1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/buffer_head.h> 21 #include <linux/hdreg.h> 22 #include <linux/async.h> 23 #include <linux/mutex.h> 24 25 #include <asm/ccwdev.h> 26 #include <asm/ebcdic.h> 27 #include <asm/idals.h> 28 #include <asm/itcw.h> 29 30 /* This is ugly... */ 31 #define PRINTK_HEADER "dasd:" 32 33 #include "dasd_int.h" 34 /* 35 * SECTION: Constant definitions to be used within this file 36 */ 37 #define DASD_CHANQ_MAX_SIZE 4 38 39 /* 40 * SECTION: exported variables of dasd.c 41 */ 42 debug_info_t *dasd_debug_area; 43 struct dasd_discipline *dasd_diag_discipline_pointer; 44 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 45 46 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 47 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 48 " Copyright 2000 IBM Corporation"); 49 MODULE_SUPPORTED_DEVICE("dasd"); 50 MODULE_LICENSE("GPL"); 51 52 /* 53 * SECTION: prototypes for static functions of dasd.c 54 */ 55 static int dasd_alloc_queue(struct dasd_block *); 56 static void dasd_setup_queue(struct dasd_block *); 57 static void dasd_free_queue(struct dasd_block *); 58 static void dasd_flush_request_queue(struct dasd_block *); 59 static int dasd_flush_block_queue(struct dasd_block *); 60 static void dasd_device_tasklet(struct dasd_device *); 61 static void dasd_block_tasklet(struct dasd_block *); 62 static void do_kick_device(struct work_struct *); 63 static void do_restore_device(struct work_struct *); 64 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 65 static void dasd_device_timeout(unsigned long); 66 static void dasd_block_timeout(unsigned long); 67 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 68 69 /* 70 * SECTION: Operations on the device structure. 71 */ 72 static wait_queue_head_t dasd_init_waitq; 73 static wait_queue_head_t dasd_flush_wq; 74 static wait_queue_head_t generic_waitq; 75 76 /* 77 * Allocate memory for a new device structure. 78 */ 79 struct dasd_device *dasd_alloc_device(void) 80 { 81 struct dasd_device *device; 82 83 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 84 if (!device) 85 return ERR_PTR(-ENOMEM); 86 87 /* Get two pages for normal block device operations. */ 88 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 89 if (!device->ccw_mem) { 90 kfree(device); 91 return ERR_PTR(-ENOMEM); 92 } 93 /* Get one page for error recovery. */ 94 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 95 if (!device->erp_mem) { 96 free_pages((unsigned long) device->ccw_mem, 1); 97 kfree(device); 98 return ERR_PTR(-ENOMEM); 99 } 100 101 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 102 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 103 spin_lock_init(&device->mem_lock); 104 atomic_set(&device->tasklet_scheduled, 0); 105 tasklet_init(&device->tasklet, 106 (void (*)(unsigned long)) dasd_device_tasklet, 107 (unsigned long) device); 108 INIT_LIST_HEAD(&device->ccw_queue); 109 init_timer(&device->timer); 110 device->timer.function = dasd_device_timeout; 111 device->timer.data = (unsigned long) device; 112 INIT_WORK(&device->kick_work, do_kick_device); 113 INIT_WORK(&device->restore_device, do_restore_device); 114 device->state = DASD_STATE_NEW; 115 device->target = DASD_STATE_NEW; 116 mutex_init(&device->state_mutex); 117 118 return device; 119 } 120 121 /* 122 * Free memory of a device structure. 123 */ 124 void dasd_free_device(struct dasd_device *device) 125 { 126 kfree(device->private); 127 free_page((unsigned long) device->erp_mem); 128 free_pages((unsigned long) device->ccw_mem, 1); 129 kfree(device); 130 } 131 132 /* 133 * Allocate memory for a new device structure. 134 */ 135 struct dasd_block *dasd_alloc_block(void) 136 { 137 struct dasd_block *block; 138 139 block = kzalloc(sizeof(*block), GFP_ATOMIC); 140 if (!block) 141 return ERR_PTR(-ENOMEM); 142 /* open_count = 0 means device online but not in use */ 143 atomic_set(&block->open_count, -1); 144 145 spin_lock_init(&block->request_queue_lock); 146 atomic_set(&block->tasklet_scheduled, 0); 147 tasklet_init(&block->tasklet, 148 (void (*)(unsigned long)) dasd_block_tasklet, 149 (unsigned long) block); 150 INIT_LIST_HEAD(&block->ccw_queue); 151 spin_lock_init(&block->queue_lock); 152 init_timer(&block->timer); 153 block->timer.function = dasd_block_timeout; 154 block->timer.data = (unsigned long) block; 155 156 return block; 157 } 158 159 /* 160 * Free memory of a device structure. 161 */ 162 void dasd_free_block(struct dasd_block *block) 163 { 164 kfree(block); 165 } 166 167 /* 168 * Make a new device known to the system. 169 */ 170 static int dasd_state_new_to_known(struct dasd_device *device) 171 { 172 int rc; 173 174 /* 175 * As long as the device is not in state DASD_STATE_NEW we want to 176 * keep the reference count > 0. 177 */ 178 dasd_get_device(device); 179 180 if (device->block) { 181 rc = dasd_alloc_queue(device->block); 182 if (rc) { 183 dasd_put_device(device); 184 return rc; 185 } 186 } 187 device->state = DASD_STATE_KNOWN; 188 return 0; 189 } 190 191 /* 192 * Let the system forget about a device. 193 */ 194 static int dasd_state_known_to_new(struct dasd_device *device) 195 { 196 /* Disable extended error reporting for this device. */ 197 dasd_eer_disable(device); 198 /* Forget the discipline information. */ 199 if (device->discipline) { 200 if (device->discipline->uncheck_device) 201 device->discipline->uncheck_device(device); 202 module_put(device->discipline->owner); 203 } 204 device->discipline = NULL; 205 if (device->base_discipline) 206 module_put(device->base_discipline->owner); 207 device->base_discipline = NULL; 208 device->state = DASD_STATE_NEW; 209 210 if (device->block) 211 dasd_free_queue(device->block); 212 213 /* Give up reference we took in dasd_state_new_to_known. */ 214 dasd_put_device(device); 215 return 0; 216 } 217 218 /* 219 * Request the irq line for the device. 220 */ 221 static int dasd_state_known_to_basic(struct dasd_device *device) 222 { 223 int rc; 224 225 /* Allocate and register gendisk structure. */ 226 if (device->block) { 227 rc = dasd_gendisk_alloc(device->block); 228 if (rc) 229 return rc; 230 } 231 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 232 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 233 8 * sizeof(long)); 234 debug_register_view(device->debug_area, &debug_sprintf_view); 235 debug_set_level(device->debug_area, DBF_WARNING); 236 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 237 238 device->state = DASD_STATE_BASIC; 239 return 0; 240 } 241 242 /* 243 * Release the irq line for the device. Terminate any running i/o. 244 */ 245 static int dasd_state_basic_to_known(struct dasd_device *device) 246 { 247 int rc; 248 if (device->block) { 249 dasd_gendisk_free(device->block); 250 dasd_block_clear_timer(device->block); 251 } 252 rc = dasd_flush_device_queue(device); 253 if (rc) 254 return rc; 255 dasd_device_clear_timer(device); 256 257 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 258 if (device->debug_area != NULL) { 259 debug_unregister(device->debug_area); 260 device->debug_area = NULL; 261 } 262 device->state = DASD_STATE_KNOWN; 263 return 0; 264 } 265 266 /* 267 * Do the initial analysis. The do_analysis function may return 268 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 269 * until the discipline decides to continue the startup sequence 270 * by calling the function dasd_change_state. The eckd disciplines 271 * uses this to start a ccw that detects the format. The completion 272 * interrupt for this detection ccw uses the kernel event daemon to 273 * trigger the call to dasd_change_state. All this is done in the 274 * discipline code, see dasd_eckd.c. 275 * After the analysis ccw is done (do_analysis returned 0) the block 276 * device is setup. 277 * In case the analysis returns an error, the device setup is stopped 278 * (a fake disk was already added to allow formatting). 279 */ 280 static int dasd_state_basic_to_ready(struct dasd_device *device) 281 { 282 int rc; 283 struct dasd_block *block; 284 285 rc = 0; 286 block = device->block; 287 /* make disk known with correct capacity */ 288 if (block) { 289 if (block->base->discipline->do_analysis != NULL) 290 rc = block->base->discipline->do_analysis(block); 291 if (rc) { 292 if (rc != -EAGAIN) 293 device->state = DASD_STATE_UNFMT; 294 return rc; 295 } 296 dasd_setup_queue(block); 297 set_capacity(block->gdp, 298 block->blocks << block->s2b_shift); 299 device->state = DASD_STATE_READY; 300 rc = dasd_scan_partitions(block); 301 if (rc) 302 device->state = DASD_STATE_BASIC; 303 } else { 304 device->state = DASD_STATE_READY; 305 } 306 return rc; 307 } 308 309 /* 310 * Remove device from block device layer. Destroy dirty buffers. 311 * Forget format information. Check if the target level is basic 312 * and if it is create fake disk for formatting. 313 */ 314 static int dasd_state_ready_to_basic(struct dasd_device *device) 315 { 316 int rc; 317 318 device->state = DASD_STATE_BASIC; 319 if (device->block) { 320 struct dasd_block *block = device->block; 321 rc = dasd_flush_block_queue(block); 322 if (rc) { 323 device->state = DASD_STATE_READY; 324 return rc; 325 } 326 dasd_flush_request_queue(block); 327 dasd_destroy_partitions(block); 328 block->blocks = 0; 329 block->bp_block = 0; 330 block->s2b_shift = 0; 331 } 332 return 0; 333 } 334 335 /* 336 * Back to basic. 337 */ 338 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 339 { 340 device->state = DASD_STATE_BASIC; 341 return 0; 342 } 343 344 /* 345 * Make the device online and schedule the bottom half to start 346 * the requeueing of requests from the linux request queue to the 347 * ccw queue. 348 */ 349 static int 350 dasd_state_ready_to_online(struct dasd_device * device) 351 { 352 int rc; 353 struct gendisk *disk; 354 struct disk_part_iter piter; 355 struct hd_struct *part; 356 357 if (device->discipline->ready_to_online) { 358 rc = device->discipline->ready_to_online(device); 359 if (rc) 360 return rc; 361 } 362 device->state = DASD_STATE_ONLINE; 363 if (device->block) { 364 dasd_schedule_block_bh(device->block); 365 disk = device->block->bdev->bd_disk; 366 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 367 while ((part = disk_part_iter_next(&piter))) 368 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 369 disk_part_iter_exit(&piter); 370 } 371 return 0; 372 } 373 374 /* 375 * Stop the requeueing of requests again. 376 */ 377 static int dasd_state_online_to_ready(struct dasd_device *device) 378 { 379 int rc; 380 struct gendisk *disk; 381 struct disk_part_iter piter; 382 struct hd_struct *part; 383 384 if (device->discipline->online_to_ready) { 385 rc = device->discipline->online_to_ready(device); 386 if (rc) 387 return rc; 388 } 389 device->state = DASD_STATE_READY; 390 if (device->block) { 391 disk = device->block->bdev->bd_disk; 392 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 393 while ((part = disk_part_iter_next(&piter))) 394 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 395 disk_part_iter_exit(&piter); 396 } 397 return 0; 398 } 399 400 /* 401 * Device startup state changes. 402 */ 403 static int dasd_increase_state(struct dasd_device *device) 404 { 405 int rc; 406 407 rc = 0; 408 if (device->state == DASD_STATE_NEW && 409 device->target >= DASD_STATE_KNOWN) 410 rc = dasd_state_new_to_known(device); 411 412 if (!rc && 413 device->state == DASD_STATE_KNOWN && 414 device->target >= DASD_STATE_BASIC) 415 rc = dasd_state_known_to_basic(device); 416 417 if (!rc && 418 device->state == DASD_STATE_BASIC && 419 device->target >= DASD_STATE_READY) 420 rc = dasd_state_basic_to_ready(device); 421 422 if (!rc && 423 device->state == DASD_STATE_UNFMT && 424 device->target > DASD_STATE_UNFMT) 425 rc = -EPERM; 426 427 if (!rc && 428 device->state == DASD_STATE_READY && 429 device->target >= DASD_STATE_ONLINE) 430 rc = dasd_state_ready_to_online(device); 431 432 return rc; 433 } 434 435 /* 436 * Device shutdown state changes. 437 */ 438 static int dasd_decrease_state(struct dasd_device *device) 439 { 440 int rc; 441 442 rc = 0; 443 if (device->state == DASD_STATE_ONLINE && 444 device->target <= DASD_STATE_READY) 445 rc = dasd_state_online_to_ready(device); 446 447 if (!rc && 448 device->state == DASD_STATE_READY && 449 device->target <= DASD_STATE_BASIC) 450 rc = dasd_state_ready_to_basic(device); 451 452 if (!rc && 453 device->state == DASD_STATE_UNFMT && 454 device->target <= DASD_STATE_BASIC) 455 rc = dasd_state_unfmt_to_basic(device); 456 457 if (!rc && 458 device->state == DASD_STATE_BASIC && 459 device->target <= DASD_STATE_KNOWN) 460 rc = dasd_state_basic_to_known(device); 461 462 if (!rc && 463 device->state == DASD_STATE_KNOWN && 464 device->target <= DASD_STATE_NEW) 465 rc = dasd_state_known_to_new(device); 466 467 return rc; 468 } 469 470 /* 471 * This is the main startup/shutdown routine. 472 */ 473 static void dasd_change_state(struct dasd_device *device) 474 { 475 int rc; 476 477 if (device->state == device->target) 478 /* Already where we want to go today... */ 479 return; 480 if (device->state < device->target) 481 rc = dasd_increase_state(device); 482 else 483 rc = dasd_decrease_state(device); 484 if (rc == -EAGAIN) 485 return; 486 if (rc) 487 device->target = device->state; 488 489 if (device->state == device->target) 490 wake_up(&dasd_init_waitq); 491 492 /* let user-space know that the device status changed */ 493 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 494 } 495 496 /* 497 * Kick starter for devices that did not complete the startup/shutdown 498 * procedure or were sleeping because of a pending state. 499 * dasd_kick_device will schedule a call do do_kick_device to the kernel 500 * event daemon. 501 */ 502 static void do_kick_device(struct work_struct *work) 503 { 504 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 505 mutex_lock(&device->state_mutex); 506 dasd_change_state(device); 507 mutex_unlock(&device->state_mutex); 508 dasd_schedule_device_bh(device); 509 dasd_put_device(device); 510 } 511 512 void dasd_kick_device(struct dasd_device *device) 513 { 514 dasd_get_device(device); 515 /* queue call to dasd_kick_device to the kernel event daemon. */ 516 schedule_work(&device->kick_work); 517 } 518 519 /* 520 * dasd_restore_device will schedule a call do do_restore_device to the kernel 521 * event daemon. 522 */ 523 static void do_restore_device(struct work_struct *work) 524 { 525 struct dasd_device *device = container_of(work, struct dasd_device, 526 restore_device); 527 device->cdev->drv->restore(device->cdev); 528 dasd_put_device(device); 529 } 530 531 void dasd_restore_device(struct dasd_device *device) 532 { 533 dasd_get_device(device); 534 /* queue call to dasd_restore_device to the kernel event daemon. */ 535 schedule_work(&device->restore_device); 536 } 537 538 /* 539 * Set the target state for a device and starts the state change. 540 */ 541 void dasd_set_target_state(struct dasd_device *device, int target) 542 { 543 dasd_get_device(device); 544 mutex_lock(&device->state_mutex); 545 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 546 if (dasd_probeonly && target > DASD_STATE_READY) 547 target = DASD_STATE_READY; 548 if (device->target != target) { 549 if (device->state == target) 550 wake_up(&dasd_init_waitq); 551 device->target = target; 552 } 553 if (device->state != device->target) 554 dasd_change_state(device); 555 mutex_unlock(&device->state_mutex); 556 dasd_put_device(device); 557 } 558 559 /* 560 * Enable devices with device numbers in [from..to]. 561 */ 562 static inline int _wait_for_device(struct dasd_device *device) 563 { 564 return (device->state == device->target); 565 } 566 567 void dasd_enable_device(struct dasd_device *device) 568 { 569 dasd_set_target_state(device, DASD_STATE_ONLINE); 570 if (device->state <= DASD_STATE_KNOWN) 571 /* No discipline for device found. */ 572 dasd_set_target_state(device, DASD_STATE_NEW); 573 /* Now wait for the devices to come up. */ 574 wait_event(dasd_init_waitq, _wait_for_device(device)); 575 } 576 577 /* 578 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 579 */ 580 #ifdef CONFIG_DASD_PROFILE 581 582 struct dasd_profile_info_t dasd_global_profile; 583 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 584 585 /* 586 * Increments counter in global and local profiling structures. 587 */ 588 #define dasd_profile_counter(value, counter, block) \ 589 { \ 590 int index; \ 591 for (index = 0; index < 31 && value >> (2+index); index++); \ 592 dasd_global_profile.counter[index]++; \ 593 block->profile.counter[index]++; \ 594 } 595 596 /* 597 * Add profiling information for cqr before execution. 598 */ 599 static void dasd_profile_start(struct dasd_block *block, 600 struct dasd_ccw_req *cqr, 601 struct request *req) 602 { 603 struct list_head *l; 604 unsigned int counter; 605 606 if (dasd_profile_level != DASD_PROFILE_ON) 607 return; 608 609 /* count the length of the chanq for statistics */ 610 counter = 0; 611 list_for_each(l, &block->ccw_queue) 612 if (++counter >= 31) 613 break; 614 dasd_global_profile.dasd_io_nr_req[counter]++; 615 block->profile.dasd_io_nr_req[counter]++; 616 } 617 618 /* 619 * Add profiling information for cqr after execution. 620 */ 621 static void dasd_profile_end(struct dasd_block *block, 622 struct dasd_ccw_req *cqr, 623 struct request *req) 624 { 625 long strtime, irqtime, endtime, tottime; /* in microseconds */ 626 long tottimeps, sectors; 627 628 if (dasd_profile_level != DASD_PROFILE_ON) 629 return; 630 631 sectors = blk_rq_sectors(req); 632 if (!cqr->buildclk || !cqr->startclk || 633 !cqr->stopclk || !cqr->endclk || 634 !sectors) 635 return; 636 637 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 638 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 639 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 640 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 641 tottimeps = tottime / sectors; 642 643 if (!dasd_global_profile.dasd_io_reqs) 644 memset(&dasd_global_profile, 0, 645 sizeof(struct dasd_profile_info_t)); 646 dasd_global_profile.dasd_io_reqs++; 647 dasd_global_profile.dasd_io_sects += sectors; 648 649 if (!block->profile.dasd_io_reqs) 650 memset(&block->profile, 0, 651 sizeof(struct dasd_profile_info_t)); 652 block->profile.dasd_io_reqs++; 653 block->profile.dasd_io_sects += sectors; 654 655 dasd_profile_counter(sectors, dasd_io_secs, block); 656 dasd_profile_counter(tottime, dasd_io_times, block); 657 dasd_profile_counter(tottimeps, dasd_io_timps, block); 658 dasd_profile_counter(strtime, dasd_io_time1, block); 659 dasd_profile_counter(irqtime, dasd_io_time2, block); 660 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 661 dasd_profile_counter(endtime, dasd_io_time3, block); 662 } 663 #else 664 #define dasd_profile_start(block, cqr, req) do {} while (0) 665 #define dasd_profile_end(block, cqr, req) do {} while (0) 666 #endif /* CONFIG_DASD_PROFILE */ 667 668 /* 669 * Allocate memory for a channel program with 'cplength' channel 670 * command words and 'datasize' additional space. There are two 671 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 672 * memory and 2) dasd_smalloc_request uses the static ccw memory 673 * that gets allocated for each device. 674 */ 675 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 676 int datasize, 677 struct dasd_device *device) 678 { 679 struct dasd_ccw_req *cqr; 680 681 /* Sanity checks */ 682 BUG_ON(datasize > PAGE_SIZE || 683 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 684 685 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 686 if (cqr == NULL) 687 return ERR_PTR(-ENOMEM); 688 cqr->cpaddr = NULL; 689 if (cplength > 0) { 690 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 691 GFP_ATOMIC | GFP_DMA); 692 if (cqr->cpaddr == NULL) { 693 kfree(cqr); 694 return ERR_PTR(-ENOMEM); 695 } 696 } 697 cqr->data = NULL; 698 if (datasize > 0) { 699 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 700 if (cqr->data == NULL) { 701 kfree(cqr->cpaddr); 702 kfree(cqr); 703 return ERR_PTR(-ENOMEM); 704 } 705 } 706 cqr->magic = magic; 707 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 708 dasd_get_device(device); 709 return cqr; 710 } 711 712 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 713 int datasize, 714 struct dasd_device *device) 715 { 716 unsigned long flags; 717 struct dasd_ccw_req *cqr; 718 char *data; 719 int size; 720 721 /* Sanity checks */ 722 BUG_ON(datasize > PAGE_SIZE || 723 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 724 725 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 726 if (cplength > 0) 727 size += cplength * sizeof(struct ccw1); 728 if (datasize > 0) 729 size += datasize; 730 spin_lock_irqsave(&device->mem_lock, flags); 731 cqr = (struct dasd_ccw_req *) 732 dasd_alloc_chunk(&device->ccw_chunks, size); 733 spin_unlock_irqrestore(&device->mem_lock, flags); 734 if (cqr == NULL) 735 return ERR_PTR(-ENOMEM); 736 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 737 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 738 cqr->cpaddr = NULL; 739 if (cplength > 0) { 740 cqr->cpaddr = (struct ccw1 *) data; 741 data += cplength*sizeof(struct ccw1); 742 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 743 } 744 cqr->data = NULL; 745 if (datasize > 0) { 746 cqr->data = data; 747 memset(cqr->data, 0, datasize); 748 } 749 cqr->magic = magic; 750 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 751 dasd_get_device(device); 752 return cqr; 753 } 754 755 /* 756 * Free memory of a channel program. This function needs to free all the 757 * idal lists that might have been created by dasd_set_cda and the 758 * struct dasd_ccw_req itself. 759 */ 760 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 761 { 762 #ifdef CONFIG_64BIT 763 struct ccw1 *ccw; 764 765 /* Clear any idals used for the request. */ 766 ccw = cqr->cpaddr; 767 do { 768 clear_normalized_cda(ccw); 769 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 770 #endif 771 kfree(cqr->cpaddr); 772 kfree(cqr->data); 773 kfree(cqr); 774 dasd_put_device(device); 775 } 776 777 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 778 { 779 unsigned long flags; 780 781 spin_lock_irqsave(&device->mem_lock, flags); 782 dasd_free_chunk(&device->ccw_chunks, cqr); 783 spin_unlock_irqrestore(&device->mem_lock, flags); 784 dasd_put_device(device); 785 } 786 787 /* 788 * Check discipline magic in cqr. 789 */ 790 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 791 { 792 struct dasd_device *device; 793 794 if (cqr == NULL) 795 return -EINVAL; 796 device = cqr->startdev; 797 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 798 DBF_DEV_EVENT(DBF_WARNING, device, 799 " dasd_ccw_req 0x%08x magic doesn't match" 800 " discipline 0x%08x", 801 cqr->magic, 802 *(unsigned int *) device->discipline->name); 803 return -EINVAL; 804 } 805 return 0; 806 } 807 808 /* 809 * Terminate the current i/o and set the request to clear_pending. 810 * Timer keeps device runnig. 811 * ccw_device_clear can fail if the i/o subsystem 812 * is in a bad mood. 813 */ 814 int dasd_term_IO(struct dasd_ccw_req *cqr) 815 { 816 struct dasd_device *device; 817 int retries, rc; 818 char errorstring[ERRORLENGTH]; 819 820 /* Check the cqr */ 821 rc = dasd_check_cqr(cqr); 822 if (rc) 823 return rc; 824 retries = 0; 825 device = (struct dasd_device *) cqr->startdev; 826 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 827 rc = ccw_device_clear(device->cdev, (long) cqr); 828 switch (rc) { 829 case 0: /* termination successful */ 830 cqr->retries--; 831 cqr->status = DASD_CQR_CLEAR_PENDING; 832 cqr->stopclk = get_clock(); 833 cqr->starttime = 0; 834 DBF_DEV_EVENT(DBF_DEBUG, device, 835 "terminate cqr %p successful", 836 cqr); 837 break; 838 case -ENODEV: 839 DBF_DEV_EVENT(DBF_ERR, device, "%s", 840 "device gone, retry"); 841 break; 842 case -EIO: 843 DBF_DEV_EVENT(DBF_ERR, device, "%s", 844 "I/O error, retry"); 845 break; 846 case -EINVAL: 847 case -EBUSY: 848 DBF_DEV_EVENT(DBF_ERR, device, "%s", 849 "device busy, retry later"); 850 break; 851 default: 852 /* internal error 10 - unknown rc*/ 853 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 854 dev_err(&device->cdev->dev, "An error occurred in the " 855 "DASD device driver, reason=%s\n", errorstring); 856 BUG(); 857 break; 858 } 859 retries++; 860 } 861 dasd_schedule_device_bh(device); 862 return rc; 863 } 864 865 /* 866 * Start the i/o. This start_IO can fail if the channel is really busy. 867 * In that case set up a timer to start the request later. 868 */ 869 int dasd_start_IO(struct dasd_ccw_req *cqr) 870 { 871 struct dasd_device *device; 872 int rc; 873 char errorstring[ERRORLENGTH]; 874 875 /* Check the cqr */ 876 rc = dasd_check_cqr(cqr); 877 if (rc) { 878 cqr->intrc = rc; 879 return rc; 880 } 881 device = (struct dasd_device *) cqr->startdev; 882 if (cqr->retries < 0) { 883 /* internal error 14 - start_IO run out of retries */ 884 sprintf(errorstring, "14 %p", cqr); 885 dev_err(&device->cdev->dev, "An error occurred in the DASD " 886 "device driver, reason=%s\n", errorstring); 887 cqr->status = DASD_CQR_ERROR; 888 return -EIO; 889 } 890 cqr->startclk = get_clock(); 891 cqr->starttime = jiffies; 892 cqr->retries--; 893 if (cqr->cpmode == 1) { 894 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 895 (long) cqr, cqr->lpm); 896 } else { 897 rc = ccw_device_start(device->cdev, cqr->cpaddr, 898 (long) cqr, cqr->lpm, 0); 899 } 900 switch (rc) { 901 case 0: 902 cqr->status = DASD_CQR_IN_IO; 903 break; 904 case -EBUSY: 905 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 906 "start_IO: device busy, retry later"); 907 break; 908 case -ETIMEDOUT: 909 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 910 "start_IO: request timeout, retry later"); 911 break; 912 case -EACCES: 913 /* -EACCES indicates that the request used only a 914 * subset of the available pathes and all these 915 * pathes are gone. 916 * Do a retry with all available pathes. 917 */ 918 cqr->lpm = LPM_ANYPATH; 919 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 920 "start_IO: selected pathes gone," 921 " retry on all pathes"); 922 break; 923 case -ENODEV: 924 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 925 "start_IO: -ENODEV device gone, retry"); 926 break; 927 case -EIO: 928 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 929 "start_IO: -EIO device gone, retry"); 930 break; 931 case -EINVAL: 932 /* most likely caused in power management context */ 933 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 934 "start_IO: -EINVAL device currently " 935 "not accessible"); 936 break; 937 default: 938 /* internal error 11 - unknown rc */ 939 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 940 dev_err(&device->cdev->dev, 941 "An error occurred in the DASD device driver, " 942 "reason=%s\n", errorstring); 943 BUG(); 944 break; 945 } 946 cqr->intrc = rc; 947 return rc; 948 } 949 950 /* 951 * Timeout function for dasd devices. This is used for different purposes 952 * 1) missing interrupt handler for normal operation 953 * 2) delayed start of request where start_IO failed with -EBUSY 954 * 3) timeout for missing state change interrupts 955 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 956 * DASD_CQR_QUEUED for 2) and 3). 957 */ 958 static void dasd_device_timeout(unsigned long ptr) 959 { 960 unsigned long flags; 961 struct dasd_device *device; 962 963 device = (struct dasd_device *) ptr; 964 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 965 /* re-activate request queue */ 966 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 967 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 968 dasd_schedule_device_bh(device); 969 } 970 971 /* 972 * Setup timeout for a device in jiffies. 973 */ 974 void dasd_device_set_timer(struct dasd_device *device, int expires) 975 { 976 if (expires == 0) 977 del_timer(&device->timer); 978 else 979 mod_timer(&device->timer, jiffies + expires); 980 } 981 982 /* 983 * Clear timeout for a device. 984 */ 985 void dasd_device_clear_timer(struct dasd_device *device) 986 { 987 del_timer(&device->timer); 988 } 989 990 static void dasd_handle_killed_request(struct ccw_device *cdev, 991 unsigned long intparm) 992 { 993 struct dasd_ccw_req *cqr; 994 struct dasd_device *device; 995 996 if (!intparm) 997 return; 998 cqr = (struct dasd_ccw_req *) intparm; 999 if (cqr->status != DASD_CQR_IN_IO) { 1000 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1001 "invalid status in handle_killed_request: " 1002 "%02x", cqr->status); 1003 return; 1004 } 1005 1006 device = dasd_device_from_cdev_locked(cdev); 1007 if (IS_ERR(device)) { 1008 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1009 "unable to get device from cdev"); 1010 return; 1011 } 1012 1013 if (!cqr->startdev || 1014 device != cqr->startdev || 1015 strncmp(cqr->startdev->discipline->ebcname, 1016 (char *) &cqr->magic, 4)) { 1017 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1018 "invalid device in request"); 1019 dasd_put_device(device); 1020 return; 1021 } 1022 1023 /* Schedule request to be retried. */ 1024 cqr->status = DASD_CQR_QUEUED; 1025 1026 dasd_device_clear_timer(device); 1027 dasd_schedule_device_bh(device); 1028 dasd_put_device(device); 1029 } 1030 1031 void dasd_generic_handle_state_change(struct dasd_device *device) 1032 { 1033 /* First of all start sense subsystem status request. */ 1034 dasd_eer_snss(device); 1035 1036 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1037 dasd_schedule_device_bh(device); 1038 if (device->block) 1039 dasd_schedule_block_bh(device->block); 1040 } 1041 1042 /* 1043 * Interrupt handler for "normal" ssch-io based dasd devices. 1044 */ 1045 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1046 struct irb *irb) 1047 { 1048 struct dasd_ccw_req *cqr, *next; 1049 struct dasd_device *device; 1050 unsigned long long now; 1051 int expires; 1052 1053 if (IS_ERR(irb)) { 1054 switch (PTR_ERR(irb)) { 1055 case -EIO: 1056 break; 1057 case -ETIMEDOUT: 1058 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1059 "request timed out\n", __func__); 1060 break; 1061 default: 1062 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1063 "unknown error %ld\n", __func__, 1064 PTR_ERR(irb)); 1065 } 1066 dasd_handle_killed_request(cdev, intparm); 1067 return; 1068 } 1069 1070 now = get_clock(); 1071 1072 /* check for unsolicited interrupts */ 1073 cqr = (struct dasd_ccw_req *) intparm; 1074 if (!cqr || ((scsw_cc(&irb->scsw) == 1) && 1075 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1076 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) { 1077 if (cqr && cqr->status == DASD_CQR_IN_IO) 1078 cqr->status = DASD_CQR_QUEUED; 1079 device = dasd_device_from_cdev_locked(cdev); 1080 if (!IS_ERR(device)) { 1081 dasd_device_clear_timer(device); 1082 device->discipline->handle_unsolicited_interrupt(device, 1083 irb); 1084 dasd_put_device(device); 1085 } 1086 return; 1087 } 1088 1089 device = (struct dasd_device *) cqr->startdev; 1090 if (!device || 1091 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1092 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1093 "invalid device in request"); 1094 return; 1095 } 1096 1097 /* Check for clear pending */ 1098 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1099 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1100 cqr->status = DASD_CQR_CLEARED; 1101 dasd_device_clear_timer(device); 1102 wake_up(&dasd_flush_wq); 1103 dasd_schedule_device_bh(device); 1104 return; 1105 } 1106 1107 /* check status - the request might have been killed by dyn detach */ 1108 if (cqr->status != DASD_CQR_IN_IO) { 1109 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1110 "status %02x", dev_name(&cdev->dev), cqr->status); 1111 return; 1112 } 1113 1114 next = NULL; 1115 expires = 0; 1116 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1117 scsw_cstat(&irb->scsw) == 0) { 1118 /* request was completed successfully */ 1119 cqr->status = DASD_CQR_SUCCESS; 1120 cqr->stopclk = now; 1121 /* Start first request on queue if possible -> fast_io. */ 1122 if (cqr->devlist.next != &device->ccw_queue) { 1123 next = list_entry(cqr->devlist.next, 1124 struct dasd_ccw_req, devlist); 1125 } 1126 } else { /* error */ 1127 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1128 /* log sense for every failed I/O to s390 debugfeature */ 1129 dasd_log_sense_dbf(cqr, irb); 1130 if (device->features & DASD_FEATURE_ERPLOG) { 1131 dasd_log_sense(cqr, irb); 1132 } 1133 1134 /* 1135 * If we don't want complex ERP for this request, then just 1136 * reset this and retry it in the fastpath 1137 */ 1138 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1139 cqr->retries > 0) { 1140 if (cqr->lpm == LPM_ANYPATH) 1141 DBF_DEV_EVENT(DBF_DEBUG, device, 1142 "default ERP in fastpath " 1143 "(%i retries left)", 1144 cqr->retries); 1145 cqr->lpm = LPM_ANYPATH; 1146 cqr->status = DASD_CQR_QUEUED; 1147 next = cqr; 1148 } else 1149 cqr->status = DASD_CQR_ERROR; 1150 } 1151 if (next && (next->status == DASD_CQR_QUEUED) && 1152 (!device->stopped)) { 1153 if (device->discipline->start_IO(next) == 0) 1154 expires = next->expires; 1155 } 1156 if (expires != 0) 1157 dasd_device_set_timer(device, expires); 1158 else 1159 dasd_device_clear_timer(device); 1160 dasd_schedule_device_bh(device); 1161 } 1162 1163 /* 1164 * If we have an error on a dasd_block layer request then we cancel 1165 * and return all further requests from the same dasd_block as well. 1166 */ 1167 static void __dasd_device_recovery(struct dasd_device *device, 1168 struct dasd_ccw_req *ref_cqr) 1169 { 1170 struct list_head *l, *n; 1171 struct dasd_ccw_req *cqr; 1172 1173 /* 1174 * only requeue request that came from the dasd_block layer 1175 */ 1176 if (!ref_cqr->block) 1177 return; 1178 1179 list_for_each_safe(l, n, &device->ccw_queue) { 1180 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1181 if (cqr->status == DASD_CQR_QUEUED && 1182 ref_cqr->block == cqr->block) { 1183 cqr->status = DASD_CQR_CLEARED; 1184 } 1185 } 1186 }; 1187 1188 /* 1189 * Remove those ccw requests from the queue that need to be returned 1190 * to the upper layer. 1191 */ 1192 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1193 struct list_head *final_queue) 1194 { 1195 struct list_head *l, *n; 1196 struct dasd_ccw_req *cqr; 1197 1198 /* Process request with final status. */ 1199 list_for_each_safe(l, n, &device->ccw_queue) { 1200 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1201 1202 /* Stop list processing at the first non-final request. */ 1203 if (cqr->status == DASD_CQR_QUEUED || 1204 cqr->status == DASD_CQR_IN_IO || 1205 cqr->status == DASD_CQR_CLEAR_PENDING) 1206 break; 1207 if (cqr->status == DASD_CQR_ERROR) { 1208 __dasd_device_recovery(device, cqr); 1209 } 1210 /* Rechain finished requests to final queue */ 1211 list_move_tail(&cqr->devlist, final_queue); 1212 } 1213 } 1214 1215 /* 1216 * the cqrs from the final queue are returned to the upper layer 1217 * by setting a dasd_block state and calling the callback function 1218 */ 1219 static void __dasd_device_process_final_queue(struct dasd_device *device, 1220 struct list_head *final_queue) 1221 { 1222 struct list_head *l, *n; 1223 struct dasd_ccw_req *cqr; 1224 struct dasd_block *block; 1225 void (*callback)(struct dasd_ccw_req *, void *data); 1226 void *callback_data; 1227 char errorstring[ERRORLENGTH]; 1228 1229 list_for_each_safe(l, n, final_queue) { 1230 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1231 list_del_init(&cqr->devlist); 1232 block = cqr->block; 1233 callback = cqr->callback; 1234 callback_data = cqr->callback_data; 1235 if (block) 1236 spin_lock_bh(&block->queue_lock); 1237 switch (cqr->status) { 1238 case DASD_CQR_SUCCESS: 1239 cqr->status = DASD_CQR_DONE; 1240 break; 1241 case DASD_CQR_ERROR: 1242 cqr->status = DASD_CQR_NEED_ERP; 1243 break; 1244 case DASD_CQR_CLEARED: 1245 cqr->status = DASD_CQR_TERMINATED; 1246 break; 1247 default: 1248 /* internal error 12 - wrong cqr status*/ 1249 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1250 dev_err(&device->cdev->dev, 1251 "An error occurred in the DASD device driver, " 1252 "reason=%s\n", errorstring); 1253 BUG(); 1254 } 1255 if (cqr->callback != NULL) 1256 (callback)(cqr, callback_data); 1257 if (block) 1258 spin_unlock_bh(&block->queue_lock); 1259 } 1260 } 1261 1262 /* 1263 * Take a look at the first request on the ccw queue and check 1264 * if it reached its expire time. If so, terminate the IO. 1265 */ 1266 static void __dasd_device_check_expire(struct dasd_device *device) 1267 { 1268 struct dasd_ccw_req *cqr; 1269 1270 if (list_empty(&device->ccw_queue)) 1271 return; 1272 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1273 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1274 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1275 if (device->discipline->term_IO(cqr) != 0) { 1276 /* Hmpf, try again in 5 sec */ 1277 dev_err(&device->cdev->dev, 1278 "cqr %p timed out (%is) but cannot be " 1279 "ended, retrying in 5 s\n", 1280 cqr, (cqr->expires/HZ)); 1281 cqr->expires += 5*HZ; 1282 dasd_device_set_timer(device, 5*HZ); 1283 } else { 1284 dev_err(&device->cdev->dev, 1285 "cqr %p timed out (%is), %i retries " 1286 "remaining\n", cqr, (cqr->expires/HZ), 1287 cqr->retries); 1288 } 1289 } 1290 } 1291 1292 /* 1293 * Take a look at the first request on the ccw queue and check 1294 * if it needs to be started. 1295 */ 1296 static void __dasd_device_start_head(struct dasd_device *device) 1297 { 1298 struct dasd_ccw_req *cqr; 1299 int rc; 1300 1301 if (list_empty(&device->ccw_queue)) 1302 return; 1303 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1304 if (cqr->status != DASD_CQR_QUEUED) 1305 return; 1306 /* when device is stopped, return request to previous layer */ 1307 if (device->stopped) { 1308 cqr->status = DASD_CQR_CLEARED; 1309 dasd_schedule_device_bh(device); 1310 return; 1311 } 1312 1313 rc = device->discipline->start_IO(cqr); 1314 if (rc == 0) 1315 dasd_device_set_timer(device, cqr->expires); 1316 else if (rc == -EACCES) { 1317 dasd_schedule_device_bh(device); 1318 } else 1319 /* Hmpf, try again in 1/2 sec */ 1320 dasd_device_set_timer(device, 50); 1321 } 1322 1323 /* 1324 * Go through all request on the dasd_device request queue, 1325 * terminate them on the cdev if necessary, and return them to the 1326 * submitting layer via callback. 1327 * Note: 1328 * Make sure that all 'submitting layers' still exist when 1329 * this function is called!. In other words, when 'device' is a base 1330 * device then all block layer requests must have been removed before 1331 * via dasd_flush_block_queue. 1332 */ 1333 int dasd_flush_device_queue(struct dasd_device *device) 1334 { 1335 struct dasd_ccw_req *cqr, *n; 1336 int rc; 1337 struct list_head flush_queue; 1338 1339 INIT_LIST_HEAD(&flush_queue); 1340 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1341 rc = 0; 1342 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1343 /* Check status and move request to flush_queue */ 1344 switch (cqr->status) { 1345 case DASD_CQR_IN_IO: 1346 rc = device->discipline->term_IO(cqr); 1347 if (rc) { 1348 /* unable to terminate requeust */ 1349 dev_err(&device->cdev->dev, 1350 "Flushing the DASD request queue " 1351 "failed for request %p\n", cqr); 1352 /* stop flush processing */ 1353 goto finished; 1354 } 1355 break; 1356 case DASD_CQR_QUEUED: 1357 cqr->stopclk = get_clock(); 1358 cqr->status = DASD_CQR_CLEARED; 1359 break; 1360 default: /* no need to modify the others */ 1361 break; 1362 } 1363 list_move_tail(&cqr->devlist, &flush_queue); 1364 } 1365 finished: 1366 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1367 /* 1368 * After this point all requests must be in state CLEAR_PENDING, 1369 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1370 * one of the others. 1371 */ 1372 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1373 wait_event(dasd_flush_wq, 1374 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1375 /* 1376 * Now set each request back to TERMINATED, DONE or NEED_ERP 1377 * and call the callback function of flushed requests 1378 */ 1379 __dasd_device_process_final_queue(device, &flush_queue); 1380 return rc; 1381 } 1382 1383 /* 1384 * Acquire the device lock and process queues for the device. 1385 */ 1386 static void dasd_device_tasklet(struct dasd_device *device) 1387 { 1388 struct list_head final_queue; 1389 1390 atomic_set (&device->tasklet_scheduled, 0); 1391 INIT_LIST_HEAD(&final_queue); 1392 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1393 /* Check expire time of first request on the ccw queue. */ 1394 __dasd_device_check_expire(device); 1395 /* find final requests on ccw queue */ 1396 __dasd_device_process_ccw_queue(device, &final_queue); 1397 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1398 /* Now call the callback function of requests with final status */ 1399 __dasd_device_process_final_queue(device, &final_queue); 1400 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1401 /* Now check if the head of the ccw queue needs to be started. */ 1402 __dasd_device_start_head(device); 1403 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1404 dasd_put_device(device); 1405 } 1406 1407 /* 1408 * Schedules a call to dasd_tasklet over the device tasklet. 1409 */ 1410 void dasd_schedule_device_bh(struct dasd_device *device) 1411 { 1412 /* Protect against rescheduling. */ 1413 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1414 return; 1415 dasd_get_device(device); 1416 tasklet_hi_schedule(&device->tasklet); 1417 } 1418 1419 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 1420 { 1421 device->stopped |= bits; 1422 } 1423 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 1424 1425 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 1426 { 1427 device->stopped &= ~bits; 1428 if (!device->stopped) 1429 wake_up(&generic_waitq); 1430 } 1431 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 1432 1433 /* 1434 * Queue a request to the head of the device ccw_queue. 1435 * Start the I/O if possible. 1436 */ 1437 void dasd_add_request_head(struct dasd_ccw_req *cqr) 1438 { 1439 struct dasd_device *device; 1440 unsigned long flags; 1441 1442 device = cqr->startdev; 1443 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1444 cqr->status = DASD_CQR_QUEUED; 1445 list_add(&cqr->devlist, &device->ccw_queue); 1446 /* let the bh start the request to keep them in order */ 1447 dasd_schedule_device_bh(device); 1448 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1449 } 1450 1451 /* 1452 * Queue a request to the tail of the device ccw_queue. 1453 * Start the I/O if possible. 1454 */ 1455 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 1456 { 1457 struct dasd_device *device; 1458 unsigned long flags; 1459 1460 device = cqr->startdev; 1461 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1462 cqr->status = DASD_CQR_QUEUED; 1463 list_add_tail(&cqr->devlist, &device->ccw_queue); 1464 /* let the bh start the request to keep them in order */ 1465 dasd_schedule_device_bh(device); 1466 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1467 } 1468 1469 /* 1470 * Wakeup helper for the 'sleep_on' functions. 1471 */ 1472 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1473 { 1474 wake_up((wait_queue_head_t *) data); 1475 } 1476 1477 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1478 { 1479 struct dasd_device *device; 1480 int rc; 1481 1482 device = cqr->startdev; 1483 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1484 rc = ((cqr->status == DASD_CQR_DONE || 1485 cqr->status == DASD_CQR_NEED_ERP || 1486 cqr->status == DASD_CQR_TERMINATED) && 1487 list_empty(&cqr->devlist)); 1488 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1489 return rc; 1490 } 1491 1492 /* 1493 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 1494 */ 1495 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 1496 { 1497 struct dasd_device *device; 1498 dasd_erp_fn_t erp_fn; 1499 1500 if (cqr->status == DASD_CQR_FILLED) 1501 return 0; 1502 device = cqr->startdev; 1503 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 1504 if (cqr->status == DASD_CQR_TERMINATED) { 1505 device->discipline->handle_terminated_request(cqr); 1506 return 1; 1507 } 1508 if (cqr->status == DASD_CQR_NEED_ERP) { 1509 erp_fn = device->discipline->erp_action(cqr); 1510 erp_fn(cqr); 1511 return 1; 1512 } 1513 if (cqr->status == DASD_CQR_FAILED) 1514 dasd_log_sense(cqr, &cqr->irb); 1515 if (cqr->refers) { 1516 __dasd_process_erp(device, cqr); 1517 return 1; 1518 } 1519 } 1520 return 0; 1521 } 1522 1523 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 1524 { 1525 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 1526 if (cqr->refers) /* erp is not done yet */ 1527 return 1; 1528 return ((cqr->status != DASD_CQR_DONE) && 1529 (cqr->status != DASD_CQR_FAILED)); 1530 } else 1531 return (cqr->status == DASD_CQR_FILLED); 1532 } 1533 1534 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 1535 { 1536 struct dasd_device *device; 1537 int rc; 1538 struct list_head ccw_queue; 1539 struct dasd_ccw_req *cqr; 1540 1541 INIT_LIST_HEAD(&ccw_queue); 1542 maincqr->status = DASD_CQR_FILLED; 1543 device = maincqr->startdev; 1544 list_add(&maincqr->blocklist, &ccw_queue); 1545 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 1546 cqr = list_first_entry(&ccw_queue, 1547 struct dasd_ccw_req, blocklist)) { 1548 1549 if (__dasd_sleep_on_erp(cqr)) 1550 continue; 1551 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 1552 continue; 1553 1554 /* Non-temporary stop condition will trigger fail fast */ 1555 if (device->stopped & ~DASD_STOPPED_PENDING && 1556 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1557 (!dasd_eer_enabled(device))) { 1558 cqr->status = DASD_CQR_FAILED; 1559 continue; 1560 } 1561 1562 /* Don't try to start requests if device is stopped */ 1563 if (interruptible) { 1564 rc = wait_event_interruptible( 1565 generic_waitq, !(device->stopped)); 1566 if (rc == -ERESTARTSYS) { 1567 cqr->status = DASD_CQR_FAILED; 1568 maincqr->intrc = rc; 1569 continue; 1570 } 1571 } else 1572 wait_event(generic_waitq, !(device->stopped)); 1573 1574 cqr->callback = dasd_wakeup_cb; 1575 cqr->callback_data = (void *) &generic_waitq; 1576 dasd_add_request_tail(cqr); 1577 if (interruptible) { 1578 rc = wait_event_interruptible( 1579 generic_waitq, _wait_for_wakeup(cqr)); 1580 if (rc == -ERESTARTSYS) { 1581 dasd_cancel_req(cqr); 1582 /* wait (non-interruptible) for final status */ 1583 wait_event(generic_waitq, 1584 _wait_for_wakeup(cqr)); 1585 cqr->status = DASD_CQR_FAILED; 1586 maincqr->intrc = rc; 1587 continue; 1588 } 1589 } else 1590 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1591 } 1592 1593 maincqr->endclk = get_clock(); 1594 if ((maincqr->status != DASD_CQR_DONE) && 1595 (maincqr->intrc != -ERESTARTSYS)) 1596 dasd_log_sense(maincqr, &maincqr->irb); 1597 if (maincqr->status == DASD_CQR_DONE) 1598 rc = 0; 1599 else if (maincqr->intrc) 1600 rc = maincqr->intrc; 1601 else 1602 rc = -EIO; 1603 return rc; 1604 } 1605 1606 /* 1607 * Queue a request to the tail of the device ccw_queue and wait for 1608 * it's completion. 1609 */ 1610 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1611 { 1612 return _dasd_sleep_on(cqr, 0); 1613 } 1614 1615 /* 1616 * Queue a request to the tail of the device ccw_queue and wait 1617 * interruptible for it's completion. 1618 */ 1619 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1620 { 1621 return _dasd_sleep_on(cqr, 1); 1622 } 1623 1624 /* 1625 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1626 * for eckd devices) the currently running request has to be terminated 1627 * and be put back to status queued, before the special request is added 1628 * to the head of the queue. Then the special request is waited on normally. 1629 */ 1630 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1631 { 1632 struct dasd_ccw_req *cqr; 1633 1634 if (list_empty(&device->ccw_queue)) 1635 return 0; 1636 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1637 return device->discipline->term_IO(cqr); 1638 } 1639 1640 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1641 { 1642 struct dasd_device *device; 1643 int rc; 1644 1645 device = cqr->startdev; 1646 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1647 rc = _dasd_term_running_cqr(device); 1648 if (rc) { 1649 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1650 return rc; 1651 } 1652 1653 cqr->callback = dasd_wakeup_cb; 1654 cqr->callback_data = (void *) &generic_waitq; 1655 cqr->status = DASD_CQR_QUEUED; 1656 list_add(&cqr->devlist, &device->ccw_queue); 1657 1658 /* let the bh start the request to keep them in order */ 1659 dasd_schedule_device_bh(device); 1660 1661 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1662 1663 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1664 1665 if (cqr->status == DASD_CQR_DONE) 1666 rc = 0; 1667 else if (cqr->intrc) 1668 rc = cqr->intrc; 1669 else 1670 rc = -EIO; 1671 return rc; 1672 } 1673 1674 /* 1675 * Cancels a request that was started with dasd_sleep_on_req. 1676 * This is useful to timeout requests. The request will be 1677 * terminated if it is currently in i/o. 1678 * Returns 1 if the request has been terminated. 1679 * 0 if there was no need to terminate the request (not started yet) 1680 * negative error code if termination failed 1681 * Cancellation of a request is an asynchronous operation! The calling 1682 * function has to wait until the request is properly returned via callback. 1683 */ 1684 int dasd_cancel_req(struct dasd_ccw_req *cqr) 1685 { 1686 struct dasd_device *device = cqr->startdev; 1687 unsigned long flags; 1688 int rc; 1689 1690 rc = 0; 1691 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1692 switch (cqr->status) { 1693 case DASD_CQR_QUEUED: 1694 /* request was not started - just set to cleared */ 1695 cqr->status = DASD_CQR_CLEARED; 1696 break; 1697 case DASD_CQR_IN_IO: 1698 /* request in IO - terminate IO and release again */ 1699 rc = device->discipline->term_IO(cqr); 1700 if (rc) { 1701 dev_err(&device->cdev->dev, 1702 "Cancelling request %p failed with rc=%d\n", 1703 cqr, rc); 1704 } else { 1705 cqr->stopclk = get_clock(); 1706 } 1707 break; 1708 default: /* already finished or clear pending - do nothing */ 1709 break; 1710 } 1711 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1712 dasd_schedule_device_bh(device); 1713 return rc; 1714 } 1715 1716 1717 /* 1718 * SECTION: Operations of the dasd_block layer. 1719 */ 1720 1721 /* 1722 * Timeout function for dasd_block. This is used when the block layer 1723 * is waiting for something that may not come reliably, (e.g. a state 1724 * change interrupt) 1725 */ 1726 static void dasd_block_timeout(unsigned long ptr) 1727 { 1728 unsigned long flags; 1729 struct dasd_block *block; 1730 1731 block = (struct dasd_block *) ptr; 1732 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1733 /* re-activate request queue */ 1734 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 1735 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1736 dasd_schedule_block_bh(block); 1737 } 1738 1739 /* 1740 * Setup timeout for a dasd_block in jiffies. 1741 */ 1742 void dasd_block_set_timer(struct dasd_block *block, int expires) 1743 { 1744 if (expires == 0) 1745 del_timer(&block->timer); 1746 else 1747 mod_timer(&block->timer, jiffies + expires); 1748 } 1749 1750 /* 1751 * Clear timeout for a dasd_block. 1752 */ 1753 void dasd_block_clear_timer(struct dasd_block *block) 1754 { 1755 del_timer(&block->timer); 1756 } 1757 1758 /* 1759 * Process finished error recovery ccw. 1760 */ 1761 static void __dasd_process_erp(struct dasd_device *device, 1762 struct dasd_ccw_req *cqr) 1763 { 1764 dasd_erp_fn_t erp_fn; 1765 1766 if (cqr->status == DASD_CQR_DONE) 1767 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1768 else 1769 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 1770 erp_fn = device->discipline->erp_postaction(cqr); 1771 erp_fn(cqr); 1772 } 1773 1774 /* 1775 * Fetch requests from the block device queue. 1776 */ 1777 static void __dasd_process_request_queue(struct dasd_block *block) 1778 { 1779 struct request_queue *queue; 1780 struct request *req; 1781 struct dasd_ccw_req *cqr; 1782 struct dasd_device *basedev; 1783 unsigned long flags; 1784 queue = block->request_queue; 1785 basedev = block->base; 1786 /* No queue ? Then there is nothing to do. */ 1787 if (queue == NULL) 1788 return; 1789 1790 /* 1791 * We requeue request from the block device queue to the ccw 1792 * queue only in two states. In state DASD_STATE_READY the 1793 * partition detection is done and we need to requeue requests 1794 * for that. State DASD_STATE_ONLINE is normal block device 1795 * operation. 1796 */ 1797 if (basedev->state < DASD_STATE_READY) { 1798 while ((req = blk_fetch_request(block->request_queue))) 1799 __blk_end_request_all(req, -EIO); 1800 return; 1801 } 1802 /* Now we try to fetch requests from the request queue */ 1803 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1804 if (basedev->features & DASD_FEATURE_READONLY && 1805 rq_data_dir(req) == WRITE) { 1806 DBF_DEV_EVENT(DBF_ERR, basedev, 1807 "Rejecting write request %p", 1808 req); 1809 blk_start_request(req); 1810 __blk_end_request_all(req, -EIO); 1811 continue; 1812 } 1813 cqr = basedev->discipline->build_cp(basedev, block, req); 1814 if (IS_ERR(cqr)) { 1815 if (PTR_ERR(cqr) == -EBUSY) 1816 break; /* normal end condition */ 1817 if (PTR_ERR(cqr) == -ENOMEM) 1818 break; /* terminate request queue loop */ 1819 if (PTR_ERR(cqr) == -EAGAIN) { 1820 /* 1821 * The current request cannot be build right 1822 * now, we have to try later. If this request 1823 * is the head-of-queue we stop the device 1824 * for 1/2 second. 1825 */ 1826 if (!list_empty(&block->ccw_queue)) 1827 break; 1828 spin_lock_irqsave( 1829 get_ccwdev_lock(basedev->cdev), flags); 1830 dasd_device_set_stop_bits(basedev, 1831 DASD_STOPPED_PENDING); 1832 spin_unlock_irqrestore( 1833 get_ccwdev_lock(basedev->cdev), flags); 1834 dasd_block_set_timer(block, HZ/2); 1835 break; 1836 } 1837 DBF_DEV_EVENT(DBF_ERR, basedev, 1838 "CCW creation failed (rc=%ld) " 1839 "on request %p", 1840 PTR_ERR(cqr), req); 1841 blk_start_request(req); 1842 __blk_end_request_all(req, -EIO); 1843 continue; 1844 } 1845 /* 1846 * Note: callback is set to dasd_return_cqr_cb in 1847 * __dasd_block_start_head to cover erp requests as well 1848 */ 1849 cqr->callback_data = (void *) req; 1850 cqr->status = DASD_CQR_FILLED; 1851 blk_start_request(req); 1852 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1853 dasd_profile_start(block, cqr, req); 1854 } 1855 } 1856 1857 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1858 { 1859 struct request *req; 1860 int status; 1861 int error = 0; 1862 1863 req = (struct request *) cqr->callback_data; 1864 dasd_profile_end(cqr->block, cqr, req); 1865 status = cqr->block->base->discipline->free_cp(cqr, req); 1866 if (status <= 0) 1867 error = status ? status : -EIO; 1868 __blk_end_request_all(req, error); 1869 } 1870 1871 /* 1872 * Process ccw request queue. 1873 */ 1874 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1875 struct list_head *final_queue) 1876 { 1877 struct list_head *l, *n; 1878 struct dasd_ccw_req *cqr; 1879 dasd_erp_fn_t erp_fn; 1880 unsigned long flags; 1881 struct dasd_device *base = block->base; 1882 1883 restart: 1884 /* Process request with final status. */ 1885 list_for_each_safe(l, n, &block->ccw_queue) { 1886 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1887 if (cqr->status != DASD_CQR_DONE && 1888 cqr->status != DASD_CQR_FAILED && 1889 cqr->status != DASD_CQR_NEED_ERP && 1890 cqr->status != DASD_CQR_TERMINATED) 1891 continue; 1892 1893 if (cqr->status == DASD_CQR_TERMINATED) { 1894 base->discipline->handle_terminated_request(cqr); 1895 goto restart; 1896 } 1897 1898 /* Process requests that may be recovered */ 1899 if (cqr->status == DASD_CQR_NEED_ERP) { 1900 erp_fn = base->discipline->erp_action(cqr); 1901 erp_fn(cqr); 1902 goto restart; 1903 } 1904 1905 /* log sense for fatal error */ 1906 if (cqr->status == DASD_CQR_FAILED) { 1907 dasd_log_sense(cqr, &cqr->irb); 1908 } 1909 1910 /* First of all call extended error reporting. */ 1911 if (dasd_eer_enabled(base) && 1912 cqr->status == DASD_CQR_FAILED) { 1913 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1914 1915 /* restart request */ 1916 cqr->status = DASD_CQR_FILLED; 1917 cqr->retries = 255; 1918 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1919 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 1920 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1921 flags); 1922 goto restart; 1923 } 1924 1925 /* Process finished ERP request. */ 1926 if (cqr->refers) { 1927 __dasd_process_erp(base, cqr); 1928 goto restart; 1929 } 1930 1931 /* Rechain finished requests to final queue */ 1932 cqr->endclk = get_clock(); 1933 list_move_tail(&cqr->blocklist, final_queue); 1934 } 1935 } 1936 1937 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 1938 { 1939 dasd_schedule_block_bh(cqr->block); 1940 } 1941 1942 static void __dasd_block_start_head(struct dasd_block *block) 1943 { 1944 struct dasd_ccw_req *cqr; 1945 1946 if (list_empty(&block->ccw_queue)) 1947 return; 1948 /* We allways begin with the first requests on the queue, as some 1949 * of previously started requests have to be enqueued on a 1950 * dasd_device again for error recovery. 1951 */ 1952 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 1953 if (cqr->status != DASD_CQR_FILLED) 1954 continue; 1955 /* Non-temporary stop condition will trigger fail fast */ 1956 if (block->base->stopped & ~DASD_STOPPED_PENDING && 1957 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1958 (!dasd_eer_enabled(block->base))) { 1959 cqr->status = DASD_CQR_FAILED; 1960 dasd_schedule_block_bh(block); 1961 continue; 1962 } 1963 /* Don't try to start requests if device is stopped */ 1964 if (block->base->stopped) 1965 return; 1966 1967 /* just a fail safe check, should not happen */ 1968 if (!cqr->startdev) 1969 cqr->startdev = block->base; 1970 1971 /* make sure that the requests we submit find their way back */ 1972 cqr->callback = dasd_return_cqr_cb; 1973 1974 dasd_add_request_tail(cqr); 1975 } 1976 } 1977 1978 /* 1979 * Central dasd_block layer routine. Takes requests from the generic 1980 * block layer request queue, creates ccw requests, enqueues them on 1981 * a dasd_device and processes ccw requests that have been returned. 1982 */ 1983 static void dasd_block_tasklet(struct dasd_block *block) 1984 { 1985 struct list_head final_queue; 1986 struct list_head *l, *n; 1987 struct dasd_ccw_req *cqr; 1988 1989 atomic_set(&block->tasklet_scheduled, 0); 1990 INIT_LIST_HEAD(&final_queue); 1991 spin_lock(&block->queue_lock); 1992 /* Finish off requests on ccw queue */ 1993 __dasd_process_block_ccw_queue(block, &final_queue); 1994 spin_unlock(&block->queue_lock); 1995 /* Now call the callback function of requests with final status */ 1996 spin_lock_irq(&block->request_queue_lock); 1997 list_for_each_safe(l, n, &final_queue) { 1998 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1999 list_del_init(&cqr->blocklist); 2000 __dasd_cleanup_cqr(cqr); 2001 } 2002 spin_lock(&block->queue_lock); 2003 /* Get new request from the block device request queue */ 2004 __dasd_process_request_queue(block); 2005 /* Now check if the head of the ccw queue needs to be started. */ 2006 __dasd_block_start_head(block); 2007 spin_unlock(&block->queue_lock); 2008 spin_unlock_irq(&block->request_queue_lock); 2009 dasd_put_device(block->base); 2010 } 2011 2012 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2013 { 2014 wake_up(&dasd_flush_wq); 2015 } 2016 2017 /* 2018 * Go through all request on the dasd_block request queue, cancel them 2019 * on the respective dasd_device, and return them to the generic 2020 * block layer. 2021 */ 2022 static int dasd_flush_block_queue(struct dasd_block *block) 2023 { 2024 struct dasd_ccw_req *cqr, *n; 2025 int rc, i; 2026 struct list_head flush_queue; 2027 2028 INIT_LIST_HEAD(&flush_queue); 2029 spin_lock_bh(&block->queue_lock); 2030 rc = 0; 2031 restart: 2032 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2033 /* if this request currently owned by a dasd_device cancel it */ 2034 if (cqr->status >= DASD_CQR_QUEUED) 2035 rc = dasd_cancel_req(cqr); 2036 if (rc < 0) 2037 break; 2038 /* Rechain request (including erp chain) so it won't be 2039 * touched by the dasd_block_tasklet anymore. 2040 * Replace the callback so we notice when the request 2041 * is returned from the dasd_device layer. 2042 */ 2043 cqr->callback = _dasd_wake_block_flush_cb; 2044 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2045 list_move_tail(&cqr->blocklist, &flush_queue); 2046 if (i > 1) 2047 /* moved more than one request - need to restart */ 2048 goto restart; 2049 } 2050 spin_unlock_bh(&block->queue_lock); 2051 /* Now call the callback function of flushed requests */ 2052 restart_cb: 2053 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2054 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2055 /* Process finished ERP request. */ 2056 if (cqr->refers) { 2057 spin_lock_bh(&block->queue_lock); 2058 __dasd_process_erp(block->base, cqr); 2059 spin_unlock_bh(&block->queue_lock); 2060 /* restart list_for_xx loop since dasd_process_erp 2061 * might remove multiple elements */ 2062 goto restart_cb; 2063 } 2064 /* call the callback function */ 2065 spin_lock_irq(&block->request_queue_lock); 2066 cqr->endclk = get_clock(); 2067 list_del_init(&cqr->blocklist); 2068 __dasd_cleanup_cqr(cqr); 2069 spin_unlock_irq(&block->request_queue_lock); 2070 } 2071 return rc; 2072 } 2073 2074 /* 2075 * Schedules a call to dasd_tasklet over the device tasklet. 2076 */ 2077 void dasd_schedule_block_bh(struct dasd_block *block) 2078 { 2079 /* Protect against rescheduling. */ 2080 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2081 return; 2082 /* life cycle of block is bound to it's base device */ 2083 dasd_get_device(block->base); 2084 tasklet_hi_schedule(&block->tasklet); 2085 } 2086 2087 2088 /* 2089 * SECTION: external block device operations 2090 * (request queue handling, open, release, etc.) 2091 */ 2092 2093 /* 2094 * Dasd request queue function. Called from ll_rw_blk.c 2095 */ 2096 static void do_dasd_request(struct request_queue *queue) 2097 { 2098 struct dasd_block *block; 2099 2100 block = queue->queuedata; 2101 spin_lock(&block->queue_lock); 2102 /* Get new request from the block device request queue */ 2103 __dasd_process_request_queue(block); 2104 /* Now check if the head of the ccw queue needs to be started. */ 2105 __dasd_block_start_head(block); 2106 spin_unlock(&block->queue_lock); 2107 } 2108 2109 /* 2110 * Allocate and initialize request queue and default I/O scheduler. 2111 */ 2112 static int dasd_alloc_queue(struct dasd_block *block) 2113 { 2114 int rc; 2115 2116 block->request_queue = blk_init_queue(do_dasd_request, 2117 &block->request_queue_lock); 2118 if (block->request_queue == NULL) 2119 return -ENOMEM; 2120 2121 block->request_queue->queuedata = block; 2122 2123 elevator_exit(block->request_queue->elevator); 2124 block->request_queue->elevator = NULL; 2125 rc = elevator_init(block->request_queue, "deadline"); 2126 if (rc) { 2127 blk_cleanup_queue(block->request_queue); 2128 return rc; 2129 } 2130 return 0; 2131 } 2132 2133 /* 2134 * Allocate and initialize request queue. 2135 */ 2136 static void dasd_setup_queue(struct dasd_block *block) 2137 { 2138 int max; 2139 2140 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2141 max = block->base->discipline->max_blocks << block->s2b_shift; 2142 blk_queue_max_hw_sectors(block->request_queue, max); 2143 blk_queue_max_segments(block->request_queue, -1L); 2144 /* with page sized segments we can translate each segement into 2145 * one idaw/tidaw 2146 */ 2147 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2148 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2149 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); 2150 } 2151 2152 /* 2153 * Deactivate and free request queue. 2154 */ 2155 static void dasd_free_queue(struct dasd_block *block) 2156 { 2157 if (block->request_queue) { 2158 blk_cleanup_queue(block->request_queue); 2159 block->request_queue = NULL; 2160 } 2161 } 2162 2163 /* 2164 * Flush request on the request queue. 2165 */ 2166 static void dasd_flush_request_queue(struct dasd_block *block) 2167 { 2168 struct request *req; 2169 2170 if (!block->request_queue) 2171 return; 2172 2173 spin_lock_irq(&block->request_queue_lock); 2174 while ((req = blk_fetch_request(block->request_queue))) 2175 __blk_end_request_all(req, -EIO); 2176 spin_unlock_irq(&block->request_queue_lock); 2177 } 2178 2179 static int dasd_open(struct block_device *bdev, fmode_t mode) 2180 { 2181 struct dasd_block *block = bdev->bd_disk->private_data; 2182 struct dasd_device *base; 2183 int rc; 2184 2185 if (!block) 2186 return -ENODEV; 2187 2188 base = block->base; 2189 atomic_inc(&block->open_count); 2190 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2191 rc = -ENODEV; 2192 goto unlock; 2193 } 2194 2195 if (!try_module_get(base->discipline->owner)) { 2196 rc = -EINVAL; 2197 goto unlock; 2198 } 2199 2200 if (dasd_probeonly) { 2201 dev_info(&base->cdev->dev, 2202 "Accessing the DASD failed because it is in " 2203 "probeonly mode\n"); 2204 rc = -EPERM; 2205 goto out; 2206 } 2207 2208 if (base->state <= DASD_STATE_BASIC) { 2209 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2210 " Cannot open unrecognized device"); 2211 rc = -ENODEV; 2212 goto out; 2213 } 2214 2215 return 0; 2216 2217 out: 2218 module_put(base->discipline->owner); 2219 unlock: 2220 atomic_dec(&block->open_count); 2221 return rc; 2222 } 2223 2224 static int dasd_release(struct gendisk *disk, fmode_t mode) 2225 { 2226 struct dasd_block *block = disk->private_data; 2227 2228 atomic_dec(&block->open_count); 2229 module_put(block->base->discipline->owner); 2230 return 0; 2231 } 2232 2233 /* 2234 * Return disk geometry. 2235 */ 2236 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2237 { 2238 struct dasd_block *block; 2239 struct dasd_device *base; 2240 2241 block = bdev->bd_disk->private_data; 2242 if (!block) 2243 return -ENODEV; 2244 base = block->base; 2245 2246 if (!base->discipline || 2247 !base->discipline->fill_geometry) 2248 return -EINVAL; 2249 2250 base->discipline->fill_geometry(block, geo); 2251 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2252 return 0; 2253 } 2254 2255 const struct block_device_operations 2256 dasd_device_operations = { 2257 .owner = THIS_MODULE, 2258 .open = dasd_open, 2259 .release = dasd_release, 2260 .ioctl = dasd_ioctl, 2261 .compat_ioctl = dasd_ioctl, 2262 .getgeo = dasd_getgeo, 2263 }; 2264 2265 /******************************************************************************* 2266 * end of block device operations 2267 */ 2268 2269 static void 2270 dasd_exit(void) 2271 { 2272 #ifdef CONFIG_PROC_FS 2273 dasd_proc_exit(); 2274 #endif 2275 dasd_eer_exit(); 2276 if (dasd_page_cache != NULL) { 2277 kmem_cache_destroy(dasd_page_cache); 2278 dasd_page_cache = NULL; 2279 } 2280 dasd_gendisk_exit(); 2281 dasd_devmap_exit(); 2282 if (dasd_debug_area != NULL) { 2283 debug_unregister(dasd_debug_area); 2284 dasd_debug_area = NULL; 2285 } 2286 } 2287 2288 /* 2289 * SECTION: common functions for ccw_driver use 2290 */ 2291 2292 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 2293 { 2294 struct ccw_device *cdev = data; 2295 int ret; 2296 2297 ret = ccw_device_set_online(cdev); 2298 if (ret) 2299 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 2300 dev_name(&cdev->dev), ret); 2301 } 2302 2303 /* 2304 * Initial attempt at a probe function. this can be simplified once 2305 * the other detection code is gone. 2306 */ 2307 int dasd_generic_probe(struct ccw_device *cdev, 2308 struct dasd_discipline *discipline) 2309 { 2310 int ret; 2311 2312 ret = dasd_add_sysfs_files(cdev); 2313 if (ret) { 2314 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 2315 "dasd_generic_probe: could not add " 2316 "sysfs entries"); 2317 return ret; 2318 } 2319 cdev->handler = &dasd_int_handler; 2320 2321 /* 2322 * Automatically online either all dasd devices (dasd_autodetect) 2323 * or all devices specified with dasd= parameters during 2324 * initial probe. 2325 */ 2326 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2327 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 2328 async_schedule(dasd_generic_auto_online, cdev); 2329 return 0; 2330 } 2331 2332 /* 2333 * This will one day be called from a global not_oper handler. 2334 * It is also used by driver_unregister during module unload. 2335 */ 2336 void dasd_generic_remove(struct ccw_device *cdev) 2337 { 2338 struct dasd_device *device; 2339 struct dasd_block *block; 2340 2341 cdev->handler = NULL; 2342 2343 dasd_remove_sysfs_files(cdev); 2344 device = dasd_device_from_cdev(cdev); 2345 if (IS_ERR(device)) 2346 return; 2347 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2348 /* Already doing offline processing */ 2349 dasd_put_device(device); 2350 return; 2351 } 2352 /* 2353 * This device is removed unconditionally. Set offline 2354 * flag to prevent dasd_open from opening it while it is 2355 * no quite down yet. 2356 */ 2357 dasd_set_target_state(device, DASD_STATE_NEW); 2358 /* dasd_delete_device destroys the device reference. */ 2359 block = device->block; 2360 device->block = NULL; 2361 dasd_delete_device(device); 2362 /* 2363 * life cycle of block is bound to device, so delete it after 2364 * device was safely removed 2365 */ 2366 if (block) 2367 dasd_free_block(block); 2368 } 2369 2370 /* 2371 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2372 * the device is detected for the first time and is supposed to be used 2373 * or the user has started activation through sysfs. 2374 */ 2375 int dasd_generic_set_online(struct ccw_device *cdev, 2376 struct dasd_discipline *base_discipline) 2377 { 2378 struct dasd_discipline *discipline; 2379 struct dasd_device *device; 2380 int rc; 2381 2382 /* first online clears initial online feature flag */ 2383 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2384 device = dasd_create_device(cdev); 2385 if (IS_ERR(device)) 2386 return PTR_ERR(device); 2387 2388 discipline = base_discipline; 2389 if (device->features & DASD_FEATURE_USEDIAG) { 2390 if (!dasd_diag_discipline_pointer) { 2391 pr_warning("%s Setting the DASD online failed because " 2392 "of missing DIAG discipline\n", 2393 dev_name(&cdev->dev)); 2394 dasd_delete_device(device); 2395 return -ENODEV; 2396 } 2397 discipline = dasd_diag_discipline_pointer; 2398 } 2399 if (!try_module_get(base_discipline->owner)) { 2400 dasd_delete_device(device); 2401 return -EINVAL; 2402 } 2403 if (!try_module_get(discipline->owner)) { 2404 module_put(base_discipline->owner); 2405 dasd_delete_device(device); 2406 return -EINVAL; 2407 } 2408 device->base_discipline = base_discipline; 2409 device->discipline = discipline; 2410 2411 /* check_device will allocate block device if necessary */ 2412 rc = discipline->check_device(device); 2413 if (rc) { 2414 pr_warning("%s Setting the DASD online with discipline %s " 2415 "failed with rc=%i\n", 2416 dev_name(&cdev->dev), discipline->name, rc); 2417 module_put(discipline->owner); 2418 module_put(base_discipline->owner); 2419 dasd_delete_device(device); 2420 return rc; 2421 } 2422 2423 dasd_set_target_state(device, DASD_STATE_ONLINE); 2424 if (device->state <= DASD_STATE_KNOWN) { 2425 pr_warning("%s Setting the DASD online failed because of a " 2426 "missing discipline\n", dev_name(&cdev->dev)); 2427 rc = -ENODEV; 2428 dasd_set_target_state(device, DASD_STATE_NEW); 2429 if (device->block) 2430 dasd_free_block(device->block); 2431 dasd_delete_device(device); 2432 } else 2433 pr_debug("dasd_generic device %s found\n", 2434 dev_name(&cdev->dev)); 2435 2436 wait_event(dasd_init_waitq, _wait_for_device(device)); 2437 2438 dasd_put_device(device); 2439 return rc; 2440 } 2441 2442 int dasd_generic_set_offline(struct ccw_device *cdev) 2443 { 2444 struct dasd_device *device; 2445 struct dasd_block *block; 2446 int max_count, open_count; 2447 2448 device = dasd_device_from_cdev(cdev); 2449 if (IS_ERR(device)) 2450 return PTR_ERR(device); 2451 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2452 /* Already doing offline processing */ 2453 dasd_put_device(device); 2454 return 0; 2455 } 2456 /* 2457 * We must make sure that this device is currently not in use. 2458 * The open_count is increased for every opener, that includes 2459 * the blkdev_get in dasd_scan_partitions. We are only interested 2460 * in the other openers. 2461 */ 2462 if (device->block) { 2463 max_count = device->block->bdev ? 0 : -1; 2464 open_count = atomic_read(&device->block->open_count); 2465 if (open_count > max_count) { 2466 if (open_count > 0) 2467 pr_warning("%s: The DASD cannot be set offline " 2468 "with open count %i\n", 2469 dev_name(&cdev->dev), open_count); 2470 else 2471 pr_warning("%s: The DASD cannot be set offline " 2472 "while it is in use\n", 2473 dev_name(&cdev->dev)); 2474 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2475 dasd_put_device(device); 2476 return -EBUSY; 2477 } 2478 } 2479 dasd_set_target_state(device, DASD_STATE_NEW); 2480 /* dasd_delete_device destroys the device reference. */ 2481 block = device->block; 2482 device->block = NULL; 2483 dasd_delete_device(device); 2484 /* 2485 * life cycle of block is bound to device, so delete it after 2486 * device was safely removed 2487 */ 2488 if (block) 2489 dasd_free_block(block); 2490 return 0; 2491 } 2492 2493 int dasd_generic_notify(struct ccw_device *cdev, int event) 2494 { 2495 struct dasd_device *device; 2496 struct dasd_ccw_req *cqr; 2497 int ret; 2498 2499 device = dasd_device_from_cdev_locked(cdev); 2500 if (IS_ERR(device)) 2501 return 0; 2502 ret = 0; 2503 switch (event) { 2504 case CIO_GONE: 2505 case CIO_BOXED: 2506 case CIO_NO_PATH: 2507 /* First of all call extended error reporting. */ 2508 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2509 2510 if (device->state < DASD_STATE_BASIC) 2511 break; 2512 /* Device is active. We want to keep it. */ 2513 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2514 if (cqr->status == DASD_CQR_IN_IO) { 2515 cqr->status = DASD_CQR_QUEUED; 2516 cqr->retries++; 2517 } 2518 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 2519 dasd_device_clear_timer(device); 2520 dasd_schedule_device_bh(device); 2521 ret = 1; 2522 break; 2523 case CIO_OPER: 2524 /* FIXME: add a sanity check. */ 2525 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 2526 if (device->stopped & DASD_UNRESUMED_PM) { 2527 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 2528 dasd_restore_device(device); 2529 ret = 1; 2530 break; 2531 } 2532 dasd_schedule_device_bh(device); 2533 if (device->block) 2534 dasd_schedule_block_bh(device->block); 2535 ret = 1; 2536 break; 2537 } 2538 dasd_put_device(device); 2539 return ret; 2540 } 2541 2542 int dasd_generic_pm_freeze(struct ccw_device *cdev) 2543 { 2544 struct dasd_ccw_req *cqr, *n; 2545 int rc; 2546 struct list_head freeze_queue; 2547 struct dasd_device *device = dasd_device_from_cdev(cdev); 2548 2549 if (IS_ERR(device)) 2550 return PTR_ERR(device); 2551 /* disallow new I/O */ 2552 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 2553 /* clear active requests */ 2554 INIT_LIST_HEAD(&freeze_queue); 2555 spin_lock_irq(get_ccwdev_lock(cdev)); 2556 rc = 0; 2557 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2558 /* Check status and move request to flush_queue */ 2559 if (cqr->status == DASD_CQR_IN_IO) { 2560 rc = device->discipline->term_IO(cqr); 2561 if (rc) { 2562 /* unable to terminate requeust */ 2563 dev_err(&device->cdev->dev, 2564 "Unable to terminate request %p " 2565 "on suspend\n", cqr); 2566 spin_unlock_irq(get_ccwdev_lock(cdev)); 2567 dasd_put_device(device); 2568 return rc; 2569 } 2570 } 2571 list_move_tail(&cqr->devlist, &freeze_queue); 2572 } 2573 2574 spin_unlock_irq(get_ccwdev_lock(cdev)); 2575 2576 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 2577 wait_event(dasd_flush_wq, 2578 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2579 if (cqr->status == DASD_CQR_CLEARED) 2580 cqr->status = DASD_CQR_QUEUED; 2581 } 2582 /* move freeze_queue to start of the ccw_queue */ 2583 spin_lock_irq(get_ccwdev_lock(cdev)); 2584 list_splice_tail(&freeze_queue, &device->ccw_queue); 2585 spin_unlock_irq(get_ccwdev_lock(cdev)); 2586 2587 if (device->discipline->freeze) 2588 rc = device->discipline->freeze(device); 2589 2590 dasd_put_device(device); 2591 return rc; 2592 } 2593 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 2594 2595 int dasd_generic_restore_device(struct ccw_device *cdev) 2596 { 2597 struct dasd_device *device = dasd_device_from_cdev(cdev); 2598 int rc = 0; 2599 2600 if (IS_ERR(device)) 2601 return PTR_ERR(device); 2602 2603 /* allow new IO again */ 2604 dasd_device_remove_stop_bits(device, 2605 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 2606 2607 dasd_schedule_device_bh(device); 2608 2609 /* 2610 * call discipline restore function 2611 * if device is stopped do nothing e.g. for disconnected devices 2612 */ 2613 if (device->discipline->restore && !(device->stopped)) 2614 rc = device->discipline->restore(device); 2615 if (rc || device->stopped) 2616 /* 2617 * if the resume failed for the DASD we put it in 2618 * an UNRESUMED stop state 2619 */ 2620 device->stopped |= DASD_UNRESUMED_PM; 2621 2622 if (device->block) 2623 dasd_schedule_block_bh(device->block); 2624 2625 dasd_put_device(device); 2626 return 0; 2627 } 2628 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 2629 2630 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2631 void *rdc_buffer, 2632 int rdc_buffer_size, 2633 int magic) 2634 { 2635 struct dasd_ccw_req *cqr; 2636 struct ccw1 *ccw; 2637 unsigned long *idaw; 2638 2639 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2640 2641 if (IS_ERR(cqr)) { 2642 /* internal error 13 - Allocating the RDC request failed*/ 2643 dev_err(&device->cdev->dev, 2644 "An error occurred in the DASD device driver, " 2645 "reason=%s\n", "13"); 2646 return cqr; 2647 } 2648 2649 ccw = cqr->cpaddr; 2650 ccw->cmd_code = CCW_CMD_RDC; 2651 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 2652 idaw = (unsigned long *) (cqr->data); 2653 ccw->cda = (__u32)(addr_t) idaw; 2654 ccw->flags = CCW_FLAG_IDA; 2655 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 2656 } else { 2657 ccw->cda = (__u32)(addr_t) rdc_buffer; 2658 ccw->flags = 0; 2659 } 2660 2661 ccw->count = rdc_buffer_size; 2662 cqr->startdev = device; 2663 cqr->memdev = device; 2664 cqr->expires = 10*HZ; 2665 cqr->retries = 256; 2666 cqr->buildclk = get_clock(); 2667 cqr->status = DASD_CQR_FILLED; 2668 return cqr; 2669 } 2670 2671 2672 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 2673 void *rdc_buffer, int rdc_buffer_size) 2674 { 2675 int ret; 2676 struct dasd_ccw_req *cqr; 2677 2678 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 2679 magic); 2680 if (IS_ERR(cqr)) 2681 return PTR_ERR(cqr); 2682 2683 ret = dasd_sleep_on(cqr); 2684 dasd_sfree_request(cqr, cqr->memdev); 2685 return ret; 2686 } 2687 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2688 2689 /* 2690 * In command mode and transport mode we need to look for sense 2691 * data in different places. The sense data itself is allways 2692 * an array of 32 bytes, so we can unify the sense data access 2693 * for both modes. 2694 */ 2695 char *dasd_get_sense(struct irb *irb) 2696 { 2697 struct tsb *tsb = NULL; 2698 char *sense = NULL; 2699 2700 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 2701 if (irb->scsw.tm.tcw) 2702 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 2703 irb->scsw.tm.tcw); 2704 if (tsb && tsb->length == 64 && tsb->flags) 2705 switch (tsb->flags & 0x07) { 2706 case 1: /* tsa_iostat */ 2707 sense = tsb->tsa.iostat.sense; 2708 break; 2709 case 2: /* tsa_ddpc */ 2710 sense = tsb->tsa.ddpc.sense; 2711 break; 2712 default: 2713 /* currently we don't use interrogate data */ 2714 break; 2715 } 2716 } else if (irb->esw.esw0.erw.cons) { 2717 sense = irb->ecw; 2718 } 2719 return sense; 2720 } 2721 EXPORT_SYMBOL_GPL(dasd_get_sense); 2722 2723 static int __init dasd_init(void) 2724 { 2725 int rc; 2726 2727 init_waitqueue_head(&dasd_init_waitq); 2728 init_waitqueue_head(&dasd_flush_wq); 2729 init_waitqueue_head(&generic_waitq); 2730 2731 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2732 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2733 if (dasd_debug_area == NULL) { 2734 rc = -ENOMEM; 2735 goto failed; 2736 } 2737 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2738 debug_set_level(dasd_debug_area, DBF_WARNING); 2739 2740 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2741 2742 dasd_diag_discipline_pointer = NULL; 2743 2744 rc = dasd_devmap_init(); 2745 if (rc) 2746 goto failed; 2747 rc = dasd_gendisk_init(); 2748 if (rc) 2749 goto failed; 2750 rc = dasd_parse(); 2751 if (rc) 2752 goto failed; 2753 rc = dasd_eer_init(); 2754 if (rc) 2755 goto failed; 2756 #ifdef CONFIG_PROC_FS 2757 rc = dasd_proc_init(); 2758 if (rc) 2759 goto failed; 2760 #endif 2761 2762 return 0; 2763 failed: 2764 pr_info("The DASD device driver could not be initialized\n"); 2765 dasd_exit(); 2766 return rc; 2767 } 2768 2769 module_init(dasd_init); 2770 module_exit(dasd_exit); 2771 2772 EXPORT_SYMBOL(dasd_debug_area); 2773 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2774 2775 EXPORT_SYMBOL(dasd_add_request_head); 2776 EXPORT_SYMBOL(dasd_add_request_tail); 2777 EXPORT_SYMBOL(dasd_cancel_req); 2778 EXPORT_SYMBOL(dasd_device_clear_timer); 2779 EXPORT_SYMBOL(dasd_block_clear_timer); 2780 EXPORT_SYMBOL(dasd_enable_device); 2781 EXPORT_SYMBOL(dasd_int_handler); 2782 EXPORT_SYMBOL(dasd_kfree_request); 2783 EXPORT_SYMBOL(dasd_kick_device); 2784 EXPORT_SYMBOL(dasd_kmalloc_request); 2785 EXPORT_SYMBOL(dasd_schedule_device_bh); 2786 EXPORT_SYMBOL(dasd_schedule_block_bh); 2787 EXPORT_SYMBOL(dasd_set_target_state); 2788 EXPORT_SYMBOL(dasd_device_set_timer); 2789 EXPORT_SYMBOL(dasd_block_set_timer); 2790 EXPORT_SYMBOL(dasd_sfree_request); 2791 EXPORT_SYMBOL(dasd_sleep_on); 2792 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2793 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2794 EXPORT_SYMBOL(dasd_smalloc_request); 2795 EXPORT_SYMBOL(dasd_start_IO); 2796 EXPORT_SYMBOL(dasd_term_IO); 2797 2798 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2799 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2800 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2801 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2802 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2803 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2804 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2805 EXPORT_SYMBOL_GPL(dasd_alloc_block); 2806 EXPORT_SYMBOL_GPL(dasd_free_block); 2807