1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 9 * 10 */ 11 12 #include <linux/kmod.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/ctype.h> 16 #include <linux/major.h> 17 #include <linux/slab.h> 18 #include <linux/buffer_head.h> 19 #include <linux/hdreg.h> 20 21 #include <asm/ccwdev.h> 22 #include <asm/ebcdic.h> 23 #include <asm/idals.h> 24 #include <asm/todclk.h> 25 26 /* This is ugly... */ 27 #define PRINTK_HEADER "dasd:" 28 29 #include "dasd_int.h" 30 /* 31 * SECTION: Constant definitions to be used within this file 32 */ 33 #define DASD_CHANQ_MAX_SIZE 4 34 35 /* 36 * SECTION: exported variables of dasd.c 37 */ 38 debug_info_t *dasd_debug_area; 39 struct dasd_discipline *dasd_diag_discipline_pointer; 40 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 41 42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45 MODULE_SUPPORTED_DEVICE("dasd"); 46 MODULE_LICENSE("GPL"); 47 48 /* 49 * SECTION: prototypes for static functions of dasd.c 50 */ 51 static int dasd_alloc_queue(struct dasd_block *); 52 static void dasd_setup_queue(struct dasd_block *); 53 static void dasd_free_queue(struct dasd_block *); 54 static void dasd_flush_request_queue(struct dasd_block *); 55 static int dasd_flush_block_queue(struct dasd_block *); 56 static void dasd_device_tasklet(struct dasd_device *); 57 static void dasd_block_tasklet(struct dasd_block *); 58 static void do_kick_device(struct work_struct *); 59 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 60 static void dasd_device_timeout(unsigned long); 61 static void dasd_block_timeout(unsigned long); 62 63 /* 64 * SECTION: Operations on the device structure. 65 */ 66 static wait_queue_head_t dasd_init_waitq; 67 static wait_queue_head_t dasd_flush_wq; 68 static wait_queue_head_t generic_waitq; 69 70 /* 71 * Allocate memory for a new device structure. 72 */ 73 struct dasd_device *dasd_alloc_device(void) 74 { 75 struct dasd_device *device; 76 77 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 78 if (!device) 79 return ERR_PTR(-ENOMEM); 80 81 /* Get two pages for normal block device operations. */ 82 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 83 if (!device->ccw_mem) { 84 kfree(device); 85 return ERR_PTR(-ENOMEM); 86 } 87 /* Get one page for error recovery. */ 88 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 89 if (!device->erp_mem) { 90 free_pages((unsigned long) device->ccw_mem, 1); 91 kfree(device); 92 return ERR_PTR(-ENOMEM); 93 } 94 95 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 96 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 97 spin_lock_init(&device->mem_lock); 98 atomic_set(&device->tasklet_scheduled, 0); 99 tasklet_init(&device->tasklet, 100 (void (*)(unsigned long)) dasd_device_tasklet, 101 (unsigned long) device); 102 INIT_LIST_HEAD(&device->ccw_queue); 103 init_timer(&device->timer); 104 device->timer.function = dasd_device_timeout; 105 device->timer.data = (unsigned long) device; 106 INIT_WORK(&device->kick_work, do_kick_device); 107 device->state = DASD_STATE_NEW; 108 device->target = DASD_STATE_NEW; 109 110 return device; 111 } 112 113 /* 114 * Free memory of a device structure. 115 */ 116 void dasd_free_device(struct dasd_device *device) 117 { 118 kfree(device->private); 119 free_page((unsigned long) device->erp_mem); 120 free_pages((unsigned long) device->ccw_mem, 1); 121 kfree(device); 122 } 123 124 /* 125 * Allocate memory for a new device structure. 126 */ 127 struct dasd_block *dasd_alloc_block(void) 128 { 129 struct dasd_block *block; 130 131 block = kzalloc(sizeof(*block), GFP_ATOMIC); 132 if (!block) 133 return ERR_PTR(-ENOMEM); 134 /* open_count = 0 means device online but not in use */ 135 atomic_set(&block->open_count, -1); 136 137 spin_lock_init(&block->request_queue_lock); 138 atomic_set(&block->tasklet_scheduled, 0); 139 tasklet_init(&block->tasklet, 140 (void (*)(unsigned long)) dasd_block_tasklet, 141 (unsigned long) block); 142 INIT_LIST_HEAD(&block->ccw_queue); 143 spin_lock_init(&block->queue_lock); 144 init_timer(&block->timer); 145 block->timer.function = dasd_block_timeout; 146 block->timer.data = (unsigned long) block; 147 148 return block; 149 } 150 151 /* 152 * Free memory of a device structure. 153 */ 154 void dasd_free_block(struct dasd_block *block) 155 { 156 kfree(block); 157 } 158 159 /* 160 * Make a new device known to the system. 161 */ 162 static int dasd_state_new_to_known(struct dasd_device *device) 163 { 164 int rc; 165 166 /* 167 * As long as the device is not in state DASD_STATE_NEW we want to 168 * keep the reference count > 0. 169 */ 170 dasd_get_device(device); 171 172 if (device->block) { 173 rc = dasd_alloc_queue(device->block); 174 if (rc) { 175 dasd_put_device(device); 176 return rc; 177 } 178 } 179 device->state = DASD_STATE_KNOWN; 180 return 0; 181 } 182 183 /* 184 * Let the system forget about a device. 185 */ 186 static int dasd_state_known_to_new(struct dasd_device *device) 187 { 188 /* Disable extended error reporting for this device. */ 189 dasd_eer_disable(device); 190 /* Forget the discipline information. */ 191 if (device->discipline) { 192 if (device->discipline->uncheck_device) 193 device->discipline->uncheck_device(device); 194 module_put(device->discipline->owner); 195 } 196 device->discipline = NULL; 197 if (device->base_discipline) 198 module_put(device->base_discipline->owner); 199 device->base_discipline = NULL; 200 device->state = DASD_STATE_NEW; 201 202 if (device->block) 203 dasd_free_queue(device->block); 204 205 /* Give up reference we took in dasd_state_new_to_known. */ 206 dasd_put_device(device); 207 return 0; 208 } 209 210 /* 211 * Request the irq line for the device. 212 */ 213 static int dasd_state_known_to_basic(struct dasd_device *device) 214 { 215 int rc; 216 217 /* Allocate and register gendisk structure. */ 218 if (device->block) { 219 rc = dasd_gendisk_alloc(device->block); 220 if (rc) 221 return rc; 222 } 223 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 224 device->debug_area = debug_register(dev_name(&device->cdev->dev), 1, 1, 225 8 * sizeof(long)); 226 debug_register_view(device->debug_area, &debug_sprintf_view); 227 debug_set_level(device->debug_area, DBF_WARNING); 228 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 229 230 device->state = DASD_STATE_BASIC; 231 return 0; 232 } 233 234 /* 235 * Release the irq line for the device. Terminate any running i/o. 236 */ 237 static int dasd_state_basic_to_known(struct dasd_device *device) 238 { 239 int rc; 240 if (device->block) { 241 dasd_gendisk_free(device->block); 242 dasd_block_clear_timer(device->block); 243 } 244 rc = dasd_flush_device_queue(device); 245 if (rc) 246 return rc; 247 dasd_device_clear_timer(device); 248 249 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 250 if (device->debug_area != NULL) { 251 debug_unregister(device->debug_area); 252 device->debug_area = NULL; 253 } 254 device->state = DASD_STATE_KNOWN; 255 return 0; 256 } 257 258 /* 259 * Do the initial analysis. The do_analysis function may return 260 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 261 * until the discipline decides to continue the startup sequence 262 * by calling the function dasd_change_state. The eckd disciplines 263 * uses this to start a ccw that detects the format. The completion 264 * interrupt for this detection ccw uses the kernel event daemon to 265 * trigger the call to dasd_change_state. All this is done in the 266 * discipline code, see dasd_eckd.c. 267 * After the analysis ccw is done (do_analysis returned 0) the block 268 * device is setup. 269 * In case the analysis returns an error, the device setup is stopped 270 * (a fake disk was already added to allow formatting). 271 */ 272 static int dasd_state_basic_to_ready(struct dasd_device *device) 273 { 274 int rc; 275 struct dasd_block *block; 276 277 rc = 0; 278 block = device->block; 279 /* make disk known with correct capacity */ 280 if (block) { 281 if (block->base->discipline->do_analysis != NULL) 282 rc = block->base->discipline->do_analysis(block); 283 if (rc) { 284 if (rc != -EAGAIN) 285 device->state = DASD_STATE_UNFMT; 286 return rc; 287 } 288 dasd_setup_queue(block); 289 set_capacity(block->gdp, 290 block->blocks << block->s2b_shift); 291 device->state = DASD_STATE_READY; 292 rc = dasd_scan_partitions(block); 293 if (rc) 294 device->state = DASD_STATE_BASIC; 295 } else { 296 device->state = DASD_STATE_READY; 297 } 298 return rc; 299 } 300 301 /* 302 * Remove device from block device layer. Destroy dirty buffers. 303 * Forget format information. Check if the target level is basic 304 * and if it is create fake disk for formatting. 305 */ 306 static int dasd_state_ready_to_basic(struct dasd_device *device) 307 { 308 int rc; 309 310 device->state = DASD_STATE_BASIC; 311 if (device->block) { 312 struct dasd_block *block = device->block; 313 rc = dasd_flush_block_queue(block); 314 if (rc) { 315 device->state = DASD_STATE_READY; 316 return rc; 317 } 318 dasd_destroy_partitions(block); 319 dasd_flush_request_queue(block); 320 block->blocks = 0; 321 block->bp_block = 0; 322 block->s2b_shift = 0; 323 } 324 return 0; 325 } 326 327 /* 328 * Back to basic. 329 */ 330 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 331 { 332 device->state = DASD_STATE_BASIC; 333 return 0; 334 } 335 336 /* 337 * Make the device online and schedule the bottom half to start 338 * the requeueing of requests from the linux request queue to the 339 * ccw queue. 340 */ 341 static int 342 dasd_state_ready_to_online(struct dasd_device * device) 343 { 344 int rc; 345 struct gendisk *disk; 346 struct disk_part_iter piter; 347 struct hd_struct *part; 348 349 if (device->discipline->ready_to_online) { 350 rc = device->discipline->ready_to_online(device); 351 if (rc) 352 return rc; 353 } 354 device->state = DASD_STATE_ONLINE; 355 if (device->block) { 356 dasd_schedule_block_bh(device->block); 357 disk = device->block->bdev->bd_disk; 358 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 359 while ((part = disk_part_iter_next(&piter))) 360 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 361 disk_part_iter_exit(&piter); 362 } 363 return 0; 364 } 365 366 /* 367 * Stop the requeueing of requests again. 368 */ 369 static int dasd_state_online_to_ready(struct dasd_device *device) 370 { 371 int rc; 372 struct gendisk *disk; 373 struct disk_part_iter piter; 374 struct hd_struct *part; 375 376 if (device->discipline->online_to_ready) { 377 rc = device->discipline->online_to_ready(device); 378 if (rc) 379 return rc; 380 } 381 device->state = DASD_STATE_READY; 382 if (device->block) { 383 disk = device->block->bdev->bd_disk; 384 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 385 while ((part = disk_part_iter_next(&piter))) 386 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 387 disk_part_iter_exit(&piter); 388 } 389 return 0; 390 } 391 392 /* 393 * Device startup state changes. 394 */ 395 static int dasd_increase_state(struct dasd_device *device) 396 { 397 int rc; 398 399 rc = 0; 400 if (device->state == DASD_STATE_NEW && 401 device->target >= DASD_STATE_KNOWN) 402 rc = dasd_state_new_to_known(device); 403 404 if (!rc && 405 device->state == DASD_STATE_KNOWN && 406 device->target >= DASD_STATE_BASIC) 407 rc = dasd_state_known_to_basic(device); 408 409 if (!rc && 410 device->state == DASD_STATE_BASIC && 411 device->target >= DASD_STATE_READY) 412 rc = dasd_state_basic_to_ready(device); 413 414 if (!rc && 415 device->state == DASD_STATE_UNFMT && 416 device->target > DASD_STATE_UNFMT) 417 rc = -EPERM; 418 419 if (!rc && 420 device->state == DASD_STATE_READY && 421 device->target >= DASD_STATE_ONLINE) 422 rc = dasd_state_ready_to_online(device); 423 424 return rc; 425 } 426 427 /* 428 * Device shutdown state changes. 429 */ 430 static int dasd_decrease_state(struct dasd_device *device) 431 { 432 int rc; 433 434 rc = 0; 435 if (device->state == DASD_STATE_ONLINE && 436 device->target <= DASD_STATE_READY) 437 rc = dasd_state_online_to_ready(device); 438 439 if (!rc && 440 device->state == DASD_STATE_READY && 441 device->target <= DASD_STATE_BASIC) 442 rc = dasd_state_ready_to_basic(device); 443 444 if (!rc && 445 device->state == DASD_STATE_UNFMT && 446 device->target <= DASD_STATE_BASIC) 447 rc = dasd_state_unfmt_to_basic(device); 448 449 if (!rc && 450 device->state == DASD_STATE_BASIC && 451 device->target <= DASD_STATE_KNOWN) 452 rc = dasd_state_basic_to_known(device); 453 454 if (!rc && 455 device->state == DASD_STATE_KNOWN && 456 device->target <= DASD_STATE_NEW) 457 rc = dasd_state_known_to_new(device); 458 459 return rc; 460 } 461 462 /* 463 * This is the main startup/shutdown routine. 464 */ 465 static void dasd_change_state(struct dasd_device *device) 466 { 467 int rc; 468 469 if (device->state == device->target) 470 /* Already where we want to go today... */ 471 return; 472 if (device->state < device->target) 473 rc = dasd_increase_state(device); 474 else 475 rc = dasd_decrease_state(device); 476 if (rc && rc != -EAGAIN) 477 device->target = device->state; 478 479 if (device->state == device->target) 480 wake_up(&dasd_init_waitq); 481 482 /* let user-space know that the device status changed */ 483 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 484 } 485 486 /* 487 * Kick starter for devices that did not complete the startup/shutdown 488 * procedure or were sleeping because of a pending state. 489 * dasd_kick_device will schedule a call do do_kick_device to the kernel 490 * event daemon. 491 */ 492 static void do_kick_device(struct work_struct *work) 493 { 494 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 495 dasd_change_state(device); 496 dasd_schedule_device_bh(device); 497 dasd_put_device(device); 498 } 499 500 void dasd_kick_device(struct dasd_device *device) 501 { 502 dasd_get_device(device); 503 /* queue call to dasd_kick_device to the kernel event daemon. */ 504 schedule_work(&device->kick_work); 505 } 506 507 /* 508 * Set the target state for a device and starts the state change. 509 */ 510 void dasd_set_target_state(struct dasd_device *device, int target) 511 { 512 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 513 if (dasd_probeonly && target > DASD_STATE_READY) 514 target = DASD_STATE_READY; 515 if (device->target != target) { 516 if (device->state == target) 517 wake_up(&dasd_init_waitq); 518 device->target = target; 519 } 520 if (device->state != device->target) 521 dasd_change_state(device); 522 } 523 524 /* 525 * Enable devices with device numbers in [from..to]. 526 */ 527 static inline int _wait_for_device(struct dasd_device *device) 528 { 529 return (device->state == device->target); 530 } 531 532 void dasd_enable_device(struct dasd_device *device) 533 { 534 dasd_set_target_state(device, DASD_STATE_ONLINE); 535 if (device->state <= DASD_STATE_KNOWN) 536 /* No discipline for device found. */ 537 dasd_set_target_state(device, DASD_STATE_NEW); 538 /* Now wait for the devices to come up. */ 539 wait_event(dasd_init_waitq, _wait_for_device(device)); 540 } 541 542 /* 543 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 544 */ 545 #ifdef CONFIG_DASD_PROFILE 546 547 struct dasd_profile_info_t dasd_global_profile; 548 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 549 550 /* 551 * Increments counter in global and local profiling structures. 552 */ 553 #define dasd_profile_counter(value, counter, block) \ 554 { \ 555 int index; \ 556 for (index = 0; index < 31 && value >> (2+index); index++); \ 557 dasd_global_profile.counter[index]++; \ 558 block->profile.counter[index]++; \ 559 } 560 561 /* 562 * Add profiling information for cqr before execution. 563 */ 564 static void dasd_profile_start(struct dasd_block *block, 565 struct dasd_ccw_req *cqr, 566 struct request *req) 567 { 568 struct list_head *l; 569 unsigned int counter; 570 571 if (dasd_profile_level != DASD_PROFILE_ON) 572 return; 573 574 /* count the length of the chanq for statistics */ 575 counter = 0; 576 list_for_each(l, &block->ccw_queue) 577 if (++counter >= 31) 578 break; 579 dasd_global_profile.dasd_io_nr_req[counter]++; 580 block->profile.dasd_io_nr_req[counter]++; 581 } 582 583 /* 584 * Add profiling information for cqr after execution. 585 */ 586 static void dasd_profile_end(struct dasd_block *block, 587 struct dasd_ccw_req *cqr, 588 struct request *req) 589 { 590 long strtime, irqtime, endtime, tottime; /* in microseconds */ 591 long tottimeps, sectors; 592 593 if (dasd_profile_level != DASD_PROFILE_ON) 594 return; 595 596 sectors = req->nr_sectors; 597 if (!cqr->buildclk || !cqr->startclk || 598 !cqr->stopclk || !cqr->endclk || 599 !sectors) 600 return; 601 602 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 603 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 604 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 605 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 606 tottimeps = tottime / sectors; 607 608 if (!dasd_global_profile.dasd_io_reqs) 609 memset(&dasd_global_profile, 0, 610 sizeof(struct dasd_profile_info_t)); 611 dasd_global_profile.dasd_io_reqs++; 612 dasd_global_profile.dasd_io_sects += sectors; 613 614 if (!block->profile.dasd_io_reqs) 615 memset(&block->profile, 0, 616 sizeof(struct dasd_profile_info_t)); 617 block->profile.dasd_io_reqs++; 618 block->profile.dasd_io_sects += sectors; 619 620 dasd_profile_counter(sectors, dasd_io_secs, block); 621 dasd_profile_counter(tottime, dasd_io_times, block); 622 dasd_profile_counter(tottimeps, dasd_io_timps, block); 623 dasd_profile_counter(strtime, dasd_io_time1, block); 624 dasd_profile_counter(irqtime, dasd_io_time2, block); 625 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 626 dasd_profile_counter(endtime, dasd_io_time3, block); 627 } 628 #else 629 #define dasd_profile_start(block, cqr, req) do {} while (0) 630 #define dasd_profile_end(block, cqr, req) do {} while (0) 631 #endif /* CONFIG_DASD_PROFILE */ 632 633 /* 634 * Allocate memory for a channel program with 'cplength' channel 635 * command words and 'datasize' additional space. There are two 636 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 637 * memory and 2) dasd_smalloc_request uses the static ccw memory 638 * that gets allocated for each device. 639 */ 640 struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, 641 int datasize, 642 struct dasd_device *device) 643 { 644 struct dasd_ccw_req *cqr; 645 646 /* Sanity checks */ 647 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 648 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 649 650 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 651 if (cqr == NULL) 652 return ERR_PTR(-ENOMEM); 653 cqr->cpaddr = NULL; 654 if (cplength > 0) { 655 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 656 GFP_ATOMIC | GFP_DMA); 657 if (cqr->cpaddr == NULL) { 658 kfree(cqr); 659 return ERR_PTR(-ENOMEM); 660 } 661 } 662 cqr->data = NULL; 663 if (datasize > 0) { 664 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 665 if (cqr->data == NULL) { 666 kfree(cqr->cpaddr); 667 kfree(cqr); 668 return ERR_PTR(-ENOMEM); 669 } 670 } 671 strncpy((char *) &cqr->magic, magic, 4); 672 ASCEBC((char *) &cqr->magic, 4); 673 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 674 dasd_get_device(device); 675 return cqr; 676 } 677 678 struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, 679 int datasize, 680 struct dasd_device *device) 681 { 682 unsigned long flags; 683 struct dasd_ccw_req *cqr; 684 char *data; 685 int size; 686 687 /* Sanity checks */ 688 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 689 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 690 691 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 692 if (cplength > 0) 693 size += cplength * sizeof(struct ccw1); 694 if (datasize > 0) 695 size += datasize; 696 spin_lock_irqsave(&device->mem_lock, flags); 697 cqr = (struct dasd_ccw_req *) 698 dasd_alloc_chunk(&device->ccw_chunks, size); 699 spin_unlock_irqrestore(&device->mem_lock, flags); 700 if (cqr == NULL) 701 return ERR_PTR(-ENOMEM); 702 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 703 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 704 cqr->cpaddr = NULL; 705 if (cplength > 0) { 706 cqr->cpaddr = (struct ccw1 *) data; 707 data += cplength*sizeof(struct ccw1); 708 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 709 } 710 cqr->data = NULL; 711 if (datasize > 0) { 712 cqr->data = data; 713 memset(cqr->data, 0, datasize); 714 } 715 strncpy((char *) &cqr->magic, magic, 4); 716 ASCEBC((char *) &cqr->magic, 4); 717 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 718 dasd_get_device(device); 719 return cqr; 720 } 721 722 /* 723 * Free memory of a channel program. This function needs to free all the 724 * idal lists that might have been created by dasd_set_cda and the 725 * struct dasd_ccw_req itself. 726 */ 727 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 728 { 729 #ifdef CONFIG_64BIT 730 struct ccw1 *ccw; 731 732 /* Clear any idals used for the request. */ 733 ccw = cqr->cpaddr; 734 do { 735 clear_normalized_cda(ccw); 736 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 737 #endif 738 kfree(cqr->cpaddr); 739 kfree(cqr->data); 740 kfree(cqr); 741 dasd_put_device(device); 742 } 743 744 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 745 { 746 unsigned long flags; 747 748 spin_lock_irqsave(&device->mem_lock, flags); 749 dasd_free_chunk(&device->ccw_chunks, cqr); 750 spin_unlock_irqrestore(&device->mem_lock, flags); 751 dasd_put_device(device); 752 } 753 754 /* 755 * Check discipline magic in cqr. 756 */ 757 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 758 { 759 struct dasd_device *device; 760 761 if (cqr == NULL) 762 return -EINVAL; 763 device = cqr->startdev; 764 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 765 DEV_MESSAGE(KERN_WARNING, device, 766 " dasd_ccw_req 0x%08x magic doesn't match" 767 " discipline 0x%08x", 768 cqr->magic, 769 *(unsigned int *) device->discipline->name); 770 return -EINVAL; 771 } 772 return 0; 773 } 774 775 /* 776 * Terminate the current i/o and set the request to clear_pending. 777 * Timer keeps device runnig. 778 * ccw_device_clear can fail if the i/o subsystem 779 * is in a bad mood. 780 */ 781 int dasd_term_IO(struct dasd_ccw_req *cqr) 782 { 783 struct dasd_device *device; 784 int retries, rc; 785 786 /* Check the cqr */ 787 rc = dasd_check_cqr(cqr); 788 if (rc) 789 return rc; 790 retries = 0; 791 device = (struct dasd_device *) cqr->startdev; 792 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 793 rc = ccw_device_clear(device->cdev, (long) cqr); 794 switch (rc) { 795 case 0: /* termination successful */ 796 cqr->retries--; 797 cqr->status = DASD_CQR_CLEAR_PENDING; 798 cqr->stopclk = get_clock(); 799 cqr->starttime = 0; 800 DBF_DEV_EVENT(DBF_DEBUG, device, 801 "terminate cqr %p successful", 802 cqr); 803 break; 804 case -ENODEV: 805 DBF_DEV_EVENT(DBF_ERR, device, "%s", 806 "device gone, retry"); 807 break; 808 case -EIO: 809 DBF_DEV_EVENT(DBF_ERR, device, "%s", 810 "I/O error, retry"); 811 break; 812 case -EINVAL: 813 case -EBUSY: 814 DBF_DEV_EVENT(DBF_ERR, device, "%s", 815 "device busy, retry later"); 816 break; 817 default: 818 DEV_MESSAGE(KERN_ERR, device, 819 "line %d unknown RC=%d, please " 820 "report to linux390@de.ibm.com", 821 __LINE__, rc); 822 BUG(); 823 break; 824 } 825 retries++; 826 } 827 dasd_schedule_device_bh(device); 828 return rc; 829 } 830 831 /* 832 * Start the i/o. This start_IO can fail if the channel is really busy. 833 * In that case set up a timer to start the request later. 834 */ 835 int dasd_start_IO(struct dasd_ccw_req *cqr) 836 { 837 struct dasd_device *device; 838 int rc; 839 840 /* Check the cqr */ 841 rc = dasd_check_cqr(cqr); 842 if (rc) 843 return rc; 844 device = (struct dasd_device *) cqr->startdev; 845 if (cqr->retries < 0) { 846 DEV_MESSAGE(KERN_DEBUG, device, 847 "start_IO: request %p (%02x/%i) - no retry left.", 848 cqr, cqr->status, cqr->retries); 849 cqr->status = DASD_CQR_ERROR; 850 return -EIO; 851 } 852 cqr->startclk = get_clock(); 853 cqr->starttime = jiffies; 854 cqr->retries--; 855 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 856 cqr->lpm, 0); 857 switch (rc) { 858 case 0: 859 cqr->status = DASD_CQR_IN_IO; 860 DBF_DEV_EVENT(DBF_DEBUG, device, 861 "start_IO: request %p started successful", 862 cqr); 863 break; 864 case -EBUSY: 865 DBF_DEV_EVENT(DBF_ERR, device, "%s", 866 "start_IO: device busy, retry later"); 867 break; 868 case -ETIMEDOUT: 869 DBF_DEV_EVENT(DBF_ERR, device, "%s", 870 "start_IO: request timeout, retry later"); 871 break; 872 case -EACCES: 873 /* -EACCES indicates that the request used only a 874 * subset of the available pathes and all these 875 * pathes are gone. 876 * Do a retry with all available pathes. 877 */ 878 cqr->lpm = LPM_ANYPATH; 879 DBF_DEV_EVENT(DBF_ERR, device, "%s", 880 "start_IO: selected pathes gone," 881 " retry on all pathes"); 882 break; 883 case -ENODEV: 884 case -EIO: 885 DBF_DEV_EVENT(DBF_ERR, device, "%s", 886 "start_IO: device gone, retry"); 887 break; 888 default: 889 DEV_MESSAGE(KERN_ERR, device, 890 "line %d unknown RC=%d, please report" 891 " to linux390@de.ibm.com", __LINE__, rc); 892 BUG(); 893 break; 894 } 895 return rc; 896 } 897 898 /* 899 * Timeout function for dasd devices. This is used for different purposes 900 * 1) missing interrupt handler for normal operation 901 * 2) delayed start of request where start_IO failed with -EBUSY 902 * 3) timeout for missing state change interrupts 903 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 904 * DASD_CQR_QUEUED for 2) and 3). 905 */ 906 static void dasd_device_timeout(unsigned long ptr) 907 { 908 unsigned long flags; 909 struct dasd_device *device; 910 911 device = (struct dasd_device *) ptr; 912 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 913 /* re-activate request queue */ 914 device->stopped &= ~DASD_STOPPED_PENDING; 915 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 916 dasd_schedule_device_bh(device); 917 } 918 919 /* 920 * Setup timeout for a device in jiffies. 921 */ 922 void dasd_device_set_timer(struct dasd_device *device, int expires) 923 { 924 if (expires == 0) 925 del_timer(&device->timer); 926 else 927 mod_timer(&device->timer, jiffies + expires); 928 } 929 930 /* 931 * Clear timeout for a device. 932 */ 933 void dasd_device_clear_timer(struct dasd_device *device) 934 { 935 del_timer(&device->timer); 936 } 937 938 static void dasd_handle_killed_request(struct ccw_device *cdev, 939 unsigned long intparm) 940 { 941 struct dasd_ccw_req *cqr; 942 struct dasd_device *device; 943 944 if (!intparm) 945 return; 946 cqr = (struct dasd_ccw_req *) intparm; 947 if (cqr->status != DASD_CQR_IN_IO) { 948 MESSAGE(KERN_DEBUG, 949 "invalid status in handle_killed_request: " 950 "bus_id %s, status %02x", 951 dev_name(&cdev->dev), cqr->status); 952 return; 953 } 954 955 device = (struct dasd_device *) cqr->startdev; 956 if (device == NULL || 957 device != dasd_device_from_cdev_locked(cdev) || 958 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 959 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 960 dev_name(&cdev->dev)); 961 return; 962 } 963 964 /* Schedule request to be retried. */ 965 cqr->status = DASD_CQR_QUEUED; 966 967 dasd_device_clear_timer(device); 968 dasd_schedule_device_bh(device); 969 dasd_put_device(device); 970 } 971 972 void dasd_generic_handle_state_change(struct dasd_device *device) 973 { 974 /* First of all start sense subsystem status request. */ 975 dasd_eer_snss(device); 976 977 device->stopped &= ~DASD_STOPPED_PENDING; 978 dasd_schedule_device_bh(device); 979 if (device->block) 980 dasd_schedule_block_bh(device->block); 981 } 982 983 /* 984 * Interrupt handler for "normal" ssch-io based dasd devices. 985 */ 986 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 987 struct irb *irb) 988 { 989 struct dasd_ccw_req *cqr, *next; 990 struct dasd_device *device; 991 unsigned long long now; 992 int expires; 993 994 if (IS_ERR(irb)) { 995 switch (PTR_ERR(irb)) { 996 case -EIO: 997 break; 998 case -ETIMEDOUT: 999 printk(KERN_WARNING"%s(%s): request timed out\n", 1000 __func__, dev_name(&cdev->dev)); 1001 break; 1002 default: 1003 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 1004 __func__, dev_name(&cdev->dev), PTR_ERR(irb)); 1005 } 1006 dasd_handle_killed_request(cdev, intparm); 1007 return; 1008 } 1009 1010 now = get_clock(); 1011 1012 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 1013 dev_name(&cdev->dev), ((irb->scsw.cmd.cstat << 8) | 1014 irb->scsw.cmd.dstat), (unsigned int) intparm); 1015 1016 /* check for unsolicited interrupts */ 1017 cqr = (struct dasd_ccw_req *) intparm; 1018 if (!cqr || ((irb->scsw.cmd.cc == 1) && 1019 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1020 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) { 1021 if (cqr && cqr->status == DASD_CQR_IN_IO) 1022 cqr->status = DASD_CQR_QUEUED; 1023 device = dasd_device_from_cdev_locked(cdev); 1024 if (!IS_ERR(device)) { 1025 dasd_device_clear_timer(device); 1026 device->discipline->handle_unsolicited_interrupt(device, 1027 irb); 1028 dasd_put_device(device); 1029 } 1030 return; 1031 } 1032 1033 device = (struct dasd_device *) cqr->startdev; 1034 if (!device || 1035 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1036 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1037 dev_name(&cdev->dev)); 1038 return; 1039 } 1040 1041 /* Check for clear pending */ 1042 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1043 irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1044 cqr->status = DASD_CQR_CLEARED; 1045 dasd_device_clear_timer(device); 1046 wake_up(&dasd_flush_wq); 1047 dasd_schedule_device_bh(device); 1048 return; 1049 } 1050 1051 /* check status - the request might have been killed by dyn detach */ 1052 if (cqr->status != DASD_CQR_IN_IO) { 1053 MESSAGE(KERN_DEBUG, 1054 "invalid status: bus_id %s, status %02x", 1055 dev_name(&cdev->dev), cqr->status); 1056 return; 1057 } 1058 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1059 ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr); 1060 next = NULL; 1061 expires = 0; 1062 if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1063 irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) { 1064 /* request was completed successfully */ 1065 cqr->status = DASD_CQR_SUCCESS; 1066 cqr->stopclk = now; 1067 /* Start first request on queue if possible -> fast_io. */ 1068 if (cqr->devlist.next != &device->ccw_queue) { 1069 next = list_entry(cqr->devlist.next, 1070 struct dasd_ccw_req, devlist); 1071 } 1072 } else { /* error */ 1073 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1074 if (device->features & DASD_FEATURE_ERPLOG) { 1075 dasd_log_sense(cqr, irb); 1076 } 1077 /* 1078 * If we don't want complex ERP for this request, then just 1079 * reset this and retry it in the fastpath 1080 */ 1081 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1082 cqr->retries > 0) { 1083 DEV_MESSAGE(KERN_DEBUG, device, 1084 "default ERP in fastpath (%i retries left)", 1085 cqr->retries); 1086 cqr->lpm = LPM_ANYPATH; 1087 cqr->status = DASD_CQR_QUEUED; 1088 next = cqr; 1089 } else 1090 cqr->status = DASD_CQR_ERROR; 1091 } 1092 if (next && (next->status == DASD_CQR_QUEUED) && 1093 (!device->stopped)) { 1094 if (device->discipline->start_IO(next) == 0) 1095 expires = next->expires; 1096 else 1097 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1098 "Interrupt fastpath " 1099 "failed!"); 1100 } 1101 if (expires != 0) 1102 dasd_device_set_timer(device, expires); 1103 else 1104 dasd_device_clear_timer(device); 1105 dasd_schedule_device_bh(device); 1106 } 1107 1108 /* 1109 * If we have an error on a dasd_block layer request then we cancel 1110 * and return all further requests from the same dasd_block as well. 1111 */ 1112 static void __dasd_device_recovery(struct dasd_device *device, 1113 struct dasd_ccw_req *ref_cqr) 1114 { 1115 struct list_head *l, *n; 1116 struct dasd_ccw_req *cqr; 1117 1118 /* 1119 * only requeue request that came from the dasd_block layer 1120 */ 1121 if (!ref_cqr->block) 1122 return; 1123 1124 list_for_each_safe(l, n, &device->ccw_queue) { 1125 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1126 if (cqr->status == DASD_CQR_QUEUED && 1127 ref_cqr->block == cqr->block) { 1128 cqr->status = DASD_CQR_CLEARED; 1129 } 1130 } 1131 }; 1132 1133 /* 1134 * Remove those ccw requests from the queue that need to be returned 1135 * to the upper layer. 1136 */ 1137 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1138 struct list_head *final_queue) 1139 { 1140 struct list_head *l, *n; 1141 struct dasd_ccw_req *cqr; 1142 1143 /* Process request with final status. */ 1144 list_for_each_safe(l, n, &device->ccw_queue) { 1145 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1146 1147 /* Stop list processing at the first non-final request. */ 1148 if (cqr->status == DASD_CQR_QUEUED || 1149 cqr->status == DASD_CQR_IN_IO || 1150 cqr->status == DASD_CQR_CLEAR_PENDING) 1151 break; 1152 if (cqr->status == DASD_CQR_ERROR) { 1153 __dasd_device_recovery(device, cqr); 1154 } 1155 /* Rechain finished requests to final queue */ 1156 list_move_tail(&cqr->devlist, final_queue); 1157 } 1158 } 1159 1160 /* 1161 * the cqrs from the final queue are returned to the upper layer 1162 * by setting a dasd_block state and calling the callback function 1163 */ 1164 static void __dasd_device_process_final_queue(struct dasd_device *device, 1165 struct list_head *final_queue) 1166 { 1167 struct list_head *l, *n; 1168 struct dasd_ccw_req *cqr; 1169 struct dasd_block *block; 1170 void (*callback)(struct dasd_ccw_req *, void *data); 1171 void *callback_data; 1172 1173 list_for_each_safe(l, n, final_queue) { 1174 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1175 list_del_init(&cqr->devlist); 1176 block = cqr->block; 1177 callback = cqr->callback; 1178 callback_data = cqr->callback_data; 1179 if (block) 1180 spin_lock_bh(&block->queue_lock); 1181 switch (cqr->status) { 1182 case DASD_CQR_SUCCESS: 1183 cqr->status = DASD_CQR_DONE; 1184 break; 1185 case DASD_CQR_ERROR: 1186 cqr->status = DASD_CQR_NEED_ERP; 1187 break; 1188 case DASD_CQR_CLEARED: 1189 cqr->status = DASD_CQR_TERMINATED; 1190 break; 1191 default: 1192 DEV_MESSAGE(KERN_ERR, device, 1193 "wrong cqr status in __dasd_process_final_queue " 1194 "for cqr %p, status %x", 1195 cqr, cqr->status); 1196 BUG(); 1197 } 1198 if (cqr->callback != NULL) 1199 (callback)(cqr, callback_data); 1200 if (block) 1201 spin_unlock_bh(&block->queue_lock); 1202 } 1203 } 1204 1205 /* 1206 * Take a look at the first request on the ccw queue and check 1207 * if it reached its expire time. If so, terminate the IO. 1208 */ 1209 static void __dasd_device_check_expire(struct dasd_device *device) 1210 { 1211 struct dasd_ccw_req *cqr; 1212 1213 if (list_empty(&device->ccw_queue)) 1214 return; 1215 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1216 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1217 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1218 if (device->discipline->term_IO(cqr) != 0) { 1219 /* Hmpf, try again in 5 sec */ 1220 DEV_MESSAGE(KERN_ERR, device, 1221 "internal error - timeout (%is) expired " 1222 "for cqr %p, termination failed, " 1223 "retrying in 5s", 1224 (cqr->expires/HZ), cqr); 1225 cqr->expires += 5*HZ; 1226 dasd_device_set_timer(device, 5*HZ); 1227 } else { 1228 DEV_MESSAGE(KERN_ERR, device, 1229 "internal error - timeout (%is) expired " 1230 "for cqr %p (%i retries left)", 1231 (cqr->expires/HZ), cqr, cqr->retries); 1232 } 1233 } 1234 } 1235 1236 /* 1237 * Take a look at the first request on the ccw queue and check 1238 * if it needs to be started. 1239 */ 1240 static void __dasd_device_start_head(struct dasd_device *device) 1241 { 1242 struct dasd_ccw_req *cqr; 1243 int rc; 1244 1245 if (list_empty(&device->ccw_queue)) 1246 return; 1247 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1248 if (cqr->status != DASD_CQR_QUEUED) 1249 return; 1250 /* when device is stopped, return request to previous layer */ 1251 if (device->stopped) { 1252 cqr->status = DASD_CQR_CLEARED; 1253 dasd_schedule_device_bh(device); 1254 return; 1255 } 1256 1257 rc = device->discipline->start_IO(cqr); 1258 if (rc == 0) 1259 dasd_device_set_timer(device, cqr->expires); 1260 else if (rc == -EACCES) { 1261 dasd_schedule_device_bh(device); 1262 } else 1263 /* Hmpf, try again in 1/2 sec */ 1264 dasd_device_set_timer(device, 50); 1265 } 1266 1267 /* 1268 * Go through all request on the dasd_device request queue, 1269 * terminate them on the cdev if necessary, and return them to the 1270 * submitting layer via callback. 1271 * Note: 1272 * Make sure that all 'submitting layers' still exist when 1273 * this function is called!. In other words, when 'device' is a base 1274 * device then all block layer requests must have been removed before 1275 * via dasd_flush_block_queue. 1276 */ 1277 int dasd_flush_device_queue(struct dasd_device *device) 1278 { 1279 struct dasd_ccw_req *cqr, *n; 1280 int rc; 1281 struct list_head flush_queue; 1282 1283 INIT_LIST_HEAD(&flush_queue); 1284 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1285 rc = 0; 1286 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1287 /* Check status and move request to flush_queue */ 1288 switch (cqr->status) { 1289 case DASD_CQR_IN_IO: 1290 rc = device->discipline->term_IO(cqr); 1291 if (rc) { 1292 /* unable to terminate requeust */ 1293 DEV_MESSAGE(KERN_ERR, device, 1294 "dasd flush ccw_queue is unable " 1295 " to terminate request %p", 1296 cqr); 1297 /* stop flush processing */ 1298 goto finished; 1299 } 1300 break; 1301 case DASD_CQR_QUEUED: 1302 cqr->stopclk = get_clock(); 1303 cqr->status = DASD_CQR_CLEARED; 1304 break; 1305 default: /* no need to modify the others */ 1306 break; 1307 } 1308 list_move_tail(&cqr->devlist, &flush_queue); 1309 } 1310 finished: 1311 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1312 /* 1313 * After this point all requests must be in state CLEAR_PENDING, 1314 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1315 * one of the others. 1316 */ 1317 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1318 wait_event(dasd_flush_wq, 1319 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1320 /* 1321 * Now set each request back to TERMINATED, DONE or NEED_ERP 1322 * and call the callback function of flushed requests 1323 */ 1324 __dasd_device_process_final_queue(device, &flush_queue); 1325 return rc; 1326 } 1327 1328 /* 1329 * Acquire the device lock and process queues for the device. 1330 */ 1331 static void dasd_device_tasklet(struct dasd_device *device) 1332 { 1333 struct list_head final_queue; 1334 1335 atomic_set (&device->tasklet_scheduled, 0); 1336 INIT_LIST_HEAD(&final_queue); 1337 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1338 /* Check expire time of first request on the ccw queue. */ 1339 __dasd_device_check_expire(device); 1340 /* find final requests on ccw queue */ 1341 __dasd_device_process_ccw_queue(device, &final_queue); 1342 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1343 /* Now call the callback function of requests with final status */ 1344 __dasd_device_process_final_queue(device, &final_queue); 1345 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1346 /* Now check if the head of the ccw queue needs to be started. */ 1347 __dasd_device_start_head(device); 1348 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1349 dasd_put_device(device); 1350 } 1351 1352 /* 1353 * Schedules a call to dasd_tasklet over the device tasklet. 1354 */ 1355 void dasd_schedule_device_bh(struct dasd_device *device) 1356 { 1357 /* Protect against rescheduling. */ 1358 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1359 return; 1360 dasd_get_device(device); 1361 tasklet_hi_schedule(&device->tasklet); 1362 } 1363 1364 /* 1365 * Queue a request to the head of the device ccw_queue. 1366 * Start the I/O if possible. 1367 */ 1368 void dasd_add_request_head(struct dasd_ccw_req *cqr) 1369 { 1370 struct dasd_device *device; 1371 unsigned long flags; 1372 1373 device = cqr->startdev; 1374 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1375 cqr->status = DASD_CQR_QUEUED; 1376 list_add(&cqr->devlist, &device->ccw_queue); 1377 /* let the bh start the request to keep them in order */ 1378 dasd_schedule_device_bh(device); 1379 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1380 } 1381 1382 /* 1383 * Queue a request to the tail of the device ccw_queue. 1384 * Start the I/O if possible. 1385 */ 1386 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 1387 { 1388 struct dasd_device *device; 1389 unsigned long flags; 1390 1391 device = cqr->startdev; 1392 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1393 cqr->status = DASD_CQR_QUEUED; 1394 list_add_tail(&cqr->devlist, &device->ccw_queue); 1395 /* let the bh start the request to keep them in order */ 1396 dasd_schedule_device_bh(device); 1397 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1398 } 1399 1400 /* 1401 * Wakeup helper for the 'sleep_on' functions. 1402 */ 1403 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1404 { 1405 wake_up((wait_queue_head_t *) data); 1406 } 1407 1408 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1409 { 1410 struct dasd_device *device; 1411 int rc; 1412 1413 device = cqr->startdev; 1414 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1415 rc = ((cqr->status == DASD_CQR_DONE || 1416 cqr->status == DASD_CQR_NEED_ERP || 1417 cqr->status == DASD_CQR_TERMINATED) && 1418 list_empty(&cqr->devlist)); 1419 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1420 return rc; 1421 } 1422 1423 /* 1424 * Queue a request to the tail of the device ccw_queue and wait for 1425 * it's completion. 1426 */ 1427 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1428 { 1429 struct dasd_device *device; 1430 int rc; 1431 1432 device = cqr->startdev; 1433 1434 cqr->callback = dasd_wakeup_cb; 1435 cqr->callback_data = (void *) &generic_waitq; 1436 dasd_add_request_tail(cqr); 1437 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1438 1439 /* Request status is either done or failed. */ 1440 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1441 return rc; 1442 } 1443 1444 /* 1445 * Queue a request to the tail of the device ccw_queue and wait 1446 * interruptible for it's completion. 1447 */ 1448 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1449 { 1450 struct dasd_device *device; 1451 int rc; 1452 1453 device = cqr->startdev; 1454 cqr->callback = dasd_wakeup_cb; 1455 cqr->callback_data = (void *) &generic_waitq; 1456 dasd_add_request_tail(cqr); 1457 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr)); 1458 if (rc == -ERESTARTSYS) { 1459 dasd_cancel_req(cqr); 1460 /* wait (non-interruptible) for final status */ 1461 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1462 } 1463 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1464 return rc; 1465 } 1466 1467 /* 1468 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1469 * for eckd devices) the currently running request has to be terminated 1470 * and be put back to status queued, before the special request is added 1471 * to the head of the queue. Then the special request is waited on normally. 1472 */ 1473 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1474 { 1475 struct dasd_ccw_req *cqr; 1476 1477 if (list_empty(&device->ccw_queue)) 1478 return 0; 1479 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1480 return device->discipline->term_IO(cqr); 1481 } 1482 1483 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1484 { 1485 struct dasd_device *device; 1486 int rc; 1487 1488 device = cqr->startdev; 1489 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1490 rc = _dasd_term_running_cqr(device); 1491 if (rc) { 1492 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1493 return rc; 1494 } 1495 1496 cqr->callback = dasd_wakeup_cb; 1497 cqr->callback_data = (void *) &generic_waitq; 1498 cqr->status = DASD_CQR_QUEUED; 1499 list_add(&cqr->devlist, &device->ccw_queue); 1500 1501 /* let the bh start the request to keep them in order */ 1502 dasd_schedule_device_bh(device); 1503 1504 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1505 1506 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1507 1508 /* Request status is either done or failed. */ 1509 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1510 return rc; 1511 } 1512 1513 /* 1514 * Cancels a request that was started with dasd_sleep_on_req. 1515 * This is useful to timeout requests. The request will be 1516 * terminated if it is currently in i/o. 1517 * Returns 1 if the request has been terminated. 1518 * 0 if there was no need to terminate the request (not started yet) 1519 * negative error code if termination failed 1520 * Cancellation of a request is an asynchronous operation! The calling 1521 * function has to wait until the request is properly returned via callback. 1522 */ 1523 int dasd_cancel_req(struct dasd_ccw_req *cqr) 1524 { 1525 struct dasd_device *device = cqr->startdev; 1526 unsigned long flags; 1527 int rc; 1528 1529 rc = 0; 1530 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1531 switch (cqr->status) { 1532 case DASD_CQR_QUEUED: 1533 /* request was not started - just set to cleared */ 1534 cqr->status = DASD_CQR_CLEARED; 1535 break; 1536 case DASD_CQR_IN_IO: 1537 /* request in IO - terminate IO and release again */ 1538 rc = device->discipline->term_IO(cqr); 1539 if (rc) { 1540 DEV_MESSAGE(KERN_ERR, device, 1541 "dasd_cancel_req is unable " 1542 " to terminate request %p, rc = %d", 1543 cqr, rc); 1544 } else { 1545 cqr->stopclk = get_clock(); 1546 rc = 1; 1547 } 1548 break; 1549 default: /* already finished or clear pending - do nothing */ 1550 break; 1551 } 1552 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1553 dasd_schedule_device_bh(device); 1554 return rc; 1555 } 1556 1557 1558 /* 1559 * SECTION: Operations of the dasd_block layer. 1560 */ 1561 1562 /* 1563 * Timeout function for dasd_block. This is used when the block layer 1564 * is waiting for something that may not come reliably, (e.g. a state 1565 * change interrupt) 1566 */ 1567 static void dasd_block_timeout(unsigned long ptr) 1568 { 1569 unsigned long flags; 1570 struct dasd_block *block; 1571 1572 block = (struct dasd_block *) ptr; 1573 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1574 /* re-activate request queue */ 1575 block->base->stopped &= ~DASD_STOPPED_PENDING; 1576 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1577 dasd_schedule_block_bh(block); 1578 } 1579 1580 /* 1581 * Setup timeout for a dasd_block in jiffies. 1582 */ 1583 void dasd_block_set_timer(struct dasd_block *block, int expires) 1584 { 1585 if (expires == 0) 1586 del_timer(&block->timer); 1587 else 1588 mod_timer(&block->timer, jiffies + expires); 1589 } 1590 1591 /* 1592 * Clear timeout for a dasd_block. 1593 */ 1594 void dasd_block_clear_timer(struct dasd_block *block) 1595 { 1596 del_timer(&block->timer); 1597 } 1598 1599 /* 1600 * posts the buffer_cache about a finalized request 1601 */ 1602 static inline void dasd_end_request(struct request *req, int error) 1603 { 1604 if (__blk_end_request(req, error, blk_rq_bytes(req))) 1605 BUG(); 1606 } 1607 1608 /* 1609 * Process finished error recovery ccw. 1610 */ 1611 static inline void __dasd_block_process_erp(struct dasd_block *block, 1612 struct dasd_ccw_req *cqr) 1613 { 1614 dasd_erp_fn_t erp_fn; 1615 struct dasd_device *device = block->base; 1616 1617 if (cqr->status == DASD_CQR_DONE) 1618 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1619 else 1620 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1621 erp_fn = device->discipline->erp_postaction(cqr); 1622 erp_fn(cqr); 1623 } 1624 1625 /* 1626 * Fetch requests from the block device queue. 1627 */ 1628 static void __dasd_process_request_queue(struct dasd_block *block) 1629 { 1630 struct request_queue *queue; 1631 struct request *req; 1632 struct dasd_ccw_req *cqr; 1633 struct dasd_device *basedev; 1634 unsigned long flags; 1635 queue = block->request_queue; 1636 basedev = block->base; 1637 /* No queue ? Then there is nothing to do. */ 1638 if (queue == NULL) 1639 return; 1640 1641 /* 1642 * We requeue request from the block device queue to the ccw 1643 * queue only in two states. In state DASD_STATE_READY the 1644 * partition detection is done and we need to requeue requests 1645 * for that. State DASD_STATE_ONLINE is normal block device 1646 * operation. 1647 */ 1648 if (basedev->state < DASD_STATE_READY) 1649 return; 1650 /* Now we try to fetch requests from the request queue */ 1651 while (!blk_queue_plugged(queue) && 1652 elv_next_request(queue)) { 1653 1654 req = elv_next_request(queue); 1655 1656 if (basedev->features & DASD_FEATURE_READONLY && 1657 rq_data_dir(req) == WRITE) { 1658 DBF_DEV_EVENT(DBF_ERR, basedev, 1659 "Rejecting write request %p", 1660 req); 1661 blkdev_dequeue_request(req); 1662 dasd_end_request(req, -EIO); 1663 continue; 1664 } 1665 cqr = basedev->discipline->build_cp(basedev, block, req); 1666 if (IS_ERR(cqr)) { 1667 if (PTR_ERR(cqr) == -EBUSY) 1668 break; /* normal end condition */ 1669 if (PTR_ERR(cqr) == -ENOMEM) 1670 break; /* terminate request queue loop */ 1671 if (PTR_ERR(cqr) == -EAGAIN) { 1672 /* 1673 * The current request cannot be build right 1674 * now, we have to try later. If this request 1675 * is the head-of-queue we stop the device 1676 * for 1/2 second. 1677 */ 1678 if (!list_empty(&block->ccw_queue)) 1679 break; 1680 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1681 basedev->stopped |= DASD_STOPPED_PENDING; 1682 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1683 dasd_block_set_timer(block, HZ/2); 1684 break; 1685 } 1686 DBF_DEV_EVENT(DBF_ERR, basedev, 1687 "CCW creation failed (rc=%ld) " 1688 "on request %p", 1689 PTR_ERR(cqr), req); 1690 blkdev_dequeue_request(req); 1691 dasd_end_request(req, -EIO); 1692 continue; 1693 } 1694 /* 1695 * Note: callback is set to dasd_return_cqr_cb in 1696 * __dasd_block_start_head to cover erp requests as well 1697 */ 1698 cqr->callback_data = (void *) req; 1699 cqr->status = DASD_CQR_FILLED; 1700 blkdev_dequeue_request(req); 1701 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1702 dasd_profile_start(block, cqr, req); 1703 } 1704 } 1705 1706 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1707 { 1708 struct request *req; 1709 int status; 1710 int error = 0; 1711 1712 req = (struct request *) cqr->callback_data; 1713 dasd_profile_end(cqr->block, cqr, req); 1714 status = cqr->block->base->discipline->free_cp(cqr, req); 1715 if (status <= 0) 1716 error = status ? status : -EIO; 1717 dasd_end_request(req, error); 1718 } 1719 1720 /* 1721 * Process ccw request queue. 1722 */ 1723 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1724 struct list_head *final_queue) 1725 { 1726 struct list_head *l, *n; 1727 struct dasd_ccw_req *cqr; 1728 dasd_erp_fn_t erp_fn; 1729 unsigned long flags; 1730 struct dasd_device *base = block->base; 1731 1732 restart: 1733 /* Process request with final status. */ 1734 list_for_each_safe(l, n, &block->ccw_queue) { 1735 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1736 if (cqr->status != DASD_CQR_DONE && 1737 cqr->status != DASD_CQR_FAILED && 1738 cqr->status != DASD_CQR_NEED_ERP && 1739 cqr->status != DASD_CQR_TERMINATED) 1740 continue; 1741 1742 if (cqr->status == DASD_CQR_TERMINATED) { 1743 base->discipline->handle_terminated_request(cqr); 1744 goto restart; 1745 } 1746 1747 /* Process requests that may be recovered */ 1748 if (cqr->status == DASD_CQR_NEED_ERP) { 1749 erp_fn = base->discipline->erp_action(cqr); 1750 erp_fn(cqr); 1751 goto restart; 1752 } 1753 1754 /* log sense for fatal error */ 1755 if (cqr->status == DASD_CQR_FAILED) { 1756 dasd_log_sense(cqr, &cqr->irb); 1757 } 1758 1759 /* First of all call extended error reporting. */ 1760 if (dasd_eer_enabled(base) && 1761 cqr->status == DASD_CQR_FAILED) { 1762 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1763 1764 /* restart request */ 1765 cqr->status = DASD_CQR_FILLED; 1766 cqr->retries = 255; 1767 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1768 base->stopped |= DASD_STOPPED_QUIESCE; 1769 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1770 flags); 1771 goto restart; 1772 } 1773 1774 /* Process finished ERP request. */ 1775 if (cqr->refers) { 1776 __dasd_block_process_erp(block, cqr); 1777 goto restart; 1778 } 1779 1780 /* Rechain finished requests to final queue */ 1781 cqr->endclk = get_clock(); 1782 list_move_tail(&cqr->blocklist, final_queue); 1783 } 1784 } 1785 1786 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 1787 { 1788 dasd_schedule_block_bh(cqr->block); 1789 } 1790 1791 static void __dasd_block_start_head(struct dasd_block *block) 1792 { 1793 struct dasd_ccw_req *cqr; 1794 1795 if (list_empty(&block->ccw_queue)) 1796 return; 1797 /* We allways begin with the first requests on the queue, as some 1798 * of previously started requests have to be enqueued on a 1799 * dasd_device again for error recovery. 1800 */ 1801 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 1802 if (cqr->status != DASD_CQR_FILLED) 1803 continue; 1804 /* Non-temporary stop condition will trigger fail fast */ 1805 if (block->base->stopped & ~DASD_STOPPED_PENDING && 1806 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1807 (!dasd_eer_enabled(block->base))) { 1808 cqr->status = DASD_CQR_FAILED; 1809 dasd_schedule_block_bh(block); 1810 continue; 1811 } 1812 /* Don't try to start requests if device is stopped */ 1813 if (block->base->stopped) 1814 return; 1815 1816 /* just a fail safe check, should not happen */ 1817 if (!cqr->startdev) 1818 cqr->startdev = block->base; 1819 1820 /* make sure that the requests we submit find their way back */ 1821 cqr->callback = dasd_return_cqr_cb; 1822 1823 dasd_add_request_tail(cqr); 1824 } 1825 } 1826 1827 /* 1828 * Central dasd_block layer routine. Takes requests from the generic 1829 * block layer request queue, creates ccw requests, enqueues them on 1830 * a dasd_device and processes ccw requests that have been returned. 1831 */ 1832 static void dasd_block_tasklet(struct dasd_block *block) 1833 { 1834 struct list_head final_queue; 1835 struct list_head *l, *n; 1836 struct dasd_ccw_req *cqr; 1837 1838 atomic_set(&block->tasklet_scheduled, 0); 1839 INIT_LIST_HEAD(&final_queue); 1840 spin_lock(&block->queue_lock); 1841 /* Finish off requests on ccw queue */ 1842 __dasd_process_block_ccw_queue(block, &final_queue); 1843 spin_unlock(&block->queue_lock); 1844 /* Now call the callback function of requests with final status */ 1845 spin_lock_irq(&block->request_queue_lock); 1846 list_for_each_safe(l, n, &final_queue) { 1847 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1848 list_del_init(&cqr->blocklist); 1849 __dasd_cleanup_cqr(cqr); 1850 } 1851 spin_lock(&block->queue_lock); 1852 /* Get new request from the block device request queue */ 1853 __dasd_process_request_queue(block); 1854 /* Now check if the head of the ccw queue needs to be started. */ 1855 __dasd_block_start_head(block); 1856 spin_unlock(&block->queue_lock); 1857 spin_unlock_irq(&block->request_queue_lock); 1858 dasd_put_device(block->base); 1859 } 1860 1861 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 1862 { 1863 wake_up(&dasd_flush_wq); 1864 } 1865 1866 /* 1867 * Go through all request on the dasd_block request queue, cancel them 1868 * on the respective dasd_device, and return them to the generic 1869 * block layer. 1870 */ 1871 static int dasd_flush_block_queue(struct dasd_block *block) 1872 { 1873 struct dasd_ccw_req *cqr, *n; 1874 int rc, i; 1875 struct list_head flush_queue; 1876 1877 INIT_LIST_HEAD(&flush_queue); 1878 spin_lock_bh(&block->queue_lock); 1879 rc = 0; 1880 restart: 1881 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 1882 /* if this request currently owned by a dasd_device cancel it */ 1883 if (cqr->status >= DASD_CQR_QUEUED) 1884 rc = dasd_cancel_req(cqr); 1885 if (rc < 0) 1886 break; 1887 /* Rechain request (including erp chain) so it won't be 1888 * touched by the dasd_block_tasklet anymore. 1889 * Replace the callback so we notice when the request 1890 * is returned from the dasd_device layer. 1891 */ 1892 cqr->callback = _dasd_wake_block_flush_cb; 1893 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 1894 list_move_tail(&cqr->blocklist, &flush_queue); 1895 if (i > 1) 1896 /* moved more than one request - need to restart */ 1897 goto restart; 1898 } 1899 spin_unlock_bh(&block->queue_lock); 1900 /* Now call the callback function of flushed requests */ 1901 restart_cb: 1902 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 1903 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 1904 /* Process finished ERP request. */ 1905 if (cqr->refers) { 1906 spin_lock_bh(&block->queue_lock); 1907 __dasd_block_process_erp(block, cqr); 1908 spin_unlock_bh(&block->queue_lock); 1909 /* restart list_for_xx loop since dasd_process_erp 1910 * might remove multiple elements */ 1911 goto restart_cb; 1912 } 1913 /* call the callback function */ 1914 spin_lock_irq(&block->request_queue_lock); 1915 cqr->endclk = get_clock(); 1916 list_del_init(&cqr->blocklist); 1917 __dasd_cleanup_cqr(cqr); 1918 spin_unlock_irq(&block->request_queue_lock); 1919 } 1920 return rc; 1921 } 1922 1923 /* 1924 * Schedules a call to dasd_tasklet over the device tasklet. 1925 */ 1926 void dasd_schedule_block_bh(struct dasd_block *block) 1927 { 1928 /* Protect against rescheduling. */ 1929 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 1930 return; 1931 /* life cycle of block is bound to it's base device */ 1932 dasd_get_device(block->base); 1933 tasklet_hi_schedule(&block->tasklet); 1934 } 1935 1936 1937 /* 1938 * SECTION: external block device operations 1939 * (request queue handling, open, release, etc.) 1940 */ 1941 1942 /* 1943 * Dasd request queue function. Called from ll_rw_blk.c 1944 */ 1945 static void do_dasd_request(struct request_queue *queue) 1946 { 1947 struct dasd_block *block; 1948 1949 block = queue->queuedata; 1950 spin_lock(&block->queue_lock); 1951 /* Get new request from the block device request queue */ 1952 __dasd_process_request_queue(block); 1953 /* Now check if the head of the ccw queue needs to be started. */ 1954 __dasd_block_start_head(block); 1955 spin_unlock(&block->queue_lock); 1956 } 1957 1958 /* 1959 * Allocate and initialize request queue and default I/O scheduler. 1960 */ 1961 static int dasd_alloc_queue(struct dasd_block *block) 1962 { 1963 int rc; 1964 1965 block->request_queue = blk_init_queue(do_dasd_request, 1966 &block->request_queue_lock); 1967 if (block->request_queue == NULL) 1968 return -ENOMEM; 1969 1970 block->request_queue->queuedata = block; 1971 1972 elevator_exit(block->request_queue->elevator); 1973 block->request_queue->elevator = NULL; 1974 rc = elevator_init(block->request_queue, "deadline"); 1975 if (rc) { 1976 blk_cleanup_queue(block->request_queue); 1977 return rc; 1978 } 1979 return 0; 1980 } 1981 1982 /* 1983 * Allocate and initialize request queue. 1984 */ 1985 static void dasd_setup_queue(struct dasd_block *block) 1986 { 1987 int max; 1988 1989 blk_queue_hardsect_size(block->request_queue, block->bp_block); 1990 max = block->base->discipline->max_blocks << block->s2b_shift; 1991 blk_queue_max_sectors(block->request_queue, max); 1992 blk_queue_max_phys_segments(block->request_queue, -1L); 1993 blk_queue_max_hw_segments(block->request_queue, -1L); 1994 blk_queue_max_segment_size(block->request_queue, -1L); 1995 blk_queue_segment_boundary(block->request_queue, -1L); 1996 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); 1997 } 1998 1999 /* 2000 * Deactivate and free request queue. 2001 */ 2002 static void dasd_free_queue(struct dasd_block *block) 2003 { 2004 if (block->request_queue) { 2005 blk_cleanup_queue(block->request_queue); 2006 block->request_queue = NULL; 2007 } 2008 } 2009 2010 /* 2011 * Flush request on the request queue. 2012 */ 2013 static void dasd_flush_request_queue(struct dasd_block *block) 2014 { 2015 struct request *req; 2016 2017 if (!block->request_queue) 2018 return; 2019 2020 spin_lock_irq(&block->request_queue_lock); 2021 while ((req = elv_next_request(block->request_queue))) { 2022 blkdev_dequeue_request(req); 2023 dasd_end_request(req, -EIO); 2024 } 2025 spin_unlock_irq(&block->request_queue_lock); 2026 } 2027 2028 static int dasd_open(struct block_device *bdev, fmode_t mode) 2029 { 2030 struct dasd_block *block = bdev->bd_disk->private_data; 2031 struct dasd_device *base = block->base; 2032 int rc; 2033 2034 atomic_inc(&block->open_count); 2035 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2036 rc = -ENODEV; 2037 goto unlock; 2038 } 2039 2040 if (!try_module_get(base->discipline->owner)) { 2041 rc = -EINVAL; 2042 goto unlock; 2043 } 2044 2045 if (dasd_probeonly) { 2046 DEV_MESSAGE(KERN_INFO, base, "%s", 2047 "No access to device due to probeonly mode"); 2048 rc = -EPERM; 2049 goto out; 2050 } 2051 2052 if (base->state <= DASD_STATE_BASIC) { 2053 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2054 " Cannot open unrecognized device"); 2055 rc = -ENODEV; 2056 goto out; 2057 } 2058 2059 return 0; 2060 2061 out: 2062 module_put(base->discipline->owner); 2063 unlock: 2064 atomic_dec(&block->open_count); 2065 return rc; 2066 } 2067 2068 static int dasd_release(struct gendisk *disk, fmode_t mode) 2069 { 2070 struct dasd_block *block = disk->private_data; 2071 2072 atomic_dec(&block->open_count); 2073 module_put(block->base->discipline->owner); 2074 return 0; 2075 } 2076 2077 /* 2078 * Return disk geometry. 2079 */ 2080 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2081 { 2082 struct dasd_block *block; 2083 struct dasd_device *base; 2084 2085 block = bdev->bd_disk->private_data; 2086 base = block->base; 2087 if (!block) 2088 return -ENODEV; 2089 2090 if (!base->discipline || 2091 !base->discipline->fill_geometry) 2092 return -EINVAL; 2093 2094 base->discipline->fill_geometry(block, geo); 2095 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2096 return 0; 2097 } 2098 2099 struct block_device_operations 2100 dasd_device_operations = { 2101 .owner = THIS_MODULE, 2102 .open = dasd_open, 2103 .release = dasd_release, 2104 .locked_ioctl = dasd_ioctl, 2105 .getgeo = dasd_getgeo, 2106 }; 2107 2108 /******************************************************************************* 2109 * end of block device operations 2110 */ 2111 2112 static void 2113 dasd_exit(void) 2114 { 2115 #ifdef CONFIG_PROC_FS 2116 dasd_proc_exit(); 2117 #endif 2118 dasd_eer_exit(); 2119 if (dasd_page_cache != NULL) { 2120 kmem_cache_destroy(dasd_page_cache); 2121 dasd_page_cache = NULL; 2122 } 2123 dasd_gendisk_exit(); 2124 dasd_devmap_exit(); 2125 if (dasd_debug_area != NULL) { 2126 debug_unregister(dasd_debug_area); 2127 dasd_debug_area = NULL; 2128 } 2129 } 2130 2131 /* 2132 * SECTION: common functions for ccw_driver use 2133 */ 2134 2135 /* 2136 * Initial attempt at a probe function. this can be simplified once 2137 * the other detection code is gone. 2138 */ 2139 int dasd_generic_probe(struct ccw_device *cdev, 2140 struct dasd_discipline *discipline) 2141 { 2142 int ret; 2143 2144 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 2145 if (ret) { 2146 printk(KERN_WARNING 2147 "dasd_generic_probe: could not set ccw-device options " 2148 "for %s\n", dev_name(&cdev->dev)); 2149 return ret; 2150 } 2151 ret = dasd_add_sysfs_files(cdev); 2152 if (ret) { 2153 printk(KERN_WARNING 2154 "dasd_generic_probe: could not add sysfs entries " 2155 "for %s\n", dev_name(&cdev->dev)); 2156 return ret; 2157 } 2158 cdev->handler = &dasd_int_handler; 2159 2160 /* 2161 * Automatically online either all dasd devices (dasd_autodetect) 2162 * or all devices specified with dasd= parameters during 2163 * initial probe. 2164 */ 2165 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2166 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 2167 ret = ccw_device_set_online(cdev); 2168 if (ret) 2169 printk(KERN_WARNING 2170 "dasd_generic_probe: could not initially " 2171 "online ccw-device %s; return code: %d\n", 2172 dev_name(&cdev->dev), ret); 2173 return 0; 2174 } 2175 2176 /* 2177 * This will one day be called from a global not_oper handler. 2178 * It is also used by driver_unregister during module unload. 2179 */ 2180 void dasd_generic_remove(struct ccw_device *cdev) 2181 { 2182 struct dasd_device *device; 2183 struct dasd_block *block; 2184 2185 cdev->handler = NULL; 2186 2187 dasd_remove_sysfs_files(cdev); 2188 device = dasd_device_from_cdev(cdev); 2189 if (IS_ERR(device)) 2190 return; 2191 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2192 /* Already doing offline processing */ 2193 dasd_put_device(device); 2194 return; 2195 } 2196 /* 2197 * This device is removed unconditionally. Set offline 2198 * flag to prevent dasd_open from opening it while it is 2199 * no quite down yet. 2200 */ 2201 dasd_set_target_state(device, DASD_STATE_NEW); 2202 /* dasd_delete_device destroys the device reference. */ 2203 block = device->block; 2204 device->block = NULL; 2205 dasd_delete_device(device); 2206 /* 2207 * life cycle of block is bound to device, so delete it after 2208 * device was safely removed 2209 */ 2210 if (block) 2211 dasd_free_block(block); 2212 } 2213 2214 /* 2215 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2216 * the device is detected for the first time and is supposed to be used 2217 * or the user has started activation through sysfs. 2218 */ 2219 int dasd_generic_set_online(struct ccw_device *cdev, 2220 struct dasd_discipline *base_discipline) 2221 { 2222 struct dasd_discipline *discipline; 2223 struct dasd_device *device; 2224 int rc; 2225 2226 /* first online clears initial online feature flag */ 2227 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2228 device = dasd_create_device(cdev); 2229 if (IS_ERR(device)) 2230 return PTR_ERR(device); 2231 2232 discipline = base_discipline; 2233 if (device->features & DASD_FEATURE_USEDIAG) { 2234 if (!dasd_diag_discipline_pointer) { 2235 printk (KERN_WARNING 2236 "dasd_generic couldn't online device %s " 2237 "- discipline DIAG not available\n", 2238 dev_name(&cdev->dev)); 2239 dasd_delete_device(device); 2240 return -ENODEV; 2241 } 2242 discipline = dasd_diag_discipline_pointer; 2243 } 2244 if (!try_module_get(base_discipline->owner)) { 2245 dasd_delete_device(device); 2246 return -EINVAL; 2247 } 2248 if (!try_module_get(discipline->owner)) { 2249 module_put(base_discipline->owner); 2250 dasd_delete_device(device); 2251 return -EINVAL; 2252 } 2253 device->base_discipline = base_discipline; 2254 device->discipline = discipline; 2255 2256 /* check_device will allocate block device if necessary */ 2257 rc = discipline->check_device(device); 2258 if (rc) { 2259 printk (KERN_WARNING 2260 "dasd_generic couldn't online device %s " 2261 "with discipline %s rc=%i\n", 2262 dev_name(&cdev->dev), discipline->name, rc); 2263 module_put(discipline->owner); 2264 module_put(base_discipline->owner); 2265 dasd_delete_device(device); 2266 return rc; 2267 } 2268 2269 dasd_set_target_state(device, DASD_STATE_ONLINE); 2270 if (device->state <= DASD_STATE_KNOWN) { 2271 printk (KERN_WARNING 2272 "dasd_generic discipline not found for %s\n", 2273 dev_name(&cdev->dev)); 2274 rc = -ENODEV; 2275 dasd_set_target_state(device, DASD_STATE_NEW); 2276 if (device->block) 2277 dasd_free_block(device->block); 2278 dasd_delete_device(device); 2279 } else 2280 pr_debug("dasd_generic device %s found\n", 2281 dev_name(&cdev->dev)); 2282 2283 /* FIXME: we have to wait for the root device but we don't want 2284 * to wait for each single device but for all at once. */ 2285 wait_event(dasd_init_waitq, _wait_for_device(device)); 2286 2287 dasd_put_device(device); 2288 2289 return rc; 2290 } 2291 2292 int dasd_generic_set_offline(struct ccw_device *cdev) 2293 { 2294 struct dasd_device *device; 2295 struct dasd_block *block; 2296 int max_count, open_count; 2297 2298 device = dasd_device_from_cdev(cdev); 2299 if (IS_ERR(device)) 2300 return PTR_ERR(device); 2301 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2302 /* Already doing offline processing */ 2303 dasd_put_device(device); 2304 return 0; 2305 } 2306 /* 2307 * We must make sure that this device is currently not in use. 2308 * The open_count is increased for every opener, that includes 2309 * the blkdev_get in dasd_scan_partitions. We are only interested 2310 * in the other openers. 2311 */ 2312 if (device->block) { 2313 max_count = device->block->bdev ? 0 : -1; 2314 open_count = atomic_read(&device->block->open_count); 2315 if (open_count > max_count) { 2316 if (open_count > 0) 2317 printk(KERN_WARNING "Can't offline dasd " 2318 "device with open count = %i.\n", 2319 open_count); 2320 else 2321 printk(KERN_WARNING "%s", 2322 "Can't offline dasd device due " 2323 "to internal use\n"); 2324 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2325 dasd_put_device(device); 2326 return -EBUSY; 2327 } 2328 } 2329 dasd_set_target_state(device, DASD_STATE_NEW); 2330 /* dasd_delete_device destroys the device reference. */ 2331 block = device->block; 2332 device->block = NULL; 2333 dasd_delete_device(device); 2334 /* 2335 * life cycle of block is bound to device, so delete it after 2336 * device was safely removed 2337 */ 2338 if (block) 2339 dasd_free_block(block); 2340 return 0; 2341 } 2342 2343 int dasd_generic_notify(struct ccw_device *cdev, int event) 2344 { 2345 struct dasd_device *device; 2346 struct dasd_ccw_req *cqr; 2347 int ret; 2348 2349 device = dasd_device_from_cdev_locked(cdev); 2350 if (IS_ERR(device)) 2351 return 0; 2352 ret = 0; 2353 switch (event) { 2354 case CIO_GONE: 2355 case CIO_NO_PATH: 2356 /* First of all call extended error reporting. */ 2357 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2358 2359 if (device->state < DASD_STATE_BASIC) 2360 break; 2361 /* Device is active. We want to keep it. */ 2362 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2363 if (cqr->status == DASD_CQR_IN_IO) { 2364 cqr->status = DASD_CQR_QUEUED; 2365 cqr->retries++; 2366 } 2367 device->stopped |= DASD_STOPPED_DC_WAIT; 2368 dasd_device_clear_timer(device); 2369 dasd_schedule_device_bh(device); 2370 ret = 1; 2371 break; 2372 case CIO_OPER: 2373 /* FIXME: add a sanity check. */ 2374 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2375 dasd_schedule_device_bh(device); 2376 if (device->block) 2377 dasd_schedule_block_bh(device->block); 2378 ret = 1; 2379 break; 2380 } 2381 dasd_put_device(device); 2382 return ret; 2383 } 2384 2385 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2386 void *rdc_buffer, 2387 int rdc_buffer_size, 2388 char *magic) 2389 { 2390 struct dasd_ccw_req *cqr; 2391 struct ccw1 *ccw; 2392 2393 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2394 2395 if (IS_ERR(cqr)) { 2396 DEV_MESSAGE(KERN_WARNING, device, "%s", 2397 "Could not allocate RDC request"); 2398 return cqr; 2399 } 2400 2401 ccw = cqr->cpaddr; 2402 ccw->cmd_code = CCW_CMD_RDC; 2403 ccw->cda = (__u32)(addr_t)rdc_buffer; 2404 ccw->count = rdc_buffer_size; 2405 2406 cqr->startdev = device; 2407 cqr->memdev = device; 2408 cqr->expires = 10*HZ; 2409 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2410 cqr->retries = 2; 2411 cqr->buildclk = get_clock(); 2412 cqr->status = DASD_CQR_FILLED; 2413 return cqr; 2414 } 2415 2416 2417 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2418 void **rdc_buffer, int rdc_buffer_size) 2419 { 2420 int ret; 2421 struct dasd_ccw_req *cqr; 2422 2423 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2424 magic); 2425 if (IS_ERR(cqr)) 2426 return PTR_ERR(cqr); 2427 2428 ret = dasd_sleep_on(cqr); 2429 dasd_sfree_request(cqr, cqr->memdev); 2430 return ret; 2431 } 2432 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2433 2434 static int __init dasd_init(void) 2435 { 2436 int rc; 2437 2438 init_waitqueue_head(&dasd_init_waitq); 2439 init_waitqueue_head(&dasd_flush_wq); 2440 init_waitqueue_head(&generic_waitq); 2441 2442 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2443 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2444 if (dasd_debug_area == NULL) { 2445 rc = -ENOMEM; 2446 goto failed; 2447 } 2448 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2449 debug_set_level(dasd_debug_area, DBF_WARNING); 2450 2451 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2452 2453 dasd_diag_discipline_pointer = NULL; 2454 2455 rc = dasd_devmap_init(); 2456 if (rc) 2457 goto failed; 2458 rc = dasd_gendisk_init(); 2459 if (rc) 2460 goto failed; 2461 rc = dasd_parse(); 2462 if (rc) 2463 goto failed; 2464 rc = dasd_eer_init(); 2465 if (rc) 2466 goto failed; 2467 #ifdef CONFIG_PROC_FS 2468 rc = dasd_proc_init(); 2469 if (rc) 2470 goto failed; 2471 #endif 2472 2473 return 0; 2474 failed: 2475 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); 2476 dasd_exit(); 2477 return rc; 2478 } 2479 2480 module_init(dasd_init); 2481 module_exit(dasd_exit); 2482 2483 EXPORT_SYMBOL(dasd_debug_area); 2484 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2485 2486 EXPORT_SYMBOL(dasd_add_request_head); 2487 EXPORT_SYMBOL(dasd_add_request_tail); 2488 EXPORT_SYMBOL(dasd_cancel_req); 2489 EXPORT_SYMBOL(dasd_device_clear_timer); 2490 EXPORT_SYMBOL(dasd_block_clear_timer); 2491 EXPORT_SYMBOL(dasd_enable_device); 2492 EXPORT_SYMBOL(dasd_int_handler); 2493 EXPORT_SYMBOL(dasd_kfree_request); 2494 EXPORT_SYMBOL(dasd_kick_device); 2495 EXPORT_SYMBOL(dasd_kmalloc_request); 2496 EXPORT_SYMBOL(dasd_schedule_device_bh); 2497 EXPORT_SYMBOL(dasd_schedule_block_bh); 2498 EXPORT_SYMBOL(dasd_set_target_state); 2499 EXPORT_SYMBOL(dasd_device_set_timer); 2500 EXPORT_SYMBOL(dasd_block_set_timer); 2501 EXPORT_SYMBOL(dasd_sfree_request); 2502 EXPORT_SYMBOL(dasd_sleep_on); 2503 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2504 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2505 EXPORT_SYMBOL(dasd_smalloc_request); 2506 EXPORT_SYMBOL(dasd_start_IO); 2507 EXPORT_SYMBOL(dasd_term_IO); 2508 2509 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2510 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2511 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2512 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2513 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2514 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2515 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2516 EXPORT_SYMBOL_GPL(dasd_alloc_block); 2517 EXPORT_SYMBOL_GPL(dasd_free_block); 2518