1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 9 * 10 */ 11 12 #include <linux/kmod.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/ctype.h> 16 #include <linux/major.h> 17 #include <linux/slab.h> 18 #include <linux/buffer_head.h> 19 #include <linux/hdreg.h> 20 21 #include <asm/ccwdev.h> 22 #include <asm/ebcdic.h> 23 #include <asm/idals.h> 24 #include <asm/todclk.h> 25 26 /* This is ugly... */ 27 #define PRINTK_HEADER "dasd:" 28 29 #include "dasd_int.h" 30 /* 31 * SECTION: Constant definitions to be used within this file 32 */ 33 #define DASD_CHANQ_MAX_SIZE 4 34 35 /* 36 * SECTION: exported variables of dasd.c 37 */ 38 debug_info_t *dasd_debug_area; 39 struct dasd_discipline *dasd_diag_discipline_pointer; 40 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 41 42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45 MODULE_SUPPORTED_DEVICE("dasd"); 46 MODULE_LICENSE("GPL"); 47 48 /* 49 * SECTION: prototypes for static functions of dasd.c 50 */ 51 static int dasd_alloc_queue(struct dasd_device * device); 52 static void dasd_setup_queue(struct dasd_device * device); 53 static void dasd_free_queue(struct dasd_device * device); 54 static void dasd_flush_request_queue(struct dasd_device *); 55 static int dasd_flush_ccw_queue(struct dasd_device *, int); 56 static void dasd_tasklet(struct dasd_device *); 57 static void do_kick_device(struct work_struct *); 58 59 /* 60 * SECTION: Operations on the device structure. 61 */ 62 static wait_queue_head_t dasd_init_waitq; 63 static wait_queue_head_t dasd_flush_wq; 64 65 /* 66 * Allocate memory for a new device structure. 67 */ 68 struct dasd_device * 69 dasd_alloc_device(void) 70 { 71 struct dasd_device *device; 72 73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); 74 if (device == NULL) 75 return ERR_PTR(-ENOMEM); 76 /* open_count = 0 means device online but not in use */ 77 atomic_set(&device->open_count, -1); 78 79 /* Get two pages for normal block device operations. */ 80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 81 if (device->ccw_mem == NULL) { 82 kfree(device); 83 return ERR_PTR(-ENOMEM); 84 } 85 /* Get one page for error recovery. */ 86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 87 if (device->erp_mem == NULL) { 88 free_pages((unsigned long) device->ccw_mem, 1); 89 kfree(device); 90 return ERR_PTR(-ENOMEM); 91 } 92 93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 95 spin_lock_init(&device->mem_lock); 96 spin_lock_init(&device->request_queue_lock); 97 atomic_set (&device->tasklet_scheduled, 0); 98 tasklet_init(&device->tasklet, 99 (void (*)(unsigned long)) dasd_tasklet, 100 (unsigned long) device); 101 INIT_LIST_HEAD(&device->ccw_queue); 102 init_timer(&device->timer); 103 INIT_WORK(&device->kick_work, do_kick_device); 104 device->state = DASD_STATE_NEW; 105 device->target = DASD_STATE_NEW; 106 107 return device; 108 } 109 110 /* 111 * Free memory of a device structure. 112 */ 113 void 114 dasd_free_device(struct dasd_device *device) 115 { 116 kfree(device->private); 117 free_page((unsigned long) device->erp_mem); 118 free_pages((unsigned long) device->ccw_mem, 1); 119 kfree(device); 120 } 121 122 /* 123 * Make a new device known to the system. 124 */ 125 static int 126 dasd_state_new_to_known(struct dasd_device *device) 127 { 128 int rc; 129 130 /* 131 * As long as the device is not in state DASD_STATE_NEW we want to 132 * keep the reference count > 0. 133 */ 134 dasd_get_device(device); 135 136 rc = dasd_alloc_queue(device); 137 if (rc) { 138 dasd_put_device(device); 139 return rc; 140 } 141 142 device->state = DASD_STATE_KNOWN; 143 return 0; 144 } 145 146 /* 147 * Let the system forget about a device. 148 */ 149 static int 150 dasd_state_known_to_new(struct dasd_device * device) 151 { 152 /* Disable extended error reporting for this device. */ 153 dasd_eer_disable(device); 154 /* Forget the discipline information. */ 155 if (device->discipline) 156 module_put(device->discipline->owner); 157 device->discipline = NULL; 158 if (device->base_discipline) 159 module_put(device->base_discipline->owner); 160 device->base_discipline = NULL; 161 device->state = DASD_STATE_NEW; 162 163 dasd_free_queue(device); 164 165 /* Give up reference we took in dasd_state_new_to_known. */ 166 dasd_put_device(device); 167 return 0; 168 } 169 170 /* 171 * Request the irq line for the device. 172 */ 173 static int 174 dasd_state_known_to_basic(struct dasd_device * device) 175 { 176 int rc; 177 178 /* Allocate and register gendisk structure. */ 179 rc = dasd_gendisk_alloc(device); 180 if (rc) 181 return rc; 182 183 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 185 8 * sizeof (long)); 186 debug_register_view(device->debug_area, &debug_sprintf_view); 187 debug_set_level(device->debug_area, DBF_WARNING); 188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 189 190 device->state = DASD_STATE_BASIC; 191 return 0; 192 } 193 194 /* 195 * Release the irq line for the device. Terminate any running i/o. 196 */ 197 static int 198 dasd_state_basic_to_known(struct dasd_device * device) 199 { 200 int rc; 201 202 dasd_gendisk_free(device); 203 rc = dasd_flush_ccw_queue(device, 1); 204 if (rc) 205 return rc; 206 dasd_clear_timer(device); 207 208 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 209 if (device->debug_area != NULL) { 210 debug_unregister(device->debug_area); 211 device->debug_area = NULL; 212 } 213 device->state = DASD_STATE_KNOWN; 214 return 0; 215 } 216 217 /* 218 * Do the initial analysis. The do_analysis function may return 219 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 220 * until the discipline decides to continue the startup sequence 221 * by calling the function dasd_change_state. The eckd disciplines 222 * uses this to start a ccw that detects the format. The completion 223 * interrupt for this detection ccw uses the kernel event daemon to 224 * trigger the call to dasd_change_state. All this is done in the 225 * discipline code, see dasd_eckd.c. 226 * After the analysis ccw is done (do_analysis returned 0) the block 227 * device is setup. 228 * In case the analysis returns an error, the device setup is stopped 229 * (a fake disk was already added to allow formatting). 230 */ 231 static int 232 dasd_state_basic_to_ready(struct dasd_device * device) 233 { 234 int rc; 235 236 rc = 0; 237 if (device->discipline->do_analysis != NULL) 238 rc = device->discipline->do_analysis(device); 239 if (rc) { 240 if (rc != -EAGAIN) 241 device->state = DASD_STATE_UNFMT; 242 return rc; 243 } 244 /* make disk known with correct capacity */ 245 dasd_setup_queue(device); 246 set_capacity(device->gdp, device->blocks << device->s2b_shift); 247 device->state = DASD_STATE_READY; 248 rc = dasd_scan_partitions(device); 249 if (rc) 250 device->state = DASD_STATE_BASIC; 251 return rc; 252 } 253 254 /* 255 * Remove device from block device layer. Destroy dirty buffers. 256 * Forget format information. Check if the target level is basic 257 * and if it is create fake disk for formatting. 258 */ 259 static int 260 dasd_state_ready_to_basic(struct dasd_device * device) 261 { 262 int rc; 263 264 rc = dasd_flush_ccw_queue(device, 0); 265 if (rc) 266 return rc; 267 dasd_destroy_partitions(device); 268 dasd_flush_request_queue(device); 269 device->blocks = 0; 270 device->bp_block = 0; 271 device->s2b_shift = 0; 272 device->state = DASD_STATE_BASIC; 273 return 0; 274 } 275 276 /* 277 * Back to basic. 278 */ 279 static int 280 dasd_state_unfmt_to_basic(struct dasd_device * device) 281 { 282 device->state = DASD_STATE_BASIC; 283 return 0; 284 } 285 286 /* 287 * Make the device online and schedule the bottom half to start 288 * the requeueing of requests from the linux request queue to the 289 * ccw queue. 290 */ 291 static int 292 dasd_state_ready_to_online(struct dasd_device * device) 293 { 294 device->state = DASD_STATE_ONLINE; 295 dasd_schedule_bh(device); 296 return 0; 297 } 298 299 /* 300 * Stop the requeueing of requests again. 301 */ 302 static int 303 dasd_state_online_to_ready(struct dasd_device * device) 304 { 305 device->state = DASD_STATE_READY; 306 return 0; 307 } 308 309 /* 310 * Device startup state changes. 311 */ 312 static int 313 dasd_increase_state(struct dasd_device *device) 314 { 315 int rc; 316 317 rc = 0; 318 if (device->state == DASD_STATE_NEW && 319 device->target >= DASD_STATE_KNOWN) 320 rc = dasd_state_new_to_known(device); 321 322 if (!rc && 323 device->state == DASD_STATE_KNOWN && 324 device->target >= DASD_STATE_BASIC) 325 rc = dasd_state_known_to_basic(device); 326 327 if (!rc && 328 device->state == DASD_STATE_BASIC && 329 device->target >= DASD_STATE_READY) 330 rc = dasd_state_basic_to_ready(device); 331 332 if (!rc && 333 device->state == DASD_STATE_UNFMT && 334 device->target > DASD_STATE_UNFMT) 335 rc = -EPERM; 336 337 if (!rc && 338 device->state == DASD_STATE_READY && 339 device->target >= DASD_STATE_ONLINE) 340 rc = dasd_state_ready_to_online(device); 341 342 return rc; 343 } 344 345 /* 346 * Device shutdown state changes. 347 */ 348 static int 349 dasd_decrease_state(struct dasd_device *device) 350 { 351 int rc; 352 353 rc = 0; 354 if (device->state == DASD_STATE_ONLINE && 355 device->target <= DASD_STATE_READY) 356 rc = dasd_state_online_to_ready(device); 357 358 if (!rc && 359 device->state == DASD_STATE_READY && 360 device->target <= DASD_STATE_BASIC) 361 rc = dasd_state_ready_to_basic(device); 362 363 if (!rc && 364 device->state == DASD_STATE_UNFMT && 365 device->target <= DASD_STATE_BASIC) 366 rc = dasd_state_unfmt_to_basic(device); 367 368 if (!rc && 369 device->state == DASD_STATE_BASIC && 370 device->target <= DASD_STATE_KNOWN) 371 rc = dasd_state_basic_to_known(device); 372 373 if (!rc && 374 device->state == DASD_STATE_KNOWN && 375 device->target <= DASD_STATE_NEW) 376 rc = dasd_state_known_to_new(device); 377 378 return rc; 379 } 380 381 /* 382 * This is the main startup/shutdown routine. 383 */ 384 static void 385 dasd_change_state(struct dasd_device *device) 386 { 387 int rc; 388 389 if (device->state == device->target) 390 /* Already where we want to go today... */ 391 return; 392 if (device->state < device->target) 393 rc = dasd_increase_state(device); 394 else 395 rc = dasd_decrease_state(device); 396 if (rc && rc != -EAGAIN) 397 device->target = device->state; 398 399 if (device->state == device->target) 400 wake_up(&dasd_init_waitq); 401 } 402 403 /* 404 * Kick starter for devices that did not complete the startup/shutdown 405 * procedure or were sleeping because of a pending state. 406 * dasd_kick_device will schedule a call do do_kick_device to the kernel 407 * event daemon. 408 */ 409 static void 410 do_kick_device(struct work_struct *work) 411 { 412 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 413 dasd_change_state(device); 414 dasd_schedule_bh(device); 415 dasd_put_device(device); 416 } 417 418 void 419 dasd_kick_device(struct dasd_device *device) 420 { 421 dasd_get_device(device); 422 /* queue call to dasd_kick_device to the kernel event daemon. */ 423 schedule_work(&device->kick_work); 424 } 425 426 /* 427 * Set the target state for a device and starts the state change. 428 */ 429 void 430 dasd_set_target_state(struct dasd_device *device, int target) 431 { 432 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 433 if (dasd_probeonly && target > DASD_STATE_READY) 434 target = DASD_STATE_READY; 435 if (device->target != target) { 436 if (device->state == target) 437 wake_up(&dasd_init_waitq); 438 device->target = target; 439 } 440 if (device->state != device->target) 441 dasd_change_state(device); 442 } 443 444 /* 445 * Enable devices with device numbers in [from..to]. 446 */ 447 static inline int 448 _wait_for_device(struct dasd_device *device) 449 { 450 return (device->state == device->target); 451 } 452 453 void 454 dasd_enable_device(struct dasd_device *device) 455 { 456 dasd_set_target_state(device, DASD_STATE_ONLINE); 457 if (device->state <= DASD_STATE_KNOWN) 458 /* No discipline for device found. */ 459 dasd_set_target_state(device, DASD_STATE_NEW); 460 /* Now wait for the devices to come up. */ 461 wait_event(dasd_init_waitq, _wait_for_device(device)); 462 } 463 464 /* 465 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 466 */ 467 #ifdef CONFIG_DASD_PROFILE 468 469 struct dasd_profile_info_t dasd_global_profile; 470 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 471 472 /* 473 * Increments counter in global and local profiling structures. 474 */ 475 #define dasd_profile_counter(value, counter, device) \ 476 { \ 477 int index; \ 478 for (index = 0; index < 31 && value >> (2+index); index++); \ 479 dasd_global_profile.counter[index]++; \ 480 device->profile.counter[index]++; \ 481 } 482 483 /* 484 * Add profiling information for cqr before execution. 485 */ 486 static void 487 dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 488 struct request *req) 489 { 490 struct list_head *l; 491 unsigned int counter; 492 493 if (dasd_profile_level != DASD_PROFILE_ON) 494 return; 495 496 /* count the length of the chanq for statistics */ 497 counter = 0; 498 list_for_each(l, &device->ccw_queue) 499 if (++counter >= 31) 500 break; 501 dasd_global_profile.dasd_io_nr_req[counter]++; 502 device->profile.dasd_io_nr_req[counter]++; 503 } 504 505 /* 506 * Add profiling information for cqr after execution. 507 */ 508 static void 509 dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 510 struct request *req) 511 { 512 long strtime, irqtime, endtime, tottime; /* in microseconds */ 513 long tottimeps, sectors; 514 515 if (dasd_profile_level != DASD_PROFILE_ON) 516 return; 517 518 sectors = req->nr_sectors; 519 if (!cqr->buildclk || !cqr->startclk || 520 !cqr->stopclk || !cqr->endclk || 521 !sectors) 522 return; 523 524 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 525 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 526 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 527 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 528 tottimeps = tottime / sectors; 529 530 if (!dasd_global_profile.dasd_io_reqs) 531 memset(&dasd_global_profile, 0, 532 sizeof (struct dasd_profile_info_t)); 533 dasd_global_profile.dasd_io_reqs++; 534 dasd_global_profile.dasd_io_sects += sectors; 535 536 if (!device->profile.dasd_io_reqs) 537 memset(&device->profile, 0, 538 sizeof (struct dasd_profile_info_t)); 539 device->profile.dasd_io_reqs++; 540 device->profile.dasd_io_sects += sectors; 541 542 dasd_profile_counter(sectors, dasd_io_secs, device); 543 dasd_profile_counter(tottime, dasd_io_times, device); 544 dasd_profile_counter(tottimeps, dasd_io_timps, device); 545 dasd_profile_counter(strtime, dasd_io_time1, device); 546 dasd_profile_counter(irqtime, dasd_io_time2, device); 547 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); 548 dasd_profile_counter(endtime, dasd_io_time3, device); 549 } 550 #else 551 #define dasd_profile_start(device, cqr, req) do {} while (0) 552 #define dasd_profile_end(device, cqr, req) do {} while (0) 553 #endif /* CONFIG_DASD_PROFILE */ 554 555 /* 556 * Allocate memory for a channel program with 'cplength' channel 557 * command words and 'datasize' additional space. There are two 558 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 559 * memory and 2) dasd_smalloc_request uses the static ccw memory 560 * that gets allocated for each device. 561 */ 562 struct dasd_ccw_req * 563 dasd_kmalloc_request(char *magic, int cplength, int datasize, 564 struct dasd_device * device) 565 { 566 struct dasd_ccw_req *cqr; 567 568 /* Sanity checks */ 569 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 570 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 571 572 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 573 if (cqr == NULL) 574 return ERR_PTR(-ENOMEM); 575 cqr->cpaddr = NULL; 576 if (cplength > 0) { 577 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 578 GFP_ATOMIC | GFP_DMA); 579 if (cqr->cpaddr == NULL) { 580 kfree(cqr); 581 return ERR_PTR(-ENOMEM); 582 } 583 } 584 cqr->data = NULL; 585 if (datasize > 0) { 586 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 587 if (cqr->data == NULL) { 588 kfree(cqr->cpaddr); 589 kfree(cqr); 590 return ERR_PTR(-ENOMEM); 591 } 592 } 593 strncpy((char *) &cqr->magic, magic, 4); 594 ASCEBC((char *) &cqr->magic, 4); 595 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 596 dasd_get_device(device); 597 return cqr; 598 } 599 600 struct dasd_ccw_req * 601 dasd_smalloc_request(char *magic, int cplength, int datasize, 602 struct dasd_device * device) 603 { 604 unsigned long flags; 605 struct dasd_ccw_req *cqr; 606 char *data; 607 int size; 608 609 /* Sanity checks */ 610 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 611 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 612 613 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 614 if (cplength > 0) 615 size += cplength * sizeof(struct ccw1); 616 if (datasize > 0) 617 size += datasize; 618 spin_lock_irqsave(&device->mem_lock, flags); 619 cqr = (struct dasd_ccw_req *) 620 dasd_alloc_chunk(&device->ccw_chunks, size); 621 spin_unlock_irqrestore(&device->mem_lock, flags); 622 if (cqr == NULL) 623 return ERR_PTR(-ENOMEM); 624 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 625 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 626 cqr->cpaddr = NULL; 627 if (cplength > 0) { 628 cqr->cpaddr = (struct ccw1 *) data; 629 data += cplength*sizeof(struct ccw1); 630 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 631 } 632 cqr->data = NULL; 633 if (datasize > 0) { 634 cqr->data = data; 635 memset(cqr->data, 0, datasize); 636 } 637 strncpy((char *) &cqr->magic, magic, 4); 638 ASCEBC((char *) &cqr->magic, 4); 639 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 640 dasd_get_device(device); 641 return cqr; 642 } 643 644 /* 645 * Free memory of a channel program. This function needs to free all the 646 * idal lists that might have been created by dasd_set_cda and the 647 * struct dasd_ccw_req itself. 648 */ 649 void 650 dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 651 { 652 #ifdef CONFIG_64BIT 653 struct ccw1 *ccw; 654 655 /* Clear any idals used for the request. */ 656 ccw = cqr->cpaddr; 657 do { 658 clear_normalized_cda(ccw); 659 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 660 #endif 661 kfree(cqr->cpaddr); 662 kfree(cqr->data); 663 kfree(cqr); 664 dasd_put_device(device); 665 } 666 667 void 668 dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 669 { 670 unsigned long flags; 671 672 spin_lock_irqsave(&device->mem_lock, flags); 673 dasd_free_chunk(&device->ccw_chunks, cqr); 674 spin_unlock_irqrestore(&device->mem_lock, flags); 675 dasd_put_device(device); 676 } 677 678 /* 679 * Check discipline magic in cqr. 680 */ 681 static inline int 682 dasd_check_cqr(struct dasd_ccw_req *cqr) 683 { 684 struct dasd_device *device; 685 686 if (cqr == NULL) 687 return -EINVAL; 688 device = cqr->device; 689 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 690 DEV_MESSAGE(KERN_WARNING, device, 691 " dasd_ccw_req 0x%08x magic doesn't match" 692 " discipline 0x%08x", 693 cqr->magic, 694 *(unsigned int *) device->discipline->name); 695 return -EINVAL; 696 } 697 return 0; 698 } 699 700 /* 701 * Terminate the current i/o and set the request to clear_pending. 702 * Timer keeps device runnig. 703 * ccw_device_clear can fail if the i/o subsystem 704 * is in a bad mood. 705 */ 706 int 707 dasd_term_IO(struct dasd_ccw_req * cqr) 708 { 709 struct dasd_device *device; 710 int retries, rc; 711 712 /* Check the cqr */ 713 rc = dasd_check_cqr(cqr); 714 if (rc) 715 return rc; 716 retries = 0; 717 device = (struct dasd_device *) cqr->device; 718 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 719 rc = ccw_device_clear(device->cdev, (long) cqr); 720 switch (rc) { 721 case 0: /* termination successful */ 722 cqr->retries--; 723 cqr->status = DASD_CQR_CLEAR; 724 cqr->stopclk = get_clock(); 725 cqr->starttime = 0; 726 DBF_DEV_EVENT(DBF_DEBUG, device, 727 "terminate cqr %p successful", 728 cqr); 729 break; 730 case -ENODEV: 731 DBF_DEV_EVENT(DBF_ERR, device, "%s", 732 "device gone, retry"); 733 break; 734 case -EIO: 735 DBF_DEV_EVENT(DBF_ERR, device, "%s", 736 "I/O error, retry"); 737 break; 738 case -EINVAL: 739 case -EBUSY: 740 DBF_DEV_EVENT(DBF_ERR, device, "%s", 741 "device busy, retry later"); 742 break; 743 default: 744 DEV_MESSAGE(KERN_ERR, device, 745 "line %d unknown RC=%d, please " 746 "report to linux390@de.ibm.com", 747 __LINE__, rc); 748 BUG(); 749 break; 750 } 751 retries++; 752 } 753 dasd_schedule_bh(device); 754 return rc; 755 } 756 757 /* 758 * Start the i/o. This start_IO can fail if the channel is really busy. 759 * In that case set up a timer to start the request later. 760 */ 761 int 762 dasd_start_IO(struct dasd_ccw_req * cqr) 763 { 764 struct dasd_device *device; 765 int rc; 766 767 /* Check the cqr */ 768 rc = dasd_check_cqr(cqr); 769 if (rc) 770 return rc; 771 device = (struct dasd_device *) cqr->device; 772 if (cqr->retries < 0) { 773 DEV_MESSAGE(KERN_DEBUG, device, 774 "start_IO: request %p (%02x/%i) - no retry left.", 775 cqr, cqr->status, cqr->retries); 776 cqr->status = DASD_CQR_FAILED; 777 return -EIO; 778 } 779 cqr->startclk = get_clock(); 780 cqr->starttime = jiffies; 781 cqr->retries--; 782 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 783 cqr->lpm, 0); 784 switch (rc) { 785 case 0: 786 cqr->status = DASD_CQR_IN_IO; 787 DBF_DEV_EVENT(DBF_DEBUG, device, 788 "start_IO: request %p started successful", 789 cqr); 790 break; 791 case -EBUSY: 792 DBF_DEV_EVENT(DBF_ERR, device, "%s", 793 "start_IO: device busy, retry later"); 794 break; 795 case -ETIMEDOUT: 796 DBF_DEV_EVENT(DBF_ERR, device, "%s", 797 "start_IO: request timeout, retry later"); 798 break; 799 case -EACCES: 800 /* -EACCES indicates that the request used only a 801 * subset of the available pathes and all these 802 * pathes are gone. 803 * Do a retry with all available pathes. 804 */ 805 cqr->lpm = LPM_ANYPATH; 806 DBF_DEV_EVENT(DBF_ERR, device, "%s", 807 "start_IO: selected pathes gone," 808 " retry on all pathes"); 809 break; 810 case -ENODEV: 811 case -EIO: 812 DBF_DEV_EVENT(DBF_ERR, device, "%s", 813 "start_IO: device gone, retry"); 814 break; 815 default: 816 DEV_MESSAGE(KERN_ERR, device, 817 "line %d unknown RC=%d, please report" 818 " to linux390@de.ibm.com", __LINE__, rc); 819 BUG(); 820 break; 821 } 822 return rc; 823 } 824 825 /* 826 * Timeout function for dasd devices. This is used for different purposes 827 * 1) missing interrupt handler for normal operation 828 * 2) delayed start of request where start_IO failed with -EBUSY 829 * 3) timeout for missing state change interrupts 830 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 831 * DASD_CQR_QUEUED for 2) and 3). 832 */ 833 static void 834 dasd_timeout_device(unsigned long ptr) 835 { 836 unsigned long flags; 837 struct dasd_device *device; 838 839 device = (struct dasd_device *) ptr; 840 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 841 /* re-activate request queue */ 842 device->stopped &= ~DASD_STOPPED_PENDING; 843 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 844 dasd_schedule_bh(device); 845 } 846 847 /* 848 * Setup timeout for a device in jiffies. 849 */ 850 void 851 dasd_set_timer(struct dasd_device *device, int expires) 852 { 853 if (expires == 0) { 854 if (timer_pending(&device->timer)) 855 del_timer(&device->timer); 856 return; 857 } 858 if (timer_pending(&device->timer)) { 859 if (mod_timer(&device->timer, jiffies + expires)) 860 return; 861 } 862 device->timer.function = dasd_timeout_device; 863 device->timer.data = (unsigned long) device; 864 device->timer.expires = jiffies + expires; 865 add_timer(&device->timer); 866 } 867 868 /* 869 * Clear timeout for a device. 870 */ 871 void 872 dasd_clear_timer(struct dasd_device *device) 873 { 874 if (timer_pending(&device->timer)) 875 del_timer(&device->timer); 876 } 877 878 static void 879 dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) 880 { 881 struct dasd_ccw_req *cqr; 882 struct dasd_device *device; 883 884 cqr = (struct dasd_ccw_req *) intparm; 885 if (cqr->status != DASD_CQR_IN_IO) { 886 MESSAGE(KERN_DEBUG, 887 "invalid status in handle_killed_request: " 888 "bus_id %s, status %02x", 889 cdev->dev.bus_id, cqr->status); 890 return; 891 } 892 893 device = (struct dasd_device *) cqr->device; 894 if (device == NULL || 895 device != dasd_device_from_cdev_locked(cdev) || 896 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 897 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 898 cdev->dev.bus_id); 899 return; 900 } 901 902 /* Schedule request to be retried. */ 903 cqr->status = DASD_CQR_QUEUED; 904 905 dasd_clear_timer(device); 906 dasd_schedule_bh(device); 907 dasd_put_device(device); 908 } 909 910 static void 911 dasd_handle_state_change_pending(struct dasd_device *device) 912 { 913 struct dasd_ccw_req *cqr; 914 struct list_head *l, *n; 915 916 /* First of all start sense subsystem status request. */ 917 dasd_eer_snss(device); 918 919 device->stopped &= ~DASD_STOPPED_PENDING; 920 921 /* restart all 'running' IO on queue */ 922 list_for_each_safe(l, n, &device->ccw_queue) { 923 cqr = list_entry(l, struct dasd_ccw_req, list); 924 if (cqr->status == DASD_CQR_IN_IO) { 925 cqr->status = DASD_CQR_QUEUED; 926 } 927 } 928 dasd_clear_timer(device); 929 dasd_schedule_bh(device); 930 } 931 932 /* 933 * Interrupt handler for "normal" ssch-io based dasd devices. 934 */ 935 void 936 dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 937 struct irb *irb) 938 { 939 struct dasd_ccw_req *cqr, *next; 940 struct dasd_device *device; 941 unsigned long long now; 942 int expires; 943 dasd_era_t era; 944 char mask; 945 946 if (IS_ERR(irb)) { 947 switch (PTR_ERR(irb)) { 948 case -EIO: 949 dasd_handle_killed_request(cdev, intparm); 950 break; 951 case -ETIMEDOUT: 952 printk(KERN_WARNING"%s(%s): request timed out\n", 953 __FUNCTION__, cdev->dev.bus_id); 954 //FIXME - dasd uses own timeout interface... 955 break; 956 default: 957 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 958 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb)); 959 } 960 return; 961 } 962 963 now = get_clock(); 964 965 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 966 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 967 (unsigned int) intparm); 968 969 /* first of all check for state change pending interrupt */ 970 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 971 if ((irb->scsw.dstat & mask) == mask) { 972 device = dasd_device_from_cdev_locked(cdev); 973 if (!IS_ERR(device)) { 974 dasd_handle_state_change_pending(device); 975 dasd_put_device(device); 976 } 977 return; 978 } 979 980 cqr = (struct dasd_ccw_req *) intparm; 981 982 /* check for unsolicited interrupts */ 983 if (cqr == NULL) { 984 MESSAGE(KERN_DEBUG, 985 "unsolicited interrupt received: bus_id %s", 986 cdev->dev.bus_id); 987 return; 988 } 989 990 device = (struct dasd_device *) cqr->device; 991 if (device == NULL || 992 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 993 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 994 cdev->dev.bus_id); 995 return; 996 } 997 998 /* Check for clear pending */ 999 if (cqr->status == DASD_CQR_CLEAR && 1000 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1001 cqr->status = DASD_CQR_QUEUED; 1002 dasd_clear_timer(device); 1003 wake_up(&dasd_flush_wq); 1004 dasd_schedule_bh(device); 1005 return; 1006 } 1007 1008 /* check status - the request might have been killed by dyn detach */ 1009 if (cqr->status != DASD_CQR_IN_IO) { 1010 MESSAGE(KERN_DEBUG, 1011 "invalid status: bus_id %s, status %02x", 1012 cdev->dev.bus_id, cqr->status); 1013 return; 1014 } 1015 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1016 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1017 1018 /* Find out the appropriate era_action. */ 1019 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) 1020 era = dasd_era_fatal; 1021 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1022 irb->scsw.cstat == 0 && 1023 !irb->esw.esw0.erw.cons) 1024 era = dasd_era_none; 1025 else if (irb->esw.esw0.erw.cons) 1026 era = device->discipline->examine_error(cqr, irb); 1027 else 1028 era = dasd_era_recover; 1029 1030 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era); 1031 expires = 0; 1032 if (era == dasd_era_none) { 1033 cqr->status = DASD_CQR_DONE; 1034 cqr->stopclk = now; 1035 /* Start first request on queue if possible -> fast_io. */ 1036 if (cqr->list.next != &device->ccw_queue) { 1037 next = list_entry(cqr->list.next, 1038 struct dasd_ccw_req, list); 1039 if ((next->status == DASD_CQR_QUEUED) && 1040 (!device->stopped)) { 1041 if (device->discipline->start_IO(next) == 0) 1042 expires = next->expires; 1043 else 1044 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1045 "Interrupt fastpath " 1046 "failed!"); 1047 } 1048 } 1049 } else { /* error */ 1050 memcpy(&cqr->irb, irb, sizeof (struct irb)); 1051 if (device->features & DASD_FEATURE_ERPLOG) { 1052 /* dump sense data */ 1053 dasd_log_sense(cqr, irb); 1054 } 1055 switch (era) { 1056 case dasd_era_fatal: 1057 cqr->status = DASD_CQR_FAILED; 1058 cqr->stopclk = now; 1059 break; 1060 case dasd_era_recover: 1061 cqr->status = DASD_CQR_ERROR; 1062 break; 1063 default: 1064 BUG(); 1065 } 1066 } 1067 if (expires != 0) 1068 dasd_set_timer(device, expires); 1069 else 1070 dasd_clear_timer(device); 1071 dasd_schedule_bh(device); 1072 } 1073 1074 /* 1075 * posts the buffer_cache about a finalized request 1076 */ 1077 static inline void 1078 dasd_end_request(struct request *req, int uptodate) 1079 { 1080 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1081 BUG(); 1082 add_disk_randomness(req->rq_disk); 1083 end_that_request_last(req, uptodate); 1084 } 1085 1086 /* 1087 * Process finished error recovery ccw. 1088 */ 1089 static inline void 1090 __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) 1091 { 1092 dasd_erp_fn_t erp_fn; 1093 1094 if (cqr->status == DASD_CQR_DONE) 1095 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1096 else 1097 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1098 erp_fn = device->discipline->erp_postaction(cqr); 1099 erp_fn(cqr); 1100 } 1101 1102 /* 1103 * Process ccw request queue. 1104 */ 1105 static void 1106 __dasd_process_ccw_queue(struct dasd_device * device, 1107 struct list_head *final_queue) 1108 { 1109 struct list_head *l, *n; 1110 struct dasd_ccw_req *cqr; 1111 dasd_erp_fn_t erp_fn; 1112 1113 restart: 1114 /* Process request with final status. */ 1115 list_for_each_safe(l, n, &device->ccw_queue) { 1116 cqr = list_entry(l, struct dasd_ccw_req, list); 1117 /* Stop list processing at the first non-final request. */ 1118 if (cqr->status != DASD_CQR_DONE && 1119 cqr->status != DASD_CQR_FAILED && 1120 cqr->status != DASD_CQR_ERROR) 1121 break; 1122 /* Process requests with DASD_CQR_ERROR */ 1123 if (cqr->status == DASD_CQR_ERROR) { 1124 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1125 cqr->status = DASD_CQR_FAILED; 1126 cqr->stopclk = get_clock(); 1127 } else { 1128 if (cqr->irb.esw.esw0.erw.cons && 1129 test_bit(DASD_CQR_FLAGS_USE_ERP, 1130 &cqr->flags)) { 1131 erp_fn = device->discipline-> 1132 erp_action(cqr); 1133 erp_fn(cqr); 1134 } else 1135 dasd_default_erp_action(cqr); 1136 } 1137 goto restart; 1138 } 1139 1140 /* First of all call extended error reporting. */ 1141 if (dasd_eer_enabled(device) && 1142 cqr->status == DASD_CQR_FAILED) { 1143 dasd_eer_write(device, cqr, DASD_EER_FATALERROR); 1144 1145 /* restart request */ 1146 cqr->status = DASD_CQR_QUEUED; 1147 cqr->retries = 255; 1148 device->stopped |= DASD_STOPPED_QUIESCE; 1149 goto restart; 1150 } 1151 1152 /* Process finished ERP request. */ 1153 if (cqr->refers) { 1154 __dasd_process_erp(device, cqr); 1155 goto restart; 1156 } 1157 1158 /* Rechain finished requests to final queue */ 1159 cqr->endclk = get_clock(); 1160 list_move_tail(&cqr->list, final_queue); 1161 } 1162 } 1163 1164 static void 1165 dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) 1166 { 1167 struct request *req; 1168 struct dasd_device *device; 1169 int status; 1170 1171 req = (struct request *) data; 1172 device = cqr->device; 1173 dasd_profile_end(device, cqr, req); 1174 status = cqr->device->discipline->free_cp(cqr,req); 1175 spin_lock_irq(&device->request_queue_lock); 1176 dasd_end_request(req, status); 1177 spin_unlock_irq(&device->request_queue_lock); 1178 } 1179 1180 1181 /* 1182 * Fetch requests from the block device queue. 1183 */ 1184 static void 1185 __dasd_process_blk_queue(struct dasd_device * device) 1186 { 1187 request_queue_t *queue; 1188 struct request *req; 1189 struct dasd_ccw_req *cqr; 1190 int nr_queued; 1191 1192 queue = device->request_queue; 1193 /* No queue ? Then there is nothing to do. */ 1194 if (queue == NULL) 1195 return; 1196 1197 /* 1198 * We requeue request from the block device queue to the ccw 1199 * queue only in two states. In state DASD_STATE_READY the 1200 * partition detection is done and we need to requeue requests 1201 * for that. State DASD_STATE_ONLINE is normal block device 1202 * operation. 1203 */ 1204 if (device->state != DASD_STATE_READY && 1205 device->state != DASD_STATE_ONLINE) 1206 return; 1207 nr_queued = 0; 1208 /* Now we try to fetch requests from the request queue */ 1209 list_for_each_entry(cqr, &device->ccw_queue, list) 1210 if (cqr->status == DASD_CQR_QUEUED) 1211 nr_queued++; 1212 while (!blk_queue_plugged(queue) && 1213 elv_next_request(queue) && 1214 nr_queued < DASD_CHANQ_MAX_SIZE) { 1215 req = elv_next_request(queue); 1216 1217 if (device->features & DASD_FEATURE_READONLY && 1218 rq_data_dir(req) == WRITE) { 1219 DBF_DEV_EVENT(DBF_ERR, device, 1220 "Rejecting write request %p", 1221 req); 1222 blkdev_dequeue_request(req); 1223 dasd_end_request(req, 0); 1224 continue; 1225 } 1226 if (device->stopped & DASD_STOPPED_DC_EIO) { 1227 blkdev_dequeue_request(req); 1228 dasd_end_request(req, 0); 1229 continue; 1230 } 1231 cqr = device->discipline->build_cp(device, req); 1232 if (IS_ERR(cqr)) { 1233 if (PTR_ERR(cqr) == -ENOMEM) 1234 break; /* terminate request queue loop */ 1235 if (PTR_ERR(cqr) == -EAGAIN) { 1236 /* 1237 * The current request cannot be build right 1238 * now, we have to try later. If this request 1239 * is the head-of-queue we stop the device 1240 * for 1/2 second. 1241 */ 1242 if (!list_empty(&device->ccw_queue)) 1243 break; 1244 device->stopped |= DASD_STOPPED_PENDING; 1245 dasd_set_timer(device, HZ/2); 1246 break; 1247 } 1248 DBF_DEV_EVENT(DBF_ERR, device, 1249 "CCW creation failed (rc=%ld) " 1250 "on request %p", 1251 PTR_ERR(cqr), req); 1252 blkdev_dequeue_request(req); 1253 dasd_end_request(req, 0); 1254 continue; 1255 } 1256 cqr->callback = dasd_end_request_cb; 1257 cqr->callback_data = (void *) req; 1258 cqr->status = DASD_CQR_QUEUED; 1259 blkdev_dequeue_request(req); 1260 list_add_tail(&cqr->list, &device->ccw_queue); 1261 dasd_profile_start(device, cqr, req); 1262 nr_queued++; 1263 } 1264 } 1265 1266 /* 1267 * Take a look at the first request on the ccw queue and check 1268 * if it reached its expire time. If so, terminate the IO. 1269 */ 1270 static void 1271 __dasd_check_expire(struct dasd_device * device) 1272 { 1273 struct dasd_ccw_req *cqr; 1274 1275 if (list_empty(&device->ccw_queue)) 1276 return; 1277 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1278 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1279 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1280 if (device->discipline->term_IO(cqr) != 0) { 1281 /* Hmpf, try again in 5 sec */ 1282 dasd_set_timer(device, 5*HZ); 1283 DEV_MESSAGE(KERN_ERR, device, 1284 "internal error - timeout (%is) expired " 1285 "for cqr %p, termination failed, " 1286 "retrying in 5s", 1287 (cqr->expires/HZ), cqr); 1288 } else { 1289 DEV_MESSAGE(KERN_ERR, device, 1290 "internal error - timeout (%is) expired " 1291 "for cqr %p (%i retries left)", 1292 (cqr->expires/HZ), cqr, cqr->retries); 1293 } 1294 } 1295 } 1296 1297 /* 1298 * Take a look at the first request on the ccw queue and check 1299 * if it needs to be started. 1300 */ 1301 static void 1302 __dasd_start_head(struct dasd_device * device) 1303 { 1304 struct dasd_ccw_req *cqr; 1305 int rc; 1306 1307 if (list_empty(&device->ccw_queue)) 1308 return; 1309 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1310 if (cqr->status != DASD_CQR_QUEUED) 1311 return; 1312 /* Non-temporary stop condition will trigger fail fast */ 1313 if (device->stopped & ~DASD_STOPPED_PENDING && 1314 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1315 (!dasd_eer_enabled(device))) { 1316 cqr->status = DASD_CQR_FAILED; 1317 dasd_schedule_bh(device); 1318 return; 1319 } 1320 /* Don't try to start requests if device is stopped */ 1321 if (device->stopped) 1322 return; 1323 1324 rc = device->discipline->start_IO(cqr); 1325 if (rc == 0) 1326 dasd_set_timer(device, cqr->expires); 1327 else if (rc == -EACCES) { 1328 dasd_schedule_bh(device); 1329 } else 1330 /* Hmpf, try again in 1/2 sec */ 1331 dasd_set_timer(device, 50); 1332 } 1333 1334 static inline int 1335 _wait_for_clear(struct dasd_ccw_req *cqr) 1336 { 1337 return (cqr->status == DASD_CQR_QUEUED); 1338 } 1339 1340 /* 1341 * Remove all requests from the ccw queue (all = '1') or only block device 1342 * requests in case all = '0'. 1343 * Take care of the erp-chain (chained via cqr->refers) and remove either 1344 * the whole erp-chain or none of the erp-requests. 1345 * If a request is currently running, term_IO is called and the request 1346 * is re-queued. Prior to removing the terminated request we need to wait 1347 * for the clear-interrupt. 1348 * In case termination is not possible we stop processing and just finishing 1349 * the already moved requests. 1350 */ 1351 static int 1352 dasd_flush_ccw_queue(struct dasd_device * device, int all) 1353 { 1354 struct dasd_ccw_req *cqr, *orig, *n; 1355 int rc, i; 1356 1357 struct list_head flush_queue; 1358 1359 INIT_LIST_HEAD(&flush_queue); 1360 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1361 rc = 0; 1362 restart: 1363 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) { 1364 /* get original request of erp request-chain */ 1365 for (orig = cqr; orig->refers != NULL; orig = orig->refers); 1366 1367 /* Flush all request or only block device requests? */ 1368 if (all == 0 && cqr->callback != dasd_end_request_cb && 1369 orig->callback != dasd_end_request_cb) { 1370 continue; 1371 } 1372 /* Check status and move request to flush_queue */ 1373 switch (cqr->status) { 1374 case DASD_CQR_IN_IO: 1375 rc = device->discipline->term_IO(cqr); 1376 if (rc) { 1377 /* unable to terminate requeust */ 1378 DEV_MESSAGE(KERN_ERR, device, 1379 "dasd flush ccw_queue is unable " 1380 " to terminate request %p", 1381 cqr); 1382 /* stop flush processing */ 1383 goto finished; 1384 } 1385 break; 1386 case DASD_CQR_QUEUED: 1387 case DASD_CQR_ERROR: 1388 /* set request to FAILED */ 1389 cqr->stopclk = get_clock(); 1390 cqr->status = DASD_CQR_FAILED; 1391 break; 1392 default: /* do not touch the others */ 1393 break; 1394 } 1395 /* Rechain request (including erp chain) */ 1396 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) { 1397 cqr->endclk = get_clock(); 1398 list_move_tail(&cqr->list, &flush_queue); 1399 } 1400 if (i > 1) 1401 /* moved more than one request - need to restart */ 1402 goto restart; 1403 } 1404 1405 finished: 1406 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1407 /* Now call the callback function of flushed requests */ 1408 restart_cb: 1409 list_for_each_entry_safe(cqr, n, &flush_queue, list) { 1410 if (cqr->status == DASD_CQR_CLEAR) { 1411 /* wait for clear interrupt! */ 1412 wait_event(dasd_flush_wq, _wait_for_clear(cqr)); 1413 cqr->status = DASD_CQR_FAILED; 1414 } 1415 /* Process finished ERP request. */ 1416 if (cqr->refers) { 1417 __dasd_process_erp(device, cqr); 1418 /* restart list_for_xx loop since dasd_process_erp 1419 * might remove multiple elements */ 1420 goto restart_cb; 1421 } 1422 /* call the callback function */ 1423 cqr->endclk = get_clock(); 1424 if (cqr->callback != NULL) 1425 (cqr->callback)(cqr, cqr->callback_data); 1426 } 1427 return rc; 1428 } 1429 1430 /* 1431 * Acquire the device lock and process queues for the device. 1432 */ 1433 static void 1434 dasd_tasklet(struct dasd_device * device) 1435 { 1436 struct list_head final_queue; 1437 struct list_head *l, *n; 1438 struct dasd_ccw_req *cqr; 1439 1440 atomic_set (&device->tasklet_scheduled, 0); 1441 INIT_LIST_HEAD(&final_queue); 1442 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1443 /* Check expire time of first request on the ccw queue. */ 1444 __dasd_check_expire(device); 1445 /* Finish off requests on ccw queue */ 1446 __dasd_process_ccw_queue(device, &final_queue); 1447 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1448 /* Now call the callback function of requests with final status */ 1449 list_for_each_safe(l, n, &final_queue) { 1450 cqr = list_entry(l, struct dasd_ccw_req, list); 1451 list_del_init(&cqr->list); 1452 if (cqr->callback != NULL) 1453 (cqr->callback)(cqr, cqr->callback_data); 1454 } 1455 spin_lock_irq(&device->request_queue_lock); 1456 spin_lock(get_ccwdev_lock(device->cdev)); 1457 /* Get new request from the block device request queue */ 1458 __dasd_process_blk_queue(device); 1459 /* Now check if the head of the ccw queue needs to be started. */ 1460 __dasd_start_head(device); 1461 spin_unlock(get_ccwdev_lock(device->cdev)); 1462 spin_unlock_irq(&device->request_queue_lock); 1463 dasd_put_device(device); 1464 } 1465 1466 /* 1467 * Schedules a call to dasd_tasklet over the device tasklet. 1468 */ 1469 void 1470 dasd_schedule_bh(struct dasd_device * device) 1471 { 1472 /* Protect against rescheduling. */ 1473 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1474 return; 1475 dasd_get_device(device); 1476 tasklet_hi_schedule(&device->tasklet); 1477 } 1478 1479 /* 1480 * Queue a request to the head of the ccw_queue. Start the I/O if 1481 * possible. 1482 */ 1483 void 1484 dasd_add_request_head(struct dasd_ccw_req *req) 1485 { 1486 struct dasd_device *device; 1487 unsigned long flags; 1488 1489 device = req->device; 1490 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1491 req->status = DASD_CQR_QUEUED; 1492 req->device = device; 1493 list_add(&req->list, &device->ccw_queue); 1494 /* let the bh start the request to keep them in order */ 1495 dasd_schedule_bh(device); 1496 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1497 } 1498 1499 /* 1500 * Queue a request to the tail of the ccw_queue. Start the I/O if 1501 * possible. 1502 */ 1503 void 1504 dasd_add_request_tail(struct dasd_ccw_req *req) 1505 { 1506 struct dasd_device *device; 1507 unsigned long flags; 1508 1509 device = req->device; 1510 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1511 req->status = DASD_CQR_QUEUED; 1512 req->device = device; 1513 list_add_tail(&req->list, &device->ccw_queue); 1514 /* let the bh start the request to keep them in order */ 1515 dasd_schedule_bh(device); 1516 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1517 } 1518 1519 /* 1520 * Wakeup callback. 1521 */ 1522 static void 1523 dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1524 { 1525 wake_up((wait_queue_head_t *) data); 1526 } 1527 1528 static inline int 1529 _wait_for_wakeup(struct dasd_ccw_req *cqr) 1530 { 1531 struct dasd_device *device; 1532 int rc; 1533 1534 device = cqr->device; 1535 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1536 rc = ((cqr->status == DASD_CQR_DONE || 1537 cqr->status == DASD_CQR_FAILED) && 1538 list_empty(&cqr->list)); 1539 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1540 return rc; 1541 } 1542 1543 /* 1544 * Attempts to start a special ccw queue and waits for its completion. 1545 */ 1546 int 1547 dasd_sleep_on(struct dasd_ccw_req * cqr) 1548 { 1549 wait_queue_head_t wait_q; 1550 struct dasd_device *device; 1551 int rc; 1552 1553 device = cqr->device; 1554 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1555 1556 init_waitqueue_head (&wait_q); 1557 cqr->callback = dasd_wakeup_cb; 1558 cqr->callback_data = (void *) &wait_q; 1559 cqr->status = DASD_CQR_QUEUED; 1560 list_add_tail(&cqr->list, &device->ccw_queue); 1561 1562 /* let the bh start the request to keep them in order */ 1563 dasd_schedule_bh(device); 1564 1565 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1566 1567 wait_event(wait_q, _wait_for_wakeup(cqr)); 1568 1569 /* Request status is either done or failed. */ 1570 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1571 return rc; 1572 } 1573 1574 /* 1575 * Attempts to start a special ccw queue and wait interruptible 1576 * for its completion. 1577 */ 1578 int 1579 dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) 1580 { 1581 wait_queue_head_t wait_q; 1582 struct dasd_device *device; 1583 int rc, finished; 1584 1585 device = cqr->device; 1586 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1587 1588 init_waitqueue_head (&wait_q); 1589 cqr->callback = dasd_wakeup_cb; 1590 cqr->callback_data = (void *) &wait_q; 1591 cqr->status = DASD_CQR_QUEUED; 1592 list_add_tail(&cqr->list, &device->ccw_queue); 1593 1594 /* let the bh start the request to keep them in order */ 1595 dasd_schedule_bh(device); 1596 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1597 1598 finished = 0; 1599 while (!finished) { 1600 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1601 if (rc != -ERESTARTSYS) { 1602 /* Request is final (done or failed) */ 1603 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1604 break; 1605 } 1606 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1607 switch (cqr->status) { 1608 case DASD_CQR_IN_IO: 1609 /* terminate runnig cqr */ 1610 if (device->discipline->term_IO) { 1611 cqr->retries = -1; 1612 device->discipline->term_IO(cqr); 1613 /* wait (non-interruptible) for final status 1614 * because signal ist still pending */ 1615 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1616 wait_event(wait_q, _wait_for_wakeup(cqr)); 1617 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1618 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1619 finished = 1; 1620 } 1621 break; 1622 case DASD_CQR_QUEUED: 1623 /* request */ 1624 list_del_init(&cqr->list); 1625 rc = -EIO; 1626 finished = 1; 1627 break; 1628 default: 1629 /* cqr with 'non-interruptable' status - just wait */ 1630 break; 1631 } 1632 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1633 } 1634 return rc; 1635 } 1636 1637 /* 1638 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1639 * for eckd devices) the currently running request has to be terminated 1640 * and be put back to status queued, before the special request is added 1641 * to the head of the queue. Then the special request is waited on normally. 1642 */ 1643 static inline int 1644 _dasd_term_running_cqr(struct dasd_device *device) 1645 { 1646 struct dasd_ccw_req *cqr; 1647 1648 if (list_empty(&device->ccw_queue)) 1649 return 0; 1650 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1651 return device->discipline->term_IO(cqr); 1652 } 1653 1654 int 1655 dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) 1656 { 1657 wait_queue_head_t wait_q; 1658 struct dasd_device *device; 1659 int rc; 1660 1661 device = cqr->device; 1662 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1663 rc = _dasd_term_running_cqr(device); 1664 if (rc) { 1665 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1666 return rc; 1667 } 1668 1669 init_waitqueue_head (&wait_q); 1670 cqr->callback = dasd_wakeup_cb; 1671 cqr->callback_data = (void *) &wait_q; 1672 cqr->status = DASD_CQR_QUEUED; 1673 list_add(&cqr->list, &device->ccw_queue); 1674 1675 /* let the bh start the request to keep them in order */ 1676 dasd_schedule_bh(device); 1677 1678 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1679 1680 wait_event(wait_q, _wait_for_wakeup(cqr)); 1681 1682 /* Request status is either done or failed. */ 1683 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1684 return rc; 1685 } 1686 1687 /* 1688 * Cancels a request that was started with dasd_sleep_on_req. 1689 * This is useful to timeout requests. The request will be 1690 * terminated if it is currently in i/o. 1691 * Returns 1 if the request has been terminated. 1692 */ 1693 int 1694 dasd_cancel_req(struct dasd_ccw_req *cqr) 1695 { 1696 struct dasd_device *device = cqr->device; 1697 unsigned long flags; 1698 int rc; 1699 1700 rc = 0; 1701 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1702 switch (cqr->status) { 1703 case DASD_CQR_QUEUED: 1704 /* request was not started - just set to failed */ 1705 cqr->status = DASD_CQR_FAILED; 1706 break; 1707 case DASD_CQR_IN_IO: 1708 /* request in IO - terminate IO and release again */ 1709 if (device->discipline->term_IO(cqr) != 0) 1710 /* what to do if unable to terminate ?????? 1711 e.g. not _IN_IO */ 1712 cqr->status = DASD_CQR_FAILED; 1713 cqr->stopclk = get_clock(); 1714 rc = 1; 1715 break; 1716 case DASD_CQR_DONE: 1717 case DASD_CQR_FAILED: 1718 /* already finished - do nothing */ 1719 break; 1720 default: 1721 DEV_MESSAGE(KERN_ALERT, device, 1722 "invalid status %02x in request", 1723 cqr->status); 1724 BUG(); 1725 1726 } 1727 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1728 dasd_schedule_bh(device); 1729 return rc; 1730 } 1731 1732 /* 1733 * SECTION: Block device operations (request queue, partitions, open, release). 1734 */ 1735 1736 /* 1737 * Dasd request queue function. Called from ll_rw_blk.c 1738 */ 1739 static void 1740 do_dasd_request(request_queue_t * queue) 1741 { 1742 struct dasd_device *device; 1743 1744 device = (struct dasd_device *) queue->queuedata; 1745 spin_lock(get_ccwdev_lock(device->cdev)); 1746 /* Get new request from the block device request queue */ 1747 __dasd_process_blk_queue(device); 1748 /* Now check if the head of the ccw queue needs to be started. */ 1749 __dasd_start_head(device); 1750 spin_unlock(get_ccwdev_lock(device->cdev)); 1751 } 1752 1753 /* 1754 * Allocate and initialize request queue and default I/O scheduler. 1755 */ 1756 static int 1757 dasd_alloc_queue(struct dasd_device * device) 1758 { 1759 int rc; 1760 1761 device->request_queue = blk_init_queue(do_dasd_request, 1762 &device->request_queue_lock); 1763 if (device->request_queue == NULL) 1764 return -ENOMEM; 1765 1766 device->request_queue->queuedata = device; 1767 1768 elevator_exit(device->request_queue->elevator); 1769 rc = elevator_init(device->request_queue, "deadline"); 1770 if (rc) { 1771 blk_cleanup_queue(device->request_queue); 1772 return rc; 1773 } 1774 return 0; 1775 } 1776 1777 /* 1778 * Allocate and initialize request queue. 1779 */ 1780 static void 1781 dasd_setup_queue(struct dasd_device * device) 1782 { 1783 int max; 1784 1785 blk_queue_hardsect_size(device->request_queue, device->bp_block); 1786 max = device->discipline->max_blocks << device->s2b_shift; 1787 blk_queue_max_sectors(device->request_queue, max); 1788 blk_queue_max_phys_segments(device->request_queue, -1L); 1789 blk_queue_max_hw_segments(device->request_queue, -1L); 1790 blk_queue_max_segment_size(device->request_queue, -1L); 1791 blk_queue_segment_boundary(device->request_queue, -1L); 1792 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); 1793 } 1794 1795 /* 1796 * Deactivate and free request queue. 1797 */ 1798 static void 1799 dasd_free_queue(struct dasd_device * device) 1800 { 1801 if (device->request_queue) { 1802 blk_cleanup_queue(device->request_queue); 1803 device->request_queue = NULL; 1804 } 1805 } 1806 1807 /* 1808 * Flush request on the request queue. 1809 */ 1810 static void 1811 dasd_flush_request_queue(struct dasd_device * device) 1812 { 1813 struct request *req; 1814 1815 if (!device->request_queue) 1816 return; 1817 1818 spin_lock_irq(&device->request_queue_lock); 1819 while ((req = elv_next_request(device->request_queue))) { 1820 blkdev_dequeue_request(req); 1821 dasd_end_request(req, 0); 1822 } 1823 spin_unlock_irq(&device->request_queue_lock); 1824 } 1825 1826 static int 1827 dasd_open(struct inode *inp, struct file *filp) 1828 { 1829 struct gendisk *disk = inp->i_bdev->bd_disk; 1830 struct dasd_device *device = disk->private_data; 1831 int rc; 1832 1833 atomic_inc(&device->open_count); 1834 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1835 rc = -ENODEV; 1836 goto unlock; 1837 } 1838 1839 if (!try_module_get(device->discipline->owner)) { 1840 rc = -EINVAL; 1841 goto unlock; 1842 } 1843 1844 if (dasd_probeonly) { 1845 DEV_MESSAGE(KERN_INFO, device, "%s", 1846 "No access to device due to probeonly mode"); 1847 rc = -EPERM; 1848 goto out; 1849 } 1850 1851 if (device->state <= DASD_STATE_BASIC) { 1852 DBF_DEV_EVENT(DBF_ERR, device, " %s", 1853 " Cannot open unrecognized device"); 1854 rc = -ENODEV; 1855 goto out; 1856 } 1857 1858 return 0; 1859 1860 out: 1861 module_put(device->discipline->owner); 1862 unlock: 1863 atomic_dec(&device->open_count); 1864 return rc; 1865 } 1866 1867 static int 1868 dasd_release(struct inode *inp, struct file *filp) 1869 { 1870 struct gendisk *disk = inp->i_bdev->bd_disk; 1871 struct dasd_device *device = disk->private_data; 1872 1873 atomic_dec(&device->open_count); 1874 module_put(device->discipline->owner); 1875 return 0; 1876 } 1877 1878 /* 1879 * Return disk geometry. 1880 */ 1881 static int 1882 dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1883 { 1884 struct dasd_device *device; 1885 1886 device = bdev->bd_disk->private_data; 1887 if (!device) 1888 return -ENODEV; 1889 1890 if (!device->discipline || 1891 !device->discipline->fill_geometry) 1892 return -EINVAL; 1893 1894 device->discipline->fill_geometry(device, geo); 1895 geo->start = get_start_sect(bdev) >> device->s2b_shift; 1896 return 0; 1897 } 1898 1899 struct block_device_operations 1900 dasd_device_operations = { 1901 .owner = THIS_MODULE, 1902 .open = dasd_open, 1903 .release = dasd_release, 1904 .ioctl = dasd_ioctl, 1905 .compat_ioctl = dasd_compat_ioctl, 1906 .getgeo = dasd_getgeo, 1907 }; 1908 1909 1910 static void 1911 dasd_exit(void) 1912 { 1913 #ifdef CONFIG_PROC_FS 1914 dasd_proc_exit(); 1915 #endif 1916 dasd_eer_exit(); 1917 if (dasd_page_cache != NULL) { 1918 kmem_cache_destroy(dasd_page_cache); 1919 dasd_page_cache = NULL; 1920 } 1921 dasd_gendisk_exit(); 1922 dasd_devmap_exit(); 1923 if (dasd_debug_area != NULL) { 1924 debug_unregister(dasd_debug_area); 1925 dasd_debug_area = NULL; 1926 } 1927 } 1928 1929 /* 1930 * SECTION: common functions for ccw_driver use 1931 */ 1932 1933 /* 1934 * Initial attempt at a probe function. this can be simplified once 1935 * the other detection code is gone. 1936 */ 1937 int 1938 dasd_generic_probe (struct ccw_device *cdev, 1939 struct dasd_discipline *discipline) 1940 { 1941 int ret; 1942 1943 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 1944 if (ret) { 1945 printk(KERN_WARNING 1946 "dasd_generic_probe: could not set ccw-device options " 1947 "for %s\n", cdev->dev.bus_id); 1948 return ret; 1949 } 1950 ret = dasd_add_sysfs_files(cdev); 1951 if (ret) { 1952 printk(KERN_WARNING 1953 "dasd_generic_probe: could not add sysfs entries " 1954 "for %s\n", cdev->dev.bus_id); 1955 return ret; 1956 } 1957 cdev->handler = &dasd_int_handler; 1958 1959 /* 1960 * Automatically online either all dasd devices (dasd_autodetect) 1961 * or all devices specified with dasd= parameters during 1962 * initial probe. 1963 */ 1964 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 1965 (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0)) 1966 ret = ccw_device_set_online(cdev); 1967 if (ret) 1968 printk(KERN_WARNING 1969 "dasd_generic_probe: could not initially online " 1970 "ccw-device %s\n", cdev->dev.bus_id); 1971 return ret; 1972 } 1973 1974 /* 1975 * This will one day be called from a global not_oper handler. 1976 * It is also used by driver_unregister during module unload. 1977 */ 1978 void 1979 dasd_generic_remove (struct ccw_device *cdev) 1980 { 1981 struct dasd_device *device; 1982 1983 cdev->handler = NULL; 1984 1985 dasd_remove_sysfs_files(cdev); 1986 device = dasd_device_from_cdev(cdev); 1987 if (IS_ERR(device)) 1988 return; 1989 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1990 /* Already doing offline processing */ 1991 dasd_put_device(device); 1992 return; 1993 } 1994 /* 1995 * This device is removed unconditionally. Set offline 1996 * flag to prevent dasd_open from opening it while it is 1997 * no quite down yet. 1998 */ 1999 dasd_set_target_state(device, DASD_STATE_NEW); 2000 /* dasd_delete_device destroys the device reference. */ 2001 dasd_delete_device(device); 2002 } 2003 2004 /* 2005 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2006 * the device is detected for the first time and is supposed to be used 2007 * or the user has started activation through sysfs. 2008 */ 2009 int 2010 dasd_generic_set_online (struct ccw_device *cdev, 2011 struct dasd_discipline *base_discipline) 2012 2013 { 2014 struct dasd_discipline *discipline; 2015 struct dasd_device *device; 2016 int rc; 2017 2018 /* first online clears initial online feature flag */ 2019 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2020 device = dasd_create_device(cdev); 2021 if (IS_ERR(device)) 2022 return PTR_ERR(device); 2023 2024 discipline = base_discipline; 2025 if (device->features & DASD_FEATURE_USEDIAG) { 2026 if (!dasd_diag_discipline_pointer) { 2027 printk (KERN_WARNING 2028 "dasd_generic couldn't online device %s " 2029 "- discipline DIAG not available\n", 2030 cdev->dev.bus_id); 2031 dasd_delete_device(device); 2032 return -ENODEV; 2033 } 2034 discipline = dasd_diag_discipline_pointer; 2035 } 2036 if (!try_module_get(base_discipline->owner)) { 2037 dasd_delete_device(device); 2038 return -EINVAL; 2039 } 2040 if (!try_module_get(discipline->owner)) { 2041 module_put(base_discipline->owner); 2042 dasd_delete_device(device); 2043 return -EINVAL; 2044 } 2045 device->base_discipline = base_discipline; 2046 device->discipline = discipline; 2047 2048 rc = discipline->check_device(device); 2049 if (rc) { 2050 printk (KERN_WARNING 2051 "dasd_generic couldn't online device %s " 2052 "with discipline %s rc=%i\n", 2053 cdev->dev.bus_id, discipline->name, rc); 2054 module_put(discipline->owner); 2055 module_put(base_discipline->owner); 2056 dasd_delete_device(device); 2057 return rc; 2058 } 2059 2060 dasd_set_target_state(device, DASD_STATE_ONLINE); 2061 if (device->state <= DASD_STATE_KNOWN) { 2062 printk (KERN_WARNING 2063 "dasd_generic discipline not found for %s\n", 2064 cdev->dev.bus_id); 2065 rc = -ENODEV; 2066 dasd_set_target_state(device, DASD_STATE_NEW); 2067 dasd_delete_device(device); 2068 } else 2069 pr_debug("dasd_generic device %s found\n", 2070 cdev->dev.bus_id); 2071 2072 /* FIXME: we have to wait for the root device but we don't want 2073 * to wait for each single device but for all at once. */ 2074 wait_event(dasd_init_waitq, _wait_for_device(device)); 2075 2076 dasd_put_device(device); 2077 2078 return rc; 2079 } 2080 2081 int 2082 dasd_generic_set_offline (struct ccw_device *cdev) 2083 { 2084 struct dasd_device *device; 2085 int max_count, open_count; 2086 2087 device = dasd_device_from_cdev(cdev); 2088 if (IS_ERR(device)) 2089 return PTR_ERR(device); 2090 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2091 /* Already doing offline processing */ 2092 dasd_put_device(device); 2093 return 0; 2094 } 2095 /* 2096 * We must make sure that this device is currently not in use. 2097 * The open_count is increased for every opener, that includes 2098 * the blkdev_get in dasd_scan_partitions. We are only interested 2099 * in the other openers. 2100 */ 2101 max_count = device->bdev ? 0 : -1; 2102 open_count = (int) atomic_read(&device->open_count); 2103 if (open_count > max_count) { 2104 if (open_count > 0) 2105 printk (KERN_WARNING "Can't offline dasd device with " 2106 "open count = %i.\n", 2107 open_count); 2108 else 2109 printk (KERN_WARNING "%s", 2110 "Can't offline dasd device due to internal " 2111 "use\n"); 2112 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2113 dasd_put_device(device); 2114 return -EBUSY; 2115 } 2116 dasd_set_target_state(device, DASD_STATE_NEW); 2117 /* dasd_delete_device destroys the device reference. */ 2118 dasd_delete_device(device); 2119 2120 return 0; 2121 } 2122 2123 int 2124 dasd_generic_notify(struct ccw_device *cdev, int event) 2125 { 2126 struct dasd_device *device; 2127 struct dasd_ccw_req *cqr; 2128 unsigned long flags; 2129 int ret; 2130 2131 device = dasd_device_from_cdev(cdev); 2132 if (IS_ERR(device)) 2133 return 0; 2134 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 2135 ret = 0; 2136 switch (event) { 2137 case CIO_GONE: 2138 case CIO_NO_PATH: 2139 /* First of all call extended error reporting. */ 2140 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2141 2142 if (device->state < DASD_STATE_BASIC) 2143 break; 2144 /* Device is active. We want to keep it. */ 2145 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { 2146 list_for_each_entry(cqr, &device->ccw_queue, list) 2147 if (cqr->status == DASD_CQR_IN_IO) 2148 cqr->status = DASD_CQR_FAILED; 2149 device->stopped |= DASD_STOPPED_DC_EIO; 2150 } else { 2151 list_for_each_entry(cqr, &device->ccw_queue, list) 2152 if (cqr->status == DASD_CQR_IN_IO) { 2153 cqr->status = DASD_CQR_QUEUED; 2154 cqr->retries++; 2155 } 2156 device->stopped |= DASD_STOPPED_DC_WAIT; 2157 dasd_set_timer(device, 0); 2158 } 2159 dasd_schedule_bh(device); 2160 ret = 1; 2161 break; 2162 case CIO_OPER: 2163 /* FIXME: add a sanity check. */ 2164 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); 2165 dasd_schedule_bh(device); 2166 ret = 1; 2167 break; 2168 } 2169 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2170 dasd_put_device(device); 2171 return ret; 2172 } 2173 2174 2175 static int __init 2176 dasd_init(void) 2177 { 2178 int rc; 2179 2180 init_waitqueue_head(&dasd_init_waitq); 2181 init_waitqueue_head(&dasd_flush_wq); 2182 2183 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2184 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); 2185 if (dasd_debug_area == NULL) { 2186 rc = -ENOMEM; 2187 goto failed; 2188 } 2189 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2190 debug_set_level(dasd_debug_area, DBF_WARNING); 2191 2192 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2193 2194 dasd_diag_discipline_pointer = NULL; 2195 2196 rc = dasd_devmap_init(); 2197 if (rc) 2198 goto failed; 2199 rc = dasd_gendisk_init(); 2200 if (rc) 2201 goto failed; 2202 rc = dasd_parse(); 2203 if (rc) 2204 goto failed; 2205 rc = dasd_eer_init(); 2206 if (rc) 2207 goto failed; 2208 #ifdef CONFIG_PROC_FS 2209 rc = dasd_proc_init(); 2210 if (rc) 2211 goto failed; 2212 #endif 2213 2214 return 0; 2215 failed: 2216 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); 2217 dasd_exit(); 2218 return rc; 2219 } 2220 2221 module_init(dasd_init); 2222 module_exit(dasd_exit); 2223 2224 EXPORT_SYMBOL(dasd_debug_area); 2225 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2226 2227 EXPORT_SYMBOL(dasd_add_request_head); 2228 EXPORT_SYMBOL(dasd_add_request_tail); 2229 EXPORT_SYMBOL(dasd_cancel_req); 2230 EXPORT_SYMBOL(dasd_clear_timer); 2231 EXPORT_SYMBOL(dasd_enable_device); 2232 EXPORT_SYMBOL(dasd_int_handler); 2233 EXPORT_SYMBOL(dasd_kfree_request); 2234 EXPORT_SYMBOL(dasd_kick_device); 2235 EXPORT_SYMBOL(dasd_kmalloc_request); 2236 EXPORT_SYMBOL(dasd_schedule_bh); 2237 EXPORT_SYMBOL(dasd_set_target_state); 2238 EXPORT_SYMBOL(dasd_set_timer); 2239 EXPORT_SYMBOL(dasd_sfree_request); 2240 EXPORT_SYMBOL(dasd_sleep_on); 2241 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2242 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2243 EXPORT_SYMBOL(dasd_smalloc_request); 2244 EXPORT_SYMBOL(dasd_start_IO); 2245 EXPORT_SYMBOL(dasd_term_IO); 2246 2247 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2248 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2249 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2250 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2251 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2252 2253