1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 */ 9 10 #define KMSG_COMPONENT "dasd" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/kmod.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/ctype.h> 17 #include <linux/major.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> 20 #include <linux/async.h> 21 #include <linux/mutex.h> 22 #include <linux/debugfs.h> 23 #include <linux/seq_file.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 #define DASD_DIAG_MOD "dasd_diag_mod" 42 43 /* 44 * SECTION: exported variables of dasd.c 45 */ 46 debug_info_t *dasd_debug_area; 47 EXPORT_SYMBOL(dasd_debug_area); 48 static struct dentry *dasd_debugfs_root_entry; 49 struct dasd_discipline *dasd_diag_discipline_pointer; 50 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 51 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 52 53 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 54 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 55 " Copyright IBM Corp. 2000"); 56 MODULE_SUPPORTED_DEVICE("dasd"); 57 MODULE_LICENSE("GPL"); 58 59 /* 60 * SECTION: prototypes for static functions of dasd.c 61 */ 62 static int dasd_alloc_queue(struct dasd_block *); 63 static void dasd_setup_queue(struct dasd_block *); 64 static void dasd_free_queue(struct dasd_block *); 65 static int dasd_flush_block_queue(struct dasd_block *); 66 static void dasd_device_tasklet(struct dasd_device *); 67 static void dasd_block_tasklet(struct dasd_block *); 68 static void do_kick_device(struct work_struct *); 69 static void do_restore_device(struct work_struct *); 70 static void do_reload_device(struct work_struct *); 71 static void do_requeue_requests(struct work_struct *); 72 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 73 static void dasd_device_timeout(unsigned long); 74 static void dasd_block_timeout(unsigned long); 75 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 76 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 77 static void dasd_profile_exit(struct dasd_profile *); 78 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 79 static void dasd_hosts_exit(struct dasd_device *); 80 81 /* 82 * SECTION: Operations on the device structure. 83 */ 84 static wait_queue_head_t dasd_init_waitq; 85 static wait_queue_head_t dasd_flush_wq; 86 static wait_queue_head_t generic_waitq; 87 static wait_queue_head_t shutdown_waitq; 88 89 /* 90 * Allocate memory for a new device structure. 91 */ 92 struct dasd_device *dasd_alloc_device(void) 93 { 94 struct dasd_device *device; 95 96 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 97 if (!device) 98 return ERR_PTR(-ENOMEM); 99 100 /* Get two pages for normal block device operations. */ 101 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 102 if (!device->ccw_mem) { 103 kfree(device); 104 return ERR_PTR(-ENOMEM); 105 } 106 /* Get one page for error recovery. */ 107 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 108 if (!device->erp_mem) { 109 free_pages((unsigned long) device->ccw_mem, 1); 110 kfree(device); 111 return ERR_PTR(-ENOMEM); 112 } 113 114 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 115 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 116 spin_lock_init(&device->mem_lock); 117 atomic_set(&device->tasklet_scheduled, 0); 118 tasklet_init(&device->tasklet, 119 (void (*)(unsigned long)) dasd_device_tasklet, 120 (unsigned long) device); 121 INIT_LIST_HEAD(&device->ccw_queue); 122 init_timer(&device->timer); 123 device->timer.function = dasd_device_timeout; 124 device->timer.data = (unsigned long) device; 125 INIT_WORK(&device->kick_work, do_kick_device); 126 INIT_WORK(&device->restore_device, do_restore_device); 127 INIT_WORK(&device->reload_device, do_reload_device); 128 INIT_WORK(&device->requeue_requests, do_requeue_requests); 129 device->state = DASD_STATE_NEW; 130 device->target = DASD_STATE_NEW; 131 mutex_init(&device->state_mutex); 132 spin_lock_init(&device->profile.lock); 133 return device; 134 } 135 136 /* 137 * Free memory of a device structure. 138 */ 139 void dasd_free_device(struct dasd_device *device) 140 { 141 kfree(device->private); 142 free_page((unsigned long) device->erp_mem); 143 free_pages((unsigned long) device->ccw_mem, 1); 144 kfree(device); 145 } 146 147 /* 148 * Allocate memory for a new device structure. 149 */ 150 struct dasd_block *dasd_alloc_block(void) 151 { 152 struct dasd_block *block; 153 154 block = kzalloc(sizeof(*block), GFP_ATOMIC); 155 if (!block) 156 return ERR_PTR(-ENOMEM); 157 /* open_count = 0 means device online but not in use */ 158 atomic_set(&block->open_count, -1); 159 160 atomic_set(&block->tasklet_scheduled, 0); 161 tasklet_init(&block->tasklet, 162 (void (*)(unsigned long)) dasd_block_tasklet, 163 (unsigned long) block); 164 INIT_LIST_HEAD(&block->ccw_queue); 165 spin_lock_init(&block->queue_lock); 166 init_timer(&block->timer); 167 block->timer.function = dasd_block_timeout; 168 block->timer.data = (unsigned long) block; 169 spin_lock_init(&block->profile.lock); 170 171 return block; 172 } 173 EXPORT_SYMBOL_GPL(dasd_alloc_block); 174 175 /* 176 * Free memory of a device structure. 177 */ 178 void dasd_free_block(struct dasd_block *block) 179 { 180 kfree(block); 181 } 182 EXPORT_SYMBOL_GPL(dasd_free_block); 183 184 /* 185 * Make a new device known to the system. 186 */ 187 static int dasd_state_new_to_known(struct dasd_device *device) 188 { 189 int rc; 190 191 /* 192 * As long as the device is not in state DASD_STATE_NEW we want to 193 * keep the reference count > 0. 194 */ 195 dasd_get_device(device); 196 197 if (device->block) { 198 rc = dasd_alloc_queue(device->block); 199 if (rc) { 200 dasd_put_device(device); 201 return rc; 202 } 203 } 204 device->state = DASD_STATE_KNOWN; 205 return 0; 206 } 207 208 /* 209 * Let the system forget about a device. 210 */ 211 static int dasd_state_known_to_new(struct dasd_device *device) 212 { 213 /* Disable extended error reporting for this device. */ 214 dasd_eer_disable(device); 215 device->state = DASD_STATE_NEW; 216 217 if (device->block) 218 dasd_free_queue(device->block); 219 220 /* Give up reference we took in dasd_state_new_to_known. */ 221 dasd_put_device(device); 222 return 0; 223 } 224 225 static struct dentry *dasd_debugfs_setup(const char *name, 226 struct dentry *base_dentry) 227 { 228 struct dentry *pde; 229 230 if (!base_dentry) 231 return NULL; 232 pde = debugfs_create_dir(name, base_dentry); 233 if (!pde || IS_ERR(pde)) 234 return NULL; 235 return pde; 236 } 237 238 /* 239 * Request the irq line for the device. 240 */ 241 static int dasd_state_known_to_basic(struct dasd_device *device) 242 { 243 struct dasd_block *block = device->block; 244 int rc = 0; 245 246 /* Allocate and register gendisk structure. */ 247 if (block) { 248 rc = dasd_gendisk_alloc(block); 249 if (rc) 250 return rc; 251 block->debugfs_dentry = 252 dasd_debugfs_setup(block->gdp->disk_name, 253 dasd_debugfs_root_entry); 254 dasd_profile_init(&block->profile, block->debugfs_dentry); 255 if (dasd_global_profile_level == DASD_PROFILE_ON) 256 dasd_profile_on(&device->block->profile); 257 } 258 device->debugfs_dentry = 259 dasd_debugfs_setup(dev_name(&device->cdev->dev), 260 dasd_debugfs_root_entry); 261 dasd_profile_init(&device->profile, device->debugfs_dentry); 262 dasd_hosts_init(device->debugfs_dentry, device); 263 264 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 265 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 266 8 * sizeof(long)); 267 debug_register_view(device->debug_area, &debug_sprintf_view); 268 debug_set_level(device->debug_area, DBF_WARNING); 269 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 270 271 device->state = DASD_STATE_BASIC; 272 273 return rc; 274 } 275 276 /* 277 * Release the irq line for the device. Terminate any running i/o. 278 */ 279 static int dasd_state_basic_to_known(struct dasd_device *device) 280 { 281 int rc; 282 283 if (device->discipline->basic_to_known) { 284 rc = device->discipline->basic_to_known(device); 285 if (rc) 286 return rc; 287 } 288 289 if (device->block) { 290 dasd_profile_exit(&device->block->profile); 291 debugfs_remove(device->block->debugfs_dentry); 292 dasd_gendisk_free(device->block); 293 dasd_block_clear_timer(device->block); 294 } 295 rc = dasd_flush_device_queue(device); 296 if (rc) 297 return rc; 298 dasd_device_clear_timer(device); 299 dasd_profile_exit(&device->profile); 300 dasd_hosts_exit(device); 301 debugfs_remove(device->debugfs_dentry); 302 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 303 if (device->debug_area != NULL) { 304 debug_unregister(device->debug_area); 305 device->debug_area = NULL; 306 } 307 device->state = DASD_STATE_KNOWN; 308 return 0; 309 } 310 311 /* 312 * Do the initial analysis. The do_analysis function may return 313 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 314 * until the discipline decides to continue the startup sequence 315 * by calling the function dasd_change_state. The eckd disciplines 316 * uses this to start a ccw that detects the format. The completion 317 * interrupt for this detection ccw uses the kernel event daemon to 318 * trigger the call to dasd_change_state. All this is done in the 319 * discipline code, see dasd_eckd.c. 320 * After the analysis ccw is done (do_analysis returned 0) the block 321 * device is setup. 322 * In case the analysis returns an error, the device setup is stopped 323 * (a fake disk was already added to allow formatting). 324 */ 325 static int dasd_state_basic_to_ready(struct dasd_device *device) 326 { 327 int rc; 328 struct dasd_block *block; 329 struct gendisk *disk; 330 331 rc = 0; 332 block = device->block; 333 /* make disk known with correct capacity */ 334 if (block) { 335 if (block->base->discipline->do_analysis != NULL) 336 rc = block->base->discipline->do_analysis(block); 337 if (rc) { 338 if (rc != -EAGAIN) { 339 device->state = DASD_STATE_UNFMT; 340 disk = device->block->gdp; 341 kobject_uevent(&disk_to_dev(disk)->kobj, 342 KOBJ_CHANGE); 343 goto out; 344 } 345 return rc; 346 } 347 dasd_setup_queue(block); 348 set_capacity(block->gdp, 349 block->blocks << block->s2b_shift); 350 device->state = DASD_STATE_READY; 351 rc = dasd_scan_partitions(block); 352 if (rc) { 353 device->state = DASD_STATE_BASIC; 354 return rc; 355 } 356 } else { 357 device->state = DASD_STATE_READY; 358 } 359 out: 360 if (device->discipline->basic_to_ready) 361 rc = device->discipline->basic_to_ready(device); 362 return rc; 363 } 364 365 static inline 366 int _wait_for_empty_queues(struct dasd_device *device) 367 { 368 if (device->block) 369 return list_empty(&device->ccw_queue) && 370 list_empty(&device->block->ccw_queue); 371 else 372 return list_empty(&device->ccw_queue); 373 } 374 375 /* 376 * Remove device from block device layer. Destroy dirty buffers. 377 * Forget format information. Check if the target level is basic 378 * and if it is create fake disk for formatting. 379 */ 380 static int dasd_state_ready_to_basic(struct dasd_device *device) 381 { 382 int rc; 383 384 device->state = DASD_STATE_BASIC; 385 if (device->block) { 386 struct dasd_block *block = device->block; 387 rc = dasd_flush_block_queue(block); 388 if (rc) { 389 device->state = DASD_STATE_READY; 390 return rc; 391 } 392 dasd_destroy_partitions(block); 393 block->blocks = 0; 394 block->bp_block = 0; 395 block->s2b_shift = 0; 396 } 397 return 0; 398 } 399 400 /* 401 * Back to basic. 402 */ 403 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 404 { 405 device->state = DASD_STATE_BASIC; 406 return 0; 407 } 408 409 /* 410 * Make the device online and schedule the bottom half to start 411 * the requeueing of requests from the linux request queue to the 412 * ccw queue. 413 */ 414 static int 415 dasd_state_ready_to_online(struct dasd_device * device) 416 { 417 struct gendisk *disk; 418 struct disk_part_iter piter; 419 struct hd_struct *part; 420 421 device->state = DASD_STATE_ONLINE; 422 if (device->block) { 423 dasd_schedule_block_bh(device->block); 424 if ((device->features & DASD_FEATURE_USERAW)) { 425 disk = device->block->gdp; 426 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 427 return 0; 428 } 429 disk = device->block->bdev->bd_disk; 430 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 431 while ((part = disk_part_iter_next(&piter))) 432 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 433 disk_part_iter_exit(&piter); 434 } 435 return 0; 436 } 437 438 /* 439 * Stop the requeueing of requests again. 440 */ 441 static int dasd_state_online_to_ready(struct dasd_device *device) 442 { 443 int rc; 444 struct gendisk *disk; 445 struct disk_part_iter piter; 446 struct hd_struct *part; 447 448 if (device->discipline->online_to_ready) { 449 rc = device->discipline->online_to_ready(device); 450 if (rc) 451 return rc; 452 } 453 454 device->state = DASD_STATE_READY; 455 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 456 disk = device->block->bdev->bd_disk; 457 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 458 while ((part = disk_part_iter_next(&piter))) 459 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 460 disk_part_iter_exit(&piter); 461 } 462 return 0; 463 } 464 465 /* 466 * Device startup state changes. 467 */ 468 static int dasd_increase_state(struct dasd_device *device) 469 { 470 int rc; 471 472 rc = 0; 473 if (device->state == DASD_STATE_NEW && 474 device->target >= DASD_STATE_KNOWN) 475 rc = dasd_state_new_to_known(device); 476 477 if (!rc && 478 device->state == DASD_STATE_KNOWN && 479 device->target >= DASD_STATE_BASIC) 480 rc = dasd_state_known_to_basic(device); 481 482 if (!rc && 483 device->state == DASD_STATE_BASIC && 484 device->target >= DASD_STATE_READY) 485 rc = dasd_state_basic_to_ready(device); 486 487 if (!rc && 488 device->state == DASD_STATE_UNFMT && 489 device->target > DASD_STATE_UNFMT) 490 rc = -EPERM; 491 492 if (!rc && 493 device->state == DASD_STATE_READY && 494 device->target >= DASD_STATE_ONLINE) 495 rc = dasd_state_ready_to_online(device); 496 497 return rc; 498 } 499 500 /* 501 * Device shutdown state changes. 502 */ 503 static int dasd_decrease_state(struct dasd_device *device) 504 { 505 int rc; 506 507 rc = 0; 508 if (device->state == DASD_STATE_ONLINE && 509 device->target <= DASD_STATE_READY) 510 rc = dasd_state_online_to_ready(device); 511 512 if (!rc && 513 device->state == DASD_STATE_READY && 514 device->target <= DASD_STATE_BASIC) 515 rc = dasd_state_ready_to_basic(device); 516 517 if (!rc && 518 device->state == DASD_STATE_UNFMT && 519 device->target <= DASD_STATE_BASIC) 520 rc = dasd_state_unfmt_to_basic(device); 521 522 if (!rc && 523 device->state == DASD_STATE_BASIC && 524 device->target <= DASD_STATE_KNOWN) 525 rc = dasd_state_basic_to_known(device); 526 527 if (!rc && 528 device->state == DASD_STATE_KNOWN && 529 device->target <= DASD_STATE_NEW) 530 rc = dasd_state_known_to_new(device); 531 532 return rc; 533 } 534 535 /* 536 * This is the main startup/shutdown routine. 537 */ 538 static void dasd_change_state(struct dasd_device *device) 539 { 540 int rc; 541 542 if (device->state == device->target) 543 /* Already where we want to go today... */ 544 return; 545 if (device->state < device->target) 546 rc = dasd_increase_state(device); 547 else 548 rc = dasd_decrease_state(device); 549 if (rc == -EAGAIN) 550 return; 551 if (rc) 552 device->target = device->state; 553 554 /* let user-space know that the device status changed */ 555 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 556 557 if (device->state == device->target) 558 wake_up(&dasd_init_waitq); 559 } 560 561 /* 562 * Kick starter for devices that did not complete the startup/shutdown 563 * procedure or were sleeping because of a pending state. 564 * dasd_kick_device will schedule a call do do_kick_device to the kernel 565 * event daemon. 566 */ 567 static void do_kick_device(struct work_struct *work) 568 { 569 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 570 mutex_lock(&device->state_mutex); 571 dasd_change_state(device); 572 mutex_unlock(&device->state_mutex); 573 dasd_schedule_device_bh(device); 574 dasd_put_device(device); 575 } 576 577 void dasd_kick_device(struct dasd_device *device) 578 { 579 dasd_get_device(device); 580 /* queue call to dasd_kick_device to the kernel event daemon. */ 581 if (!schedule_work(&device->kick_work)) 582 dasd_put_device(device); 583 } 584 EXPORT_SYMBOL(dasd_kick_device); 585 586 /* 587 * dasd_reload_device will schedule a call do do_reload_device to the kernel 588 * event daemon. 589 */ 590 static void do_reload_device(struct work_struct *work) 591 { 592 struct dasd_device *device = container_of(work, struct dasd_device, 593 reload_device); 594 device->discipline->reload(device); 595 dasd_put_device(device); 596 } 597 598 void dasd_reload_device(struct dasd_device *device) 599 { 600 dasd_get_device(device); 601 /* queue call to dasd_reload_device to the kernel event daemon. */ 602 if (!schedule_work(&device->reload_device)) 603 dasd_put_device(device); 604 } 605 EXPORT_SYMBOL(dasd_reload_device); 606 607 /* 608 * dasd_restore_device will schedule a call do do_restore_device to the kernel 609 * event daemon. 610 */ 611 static void do_restore_device(struct work_struct *work) 612 { 613 struct dasd_device *device = container_of(work, struct dasd_device, 614 restore_device); 615 device->cdev->drv->restore(device->cdev); 616 dasd_put_device(device); 617 } 618 619 void dasd_restore_device(struct dasd_device *device) 620 { 621 dasd_get_device(device); 622 /* queue call to dasd_restore_device to the kernel event daemon. */ 623 if (!schedule_work(&device->restore_device)) 624 dasd_put_device(device); 625 } 626 627 /* 628 * Set the target state for a device and starts the state change. 629 */ 630 void dasd_set_target_state(struct dasd_device *device, int target) 631 { 632 dasd_get_device(device); 633 mutex_lock(&device->state_mutex); 634 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 635 if (dasd_probeonly && target > DASD_STATE_READY) 636 target = DASD_STATE_READY; 637 if (device->target != target) { 638 if (device->state == target) 639 wake_up(&dasd_init_waitq); 640 device->target = target; 641 } 642 if (device->state != device->target) 643 dasd_change_state(device); 644 mutex_unlock(&device->state_mutex); 645 dasd_put_device(device); 646 } 647 EXPORT_SYMBOL(dasd_set_target_state); 648 649 /* 650 * Enable devices with device numbers in [from..to]. 651 */ 652 static inline int _wait_for_device(struct dasd_device *device) 653 { 654 return (device->state == device->target); 655 } 656 657 void dasd_enable_device(struct dasd_device *device) 658 { 659 dasd_set_target_state(device, DASD_STATE_ONLINE); 660 if (device->state <= DASD_STATE_KNOWN) 661 /* No discipline for device found. */ 662 dasd_set_target_state(device, DASD_STATE_NEW); 663 /* Now wait for the devices to come up. */ 664 wait_event(dasd_init_waitq, _wait_for_device(device)); 665 666 dasd_reload_device(device); 667 if (device->discipline->kick_validate) 668 device->discipline->kick_validate(device); 669 } 670 EXPORT_SYMBOL(dasd_enable_device); 671 672 /* 673 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 674 */ 675 676 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 677 678 #ifdef CONFIG_DASD_PROFILE 679 struct dasd_profile dasd_global_profile = { 680 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 681 }; 682 static struct dentry *dasd_debugfs_global_entry; 683 684 /* 685 * Add profiling information for cqr before execution. 686 */ 687 static void dasd_profile_start(struct dasd_block *block, 688 struct dasd_ccw_req *cqr, 689 struct request *req) 690 { 691 struct list_head *l; 692 unsigned int counter; 693 struct dasd_device *device; 694 695 /* count the length of the chanq for statistics */ 696 counter = 0; 697 if (dasd_global_profile_level || block->profile.data) 698 list_for_each(l, &block->ccw_queue) 699 if (++counter >= 31) 700 break; 701 702 spin_lock(&dasd_global_profile.lock); 703 if (dasd_global_profile.data) { 704 dasd_global_profile.data->dasd_io_nr_req[counter]++; 705 if (rq_data_dir(req) == READ) 706 dasd_global_profile.data->dasd_read_nr_req[counter]++; 707 } 708 spin_unlock(&dasd_global_profile.lock); 709 710 spin_lock(&block->profile.lock); 711 if (block->profile.data) { 712 block->profile.data->dasd_io_nr_req[counter]++; 713 if (rq_data_dir(req) == READ) 714 block->profile.data->dasd_read_nr_req[counter]++; 715 } 716 spin_unlock(&block->profile.lock); 717 718 /* 719 * We count the request for the start device, even though it may run on 720 * some other device due to error recovery. This way we make sure that 721 * we count each request only once. 722 */ 723 device = cqr->startdev; 724 if (device->profile.data) { 725 counter = 1; /* request is not yet queued on the start device */ 726 list_for_each(l, &device->ccw_queue) 727 if (++counter >= 31) 728 break; 729 } 730 spin_lock(&device->profile.lock); 731 if (device->profile.data) { 732 device->profile.data->dasd_io_nr_req[counter]++; 733 if (rq_data_dir(req) == READ) 734 device->profile.data->dasd_read_nr_req[counter]++; 735 } 736 spin_unlock(&device->profile.lock); 737 } 738 739 /* 740 * Add profiling information for cqr after execution. 741 */ 742 743 #define dasd_profile_counter(value, index) \ 744 { \ 745 for (index = 0; index < 31 && value >> (2+index); index++) \ 746 ; \ 747 } 748 749 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 750 int is_alias, 751 int is_tpm, 752 int is_read, 753 long sectors, 754 int sectors_ind, 755 int tottime_ind, 756 int tottimeps_ind, 757 int strtime_ind, 758 int irqtime_ind, 759 int irqtimeps_ind, 760 int endtime_ind) 761 { 762 /* in case of an overflow, reset the whole profile */ 763 if (data->dasd_io_reqs == UINT_MAX) { 764 memset(data, 0, sizeof(*data)); 765 getnstimeofday(&data->starttod); 766 } 767 data->dasd_io_reqs++; 768 data->dasd_io_sects += sectors; 769 if (is_alias) 770 data->dasd_io_alias++; 771 if (is_tpm) 772 data->dasd_io_tpm++; 773 774 data->dasd_io_secs[sectors_ind]++; 775 data->dasd_io_times[tottime_ind]++; 776 data->dasd_io_timps[tottimeps_ind]++; 777 data->dasd_io_time1[strtime_ind]++; 778 data->dasd_io_time2[irqtime_ind]++; 779 data->dasd_io_time2ps[irqtimeps_ind]++; 780 data->dasd_io_time3[endtime_ind]++; 781 782 if (is_read) { 783 data->dasd_read_reqs++; 784 data->dasd_read_sects += sectors; 785 if (is_alias) 786 data->dasd_read_alias++; 787 if (is_tpm) 788 data->dasd_read_tpm++; 789 data->dasd_read_secs[sectors_ind]++; 790 data->dasd_read_times[tottime_ind]++; 791 data->dasd_read_time1[strtime_ind]++; 792 data->dasd_read_time2[irqtime_ind]++; 793 data->dasd_read_time3[endtime_ind]++; 794 } 795 } 796 797 static void dasd_profile_end(struct dasd_block *block, 798 struct dasd_ccw_req *cqr, 799 struct request *req) 800 { 801 unsigned long strtime, irqtime, endtime, tottime; 802 unsigned long tottimeps, sectors; 803 struct dasd_device *device; 804 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 805 int irqtime_ind, irqtimeps_ind, endtime_ind; 806 struct dasd_profile_info *data; 807 808 device = cqr->startdev; 809 if (!(dasd_global_profile_level || 810 block->profile.data || 811 device->profile.data)) 812 return; 813 814 sectors = blk_rq_sectors(req); 815 if (!cqr->buildclk || !cqr->startclk || 816 !cqr->stopclk || !cqr->endclk || 817 !sectors) 818 return; 819 820 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 821 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 822 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 823 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 824 tottimeps = tottime / sectors; 825 826 dasd_profile_counter(sectors, sectors_ind); 827 dasd_profile_counter(tottime, tottime_ind); 828 dasd_profile_counter(tottimeps, tottimeps_ind); 829 dasd_profile_counter(strtime, strtime_ind); 830 dasd_profile_counter(irqtime, irqtime_ind); 831 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 832 dasd_profile_counter(endtime, endtime_ind); 833 834 spin_lock(&dasd_global_profile.lock); 835 if (dasd_global_profile.data) { 836 data = dasd_global_profile.data; 837 data->dasd_sum_times += tottime; 838 data->dasd_sum_time_str += strtime; 839 data->dasd_sum_time_irq += irqtime; 840 data->dasd_sum_time_end += endtime; 841 dasd_profile_end_add_data(dasd_global_profile.data, 842 cqr->startdev != block->base, 843 cqr->cpmode == 1, 844 rq_data_dir(req) == READ, 845 sectors, sectors_ind, tottime_ind, 846 tottimeps_ind, strtime_ind, 847 irqtime_ind, irqtimeps_ind, 848 endtime_ind); 849 } 850 spin_unlock(&dasd_global_profile.lock); 851 852 spin_lock(&block->profile.lock); 853 if (block->profile.data) { 854 data = block->profile.data; 855 data->dasd_sum_times += tottime; 856 data->dasd_sum_time_str += strtime; 857 data->dasd_sum_time_irq += irqtime; 858 data->dasd_sum_time_end += endtime; 859 dasd_profile_end_add_data(block->profile.data, 860 cqr->startdev != block->base, 861 cqr->cpmode == 1, 862 rq_data_dir(req) == READ, 863 sectors, sectors_ind, tottime_ind, 864 tottimeps_ind, strtime_ind, 865 irqtime_ind, irqtimeps_ind, 866 endtime_ind); 867 } 868 spin_unlock(&block->profile.lock); 869 870 spin_lock(&device->profile.lock); 871 if (device->profile.data) { 872 data = device->profile.data; 873 data->dasd_sum_times += tottime; 874 data->dasd_sum_time_str += strtime; 875 data->dasd_sum_time_irq += irqtime; 876 data->dasd_sum_time_end += endtime; 877 dasd_profile_end_add_data(device->profile.data, 878 cqr->startdev != block->base, 879 cqr->cpmode == 1, 880 rq_data_dir(req) == READ, 881 sectors, sectors_ind, tottime_ind, 882 tottimeps_ind, strtime_ind, 883 irqtime_ind, irqtimeps_ind, 884 endtime_ind); 885 } 886 spin_unlock(&device->profile.lock); 887 } 888 889 void dasd_profile_reset(struct dasd_profile *profile) 890 { 891 struct dasd_profile_info *data; 892 893 spin_lock_bh(&profile->lock); 894 data = profile->data; 895 if (!data) { 896 spin_unlock_bh(&profile->lock); 897 return; 898 } 899 memset(data, 0, sizeof(*data)); 900 getnstimeofday(&data->starttod); 901 spin_unlock_bh(&profile->lock); 902 } 903 904 int dasd_profile_on(struct dasd_profile *profile) 905 { 906 struct dasd_profile_info *data; 907 908 data = kzalloc(sizeof(*data), GFP_KERNEL); 909 if (!data) 910 return -ENOMEM; 911 spin_lock_bh(&profile->lock); 912 if (profile->data) { 913 spin_unlock_bh(&profile->lock); 914 kfree(data); 915 return 0; 916 } 917 getnstimeofday(&data->starttod); 918 profile->data = data; 919 spin_unlock_bh(&profile->lock); 920 return 0; 921 } 922 923 void dasd_profile_off(struct dasd_profile *profile) 924 { 925 spin_lock_bh(&profile->lock); 926 kfree(profile->data); 927 profile->data = NULL; 928 spin_unlock_bh(&profile->lock); 929 } 930 931 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 932 { 933 char *buffer; 934 935 buffer = vmalloc(user_len + 1); 936 if (buffer == NULL) 937 return ERR_PTR(-ENOMEM); 938 if (copy_from_user(buffer, user_buf, user_len) != 0) { 939 vfree(buffer); 940 return ERR_PTR(-EFAULT); 941 } 942 /* got the string, now strip linefeed. */ 943 if (buffer[user_len - 1] == '\n') 944 buffer[user_len - 1] = 0; 945 else 946 buffer[user_len] = 0; 947 return buffer; 948 } 949 950 static ssize_t dasd_stats_write(struct file *file, 951 const char __user *user_buf, 952 size_t user_len, loff_t *pos) 953 { 954 char *buffer, *str; 955 int rc; 956 struct seq_file *m = (struct seq_file *)file->private_data; 957 struct dasd_profile *prof = m->private; 958 959 if (user_len > 65536) 960 user_len = 65536; 961 buffer = dasd_get_user_string(user_buf, user_len); 962 if (IS_ERR(buffer)) 963 return PTR_ERR(buffer); 964 965 str = skip_spaces(buffer); 966 rc = user_len; 967 if (strncmp(str, "reset", 5) == 0) { 968 dasd_profile_reset(prof); 969 } else if (strncmp(str, "on", 2) == 0) { 970 rc = dasd_profile_on(prof); 971 if (rc) 972 goto out; 973 rc = user_len; 974 if (prof == &dasd_global_profile) { 975 dasd_profile_reset(prof); 976 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 977 } 978 } else if (strncmp(str, "off", 3) == 0) { 979 if (prof == &dasd_global_profile) 980 dasd_global_profile_level = DASD_PROFILE_OFF; 981 dasd_profile_off(prof); 982 } else 983 rc = -EINVAL; 984 out: 985 vfree(buffer); 986 return rc; 987 } 988 989 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 990 { 991 int i; 992 993 for (i = 0; i < 32; i++) 994 seq_printf(m, "%u ", array[i]); 995 seq_putc(m, '\n'); 996 } 997 998 static void dasd_stats_seq_print(struct seq_file *m, 999 struct dasd_profile_info *data) 1000 { 1001 seq_printf(m, "start_time %ld.%09ld\n", 1002 data->starttod.tv_sec, data->starttod.tv_nsec); 1003 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 1004 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 1005 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 1006 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 1007 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 1008 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 1009 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 1010 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 1011 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 1012 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 1013 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 1014 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 1015 seq_puts(m, "histogram_sectors "); 1016 dasd_stats_array(m, data->dasd_io_secs); 1017 seq_puts(m, "histogram_io_times "); 1018 dasd_stats_array(m, data->dasd_io_times); 1019 seq_puts(m, "histogram_io_times_weighted "); 1020 dasd_stats_array(m, data->dasd_io_timps); 1021 seq_puts(m, "histogram_time_build_to_ssch "); 1022 dasd_stats_array(m, data->dasd_io_time1); 1023 seq_puts(m, "histogram_time_ssch_to_irq "); 1024 dasd_stats_array(m, data->dasd_io_time2); 1025 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1026 dasd_stats_array(m, data->dasd_io_time2ps); 1027 seq_puts(m, "histogram_time_irq_to_end "); 1028 dasd_stats_array(m, data->dasd_io_time3); 1029 seq_puts(m, "histogram_ccw_queue_length "); 1030 dasd_stats_array(m, data->dasd_io_nr_req); 1031 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1032 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1033 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1034 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1035 seq_puts(m, "histogram_read_sectors "); 1036 dasd_stats_array(m, data->dasd_read_secs); 1037 seq_puts(m, "histogram_read_times "); 1038 dasd_stats_array(m, data->dasd_read_times); 1039 seq_puts(m, "histogram_read_time_build_to_ssch "); 1040 dasd_stats_array(m, data->dasd_read_time1); 1041 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1042 dasd_stats_array(m, data->dasd_read_time2); 1043 seq_puts(m, "histogram_read_time_irq_to_end "); 1044 dasd_stats_array(m, data->dasd_read_time3); 1045 seq_puts(m, "histogram_read_ccw_queue_length "); 1046 dasd_stats_array(m, data->dasd_read_nr_req); 1047 } 1048 1049 static int dasd_stats_show(struct seq_file *m, void *v) 1050 { 1051 struct dasd_profile *profile; 1052 struct dasd_profile_info *data; 1053 1054 profile = m->private; 1055 spin_lock_bh(&profile->lock); 1056 data = profile->data; 1057 if (!data) { 1058 spin_unlock_bh(&profile->lock); 1059 seq_puts(m, "disabled\n"); 1060 return 0; 1061 } 1062 dasd_stats_seq_print(m, data); 1063 spin_unlock_bh(&profile->lock); 1064 return 0; 1065 } 1066 1067 static int dasd_stats_open(struct inode *inode, struct file *file) 1068 { 1069 struct dasd_profile *profile = inode->i_private; 1070 return single_open(file, dasd_stats_show, profile); 1071 } 1072 1073 static const struct file_operations dasd_stats_raw_fops = { 1074 .owner = THIS_MODULE, 1075 .open = dasd_stats_open, 1076 .read = seq_read, 1077 .llseek = seq_lseek, 1078 .release = single_release, 1079 .write = dasd_stats_write, 1080 }; 1081 1082 static void dasd_profile_init(struct dasd_profile *profile, 1083 struct dentry *base_dentry) 1084 { 1085 umode_t mode; 1086 struct dentry *pde; 1087 1088 if (!base_dentry) 1089 return; 1090 profile->dentry = NULL; 1091 profile->data = NULL; 1092 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1093 pde = debugfs_create_file("statistics", mode, base_dentry, 1094 profile, &dasd_stats_raw_fops); 1095 if (pde && !IS_ERR(pde)) 1096 profile->dentry = pde; 1097 return; 1098 } 1099 1100 static void dasd_profile_exit(struct dasd_profile *profile) 1101 { 1102 dasd_profile_off(profile); 1103 debugfs_remove(profile->dentry); 1104 profile->dentry = NULL; 1105 } 1106 1107 static void dasd_statistics_removeroot(void) 1108 { 1109 dasd_global_profile_level = DASD_PROFILE_OFF; 1110 dasd_profile_exit(&dasd_global_profile); 1111 debugfs_remove(dasd_debugfs_global_entry); 1112 debugfs_remove(dasd_debugfs_root_entry); 1113 } 1114 1115 static void dasd_statistics_createroot(void) 1116 { 1117 struct dentry *pde; 1118 1119 dasd_debugfs_root_entry = NULL; 1120 pde = debugfs_create_dir("dasd", NULL); 1121 if (!pde || IS_ERR(pde)) 1122 goto error; 1123 dasd_debugfs_root_entry = pde; 1124 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1125 if (!pde || IS_ERR(pde)) 1126 goto error; 1127 dasd_debugfs_global_entry = pde; 1128 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1129 return; 1130 1131 error: 1132 DBF_EVENT(DBF_ERR, "%s", 1133 "Creation of the dasd debugfs interface failed"); 1134 dasd_statistics_removeroot(); 1135 return; 1136 } 1137 1138 #else 1139 #define dasd_profile_start(block, cqr, req) do {} while (0) 1140 #define dasd_profile_end(block, cqr, req) do {} while (0) 1141 1142 static void dasd_statistics_createroot(void) 1143 { 1144 return; 1145 } 1146 1147 static void dasd_statistics_removeroot(void) 1148 { 1149 return; 1150 } 1151 1152 int dasd_stats_generic_show(struct seq_file *m, void *v) 1153 { 1154 seq_puts(m, "Statistics are not activated in this kernel\n"); 1155 return 0; 1156 } 1157 1158 static void dasd_profile_init(struct dasd_profile *profile, 1159 struct dentry *base_dentry) 1160 { 1161 return; 1162 } 1163 1164 static void dasd_profile_exit(struct dasd_profile *profile) 1165 { 1166 return; 1167 } 1168 1169 int dasd_profile_on(struct dasd_profile *profile) 1170 { 1171 return 0; 1172 } 1173 1174 #endif /* CONFIG_DASD_PROFILE */ 1175 1176 static int dasd_hosts_show(struct seq_file *m, void *v) 1177 { 1178 struct dasd_device *device; 1179 int rc = -EOPNOTSUPP; 1180 1181 device = m->private; 1182 dasd_get_device(device); 1183 1184 if (device->discipline->hosts_print) 1185 rc = device->discipline->hosts_print(device, m); 1186 1187 dasd_put_device(device); 1188 return rc; 1189 } 1190 1191 static int dasd_hosts_open(struct inode *inode, struct file *file) 1192 { 1193 struct dasd_device *device = inode->i_private; 1194 1195 return single_open(file, dasd_hosts_show, device); 1196 } 1197 1198 static const struct file_operations dasd_hosts_fops = { 1199 .owner = THIS_MODULE, 1200 .open = dasd_hosts_open, 1201 .read = seq_read, 1202 .llseek = seq_lseek, 1203 .release = single_release, 1204 }; 1205 1206 static void dasd_hosts_exit(struct dasd_device *device) 1207 { 1208 debugfs_remove(device->hosts_dentry); 1209 device->hosts_dentry = NULL; 1210 } 1211 1212 static void dasd_hosts_init(struct dentry *base_dentry, 1213 struct dasd_device *device) 1214 { 1215 struct dentry *pde; 1216 umode_t mode; 1217 1218 if (!base_dentry) 1219 return; 1220 1221 mode = S_IRUSR | S_IFREG; 1222 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1223 device, &dasd_hosts_fops); 1224 if (pde && !IS_ERR(pde)) 1225 device->hosts_dentry = pde; 1226 } 1227 1228 /* 1229 * Allocate memory for a channel program with 'cplength' channel 1230 * command words and 'datasize' additional space. There are two 1231 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 1232 * memory and 2) dasd_smalloc_request uses the static ccw memory 1233 * that gets allocated for each device. 1234 */ 1235 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 1236 int datasize, 1237 struct dasd_device *device) 1238 { 1239 struct dasd_ccw_req *cqr; 1240 1241 /* Sanity checks */ 1242 BUG_ON(datasize > PAGE_SIZE || 1243 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 1244 1245 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 1246 if (cqr == NULL) 1247 return ERR_PTR(-ENOMEM); 1248 cqr->cpaddr = NULL; 1249 if (cplength > 0) { 1250 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 1251 GFP_ATOMIC | GFP_DMA); 1252 if (cqr->cpaddr == NULL) { 1253 kfree(cqr); 1254 return ERR_PTR(-ENOMEM); 1255 } 1256 } 1257 cqr->data = NULL; 1258 if (datasize > 0) { 1259 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 1260 if (cqr->data == NULL) { 1261 kfree(cqr->cpaddr); 1262 kfree(cqr); 1263 return ERR_PTR(-ENOMEM); 1264 } 1265 } 1266 cqr->magic = magic; 1267 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1268 dasd_get_device(device); 1269 return cqr; 1270 } 1271 EXPORT_SYMBOL(dasd_kmalloc_request); 1272 1273 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1274 int datasize, 1275 struct dasd_device *device) 1276 { 1277 unsigned long flags; 1278 struct dasd_ccw_req *cqr; 1279 char *data; 1280 int size; 1281 1282 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 1283 if (cplength > 0) 1284 size += cplength * sizeof(struct ccw1); 1285 if (datasize > 0) 1286 size += datasize; 1287 spin_lock_irqsave(&device->mem_lock, flags); 1288 cqr = (struct dasd_ccw_req *) 1289 dasd_alloc_chunk(&device->ccw_chunks, size); 1290 spin_unlock_irqrestore(&device->mem_lock, flags); 1291 if (cqr == NULL) 1292 return ERR_PTR(-ENOMEM); 1293 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1294 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1295 cqr->cpaddr = NULL; 1296 if (cplength > 0) { 1297 cqr->cpaddr = (struct ccw1 *) data; 1298 data += cplength*sizeof(struct ccw1); 1299 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1300 } 1301 cqr->data = NULL; 1302 if (datasize > 0) { 1303 cqr->data = data; 1304 memset(cqr->data, 0, datasize); 1305 } 1306 cqr->magic = magic; 1307 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1308 dasd_get_device(device); 1309 return cqr; 1310 } 1311 EXPORT_SYMBOL(dasd_smalloc_request); 1312 1313 /* 1314 * Free memory of a channel program. This function needs to free all the 1315 * idal lists that might have been created by dasd_set_cda and the 1316 * struct dasd_ccw_req itself. 1317 */ 1318 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1319 { 1320 struct ccw1 *ccw; 1321 1322 /* Clear any idals used for the request. */ 1323 ccw = cqr->cpaddr; 1324 do { 1325 clear_normalized_cda(ccw); 1326 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1327 kfree(cqr->cpaddr); 1328 kfree(cqr->data); 1329 kfree(cqr); 1330 dasd_put_device(device); 1331 } 1332 EXPORT_SYMBOL(dasd_kfree_request); 1333 1334 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1335 { 1336 unsigned long flags; 1337 1338 spin_lock_irqsave(&device->mem_lock, flags); 1339 dasd_free_chunk(&device->ccw_chunks, cqr); 1340 spin_unlock_irqrestore(&device->mem_lock, flags); 1341 dasd_put_device(device); 1342 } 1343 EXPORT_SYMBOL(dasd_sfree_request); 1344 1345 /* 1346 * Check discipline magic in cqr. 1347 */ 1348 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1349 { 1350 struct dasd_device *device; 1351 1352 if (cqr == NULL) 1353 return -EINVAL; 1354 device = cqr->startdev; 1355 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1356 DBF_DEV_EVENT(DBF_WARNING, device, 1357 " dasd_ccw_req 0x%08x magic doesn't match" 1358 " discipline 0x%08x", 1359 cqr->magic, 1360 *(unsigned int *) device->discipline->name); 1361 return -EINVAL; 1362 } 1363 return 0; 1364 } 1365 1366 /* 1367 * Terminate the current i/o and set the request to clear_pending. 1368 * Timer keeps device runnig. 1369 * ccw_device_clear can fail if the i/o subsystem 1370 * is in a bad mood. 1371 */ 1372 int dasd_term_IO(struct dasd_ccw_req *cqr) 1373 { 1374 struct dasd_device *device; 1375 int retries, rc; 1376 char errorstring[ERRORLENGTH]; 1377 1378 /* Check the cqr */ 1379 rc = dasd_check_cqr(cqr); 1380 if (rc) 1381 return rc; 1382 retries = 0; 1383 device = (struct dasd_device *) cqr->startdev; 1384 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1385 rc = ccw_device_clear(device->cdev, (long) cqr); 1386 switch (rc) { 1387 case 0: /* termination successful */ 1388 cqr->status = DASD_CQR_CLEAR_PENDING; 1389 cqr->stopclk = get_tod_clock(); 1390 cqr->starttime = 0; 1391 DBF_DEV_EVENT(DBF_DEBUG, device, 1392 "terminate cqr %p successful", 1393 cqr); 1394 break; 1395 case -ENODEV: 1396 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1397 "device gone, retry"); 1398 break; 1399 case -EIO: 1400 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1401 "I/O error, retry"); 1402 break; 1403 case -EINVAL: 1404 /* 1405 * device not valid so no I/O could be running 1406 * handle CQR as termination successful 1407 */ 1408 cqr->status = DASD_CQR_CLEARED; 1409 cqr->stopclk = get_tod_clock(); 1410 cqr->starttime = 0; 1411 /* no retries for invalid devices */ 1412 cqr->retries = -1; 1413 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1414 "EINVAL, handle as terminated"); 1415 /* fake rc to success */ 1416 rc = 0; 1417 break; 1418 case -EBUSY: 1419 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1420 "device busy, retry later"); 1421 break; 1422 default: 1423 /* internal error 10 - unknown rc*/ 1424 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1425 dev_err(&device->cdev->dev, "An error occurred in the " 1426 "DASD device driver, reason=%s\n", errorstring); 1427 BUG(); 1428 break; 1429 } 1430 retries++; 1431 } 1432 dasd_schedule_device_bh(device); 1433 return rc; 1434 } 1435 EXPORT_SYMBOL(dasd_term_IO); 1436 1437 /* 1438 * Start the i/o. This start_IO can fail if the channel is really busy. 1439 * In that case set up a timer to start the request later. 1440 */ 1441 int dasd_start_IO(struct dasd_ccw_req *cqr) 1442 { 1443 struct dasd_device *device; 1444 int rc; 1445 char errorstring[ERRORLENGTH]; 1446 1447 /* Check the cqr */ 1448 rc = dasd_check_cqr(cqr); 1449 if (rc) { 1450 cqr->intrc = rc; 1451 return rc; 1452 } 1453 device = (struct dasd_device *) cqr->startdev; 1454 if (((cqr->block && 1455 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1456 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1457 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1458 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1459 "because of stolen lock", cqr); 1460 cqr->status = DASD_CQR_ERROR; 1461 cqr->intrc = -EPERM; 1462 return -EPERM; 1463 } 1464 if (cqr->retries < 0) { 1465 /* internal error 14 - start_IO run out of retries */ 1466 sprintf(errorstring, "14 %p", cqr); 1467 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1468 "device driver, reason=%s\n", errorstring); 1469 cqr->status = DASD_CQR_ERROR; 1470 return -EIO; 1471 } 1472 cqr->startclk = get_tod_clock(); 1473 cqr->starttime = jiffies; 1474 cqr->retries--; 1475 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1476 cqr->lpm &= dasd_path_get_opm(device); 1477 if (!cqr->lpm) 1478 cqr->lpm = dasd_path_get_opm(device); 1479 } 1480 if (cqr->cpmode == 1) { 1481 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1482 (long) cqr, cqr->lpm); 1483 } else { 1484 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1485 (long) cqr, cqr->lpm, 0); 1486 } 1487 switch (rc) { 1488 case 0: 1489 cqr->status = DASD_CQR_IN_IO; 1490 break; 1491 case -EBUSY: 1492 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1493 "start_IO: device busy, retry later"); 1494 break; 1495 case -ETIMEDOUT: 1496 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1497 "start_IO: request timeout, retry later"); 1498 break; 1499 case -EACCES: 1500 /* -EACCES indicates that the request used only a subset of the 1501 * available paths and all these paths are gone. If the lpm of 1502 * this request was only a subset of the opm (e.g. the ppm) then 1503 * we just do a retry with all available paths. 1504 * If we already use the full opm, something is amiss, and we 1505 * need a full path verification. 1506 */ 1507 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1508 DBF_DEV_EVENT(DBF_WARNING, device, 1509 "start_IO: selected paths gone (%x)", 1510 cqr->lpm); 1511 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1512 cqr->lpm = dasd_path_get_opm(device); 1513 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1514 "start_IO: selected paths gone," 1515 " retry on all paths"); 1516 } else { 1517 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1518 "start_IO: all paths in opm gone," 1519 " do path verification"); 1520 dasd_generic_last_path_gone(device); 1521 dasd_path_no_path(device); 1522 dasd_path_set_tbvpm(device, 1523 ccw_device_get_path_mask( 1524 device->cdev)); 1525 } 1526 break; 1527 case -ENODEV: 1528 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1529 "start_IO: -ENODEV device gone, retry"); 1530 break; 1531 case -EIO: 1532 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1533 "start_IO: -EIO device gone, retry"); 1534 break; 1535 case -EINVAL: 1536 /* most likely caused in power management context */ 1537 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1538 "start_IO: -EINVAL device currently " 1539 "not accessible"); 1540 break; 1541 default: 1542 /* internal error 11 - unknown rc */ 1543 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1544 dev_err(&device->cdev->dev, 1545 "An error occurred in the DASD device driver, " 1546 "reason=%s\n", errorstring); 1547 BUG(); 1548 break; 1549 } 1550 cqr->intrc = rc; 1551 return rc; 1552 } 1553 EXPORT_SYMBOL(dasd_start_IO); 1554 1555 /* 1556 * Timeout function for dasd devices. This is used for different purposes 1557 * 1) missing interrupt handler for normal operation 1558 * 2) delayed start of request where start_IO failed with -EBUSY 1559 * 3) timeout for missing state change interrupts 1560 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1561 * DASD_CQR_QUEUED for 2) and 3). 1562 */ 1563 static void dasd_device_timeout(unsigned long ptr) 1564 { 1565 unsigned long flags; 1566 struct dasd_device *device; 1567 1568 device = (struct dasd_device *) ptr; 1569 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1570 /* re-activate request queue */ 1571 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1572 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1573 dasd_schedule_device_bh(device); 1574 } 1575 1576 /* 1577 * Setup timeout for a device in jiffies. 1578 */ 1579 void dasd_device_set_timer(struct dasd_device *device, int expires) 1580 { 1581 if (expires == 0) 1582 del_timer(&device->timer); 1583 else 1584 mod_timer(&device->timer, jiffies + expires); 1585 } 1586 EXPORT_SYMBOL(dasd_device_set_timer); 1587 1588 /* 1589 * Clear timeout for a device. 1590 */ 1591 void dasd_device_clear_timer(struct dasd_device *device) 1592 { 1593 del_timer(&device->timer); 1594 } 1595 EXPORT_SYMBOL(dasd_device_clear_timer); 1596 1597 static void dasd_handle_killed_request(struct ccw_device *cdev, 1598 unsigned long intparm) 1599 { 1600 struct dasd_ccw_req *cqr; 1601 struct dasd_device *device; 1602 1603 if (!intparm) 1604 return; 1605 cqr = (struct dasd_ccw_req *) intparm; 1606 if (cqr->status != DASD_CQR_IN_IO) { 1607 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1608 "invalid status in handle_killed_request: " 1609 "%02x", cqr->status); 1610 return; 1611 } 1612 1613 device = dasd_device_from_cdev_locked(cdev); 1614 if (IS_ERR(device)) { 1615 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1616 "unable to get device from cdev"); 1617 return; 1618 } 1619 1620 if (!cqr->startdev || 1621 device != cqr->startdev || 1622 strncmp(cqr->startdev->discipline->ebcname, 1623 (char *) &cqr->magic, 4)) { 1624 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1625 "invalid device in request"); 1626 dasd_put_device(device); 1627 return; 1628 } 1629 1630 /* Schedule request to be retried. */ 1631 cqr->status = DASD_CQR_QUEUED; 1632 1633 dasd_device_clear_timer(device); 1634 dasd_schedule_device_bh(device); 1635 dasd_put_device(device); 1636 } 1637 1638 void dasd_generic_handle_state_change(struct dasd_device *device) 1639 { 1640 /* First of all start sense subsystem status request. */ 1641 dasd_eer_snss(device); 1642 1643 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1644 dasd_schedule_device_bh(device); 1645 if (device->block) { 1646 dasd_schedule_block_bh(device->block); 1647 blk_mq_run_hw_queues(device->block->request_queue, true); 1648 } 1649 } 1650 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1651 1652 static int dasd_check_hpf_error(struct irb *irb) 1653 { 1654 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1655 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1656 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1657 } 1658 1659 /* 1660 * Interrupt handler for "normal" ssch-io based dasd devices. 1661 */ 1662 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1663 struct irb *irb) 1664 { 1665 struct dasd_ccw_req *cqr, *next; 1666 struct dasd_device *device; 1667 unsigned long now; 1668 int nrf_suppressed = 0; 1669 int fp_suppressed = 0; 1670 u8 *sense = NULL; 1671 int expires; 1672 1673 cqr = (struct dasd_ccw_req *) intparm; 1674 if (IS_ERR(irb)) { 1675 switch (PTR_ERR(irb)) { 1676 case -EIO: 1677 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1678 device = cqr->startdev; 1679 cqr->status = DASD_CQR_CLEARED; 1680 dasd_device_clear_timer(device); 1681 wake_up(&dasd_flush_wq); 1682 dasd_schedule_device_bh(device); 1683 return; 1684 } 1685 break; 1686 case -ETIMEDOUT: 1687 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1688 "request timed out\n", __func__); 1689 break; 1690 default: 1691 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1692 "unknown error %ld\n", __func__, 1693 PTR_ERR(irb)); 1694 } 1695 dasd_handle_killed_request(cdev, intparm); 1696 return; 1697 } 1698 1699 now = get_tod_clock(); 1700 /* check for conditions that should be handled immediately */ 1701 if (!cqr || 1702 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1703 scsw_cstat(&irb->scsw) == 0)) { 1704 if (cqr) 1705 memcpy(&cqr->irb, irb, sizeof(*irb)); 1706 device = dasd_device_from_cdev_locked(cdev); 1707 if (IS_ERR(device)) 1708 return; 1709 /* ignore unsolicited interrupts for DIAG discipline */ 1710 if (device->discipline == dasd_diag_discipline_pointer) { 1711 dasd_put_device(device); 1712 return; 1713 } 1714 1715 /* 1716 * In some cases 'File Protected' or 'No Record Found' errors 1717 * might be expected and debug log messages for the 1718 * corresponding interrupts shouldn't be written then. 1719 * Check if either of the according suppress bits is set. 1720 */ 1721 sense = dasd_get_sense(irb); 1722 if (sense) { 1723 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1724 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1725 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1726 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1727 } 1728 if (!(fp_suppressed || nrf_suppressed)) 1729 device->discipline->dump_sense_dbf(device, irb, "int"); 1730 1731 if (device->features & DASD_FEATURE_ERPLOG) 1732 device->discipline->dump_sense(device, cqr, irb); 1733 device->discipline->check_for_device_change(device, cqr, irb); 1734 dasd_put_device(device); 1735 } 1736 1737 /* check for for attention message */ 1738 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1739 device = dasd_device_from_cdev_locked(cdev); 1740 if (!IS_ERR(device)) { 1741 device->discipline->check_attention(device, 1742 irb->esw.esw1.lpum); 1743 dasd_put_device(device); 1744 } 1745 } 1746 1747 if (!cqr) 1748 return; 1749 1750 device = (struct dasd_device *) cqr->startdev; 1751 if (!device || 1752 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1753 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1754 "invalid device in request"); 1755 return; 1756 } 1757 1758 /* Check for clear pending */ 1759 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1760 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1761 cqr->status = DASD_CQR_CLEARED; 1762 dasd_device_clear_timer(device); 1763 wake_up(&dasd_flush_wq); 1764 dasd_schedule_device_bh(device); 1765 return; 1766 } 1767 1768 /* check status - the request might have been killed by dyn detach */ 1769 if (cqr->status != DASD_CQR_IN_IO) { 1770 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1771 "status %02x", dev_name(&cdev->dev), cqr->status); 1772 return; 1773 } 1774 1775 next = NULL; 1776 expires = 0; 1777 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1778 scsw_cstat(&irb->scsw) == 0) { 1779 /* request was completed successfully */ 1780 cqr->status = DASD_CQR_SUCCESS; 1781 cqr->stopclk = now; 1782 /* Start first request on queue if possible -> fast_io. */ 1783 if (cqr->devlist.next != &device->ccw_queue) { 1784 next = list_entry(cqr->devlist.next, 1785 struct dasd_ccw_req, devlist); 1786 } 1787 } else { /* error */ 1788 /* check for HPF error 1789 * call discipline function to requeue all requests 1790 * and disable HPF accordingly 1791 */ 1792 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1793 device->discipline->handle_hpf_error) 1794 device->discipline->handle_hpf_error(device, irb); 1795 /* 1796 * If we don't want complex ERP for this request, then just 1797 * reset this and retry it in the fastpath 1798 */ 1799 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1800 cqr->retries > 0) { 1801 if (cqr->lpm == dasd_path_get_opm(device)) 1802 DBF_DEV_EVENT(DBF_DEBUG, device, 1803 "default ERP in fastpath " 1804 "(%i retries left)", 1805 cqr->retries); 1806 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1807 cqr->lpm = dasd_path_get_opm(device); 1808 cqr->status = DASD_CQR_QUEUED; 1809 next = cqr; 1810 } else 1811 cqr->status = DASD_CQR_ERROR; 1812 } 1813 if (next && (next->status == DASD_CQR_QUEUED) && 1814 (!device->stopped)) { 1815 if (device->discipline->start_IO(next) == 0) 1816 expires = next->expires; 1817 } 1818 if (expires != 0) 1819 dasd_device_set_timer(device, expires); 1820 else 1821 dasd_device_clear_timer(device); 1822 dasd_schedule_device_bh(device); 1823 } 1824 EXPORT_SYMBOL(dasd_int_handler); 1825 1826 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1827 { 1828 struct dasd_device *device; 1829 1830 device = dasd_device_from_cdev_locked(cdev); 1831 1832 if (IS_ERR(device)) 1833 goto out; 1834 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1835 device->state != device->target || 1836 !device->discipline->check_for_device_change){ 1837 dasd_put_device(device); 1838 goto out; 1839 } 1840 if (device->discipline->dump_sense_dbf) 1841 device->discipline->dump_sense_dbf(device, irb, "uc"); 1842 device->discipline->check_for_device_change(device, NULL, irb); 1843 dasd_put_device(device); 1844 out: 1845 return UC_TODO_RETRY; 1846 } 1847 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1848 1849 /* 1850 * If we have an error on a dasd_block layer request then we cancel 1851 * and return all further requests from the same dasd_block as well. 1852 */ 1853 static void __dasd_device_recovery(struct dasd_device *device, 1854 struct dasd_ccw_req *ref_cqr) 1855 { 1856 struct list_head *l, *n; 1857 struct dasd_ccw_req *cqr; 1858 1859 /* 1860 * only requeue request that came from the dasd_block layer 1861 */ 1862 if (!ref_cqr->block) 1863 return; 1864 1865 list_for_each_safe(l, n, &device->ccw_queue) { 1866 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1867 if (cqr->status == DASD_CQR_QUEUED && 1868 ref_cqr->block == cqr->block) { 1869 cqr->status = DASD_CQR_CLEARED; 1870 } 1871 } 1872 }; 1873 1874 /* 1875 * Remove those ccw requests from the queue that need to be returned 1876 * to the upper layer. 1877 */ 1878 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1879 struct list_head *final_queue) 1880 { 1881 struct list_head *l, *n; 1882 struct dasd_ccw_req *cqr; 1883 1884 /* Process request with final status. */ 1885 list_for_each_safe(l, n, &device->ccw_queue) { 1886 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1887 1888 /* Skip any non-final request. */ 1889 if (cqr->status == DASD_CQR_QUEUED || 1890 cqr->status == DASD_CQR_IN_IO || 1891 cqr->status == DASD_CQR_CLEAR_PENDING) 1892 continue; 1893 if (cqr->status == DASD_CQR_ERROR) { 1894 __dasd_device_recovery(device, cqr); 1895 } 1896 /* Rechain finished requests to final queue */ 1897 list_move_tail(&cqr->devlist, final_queue); 1898 } 1899 } 1900 1901 /* 1902 * the cqrs from the final queue are returned to the upper layer 1903 * by setting a dasd_block state and calling the callback function 1904 */ 1905 static void __dasd_device_process_final_queue(struct dasd_device *device, 1906 struct list_head *final_queue) 1907 { 1908 struct list_head *l, *n; 1909 struct dasd_ccw_req *cqr; 1910 struct dasd_block *block; 1911 void (*callback)(struct dasd_ccw_req *, void *data); 1912 void *callback_data; 1913 char errorstring[ERRORLENGTH]; 1914 1915 list_for_each_safe(l, n, final_queue) { 1916 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1917 list_del_init(&cqr->devlist); 1918 block = cqr->block; 1919 callback = cqr->callback; 1920 callback_data = cqr->callback_data; 1921 if (block) 1922 spin_lock_bh(&block->queue_lock); 1923 switch (cqr->status) { 1924 case DASD_CQR_SUCCESS: 1925 cqr->status = DASD_CQR_DONE; 1926 break; 1927 case DASD_CQR_ERROR: 1928 cqr->status = DASD_CQR_NEED_ERP; 1929 break; 1930 case DASD_CQR_CLEARED: 1931 cqr->status = DASD_CQR_TERMINATED; 1932 break; 1933 default: 1934 /* internal error 12 - wrong cqr status*/ 1935 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1936 dev_err(&device->cdev->dev, 1937 "An error occurred in the DASD device driver, " 1938 "reason=%s\n", errorstring); 1939 BUG(); 1940 } 1941 if (cqr->callback != NULL) 1942 (callback)(cqr, callback_data); 1943 if (block) 1944 spin_unlock_bh(&block->queue_lock); 1945 } 1946 } 1947 1948 /* 1949 * Take a look at the first request on the ccw queue and check 1950 * if it reached its expire time. If so, terminate the IO. 1951 */ 1952 static void __dasd_device_check_expire(struct dasd_device *device) 1953 { 1954 struct dasd_ccw_req *cqr; 1955 1956 if (list_empty(&device->ccw_queue)) 1957 return; 1958 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1959 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1960 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1961 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1962 /* 1963 * IO in safe offline processing should not 1964 * run out of retries 1965 */ 1966 cqr->retries++; 1967 } 1968 if (device->discipline->term_IO(cqr) != 0) { 1969 /* Hmpf, try again in 5 sec */ 1970 dev_err(&device->cdev->dev, 1971 "cqr %p timed out (%lus) but cannot be " 1972 "ended, retrying in 5 s\n", 1973 cqr, (cqr->expires/HZ)); 1974 cqr->expires += 5*HZ; 1975 dasd_device_set_timer(device, 5*HZ); 1976 } else { 1977 dev_err(&device->cdev->dev, 1978 "cqr %p timed out (%lus), %i retries " 1979 "remaining\n", cqr, (cqr->expires/HZ), 1980 cqr->retries); 1981 } 1982 } 1983 } 1984 1985 /* 1986 * return 1 when device is not eligible for IO 1987 */ 1988 static int __dasd_device_is_unusable(struct dasd_device *device, 1989 struct dasd_ccw_req *cqr) 1990 { 1991 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); 1992 1993 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 1994 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1995 /* 1996 * dasd is being set offline 1997 * but it is no safe offline where we have to allow I/O 1998 */ 1999 return 1; 2000 } 2001 if (device->stopped) { 2002 if (device->stopped & mask) { 2003 /* stopped and CQR will not change that. */ 2004 return 1; 2005 } 2006 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2007 /* CQR is not able to change device to 2008 * operational. */ 2009 return 1; 2010 } 2011 /* CQR required to get device operational. */ 2012 } 2013 return 0; 2014 } 2015 2016 /* 2017 * Take a look at the first request on the ccw queue and check 2018 * if it needs to be started. 2019 */ 2020 static void __dasd_device_start_head(struct dasd_device *device) 2021 { 2022 struct dasd_ccw_req *cqr; 2023 int rc; 2024 2025 if (list_empty(&device->ccw_queue)) 2026 return; 2027 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2028 if (cqr->status != DASD_CQR_QUEUED) 2029 return; 2030 /* if device is not usable return request to upper layer */ 2031 if (__dasd_device_is_unusable(device, cqr)) { 2032 cqr->intrc = -EAGAIN; 2033 cqr->status = DASD_CQR_CLEARED; 2034 dasd_schedule_device_bh(device); 2035 return; 2036 } 2037 2038 rc = device->discipline->start_IO(cqr); 2039 if (rc == 0) 2040 dasd_device_set_timer(device, cqr->expires); 2041 else if (rc == -EACCES) { 2042 dasd_schedule_device_bh(device); 2043 } else 2044 /* Hmpf, try again in 1/2 sec */ 2045 dasd_device_set_timer(device, 50); 2046 } 2047 2048 static void __dasd_device_check_path_events(struct dasd_device *device) 2049 { 2050 int rc; 2051 2052 if (!dasd_path_get_tbvpm(device)) 2053 return; 2054 2055 if (device->stopped & 2056 ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 2057 return; 2058 rc = device->discipline->verify_path(device, 2059 dasd_path_get_tbvpm(device)); 2060 if (rc) 2061 dasd_device_set_timer(device, 50); 2062 else 2063 dasd_path_clear_all_verify(device); 2064 }; 2065 2066 /* 2067 * Go through all request on the dasd_device request queue, 2068 * terminate them on the cdev if necessary, and return them to the 2069 * submitting layer via callback. 2070 * Note: 2071 * Make sure that all 'submitting layers' still exist when 2072 * this function is called!. In other words, when 'device' is a base 2073 * device then all block layer requests must have been removed before 2074 * via dasd_flush_block_queue. 2075 */ 2076 int dasd_flush_device_queue(struct dasd_device *device) 2077 { 2078 struct dasd_ccw_req *cqr, *n; 2079 int rc; 2080 struct list_head flush_queue; 2081 2082 INIT_LIST_HEAD(&flush_queue); 2083 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2084 rc = 0; 2085 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2086 /* Check status and move request to flush_queue */ 2087 switch (cqr->status) { 2088 case DASD_CQR_IN_IO: 2089 rc = device->discipline->term_IO(cqr); 2090 if (rc) { 2091 /* unable to terminate requeust */ 2092 dev_err(&device->cdev->dev, 2093 "Flushing the DASD request queue " 2094 "failed for request %p\n", cqr); 2095 /* stop flush processing */ 2096 goto finished; 2097 } 2098 break; 2099 case DASD_CQR_QUEUED: 2100 cqr->stopclk = get_tod_clock(); 2101 cqr->status = DASD_CQR_CLEARED; 2102 break; 2103 default: /* no need to modify the others */ 2104 break; 2105 } 2106 list_move_tail(&cqr->devlist, &flush_queue); 2107 } 2108 finished: 2109 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2110 /* 2111 * After this point all requests must be in state CLEAR_PENDING, 2112 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2113 * one of the others. 2114 */ 2115 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2116 wait_event(dasd_flush_wq, 2117 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2118 /* 2119 * Now set each request back to TERMINATED, DONE or NEED_ERP 2120 * and call the callback function of flushed requests 2121 */ 2122 __dasd_device_process_final_queue(device, &flush_queue); 2123 return rc; 2124 } 2125 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2126 2127 /* 2128 * Acquire the device lock and process queues for the device. 2129 */ 2130 static void dasd_device_tasklet(struct dasd_device *device) 2131 { 2132 struct list_head final_queue; 2133 2134 atomic_set (&device->tasklet_scheduled, 0); 2135 INIT_LIST_HEAD(&final_queue); 2136 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2137 /* Check expire time of first request on the ccw queue. */ 2138 __dasd_device_check_expire(device); 2139 /* find final requests on ccw queue */ 2140 __dasd_device_process_ccw_queue(device, &final_queue); 2141 __dasd_device_check_path_events(device); 2142 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2143 /* Now call the callback function of requests with final status */ 2144 __dasd_device_process_final_queue(device, &final_queue); 2145 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2146 /* Now check if the head of the ccw queue needs to be started. */ 2147 __dasd_device_start_head(device); 2148 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2149 if (waitqueue_active(&shutdown_waitq)) 2150 wake_up(&shutdown_waitq); 2151 dasd_put_device(device); 2152 } 2153 2154 /* 2155 * Schedules a call to dasd_tasklet over the device tasklet. 2156 */ 2157 void dasd_schedule_device_bh(struct dasd_device *device) 2158 { 2159 /* Protect against rescheduling. */ 2160 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2161 return; 2162 dasd_get_device(device); 2163 tasklet_hi_schedule(&device->tasklet); 2164 } 2165 EXPORT_SYMBOL(dasd_schedule_device_bh); 2166 2167 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2168 { 2169 device->stopped |= bits; 2170 } 2171 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2172 2173 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2174 { 2175 device->stopped &= ~bits; 2176 if (!device->stopped) 2177 wake_up(&generic_waitq); 2178 } 2179 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2180 2181 /* 2182 * Queue a request to the head of the device ccw_queue. 2183 * Start the I/O if possible. 2184 */ 2185 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2186 { 2187 struct dasd_device *device; 2188 unsigned long flags; 2189 2190 device = cqr->startdev; 2191 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2192 cqr->status = DASD_CQR_QUEUED; 2193 list_add(&cqr->devlist, &device->ccw_queue); 2194 /* let the bh start the request to keep them in order */ 2195 dasd_schedule_device_bh(device); 2196 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2197 } 2198 EXPORT_SYMBOL(dasd_add_request_head); 2199 2200 /* 2201 * Queue a request to the tail of the device ccw_queue. 2202 * Start the I/O if possible. 2203 */ 2204 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2205 { 2206 struct dasd_device *device; 2207 unsigned long flags; 2208 2209 device = cqr->startdev; 2210 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2211 cqr->status = DASD_CQR_QUEUED; 2212 list_add_tail(&cqr->devlist, &device->ccw_queue); 2213 /* let the bh start the request to keep them in order */ 2214 dasd_schedule_device_bh(device); 2215 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2216 } 2217 EXPORT_SYMBOL(dasd_add_request_tail); 2218 2219 /* 2220 * Wakeup helper for the 'sleep_on' functions. 2221 */ 2222 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2223 { 2224 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2225 cqr->callback_data = DASD_SLEEPON_END_TAG; 2226 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2227 wake_up(&generic_waitq); 2228 } 2229 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2230 2231 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2232 { 2233 struct dasd_device *device; 2234 int rc; 2235 2236 device = cqr->startdev; 2237 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2238 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2239 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2240 return rc; 2241 } 2242 2243 /* 2244 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2245 */ 2246 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2247 { 2248 struct dasd_device *device; 2249 dasd_erp_fn_t erp_fn; 2250 2251 if (cqr->status == DASD_CQR_FILLED) 2252 return 0; 2253 device = cqr->startdev; 2254 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2255 if (cqr->status == DASD_CQR_TERMINATED) { 2256 device->discipline->handle_terminated_request(cqr); 2257 return 1; 2258 } 2259 if (cqr->status == DASD_CQR_NEED_ERP) { 2260 erp_fn = device->discipline->erp_action(cqr); 2261 erp_fn(cqr); 2262 return 1; 2263 } 2264 if (cqr->status == DASD_CQR_FAILED) 2265 dasd_log_sense(cqr, &cqr->irb); 2266 if (cqr->refers) { 2267 __dasd_process_erp(device, cqr); 2268 return 1; 2269 } 2270 } 2271 return 0; 2272 } 2273 2274 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2275 { 2276 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2277 if (cqr->refers) /* erp is not done yet */ 2278 return 1; 2279 return ((cqr->status != DASD_CQR_DONE) && 2280 (cqr->status != DASD_CQR_FAILED)); 2281 } else 2282 return (cqr->status == DASD_CQR_FILLED); 2283 } 2284 2285 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2286 { 2287 struct dasd_device *device; 2288 int rc; 2289 struct list_head ccw_queue; 2290 struct dasd_ccw_req *cqr; 2291 2292 INIT_LIST_HEAD(&ccw_queue); 2293 maincqr->status = DASD_CQR_FILLED; 2294 device = maincqr->startdev; 2295 list_add(&maincqr->blocklist, &ccw_queue); 2296 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2297 cqr = list_first_entry(&ccw_queue, 2298 struct dasd_ccw_req, blocklist)) { 2299 2300 if (__dasd_sleep_on_erp(cqr)) 2301 continue; 2302 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2303 continue; 2304 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2305 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2306 cqr->status = DASD_CQR_FAILED; 2307 cqr->intrc = -EPERM; 2308 continue; 2309 } 2310 /* Non-temporary stop condition will trigger fail fast */ 2311 if (device->stopped & ~DASD_STOPPED_PENDING && 2312 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2313 (!dasd_eer_enabled(device))) { 2314 cqr->status = DASD_CQR_FAILED; 2315 cqr->intrc = -ENOLINK; 2316 continue; 2317 } 2318 /* 2319 * Don't try to start requests if device is in 2320 * offline processing, it might wait forever 2321 */ 2322 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2323 cqr->status = DASD_CQR_FAILED; 2324 cqr->intrc = -ENODEV; 2325 continue; 2326 } 2327 /* 2328 * Don't try to start requests if device is stopped 2329 * except path verification requests 2330 */ 2331 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2332 if (interruptible) { 2333 rc = wait_event_interruptible( 2334 generic_waitq, !(device->stopped)); 2335 if (rc == -ERESTARTSYS) { 2336 cqr->status = DASD_CQR_FAILED; 2337 maincqr->intrc = rc; 2338 continue; 2339 } 2340 } else 2341 wait_event(generic_waitq, !(device->stopped)); 2342 } 2343 if (!cqr->callback) 2344 cqr->callback = dasd_wakeup_cb; 2345 2346 cqr->callback_data = DASD_SLEEPON_START_TAG; 2347 dasd_add_request_tail(cqr); 2348 if (interruptible) { 2349 rc = wait_event_interruptible( 2350 generic_waitq, _wait_for_wakeup(cqr)); 2351 if (rc == -ERESTARTSYS) { 2352 dasd_cancel_req(cqr); 2353 /* wait (non-interruptible) for final status */ 2354 wait_event(generic_waitq, 2355 _wait_for_wakeup(cqr)); 2356 cqr->status = DASD_CQR_FAILED; 2357 maincqr->intrc = rc; 2358 continue; 2359 } 2360 } else 2361 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2362 } 2363 2364 maincqr->endclk = get_tod_clock(); 2365 if ((maincqr->status != DASD_CQR_DONE) && 2366 (maincqr->intrc != -ERESTARTSYS)) 2367 dasd_log_sense(maincqr, &maincqr->irb); 2368 if (maincqr->status == DASD_CQR_DONE) 2369 rc = 0; 2370 else if (maincqr->intrc) 2371 rc = maincqr->intrc; 2372 else 2373 rc = -EIO; 2374 return rc; 2375 } 2376 2377 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2378 { 2379 struct dasd_ccw_req *cqr; 2380 2381 list_for_each_entry(cqr, ccw_queue, blocklist) { 2382 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2383 return 0; 2384 } 2385 2386 return 1; 2387 } 2388 2389 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2390 { 2391 struct dasd_device *device; 2392 struct dasd_ccw_req *cqr, *n; 2393 u8 *sense = NULL; 2394 int rc; 2395 2396 retry: 2397 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2398 device = cqr->startdev; 2399 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2400 continue; 2401 2402 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2403 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2404 cqr->status = DASD_CQR_FAILED; 2405 cqr->intrc = -EPERM; 2406 continue; 2407 } 2408 /*Non-temporary stop condition will trigger fail fast*/ 2409 if (device->stopped & ~DASD_STOPPED_PENDING && 2410 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2411 !dasd_eer_enabled(device)) { 2412 cqr->status = DASD_CQR_FAILED; 2413 cqr->intrc = -EAGAIN; 2414 continue; 2415 } 2416 2417 /*Don't try to start requests if device is stopped*/ 2418 if (interruptible) { 2419 rc = wait_event_interruptible( 2420 generic_waitq, !device->stopped); 2421 if (rc == -ERESTARTSYS) { 2422 cqr->status = DASD_CQR_FAILED; 2423 cqr->intrc = rc; 2424 continue; 2425 } 2426 } else 2427 wait_event(generic_waitq, !(device->stopped)); 2428 2429 if (!cqr->callback) 2430 cqr->callback = dasd_wakeup_cb; 2431 cqr->callback_data = DASD_SLEEPON_START_TAG; 2432 dasd_add_request_tail(cqr); 2433 } 2434 2435 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2436 2437 rc = 0; 2438 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2439 /* 2440 * In some cases the 'File Protected' or 'Incorrect Length' 2441 * error might be expected and error recovery would be 2442 * unnecessary in these cases. Check if the according suppress 2443 * bit is set. 2444 */ 2445 sense = dasd_get_sense(&cqr->irb); 2446 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2447 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2448 continue; 2449 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2450 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2451 continue; 2452 2453 /* 2454 * for alias devices simplify error recovery and 2455 * return to upper layer 2456 * do not skip ERP requests 2457 */ 2458 if (cqr->startdev != cqr->basedev && !cqr->refers && 2459 (cqr->status == DASD_CQR_TERMINATED || 2460 cqr->status == DASD_CQR_NEED_ERP)) 2461 return -EAGAIN; 2462 2463 /* normal recovery for basedev IO */ 2464 if (__dasd_sleep_on_erp(cqr)) 2465 /* handle erp first */ 2466 goto retry; 2467 } 2468 2469 return 0; 2470 } 2471 2472 /* 2473 * Queue a request to the tail of the device ccw_queue and wait for 2474 * it's completion. 2475 */ 2476 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2477 { 2478 return _dasd_sleep_on(cqr, 0); 2479 } 2480 EXPORT_SYMBOL(dasd_sleep_on); 2481 2482 /* 2483 * Start requests from a ccw_queue and wait for their completion. 2484 */ 2485 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2486 { 2487 return _dasd_sleep_on_queue(ccw_queue, 0); 2488 } 2489 EXPORT_SYMBOL(dasd_sleep_on_queue); 2490 2491 /* 2492 * Queue a request to the tail of the device ccw_queue and wait 2493 * interruptible for it's completion. 2494 */ 2495 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2496 { 2497 return _dasd_sleep_on(cqr, 1); 2498 } 2499 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2500 2501 /* 2502 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2503 * for eckd devices) the currently running request has to be terminated 2504 * and be put back to status queued, before the special request is added 2505 * to the head of the queue. Then the special request is waited on normally. 2506 */ 2507 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2508 { 2509 struct dasd_ccw_req *cqr; 2510 int rc; 2511 2512 if (list_empty(&device->ccw_queue)) 2513 return 0; 2514 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2515 rc = device->discipline->term_IO(cqr); 2516 if (!rc) 2517 /* 2518 * CQR terminated because a more important request is pending. 2519 * Undo decreasing of retry counter because this is 2520 * not an error case. 2521 */ 2522 cqr->retries++; 2523 return rc; 2524 } 2525 2526 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2527 { 2528 struct dasd_device *device; 2529 int rc; 2530 2531 device = cqr->startdev; 2532 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2533 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2534 cqr->status = DASD_CQR_FAILED; 2535 cqr->intrc = -EPERM; 2536 return -EIO; 2537 } 2538 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2539 rc = _dasd_term_running_cqr(device); 2540 if (rc) { 2541 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2542 return rc; 2543 } 2544 cqr->callback = dasd_wakeup_cb; 2545 cqr->callback_data = DASD_SLEEPON_START_TAG; 2546 cqr->status = DASD_CQR_QUEUED; 2547 /* 2548 * add new request as second 2549 * first the terminated cqr needs to be finished 2550 */ 2551 list_add(&cqr->devlist, device->ccw_queue.next); 2552 2553 /* let the bh start the request to keep them in order */ 2554 dasd_schedule_device_bh(device); 2555 2556 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2557 2558 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2559 2560 if (cqr->status == DASD_CQR_DONE) 2561 rc = 0; 2562 else if (cqr->intrc) 2563 rc = cqr->intrc; 2564 else 2565 rc = -EIO; 2566 2567 /* kick tasklets */ 2568 dasd_schedule_device_bh(device); 2569 if (device->block) 2570 dasd_schedule_block_bh(device->block); 2571 2572 return rc; 2573 } 2574 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2575 2576 /* 2577 * Cancels a request that was started with dasd_sleep_on_req. 2578 * This is useful to timeout requests. The request will be 2579 * terminated if it is currently in i/o. 2580 * Returns 0 if request termination was successful 2581 * negative error code if termination failed 2582 * Cancellation of a request is an asynchronous operation! The calling 2583 * function has to wait until the request is properly returned via callback. 2584 */ 2585 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2586 { 2587 struct dasd_device *device = cqr->startdev; 2588 unsigned long flags; 2589 int rc; 2590 2591 rc = 0; 2592 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2593 switch (cqr->status) { 2594 case DASD_CQR_QUEUED: 2595 /* request was not started - just set to cleared */ 2596 cqr->status = DASD_CQR_CLEARED; 2597 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 2598 cqr->callback_data = DASD_SLEEPON_END_TAG; 2599 break; 2600 case DASD_CQR_IN_IO: 2601 /* request in IO - terminate IO and release again */ 2602 rc = device->discipline->term_IO(cqr); 2603 if (rc) { 2604 dev_err(&device->cdev->dev, 2605 "Cancelling request %p failed with rc=%d\n", 2606 cqr, rc); 2607 } else { 2608 cqr->stopclk = get_tod_clock(); 2609 } 2610 break; 2611 default: /* already finished or clear pending - do nothing */ 2612 break; 2613 } 2614 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2615 dasd_schedule_device_bh(device); 2616 return rc; 2617 } 2618 EXPORT_SYMBOL(dasd_cancel_req); 2619 2620 /* 2621 * SECTION: Operations of the dasd_block layer. 2622 */ 2623 2624 /* 2625 * Timeout function for dasd_block. This is used when the block layer 2626 * is waiting for something that may not come reliably, (e.g. a state 2627 * change interrupt) 2628 */ 2629 static void dasd_block_timeout(unsigned long ptr) 2630 { 2631 unsigned long flags; 2632 struct dasd_block *block; 2633 2634 block = (struct dasd_block *) ptr; 2635 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2636 /* re-activate request queue */ 2637 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2638 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2639 dasd_schedule_block_bh(block); 2640 blk_mq_run_hw_queues(block->request_queue, true); 2641 } 2642 2643 /* 2644 * Setup timeout for a dasd_block in jiffies. 2645 */ 2646 void dasd_block_set_timer(struct dasd_block *block, int expires) 2647 { 2648 if (expires == 0) 2649 del_timer(&block->timer); 2650 else 2651 mod_timer(&block->timer, jiffies + expires); 2652 } 2653 EXPORT_SYMBOL(dasd_block_set_timer); 2654 2655 /* 2656 * Clear timeout for a dasd_block. 2657 */ 2658 void dasd_block_clear_timer(struct dasd_block *block) 2659 { 2660 del_timer(&block->timer); 2661 } 2662 EXPORT_SYMBOL(dasd_block_clear_timer); 2663 2664 /* 2665 * Process finished error recovery ccw. 2666 */ 2667 static void __dasd_process_erp(struct dasd_device *device, 2668 struct dasd_ccw_req *cqr) 2669 { 2670 dasd_erp_fn_t erp_fn; 2671 2672 if (cqr->status == DASD_CQR_DONE) 2673 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2674 else 2675 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2676 erp_fn = device->discipline->erp_postaction(cqr); 2677 erp_fn(cqr); 2678 } 2679 2680 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2681 { 2682 struct request *req; 2683 blk_status_t error = BLK_STS_OK; 2684 int status; 2685 2686 req = (struct request *) cqr->callback_data; 2687 dasd_profile_end(cqr->block, cqr, req); 2688 2689 status = cqr->block->base->discipline->free_cp(cqr, req); 2690 if (status < 0) 2691 error = errno_to_blk_status(status); 2692 else if (status == 0) { 2693 switch (cqr->intrc) { 2694 case -EPERM: 2695 error = BLK_STS_NEXUS; 2696 break; 2697 case -ENOLINK: 2698 error = BLK_STS_TRANSPORT; 2699 break; 2700 case -ETIMEDOUT: 2701 error = BLK_STS_TIMEOUT; 2702 break; 2703 default: 2704 error = BLK_STS_IOERR; 2705 break; 2706 } 2707 } 2708 2709 /* 2710 * We need to take care for ETIMEDOUT errors here since the 2711 * complete callback does not get called in this case. 2712 * Take care of all errors here and avoid additional code to 2713 * transfer the error value to the complete callback. 2714 */ 2715 if (error) { 2716 blk_mq_end_request(req, error); 2717 blk_mq_run_hw_queues(req->q, true); 2718 } else { 2719 blk_mq_complete_request(req); 2720 } 2721 } 2722 2723 /* 2724 * Process ccw request queue. 2725 */ 2726 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2727 struct list_head *final_queue) 2728 { 2729 struct list_head *l, *n; 2730 struct dasd_ccw_req *cqr; 2731 dasd_erp_fn_t erp_fn; 2732 unsigned long flags; 2733 struct dasd_device *base = block->base; 2734 2735 restart: 2736 /* Process request with final status. */ 2737 list_for_each_safe(l, n, &block->ccw_queue) { 2738 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2739 if (cqr->status != DASD_CQR_DONE && 2740 cqr->status != DASD_CQR_FAILED && 2741 cqr->status != DASD_CQR_NEED_ERP && 2742 cqr->status != DASD_CQR_TERMINATED) 2743 continue; 2744 2745 if (cqr->status == DASD_CQR_TERMINATED) { 2746 base->discipline->handle_terminated_request(cqr); 2747 goto restart; 2748 } 2749 2750 /* Process requests that may be recovered */ 2751 if (cqr->status == DASD_CQR_NEED_ERP) { 2752 erp_fn = base->discipline->erp_action(cqr); 2753 if (IS_ERR(erp_fn(cqr))) 2754 continue; 2755 goto restart; 2756 } 2757 2758 /* log sense for fatal error */ 2759 if (cqr->status == DASD_CQR_FAILED) { 2760 dasd_log_sense(cqr, &cqr->irb); 2761 } 2762 2763 /* First of all call extended error reporting. */ 2764 if (dasd_eer_enabled(base) && 2765 cqr->status == DASD_CQR_FAILED) { 2766 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2767 2768 /* restart request */ 2769 cqr->status = DASD_CQR_FILLED; 2770 cqr->retries = 255; 2771 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2772 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2773 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2774 flags); 2775 goto restart; 2776 } 2777 2778 /* Process finished ERP request. */ 2779 if (cqr->refers) { 2780 __dasd_process_erp(base, cqr); 2781 goto restart; 2782 } 2783 2784 /* Rechain finished requests to final queue */ 2785 cqr->endclk = get_tod_clock(); 2786 list_move_tail(&cqr->blocklist, final_queue); 2787 } 2788 } 2789 2790 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2791 { 2792 dasd_schedule_block_bh(cqr->block); 2793 } 2794 2795 static void __dasd_block_start_head(struct dasd_block *block) 2796 { 2797 struct dasd_ccw_req *cqr; 2798 2799 if (list_empty(&block->ccw_queue)) 2800 return; 2801 /* We allways begin with the first requests on the queue, as some 2802 * of previously started requests have to be enqueued on a 2803 * dasd_device again for error recovery. 2804 */ 2805 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2806 if (cqr->status != DASD_CQR_FILLED) 2807 continue; 2808 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2809 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2810 cqr->status = DASD_CQR_FAILED; 2811 cqr->intrc = -EPERM; 2812 dasd_schedule_block_bh(block); 2813 continue; 2814 } 2815 /* Non-temporary stop condition will trigger fail fast */ 2816 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2817 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2818 (!dasd_eer_enabled(block->base))) { 2819 cqr->status = DASD_CQR_FAILED; 2820 cqr->intrc = -ENOLINK; 2821 dasd_schedule_block_bh(block); 2822 continue; 2823 } 2824 /* Don't try to start requests if device is stopped */ 2825 if (block->base->stopped) 2826 return; 2827 2828 /* just a fail safe check, should not happen */ 2829 if (!cqr->startdev) 2830 cqr->startdev = block->base; 2831 2832 /* make sure that the requests we submit find their way back */ 2833 cqr->callback = dasd_return_cqr_cb; 2834 2835 dasd_add_request_tail(cqr); 2836 } 2837 } 2838 2839 /* 2840 * Central dasd_block layer routine. Takes requests from the generic 2841 * block layer request queue, creates ccw requests, enqueues them on 2842 * a dasd_device and processes ccw requests that have been returned. 2843 */ 2844 static void dasd_block_tasklet(struct dasd_block *block) 2845 { 2846 struct list_head final_queue; 2847 struct list_head *l, *n; 2848 struct dasd_ccw_req *cqr; 2849 struct dasd_queue *dq; 2850 2851 atomic_set(&block->tasklet_scheduled, 0); 2852 INIT_LIST_HEAD(&final_queue); 2853 spin_lock_irq(&block->queue_lock); 2854 /* Finish off requests on ccw queue */ 2855 __dasd_process_block_ccw_queue(block, &final_queue); 2856 spin_unlock_irq(&block->queue_lock); 2857 2858 /* Now call the callback function of requests with final status */ 2859 list_for_each_safe(l, n, &final_queue) { 2860 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2861 dq = cqr->dq; 2862 spin_lock_irq(&dq->lock); 2863 list_del_init(&cqr->blocklist); 2864 __dasd_cleanup_cqr(cqr); 2865 spin_unlock_irq(&dq->lock); 2866 } 2867 2868 spin_lock_irq(&block->queue_lock); 2869 /* Now check if the head of the ccw queue needs to be started. */ 2870 __dasd_block_start_head(block); 2871 spin_unlock_irq(&block->queue_lock); 2872 2873 if (waitqueue_active(&shutdown_waitq)) 2874 wake_up(&shutdown_waitq); 2875 dasd_put_device(block->base); 2876 } 2877 2878 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2879 { 2880 wake_up(&dasd_flush_wq); 2881 } 2882 2883 /* 2884 * Requeue a request back to the block request queue 2885 * only works for block requests 2886 */ 2887 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2888 { 2889 struct dasd_block *block = cqr->block; 2890 struct request *req; 2891 2892 if (!block) 2893 return -EINVAL; 2894 spin_lock_irq(&cqr->dq->lock); 2895 req = (struct request *) cqr->callback_data; 2896 blk_mq_requeue_request(req, false); 2897 spin_unlock_irq(&cqr->dq->lock); 2898 2899 return 0; 2900 } 2901 2902 /* 2903 * Go through all request on the dasd_block request queue, cancel them 2904 * on the respective dasd_device, and return them to the generic 2905 * block layer. 2906 */ 2907 static int dasd_flush_block_queue(struct dasd_block *block) 2908 { 2909 struct dasd_ccw_req *cqr, *n; 2910 int rc, i; 2911 struct list_head flush_queue; 2912 unsigned long flags; 2913 2914 INIT_LIST_HEAD(&flush_queue); 2915 spin_lock_bh(&block->queue_lock); 2916 rc = 0; 2917 restart: 2918 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2919 /* if this request currently owned by a dasd_device cancel it */ 2920 if (cqr->status >= DASD_CQR_QUEUED) 2921 rc = dasd_cancel_req(cqr); 2922 if (rc < 0) 2923 break; 2924 /* Rechain request (including erp chain) so it won't be 2925 * touched by the dasd_block_tasklet anymore. 2926 * Replace the callback so we notice when the request 2927 * is returned from the dasd_device layer. 2928 */ 2929 cqr->callback = _dasd_wake_block_flush_cb; 2930 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2931 list_move_tail(&cqr->blocklist, &flush_queue); 2932 if (i > 1) 2933 /* moved more than one request - need to restart */ 2934 goto restart; 2935 } 2936 spin_unlock_bh(&block->queue_lock); 2937 /* Now call the callback function of flushed requests */ 2938 restart_cb: 2939 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2940 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2941 /* Process finished ERP request. */ 2942 if (cqr->refers) { 2943 spin_lock_bh(&block->queue_lock); 2944 __dasd_process_erp(block->base, cqr); 2945 spin_unlock_bh(&block->queue_lock); 2946 /* restart list_for_xx loop since dasd_process_erp 2947 * might remove multiple elements */ 2948 goto restart_cb; 2949 } 2950 /* call the callback function */ 2951 spin_lock_irqsave(&cqr->dq->lock, flags); 2952 cqr->endclk = get_tod_clock(); 2953 list_del_init(&cqr->blocklist); 2954 __dasd_cleanup_cqr(cqr); 2955 spin_unlock_irqrestore(&cqr->dq->lock, flags); 2956 } 2957 return rc; 2958 } 2959 2960 /* 2961 * Schedules a call to dasd_tasklet over the device tasklet. 2962 */ 2963 void dasd_schedule_block_bh(struct dasd_block *block) 2964 { 2965 /* Protect against rescheduling. */ 2966 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2967 return; 2968 /* life cycle of block is bound to it's base device */ 2969 dasd_get_device(block->base); 2970 tasklet_hi_schedule(&block->tasklet); 2971 } 2972 EXPORT_SYMBOL(dasd_schedule_block_bh); 2973 2974 2975 /* 2976 * SECTION: external block device operations 2977 * (request queue handling, open, release, etc.) 2978 */ 2979 2980 /* 2981 * Dasd request queue function. Called from ll_rw_blk.c 2982 */ 2983 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 2984 const struct blk_mq_queue_data *qd) 2985 { 2986 struct dasd_block *block = hctx->queue->queuedata; 2987 struct dasd_queue *dq = hctx->driver_data; 2988 struct request *req = qd->rq; 2989 struct dasd_device *basedev; 2990 struct dasd_ccw_req *cqr; 2991 blk_status_t rc = BLK_STS_OK; 2992 2993 basedev = block->base; 2994 spin_lock_irq(&dq->lock); 2995 if (basedev->state < DASD_STATE_READY) { 2996 DBF_DEV_EVENT(DBF_ERR, basedev, 2997 "device not ready for request %p", req); 2998 rc = BLK_STS_IOERR; 2999 goto out; 3000 } 3001 3002 /* 3003 * if device is stopped do not fetch new requests 3004 * except failfast is active which will let requests fail 3005 * immediately in __dasd_block_start_head() 3006 */ 3007 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3008 DBF_DEV_EVENT(DBF_ERR, basedev, 3009 "device stopped request %p", req); 3010 rc = BLK_STS_RESOURCE; 3011 goto out; 3012 } 3013 3014 if (basedev->features & DASD_FEATURE_READONLY && 3015 rq_data_dir(req) == WRITE) { 3016 DBF_DEV_EVENT(DBF_ERR, basedev, 3017 "Rejecting write request %p", req); 3018 rc = BLK_STS_IOERR; 3019 goto out; 3020 } 3021 3022 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3023 (basedev->features & DASD_FEATURE_FAILFAST || 3024 blk_noretry_request(req))) { 3025 DBF_DEV_EVENT(DBF_ERR, basedev, 3026 "Rejecting failfast request %p", req); 3027 rc = BLK_STS_IOERR; 3028 goto out; 3029 } 3030 3031 cqr = basedev->discipline->build_cp(basedev, block, req); 3032 if (IS_ERR(cqr)) { 3033 if (PTR_ERR(cqr) == -EBUSY || 3034 PTR_ERR(cqr) == -ENOMEM || 3035 PTR_ERR(cqr) == -EAGAIN) { 3036 rc = BLK_STS_RESOURCE; 3037 goto out; 3038 } 3039 DBF_DEV_EVENT(DBF_ERR, basedev, 3040 "CCW creation failed (rc=%ld) on request %p", 3041 PTR_ERR(cqr), req); 3042 rc = BLK_STS_IOERR; 3043 goto out; 3044 } 3045 /* 3046 * Note: callback is set to dasd_return_cqr_cb in 3047 * __dasd_block_start_head to cover erp requests as well 3048 */ 3049 cqr->callback_data = req; 3050 cqr->status = DASD_CQR_FILLED; 3051 cqr->dq = dq; 3052 req->completion_data = cqr; 3053 blk_mq_start_request(req); 3054 spin_lock(&block->queue_lock); 3055 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3056 INIT_LIST_HEAD(&cqr->devlist); 3057 dasd_profile_start(block, cqr, req); 3058 dasd_schedule_block_bh(block); 3059 spin_unlock(&block->queue_lock); 3060 3061 out: 3062 spin_unlock_irq(&dq->lock); 3063 return rc; 3064 } 3065 3066 /* 3067 * Block timeout callback, called from the block layer 3068 * 3069 * Return values: 3070 * BLK_EH_RESET_TIMER if the request should be left running 3071 * BLK_EH_NOT_HANDLED if the request is handled or terminated 3072 * by the driver. 3073 */ 3074 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3075 { 3076 struct dasd_ccw_req *cqr = req->completion_data; 3077 struct dasd_block *block = req->q->queuedata; 3078 struct dasd_device *device; 3079 unsigned long flags; 3080 int rc = 0; 3081 3082 if (!cqr) 3083 return BLK_EH_NOT_HANDLED; 3084 3085 spin_lock_irqsave(&cqr->dq->lock, flags); 3086 device = cqr->startdev ? cqr->startdev : block->base; 3087 if (!device->blk_timeout) { 3088 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3089 return BLK_EH_RESET_TIMER; 3090 } 3091 DBF_DEV_EVENT(DBF_WARNING, device, 3092 " dasd_times_out cqr %p status %x", 3093 cqr, cqr->status); 3094 3095 spin_lock(&block->queue_lock); 3096 spin_lock(get_ccwdev_lock(device->cdev)); 3097 cqr->retries = -1; 3098 cqr->intrc = -ETIMEDOUT; 3099 if (cqr->status >= DASD_CQR_QUEUED) { 3100 spin_unlock(get_ccwdev_lock(device->cdev)); 3101 rc = dasd_cancel_req(cqr); 3102 } else if (cqr->status == DASD_CQR_FILLED || 3103 cqr->status == DASD_CQR_NEED_ERP) { 3104 cqr->status = DASD_CQR_TERMINATED; 3105 spin_unlock(get_ccwdev_lock(device->cdev)); 3106 } else if (cqr->status == DASD_CQR_IN_ERP) { 3107 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3108 3109 list_for_each_entry_safe(searchcqr, nextcqr, 3110 &block->ccw_queue, blocklist) { 3111 tmpcqr = searchcqr; 3112 while (tmpcqr->refers) 3113 tmpcqr = tmpcqr->refers; 3114 if (tmpcqr != cqr) 3115 continue; 3116 /* searchcqr is an ERP request for cqr */ 3117 searchcqr->retries = -1; 3118 searchcqr->intrc = -ETIMEDOUT; 3119 if (searchcqr->status >= DASD_CQR_QUEUED) { 3120 spin_unlock(get_ccwdev_lock(device->cdev)); 3121 rc = dasd_cancel_req(searchcqr); 3122 spin_lock(get_ccwdev_lock(device->cdev)); 3123 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3124 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3125 searchcqr->status = DASD_CQR_TERMINATED; 3126 rc = 0; 3127 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3128 /* 3129 * Shouldn't happen; most recent ERP 3130 * request is at the front of queue 3131 */ 3132 continue; 3133 } 3134 break; 3135 } 3136 spin_unlock(get_ccwdev_lock(device->cdev)); 3137 } 3138 dasd_schedule_block_bh(block); 3139 spin_unlock(&block->queue_lock); 3140 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3141 3142 return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 3143 } 3144 3145 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3146 unsigned int idx) 3147 { 3148 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3149 3150 if (!dq) 3151 return -ENOMEM; 3152 3153 spin_lock_init(&dq->lock); 3154 hctx->driver_data = dq; 3155 3156 return 0; 3157 } 3158 3159 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3160 { 3161 kfree(hctx->driver_data); 3162 hctx->driver_data = NULL; 3163 } 3164 3165 static void dasd_request_done(struct request *req) 3166 { 3167 blk_mq_end_request(req, 0); 3168 blk_mq_run_hw_queues(req->q, true); 3169 } 3170 3171 static struct blk_mq_ops dasd_mq_ops = { 3172 .queue_rq = do_dasd_request, 3173 .complete = dasd_request_done, 3174 .timeout = dasd_times_out, 3175 .init_hctx = dasd_init_hctx, 3176 .exit_hctx = dasd_exit_hctx, 3177 }; 3178 3179 /* 3180 * Allocate and initialize request queue and default I/O scheduler. 3181 */ 3182 static int dasd_alloc_queue(struct dasd_block *block) 3183 { 3184 int rc; 3185 3186 block->tag_set.ops = &dasd_mq_ops; 3187 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; 3188 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; 3189 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3190 3191 rc = blk_mq_alloc_tag_set(&block->tag_set); 3192 if (rc) 3193 return rc; 3194 3195 block->request_queue = blk_mq_init_queue(&block->tag_set); 3196 if (IS_ERR(block->request_queue)) 3197 return PTR_ERR(block->request_queue); 3198 3199 block->request_queue->queuedata = block; 3200 3201 return 0; 3202 } 3203 3204 /* 3205 * Allocate and initialize request queue. 3206 */ 3207 static void dasd_setup_queue(struct dasd_block *block) 3208 { 3209 unsigned int logical_block_size = block->bp_block; 3210 struct request_queue *q = block->request_queue; 3211 unsigned int max_bytes, max_discard_sectors; 3212 int max; 3213 3214 if (block->base->features & DASD_FEATURE_USERAW) { 3215 /* 3216 * the max_blocks value for raw_track access is 256 3217 * it is higher than the native ECKD value because we 3218 * only need one ccw per track 3219 * so the max_hw_sectors are 3220 * 2048 x 512B = 1024kB = 16 tracks 3221 */ 3222 max = 2048; 3223 } else { 3224 max = block->base->discipline->max_blocks << block->s2b_shift; 3225 } 3226 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 3227 q->limits.max_dev_sectors = max; 3228 blk_queue_logical_block_size(q, logical_block_size); 3229 blk_queue_max_hw_sectors(q, max); 3230 blk_queue_max_segments(q, USHRT_MAX); 3231 /* with page sized segments we can translate each segement into 3232 * one idaw/tidaw 3233 */ 3234 blk_queue_max_segment_size(q, PAGE_SIZE); 3235 blk_queue_segment_boundary(q, PAGE_SIZE - 1); 3236 3237 /* Only activate blocklayer discard support for devices that support it */ 3238 if (block->base->features & DASD_FEATURE_DISCARD) { 3239 q->limits.discard_granularity = logical_block_size; 3240 q->limits.discard_alignment = PAGE_SIZE; 3241 3242 /* Calculate max_discard_sectors and make it PAGE aligned */ 3243 max_bytes = USHRT_MAX * logical_block_size; 3244 max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE; 3245 max_discard_sectors = max_bytes / logical_block_size; 3246 3247 blk_queue_max_discard_sectors(q, max_discard_sectors); 3248 blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); 3249 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 3250 } 3251 } 3252 3253 /* 3254 * Deactivate and free request queue. 3255 */ 3256 static void dasd_free_queue(struct dasd_block *block) 3257 { 3258 if (block->request_queue) { 3259 blk_cleanup_queue(block->request_queue); 3260 blk_mq_free_tag_set(&block->tag_set); 3261 block->request_queue = NULL; 3262 } 3263 } 3264 3265 static int dasd_open(struct block_device *bdev, fmode_t mode) 3266 { 3267 struct dasd_device *base; 3268 int rc; 3269 3270 base = dasd_device_from_gendisk(bdev->bd_disk); 3271 if (!base) 3272 return -ENODEV; 3273 3274 atomic_inc(&base->block->open_count); 3275 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3276 rc = -ENODEV; 3277 goto unlock; 3278 } 3279 3280 if (!try_module_get(base->discipline->owner)) { 3281 rc = -EINVAL; 3282 goto unlock; 3283 } 3284 3285 if (dasd_probeonly) { 3286 dev_info(&base->cdev->dev, 3287 "Accessing the DASD failed because it is in " 3288 "probeonly mode\n"); 3289 rc = -EPERM; 3290 goto out; 3291 } 3292 3293 if (base->state <= DASD_STATE_BASIC) { 3294 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3295 " Cannot open unrecognized device"); 3296 rc = -ENODEV; 3297 goto out; 3298 } 3299 3300 if ((mode & FMODE_WRITE) && 3301 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3302 (base->features & DASD_FEATURE_READONLY))) { 3303 rc = -EROFS; 3304 goto out; 3305 } 3306 3307 dasd_put_device(base); 3308 return 0; 3309 3310 out: 3311 module_put(base->discipline->owner); 3312 unlock: 3313 atomic_dec(&base->block->open_count); 3314 dasd_put_device(base); 3315 return rc; 3316 } 3317 3318 static void dasd_release(struct gendisk *disk, fmode_t mode) 3319 { 3320 struct dasd_device *base = dasd_device_from_gendisk(disk); 3321 if (base) { 3322 atomic_dec(&base->block->open_count); 3323 module_put(base->discipline->owner); 3324 dasd_put_device(base); 3325 } 3326 } 3327 3328 /* 3329 * Return disk geometry. 3330 */ 3331 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3332 { 3333 struct dasd_device *base; 3334 3335 base = dasd_device_from_gendisk(bdev->bd_disk); 3336 if (!base) 3337 return -ENODEV; 3338 3339 if (!base->discipline || 3340 !base->discipline->fill_geometry) { 3341 dasd_put_device(base); 3342 return -EINVAL; 3343 } 3344 base->discipline->fill_geometry(base->block, geo); 3345 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3346 dasd_put_device(base); 3347 return 0; 3348 } 3349 3350 const struct block_device_operations 3351 dasd_device_operations = { 3352 .owner = THIS_MODULE, 3353 .open = dasd_open, 3354 .release = dasd_release, 3355 .ioctl = dasd_ioctl, 3356 .compat_ioctl = dasd_ioctl, 3357 .getgeo = dasd_getgeo, 3358 }; 3359 3360 /******************************************************************************* 3361 * end of block device operations 3362 */ 3363 3364 static void 3365 dasd_exit(void) 3366 { 3367 #ifdef CONFIG_PROC_FS 3368 dasd_proc_exit(); 3369 #endif 3370 dasd_eer_exit(); 3371 if (dasd_page_cache != NULL) { 3372 kmem_cache_destroy(dasd_page_cache); 3373 dasd_page_cache = NULL; 3374 } 3375 dasd_gendisk_exit(); 3376 dasd_devmap_exit(); 3377 if (dasd_debug_area != NULL) { 3378 debug_unregister(dasd_debug_area); 3379 dasd_debug_area = NULL; 3380 } 3381 dasd_statistics_removeroot(); 3382 } 3383 3384 /* 3385 * SECTION: common functions for ccw_driver use 3386 */ 3387 3388 /* 3389 * Is the device read-only? 3390 * Note that this function does not report the setting of the 3391 * readonly device attribute, but how it is configured in z/VM. 3392 */ 3393 int dasd_device_is_ro(struct dasd_device *device) 3394 { 3395 struct ccw_dev_id dev_id; 3396 struct diag210 diag_data; 3397 int rc; 3398 3399 if (!MACHINE_IS_VM) 3400 return 0; 3401 ccw_device_get_id(device->cdev, &dev_id); 3402 memset(&diag_data, 0, sizeof(diag_data)); 3403 diag_data.vrdcdvno = dev_id.devno; 3404 diag_data.vrdclen = sizeof(diag_data); 3405 rc = diag210(&diag_data); 3406 if (rc == 0 || rc == 2) { 3407 return diag_data.vrdcvfla & 0x80; 3408 } else { 3409 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3410 dev_id.devno, rc); 3411 return 0; 3412 } 3413 } 3414 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3415 3416 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3417 { 3418 struct ccw_device *cdev = data; 3419 int ret; 3420 3421 ret = ccw_device_set_online(cdev); 3422 if (ret) 3423 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3424 dev_name(&cdev->dev), ret); 3425 } 3426 3427 /* 3428 * Initial attempt at a probe function. this can be simplified once 3429 * the other detection code is gone. 3430 */ 3431 int dasd_generic_probe(struct ccw_device *cdev, 3432 struct dasd_discipline *discipline) 3433 { 3434 int ret; 3435 3436 ret = dasd_add_sysfs_files(cdev); 3437 if (ret) { 3438 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3439 "dasd_generic_probe: could not add " 3440 "sysfs entries"); 3441 return ret; 3442 } 3443 cdev->handler = &dasd_int_handler; 3444 3445 /* 3446 * Automatically online either all dasd devices (dasd_autodetect) 3447 * or all devices specified with dasd= parameters during 3448 * initial probe. 3449 */ 3450 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3451 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3452 async_schedule(dasd_generic_auto_online, cdev); 3453 return 0; 3454 } 3455 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3456 3457 void dasd_generic_free_discipline(struct dasd_device *device) 3458 { 3459 /* Forget the discipline information. */ 3460 if (device->discipline) { 3461 if (device->discipline->uncheck_device) 3462 device->discipline->uncheck_device(device); 3463 module_put(device->discipline->owner); 3464 device->discipline = NULL; 3465 } 3466 if (device->base_discipline) { 3467 module_put(device->base_discipline->owner); 3468 device->base_discipline = NULL; 3469 } 3470 } 3471 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3472 3473 /* 3474 * This will one day be called from a global not_oper handler. 3475 * It is also used by driver_unregister during module unload. 3476 */ 3477 void dasd_generic_remove(struct ccw_device *cdev) 3478 { 3479 struct dasd_device *device; 3480 struct dasd_block *block; 3481 3482 cdev->handler = NULL; 3483 3484 device = dasd_device_from_cdev(cdev); 3485 if (IS_ERR(device)) { 3486 dasd_remove_sysfs_files(cdev); 3487 return; 3488 } 3489 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3490 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3491 /* Already doing offline processing */ 3492 dasd_put_device(device); 3493 dasd_remove_sysfs_files(cdev); 3494 return; 3495 } 3496 /* 3497 * This device is removed unconditionally. Set offline 3498 * flag to prevent dasd_open from opening it while it is 3499 * no quite down yet. 3500 */ 3501 dasd_set_target_state(device, DASD_STATE_NEW); 3502 /* dasd_delete_device destroys the device reference. */ 3503 block = device->block; 3504 dasd_delete_device(device); 3505 /* 3506 * life cycle of block is bound to device, so delete it after 3507 * device was safely removed 3508 */ 3509 if (block) 3510 dasd_free_block(block); 3511 3512 dasd_remove_sysfs_files(cdev); 3513 } 3514 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3515 3516 /* 3517 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3518 * the device is detected for the first time and is supposed to be used 3519 * or the user has started activation through sysfs. 3520 */ 3521 int dasd_generic_set_online(struct ccw_device *cdev, 3522 struct dasd_discipline *base_discipline) 3523 { 3524 struct dasd_discipline *discipline; 3525 struct dasd_device *device; 3526 int rc; 3527 3528 /* first online clears initial online feature flag */ 3529 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3530 device = dasd_create_device(cdev); 3531 if (IS_ERR(device)) 3532 return PTR_ERR(device); 3533 3534 discipline = base_discipline; 3535 if (device->features & DASD_FEATURE_USEDIAG) { 3536 if (!dasd_diag_discipline_pointer) { 3537 /* Try to load the required module. */ 3538 rc = request_module(DASD_DIAG_MOD); 3539 if (rc) { 3540 pr_warn("%s Setting the DASD online failed " 3541 "because the required module %s " 3542 "could not be loaded (rc=%d)\n", 3543 dev_name(&cdev->dev), DASD_DIAG_MOD, 3544 rc); 3545 dasd_delete_device(device); 3546 return -ENODEV; 3547 } 3548 } 3549 /* Module init could have failed, so check again here after 3550 * request_module(). */ 3551 if (!dasd_diag_discipline_pointer) { 3552 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3553 dev_name(&cdev->dev)); 3554 dasd_delete_device(device); 3555 return -ENODEV; 3556 } 3557 discipline = dasd_diag_discipline_pointer; 3558 } 3559 if (!try_module_get(base_discipline->owner)) { 3560 dasd_delete_device(device); 3561 return -EINVAL; 3562 } 3563 if (!try_module_get(discipline->owner)) { 3564 module_put(base_discipline->owner); 3565 dasd_delete_device(device); 3566 return -EINVAL; 3567 } 3568 device->base_discipline = base_discipline; 3569 device->discipline = discipline; 3570 3571 /* check_device will allocate block device if necessary */ 3572 rc = discipline->check_device(device); 3573 if (rc) { 3574 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3575 dev_name(&cdev->dev), discipline->name, rc); 3576 module_put(discipline->owner); 3577 module_put(base_discipline->owner); 3578 dasd_delete_device(device); 3579 return rc; 3580 } 3581 3582 dasd_set_target_state(device, DASD_STATE_ONLINE); 3583 if (device->state <= DASD_STATE_KNOWN) { 3584 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3585 dev_name(&cdev->dev)); 3586 rc = -ENODEV; 3587 dasd_set_target_state(device, DASD_STATE_NEW); 3588 if (device->block) 3589 dasd_free_block(device->block); 3590 dasd_delete_device(device); 3591 } else 3592 pr_debug("dasd_generic device %s found\n", 3593 dev_name(&cdev->dev)); 3594 3595 wait_event(dasd_init_waitq, _wait_for_device(device)); 3596 3597 dasd_put_device(device); 3598 return rc; 3599 } 3600 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3601 3602 int dasd_generic_set_offline(struct ccw_device *cdev) 3603 { 3604 struct dasd_device *device; 3605 struct dasd_block *block; 3606 int max_count, open_count, rc; 3607 unsigned long flags; 3608 3609 rc = 0; 3610 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3611 device = dasd_device_from_cdev_locked(cdev); 3612 if (IS_ERR(device)) { 3613 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3614 return PTR_ERR(device); 3615 } 3616 3617 /* 3618 * We must make sure that this device is currently not in use. 3619 * The open_count is increased for every opener, that includes 3620 * the blkdev_get in dasd_scan_partitions. We are only interested 3621 * in the other openers. 3622 */ 3623 if (device->block) { 3624 max_count = device->block->bdev ? 0 : -1; 3625 open_count = atomic_read(&device->block->open_count); 3626 if (open_count > max_count) { 3627 if (open_count > 0) 3628 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3629 dev_name(&cdev->dev), open_count); 3630 else 3631 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3632 dev_name(&cdev->dev)); 3633 rc = -EBUSY; 3634 goto out_err; 3635 } 3636 } 3637 3638 /* 3639 * Test if the offline processing is already running and exit if so. 3640 * If a safe offline is being processed this could only be a normal 3641 * offline that should be able to overtake the safe offline and 3642 * cancel any I/O we do not want to wait for any longer 3643 */ 3644 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3645 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3646 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3647 &device->flags); 3648 } else { 3649 rc = -EBUSY; 3650 goto out_err; 3651 } 3652 } 3653 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3654 3655 /* 3656 * if safe_offline is called set safe_offline_running flag and 3657 * clear safe_offline so that a call to normal offline 3658 * can overrun safe_offline processing 3659 */ 3660 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3661 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3662 /* need to unlock here to wait for outstanding I/O */ 3663 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3664 /* 3665 * If we want to set the device safe offline all IO operations 3666 * should be finished before continuing the offline process 3667 * so sync bdev first and then wait for our queues to become 3668 * empty 3669 */ 3670 if (device->block) { 3671 rc = fsync_bdev(device->block->bdev); 3672 if (rc != 0) 3673 goto interrupted; 3674 } 3675 dasd_schedule_device_bh(device); 3676 rc = wait_event_interruptible(shutdown_waitq, 3677 _wait_for_empty_queues(device)); 3678 if (rc != 0) 3679 goto interrupted; 3680 3681 /* 3682 * check if a normal offline process overtook the offline 3683 * processing in this case simply do nothing beside returning 3684 * that we got interrupted 3685 * otherwise mark safe offline as not running any longer and 3686 * continue with normal offline 3687 */ 3688 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3689 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3690 rc = -ERESTARTSYS; 3691 goto out_err; 3692 } 3693 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3694 } 3695 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3696 3697 dasd_set_target_state(device, DASD_STATE_NEW); 3698 /* dasd_delete_device destroys the device reference. */ 3699 block = device->block; 3700 dasd_delete_device(device); 3701 /* 3702 * life cycle of block is bound to device, so delete it after 3703 * device was safely removed 3704 */ 3705 if (block) 3706 dasd_free_block(block); 3707 3708 return 0; 3709 3710 interrupted: 3711 /* interrupted by signal */ 3712 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3713 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3714 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3715 out_err: 3716 dasd_put_device(device); 3717 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3718 return rc; 3719 } 3720 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3721 3722 int dasd_generic_last_path_gone(struct dasd_device *device) 3723 { 3724 struct dasd_ccw_req *cqr; 3725 3726 dev_warn(&device->cdev->dev, "No operational channel path is left " 3727 "for the device\n"); 3728 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3729 /* First of all call extended error reporting. */ 3730 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3731 3732 if (device->state < DASD_STATE_BASIC) 3733 return 0; 3734 /* Device is active. We want to keep it. */ 3735 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3736 if ((cqr->status == DASD_CQR_IN_IO) || 3737 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3738 cqr->status = DASD_CQR_QUEUED; 3739 cqr->retries++; 3740 } 3741 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3742 dasd_device_clear_timer(device); 3743 dasd_schedule_device_bh(device); 3744 return 1; 3745 } 3746 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3747 3748 int dasd_generic_path_operational(struct dasd_device *device) 3749 { 3750 dev_info(&device->cdev->dev, "A channel path to the device has become " 3751 "operational\n"); 3752 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3753 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3754 if (device->stopped & DASD_UNRESUMED_PM) { 3755 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3756 dasd_restore_device(device); 3757 return 1; 3758 } 3759 dasd_schedule_device_bh(device); 3760 if (device->block) { 3761 dasd_schedule_block_bh(device->block); 3762 blk_mq_run_hw_queues(device->block->request_queue, true); 3763 } 3764 3765 if (!device->stopped) 3766 wake_up(&generic_waitq); 3767 3768 return 1; 3769 } 3770 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3771 3772 int dasd_generic_notify(struct ccw_device *cdev, int event) 3773 { 3774 struct dasd_device *device; 3775 int ret; 3776 3777 device = dasd_device_from_cdev_locked(cdev); 3778 if (IS_ERR(device)) 3779 return 0; 3780 ret = 0; 3781 switch (event) { 3782 case CIO_GONE: 3783 case CIO_BOXED: 3784 case CIO_NO_PATH: 3785 dasd_path_no_path(device); 3786 ret = dasd_generic_last_path_gone(device); 3787 break; 3788 case CIO_OPER: 3789 ret = 1; 3790 if (dasd_path_get_opm(device)) 3791 ret = dasd_generic_path_operational(device); 3792 break; 3793 } 3794 dasd_put_device(device); 3795 return ret; 3796 } 3797 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3798 3799 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3800 { 3801 struct dasd_device *device; 3802 int chp, oldopm, hpfpm, ifccpm; 3803 3804 device = dasd_device_from_cdev_locked(cdev); 3805 if (IS_ERR(device)) 3806 return; 3807 3808 oldopm = dasd_path_get_opm(device); 3809 for (chp = 0; chp < 8; chp++) { 3810 if (path_event[chp] & PE_PATH_GONE) { 3811 dasd_path_notoper(device, chp); 3812 } 3813 if (path_event[chp] & PE_PATH_AVAILABLE) { 3814 dasd_path_available(device, chp); 3815 dasd_schedule_device_bh(device); 3816 } 3817 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3818 if (!dasd_path_is_operational(device, chp) && 3819 !dasd_path_need_verify(device, chp)) { 3820 /* 3821 * we can not establish a pathgroup on an 3822 * unavailable path, so trigger a path 3823 * verification first 3824 */ 3825 dasd_path_available(device, chp); 3826 dasd_schedule_device_bh(device); 3827 } 3828 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3829 "Pathgroup re-established\n"); 3830 if (device->discipline->kick_validate) 3831 device->discipline->kick_validate(device); 3832 } 3833 } 3834 hpfpm = dasd_path_get_hpfpm(device); 3835 ifccpm = dasd_path_get_ifccpm(device); 3836 if (!dasd_path_get_opm(device) && hpfpm) { 3837 /* 3838 * device has no operational paths but at least one path is 3839 * disabled due to HPF errors 3840 * disable HPF at all and use the path(s) again 3841 */ 3842 if (device->discipline->disable_hpf) 3843 device->discipline->disable_hpf(device); 3844 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3845 dasd_path_set_tbvpm(device, hpfpm); 3846 dasd_schedule_device_bh(device); 3847 dasd_schedule_requeue(device); 3848 } else if (!dasd_path_get_opm(device) && ifccpm) { 3849 /* 3850 * device has no operational paths but at least one path is 3851 * disabled due to IFCC errors 3852 * trigger path verification on paths with IFCC errors 3853 */ 3854 dasd_path_set_tbvpm(device, ifccpm); 3855 dasd_schedule_device_bh(device); 3856 } 3857 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3858 dev_warn(&device->cdev->dev, 3859 "No verified channel paths remain for the device\n"); 3860 DBF_DEV_EVENT(DBF_WARNING, device, 3861 "%s", "last verified path gone"); 3862 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3863 dasd_device_set_stop_bits(device, 3864 DASD_STOPPED_DC_WAIT); 3865 } 3866 dasd_put_device(device); 3867 } 3868 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3869 3870 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3871 { 3872 if (!dasd_path_get_opm(device) && lpm) { 3873 dasd_path_set_opm(device, lpm); 3874 dasd_generic_path_operational(device); 3875 } else 3876 dasd_path_add_opm(device, lpm); 3877 return 0; 3878 } 3879 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3880 3881 /* 3882 * clear active requests and requeue them to block layer if possible 3883 */ 3884 static int dasd_generic_requeue_all_requests(struct dasd_device *device) 3885 { 3886 struct list_head requeue_queue; 3887 struct dasd_ccw_req *cqr, *n; 3888 struct dasd_ccw_req *refers; 3889 int rc; 3890 3891 INIT_LIST_HEAD(&requeue_queue); 3892 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3893 rc = 0; 3894 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3895 /* Check status and move request to flush_queue */ 3896 if (cqr->status == DASD_CQR_IN_IO) { 3897 rc = device->discipline->term_IO(cqr); 3898 if (rc) { 3899 /* unable to terminate requeust */ 3900 dev_err(&device->cdev->dev, 3901 "Unable to terminate request %p " 3902 "on suspend\n", cqr); 3903 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3904 dasd_put_device(device); 3905 return rc; 3906 } 3907 } 3908 list_move_tail(&cqr->devlist, &requeue_queue); 3909 } 3910 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3911 3912 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3913 wait_event(dasd_flush_wq, 3914 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3915 3916 /* mark sleepon requests as ended */ 3917 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 3918 cqr->callback_data = DASD_SLEEPON_END_TAG; 3919 3920 /* remove requests from device and block queue */ 3921 list_del_init(&cqr->devlist); 3922 while (cqr->refers != NULL) { 3923 refers = cqr->refers; 3924 /* remove the request from the block queue */ 3925 list_del(&cqr->blocklist); 3926 /* free the finished erp request */ 3927 dasd_free_erp_request(cqr, cqr->memdev); 3928 cqr = refers; 3929 } 3930 3931 /* 3932 * requeue requests to blocklayer will only work 3933 * for block device requests 3934 */ 3935 if (_dasd_requeue_request(cqr)) 3936 continue; 3937 3938 if (cqr->block) 3939 list_del_init(&cqr->blocklist); 3940 cqr->block->base->discipline->free_cp( 3941 cqr, (struct request *) cqr->callback_data); 3942 } 3943 3944 /* 3945 * if requests remain then they are internal request 3946 * and go back to the device queue 3947 */ 3948 if (!list_empty(&requeue_queue)) { 3949 /* move freeze_queue to start of the ccw_queue */ 3950 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3951 list_splice_tail(&requeue_queue, &device->ccw_queue); 3952 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3953 } 3954 /* wake up generic waitqueue for eventually ended sleepon requests */ 3955 wake_up(&generic_waitq); 3956 return rc; 3957 } 3958 3959 static void do_requeue_requests(struct work_struct *work) 3960 { 3961 struct dasd_device *device = container_of(work, struct dasd_device, 3962 requeue_requests); 3963 dasd_generic_requeue_all_requests(device); 3964 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 3965 if (device->block) 3966 dasd_schedule_block_bh(device->block); 3967 dasd_put_device(device); 3968 } 3969 3970 void dasd_schedule_requeue(struct dasd_device *device) 3971 { 3972 dasd_get_device(device); 3973 /* queue call to dasd_reload_device to the kernel event daemon. */ 3974 if (!schedule_work(&device->requeue_requests)) 3975 dasd_put_device(device); 3976 } 3977 EXPORT_SYMBOL(dasd_schedule_requeue); 3978 3979 int dasd_generic_pm_freeze(struct ccw_device *cdev) 3980 { 3981 struct dasd_device *device = dasd_device_from_cdev(cdev); 3982 3983 if (IS_ERR(device)) 3984 return PTR_ERR(device); 3985 3986 /* mark device as suspended */ 3987 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 3988 3989 if (device->discipline->freeze) 3990 device->discipline->freeze(device); 3991 3992 /* disallow new I/O */ 3993 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3994 3995 return dasd_generic_requeue_all_requests(device); 3996 } 3997 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3998 3999 int dasd_generic_restore_device(struct ccw_device *cdev) 4000 { 4001 struct dasd_device *device = dasd_device_from_cdev(cdev); 4002 int rc = 0; 4003 4004 if (IS_ERR(device)) 4005 return PTR_ERR(device); 4006 4007 /* allow new IO again */ 4008 dasd_device_remove_stop_bits(device, 4009 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 4010 4011 dasd_schedule_device_bh(device); 4012 4013 /* 4014 * call discipline restore function 4015 * if device is stopped do nothing e.g. for disconnected devices 4016 */ 4017 if (device->discipline->restore && !(device->stopped)) 4018 rc = device->discipline->restore(device); 4019 if (rc || device->stopped) 4020 /* 4021 * if the resume failed for the DASD we put it in 4022 * an UNRESUMED stop state 4023 */ 4024 device->stopped |= DASD_UNRESUMED_PM; 4025 4026 if (device->block) { 4027 dasd_schedule_block_bh(device->block); 4028 blk_mq_run_hw_queues(device->block->request_queue, true); 4029 } 4030 4031 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 4032 dasd_put_device(device); 4033 return 0; 4034 } 4035 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 4036 4037 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 4038 void *rdc_buffer, 4039 int rdc_buffer_size, 4040 int magic) 4041 { 4042 struct dasd_ccw_req *cqr; 4043 struct ccw1 *ccw; 4044 unsigned long *idaw; 4045 4046 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 4047 4048 if (IS_ERR(cqr)) { 4049 /* internal error 13 - Allocating the RDC request failed*/ 4050 dev_err(&device->cdev->dev, 4051 "An error occurred in the DASD device driver, " 4052 "reason=%s\n", "13"); 4053 return cqr; 4054 } 4055 4056 ccw = cqr->cpaddr; 4057 ccw->cmd_code = CCW_CMD_RDC; 4058 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 4059 idaw = (unsigned long *) (cqr->data); 4060 ccw->cda = (__u32)(addr_t) idaw; 4061 ccw->flags = CCW_FLAG_IDA; 4062 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 4063 } else { 4064 ccw->cda = (__u32)(addr_t) rdc_buffer; 4065 ccw->flags = 0; 4066 } 4067 4068 ccw->count = rdc_buffer_size; 4069 cqr->startdev = device; 4070 cqr->memdev = device; 4071 cqr->expires = 10*HZ; 4072 cqr->retries = 256; 4073 cqr->buildclk = get_tod_clock(); 4074 cqr->status = DASD_CQR_FILLED; 4075 return cqr; 4076 } 4077 4078 4079 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4080 void *rdc_buffer, int rdc_buffer_size) 4081 { 4082 int ret; 4083 struct dasd_ccw_req *cqr; 4084 4085 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 4086 magic); 4087 if (IS_ERR(cqr)) 4088 return PTR_ERR(cqr); 4089 4090 ret = dasd_sleep_on(cqr); 4091 dasd_sfree_request(cqr, cqr->memdev); 4092 return ret; 4093 } 4094 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4095 4096 /* 4097 * In command mode and transport mode we need to look for sense 4098 * data in different places. The sense data itself is allways 4099 * an array of 32 bytes, so we can unify the sense data access 4100 * for both modes. 4101 */ 4102 char *dasd_get_sense(struct irb *irb) 4103 { 4104 struct tsb *tsb = NULL; 4105 char *sense = NULL; 4106 4107 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4108 if (irb->scsw.tm.tcw) 4109 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4110 irb->scsw.tm.tcw); 4111 if (tsb && tsb->length == 64 && tsb->flags) 4112 switch (tsb->flags & 0x07) { 4113 case 1: /* tsa_iostat */ 4114 sense = tsb->tsa.iostat.sense; 4115 break; 4116 case 2: /* tsa_ddpc */ 4117 sense = tsb->tsa.ddpc.sense; 4118 break; 4119 default: 4120 /* currently we don't use interrogate data */ 4121 break; 4122 } 4123 } else if (irb->esw.esw0.erw.cons) { 4124 sense = irb->ecw; 4125 } 4126 return sense; 4127 } 4128 EXPORT_SYMBOL_GPL(dasd_get_sense); 4129 4130 void dasd_generic_shutdown(struct ccw_device *cdev) 4131 { 4132 struct dasd_device *device; 4133 4134 device = dasd_device_from_cdev(cdev); 4135 if (IS_ERR(device)) 4136 return; 4137 4138 if (device->block) 4139 dasd_schedule_block_bh(device->block); 4140 4141 dasd_schedule_device_bh(device); 4142 4143 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4144 } 4145 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4146 4147 static int __init dasd_init(void) 4148 { 4149 int rc; 4150 4151 init_waitqueue_head(&dasd_init_waitq); 4152 init_waitqueue_head(&dasd_flush_wq); 4153 init_waitqueue_head(&generic_waitq); 4154 init_waitqueue_head(&shutdown_waitq); 4155 4156 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4157 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4158 if (dasd_debug_area == NULL) { 4159 rc = -ENOMEM; 4160 goto failed; 4161 } 4162 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4163 debug_set_level(dasd_debug_area, DBF_WARNING); 4164 4165 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4166 4167 dasd_diag_discipline_pointer = NULL; 4168 4169 dasd_statistics_createroot(); 4170 4171 rc = dasd_devmap_init(); 4172 if (rc) 4173 goto failed; 4174 rc = dasd_gendisk_init(); 4175 if (rc) 4176 goto failed; 4177 rc = dasd_parse(); 4178 if (rc) 4179 goto failed; 4180 rc = dasd_eer_init(); 4181 if (rc) 4182 goto failed; 4183 #ifdef CONFIG_PROC_FS 4184 rc = dasd_proc_init(); 4185 if (rc) 4186 goto failed; 4187 #endif 4188 4189 return 0; 4190 failed: 4191 pr_info("The DASD device driver could not be initialized\n"); 4192 dasd_exit(); 4193 return rc; 4194 } 4195 4196 module_init(dasd_init); 4197 module_exit(dasd_exit); 4198