1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #include <linux/export.h> 12 #include <linux/kmod.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/ctype.h> 16 #include <linux/major.h> 17 #include <linux/slab.h> 18 #include <linux/hdreg.h> 19 #include <linux/async.h> 20 #include <linux/mutex.h> 21 #include <linux/debugfs.h> 22 #include <linux/seq_file.h> 23 #include <linux/vmalloc.h> 24 25 #include <asm/machine.h> 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 #include "dasd_int.h" 33 /* 34 * SECTION: Constant definitions to be used within this file 35 */ 36 #define DASD_CHANQ_MAX_SIZE 4 37 38 #define DASD_DIAG_MOD "dasd_diag_mod" 39 40 /* 41 * SECTION: exported variables of dasd.c 42 */ 43 debug_info_t *dasd_debug_area; 44 EXPORT_SYMBOL(dasd_debug_area); 45 static struct dentry *dasd_debugfs_root_entry; 46 struct dasd_discipline *dasd_diag_discipline_pointer; 47 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 48 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 49 50 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 51 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 52 " Copyright IBM Corp. 2000"); 53 MODULE_LICENSE("GPL"); 54 55 /* 56 * SECTION: prototypes for static functions of dasd.c 57 */ 58 static int dasd_flush_block_queue(struct dasd_block *); 59 static void dasd_device_tasklet(unsigned long); 60 static void dasd_block_tasklet(unsigned long); 61 static void do_kick_device(struct work_struct *); 62 static void do_reload_device(struct work_struct *); 63 static void do_requeue_requests(struct work_struct *); 64 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 65 static void dasd_device_timeout(struct timer_list *); 66 static void dasd_block_timeout(struct timer_list *); 67 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 68 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 69 static void dasd_profile_exit(struct dasd_profile *); 70 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 71 static void dasd_hosts_exit(struct dasd_device *); 72 static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *, 73 unsigned int); 74 /* 75 * SECTION: Operations on the device structure. 76 */ 77 static wait_queue_head_t dasd_init_waitq; 78 static wait_queue_head_t dasd_flush_wq; 79 static wait_queue_head_t generic_waitq; 80 static wait_queue_head_t shutdown_waitq; 81 82 /* 83 * Allocate memory for a new device structure. 84 */ 85 struct dasd_device *dasd_alloc_device(void) 86 { 87 struct dasd_device *device; 88 89 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 90 if (!device) 91 return ERR_PTR(-ENOMEM); 92 93 /* Get two pages for normal block device operations. */ 94 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 95 if (!device->ccw_mem) { 96 kfree(device); 97 return ERR_PTR(-ENOMEM); 98 } 99 /* Get one page for error recovery. */ 100 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 101 if (!device->erp_mem) { 102 free_pages((unsigned long) device->ccw_mem, 1); 103 kfree(device); 104 return ERR_PTR(-ENOMEM); 105 } 106 /* Get two pages for ese format. */ 107 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 108 if (!device->ese_mem) { 109 free_page((unsigned long) device->erp_mem); 110 free_pages((unsigned long) device->ccw_mem, 1); 111 kfree(device); 112 return ERR_PTR(-ENOMEM); 113 } 114 115 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 116 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 117 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 118 spin_lock_init(&device->mem_lock); 119 atomic_set(&device->tasklet_scheduled, 0); 120 tasklet_init(&device->tasklet, dasd_device_tasklet, 121 (unsigned long) device); 122 INIT_LIST_HEAD(&device->ccw_queue); 123 timer_setup(&device->timer, dasd_device_timeout, 0); 124 INIT_WORK(&device->kick_work, do_kick_device); 125 INIT_WORK(&device->reload_device, do_reload_device); 126 INIT_WORK(&device->requeue_requests, do_requeue_requests); 127 device->state = DASD_STATE_NEW; 128 device->target = DASD_STATE_NEW; 129 mutex_init(&device->state_mutex); 130 spin_lock_init(&device->profile.lock); 131 return device; 132 } 133 134 /* 135 * Free memory of a device structure. 136 */ 137 void dasd_free_device(struct dasd_device *device) 138 { 139 kfree(device->private); 140 free_pages((unsigned long) device->ese_mem, 1); 141 free_page((unsigned long) device->erp_mem); 142 free_pages((unsigned long) device->ccw_mem, 1); 143 kfree(device); 144 } 145 146 /* 147 * Allocate memory for a new device structure. 148 */ 149 struct dasd_block *dasd_alloc_block(void) 150 { 151 struct dasd_block *block; 152 153 block = kzalloc(sizeof(*block), GFP_ATOMIC); 154 if (!block) 155 return ERR_PTR(-ENOMEM); 156 /* open_count = 0 means device online but not in use */ 157 atomic_set(&block->open_count, -1); 158 159 atomic_set(&block->tasklet_scheduled, 0); 160 tasklet_init(&block->tasklet, dasd_block_tasklet, 161 (unsigned long) block); 162 INIT_LIST_HEAD(&block->ccw_queue); 163 spin_lock_init(&block->queue_lock); 164 INIT_LIST_HEAD(&block->format_list); 165 spin_lock_init(&block->format_lock); 166 timer_setup(&block->timer, dasd_block_timeout, 0); 167 spin_lock_init(&block->profile.lock); 168 169 return block; 170 } 171 EXPORT_SYMBOL_GPL(dasd_alloc_block); 172 173 /* 174 * Free memory of a device structure. 175 */ 176 void dasd_free_block(struct dasd_block *block) 177 { 178 kfree(block); 179 } 180 EXPORT_SYMBOL_GPL(dasd_free_block); 181 182 /* 183 * Make a new device known to the system. 184 */ 185 static int dasd_state_new_to_known(struct dasd_device *device) 186 { 187 /* 188 * As long as the device is not in state DASD_STATE_NEW we want to 189 * keep the reference count > 0. 190 */ 191 dasd_get_device(device); 192 device->state = DASD_STATE_KNOWN; 193 return 0; 194 } 195 196 /* 197 * Let the system forget about a device. 198 */ 199 static int dasd_state_known_to_new(struct dasd_device *device) 200 { 201 /* Disable extended error reporting for this device. */ 202 dasd_eer_disable(device); 203 device->state = DASD_STATE_NEW; 204 205 /* Give up reference we took in dasd_state_new_to_known. */ 206 dasd_put_device(device); 207 return 0; 208 } 209 210 static struct dentry *dasd_debugfs_setup(const char *name, 211 struct dentry *base_dentry) 212 { 213 struct dentry *pde; 214 215 if (!base_dentry) 216 return NULL; 217 pde = debugfs_create_dir(name, base_dentry); 218 if (!pde || IS_ERR(pde)) 219 return NULL; 220 return pde; 221 } 222 223 /* 224 * Request the irq line for the device. 225 */ 226 static int dasd_state_known_to_basic(struct dasd_device *device) 227 { 228 struct dasd_block *block = device->block; 229 int rc = 0; 230 231 /* Allocate and register gendisk structure. */ 232 if (block) { 233 rc = dasd_gendisk_alloc(block); 234 if (rc) 235 return rc; 236 block->debugfs_dentry = 237 dasd_debugfs_setup(block->gdp->disk_name, 238 dasd_debugfs_root_entry); 239 dasd_profile_init(&block->profile, block->debugfs_dentry); 240 if (dasd_global_profile_level == DASD_PROFILE_ON) 241 dasd_profile_on(&device->block->profile); 242 } 243 device->debugfs_dentry = 244 dasd_debugfs_setup(dev_name(&device->cdev->dev), 245 dasd_debugfs_root_entry); 246 dasd_profile_init(&device->profile, device->debugfs_dentry); 247 dasd_hosts_init(device->debugfs_dentry, device); 248 249 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 250 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 251 8 * sizeof(long)); 252 debug_register_view(device->debug_area, &debug_sprintf_view); 253 debug_set_level(device->debug_area, DBF_WARNING); 254 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 255 256 device->state = DASD_STATE_BASIC; 257 258 return rc; 259 } 260 261 /* 262 * Release the irq line for the device. Terminate any running i/o. 263 */ 264 static int dasd_state_basic_to_known(struct dasd_device *device) 265 { 266 int rc; 267 268 if (device->discipline->basic_to_known) { 269 rc = device->discipline->basic_to_known(device); 270 if (rc) 271 return rc; 272 } 273 274 if (device->block) { 275 dasd_profile_exit(&device->block->profile); 276 debugfs_remove(device->block->debugfs_dentry); 277 dasd_gendisk_free(device->block); 278 dasd_block_clear_timer(device->block); 279 } 280 rc = dasd_flush_device_queue(device); 281 if (rc) 282 return rc; 283 dasd_device_clear_timer(device); 284 dasd_profile_exit(&device->profile); 285 dasd_hosts_exit(device); 286 debugfs_remove(device->debugfs_dentry); 287 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 288 if (device->debug_area != NULL) { 289 debug_unregister(device->debug_area); 290 device->debug_area = NULL; 291 } 292 device->state = DASD_STATE_KNOWN; 293 return 0; 294 } 295 296 /* 297 * Do the initial analysis. The do_analysis function may return 298 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 299 * until the discipline decides to continue the startup sequence 300 * by calling the function dasd_change_state. The eckd disciplines 301 * uses this to start a ccw that detects the format. The completion 302 * interrupt for this detection ccw uses the kernel event daemon to 303 * trigger the call to dasd_change_state. All this is done in the 304 * discipline code, see dasd_eckd.c. 305 * After the analysis ccw is done (do_analysis returned 0) the block 306 * device is setup. 307 * In case the analysis returns an error, the device setup is stopped 308 * (a fake disk was already added to allow formatting). 309 */ 310 static int dasd_state_basic_to_ready(struct dasd_device *device) 311 { 312 struct dasd_block *block = device->block; 313 struct queue_limits lim; 314 int rc = 0; 315 316 /* make disk known with correct capacity */ 317 if (!block) { 318 device->state = DASD_STATE_READY; 319 goto out; 320 } 321 322 if (block->base->discipline->do_analysis != NULL) 323 rc = block->base->discipline->do_analysis(block); 324 if (rc) { 325 if (rc == -EAGAIN) 326 return rc; 327 device->state = DASD_STATE_UNFMT; 328 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, 329 KOBJ_CHANGE); 330 goto out; 331 } 332 333 lim = queue_limits_start_update(block->gdp->queue); 334 lim.max_dev_sectors = device->discipline->max_sectors(block); 335 lim.max_hw_sectors = lim.max_dev_sectors; 336 lim.logical_block_size = block->bp_block; 337 /* 338 * Adjust dma_alignment to match block_size - 1 339 * to ensure proper buffer alignment checks in the block layer. 340 */ 341 lim.dma_alignment = lim.logical_block_size - 1; 342 343 if (device->discipline->has_discard) { 344 unsigned int max_bytes; 345 346 lim.discard_granularity = block->bp_block; 347 348 /* Calculate max_discard_sectors and make it PAGE aligned */ 349 max_bytes = USHRT_MAX * block->bp_block; 350 max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE); 351 352 lim.max_hw_discard_sectors = max_bytes / block->bp_block; 353 lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors; 354 } 355 rc = queue_limits_commit_update(block->gdp->queue, &lim); 356 if (rc) 357 return rc; 358 359 set_capacity(block->gdp, block->blocks << block->s2b_shift); 360 device->state = DASD_STATE_READY; 361 362 rc = dasd_scan_partitions(block); 363 if (rc) { 364 device->state = DASD_STATE_BASIC; 365 return rc; 366 } 367 368 out: 369 if (device->discipline->basic_to_ready) 370 rc = device->discipline->basic_to_ready(device); 371 return rc; 372 } 373 374 static inline 375 int _wait_for_empty_queues(struct dasd_device *device) 376 { 377 if (device->block) 378 return list_empty(&device->ccw_queue) && 379 list_empty(&device->block->ccw_queue); 380 else 381 return list_empty(&device->ccw_queue); 382 } 383 384 /* 385 * Remove device from block device layer. Destroy dirty buffers. 386 * Forget format information. Check if the target level is basic 387 * and if it is create fake disk for formatting. 388 */ 389 static int dasd_state_ready_to_basic(struct dasd_device *device) 390 { 391 int rc; 392 393 device->state = DASD_STATE_BASIC; 394 if (device->block) { 395 struct dasd_block *block = device->block; 396 rc = dasd_flush_block_queue(block); 397 if (rc) { 398 device->state = DASD_STATE_READY; 399 return rc; 400 } 401 dasd_destroy_partitions(block); 402 block->blocks = 0; 403 block->bp_block = 0; 404 block->s2b_shift = 0; 405 } 406 return 0; 407 } 408 409 /* 410 * Back to basic. 411 */ 412 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 413 { 414 device->state = DASD_STATE_BASIC; 415 return 0; 416 } 417 418 /* 419 * Make the device online and schedule the bottom half to start 420 * the requeueing of requests from the linux request queue to the 421 * ccw queue. 422 */ 423 static int 424 dasd_state_ready_to_online(struct dasd_device * device) 425 { 426 device->state = DASD_STATE_ONLINE; 427 if (device->block) { 428 dasd_schedule_block_bh(device->block); 429 if ((device->features & DASD_FEATURE_USERAW)) { 430 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, 431 KOBJ_CHANGE); 432 return 0; 433 } 434 disk_uevent(file_bdev(device->block->bdev_file)->bd_disk, 435 KOBJ_CHANGE); 436 } 437 return 0; 438 } 439 440 /* 441 * Stop the requeueing of requests again. 442 */ 443 static int dasd_state_online_to_ready(struct dasd_device *device) 444 { 445 int rc; 446 447 if (device->discipline->online_to_ready) { 448 rc = device->discipline->online_to_ready(device); 449 if (rc) 450 return rc; 451 } 452 453 device->state = DASD_STATE_READY; 454 if (device->block && !(device->features & DASD_FEATURE_USERAW)) 455 disk_uevent(file_bdev(device->block->bdev_file)->bd_disk, 456 KOBJ_CHANGE); 457 return 0; 458 } 459 460 /* 461 * Device startup state changes. 462 */ 463 static int dasd_increase_state(struct dasd_device *device) 464 { 465 int rc; 466 467 rc = 0; 468 if (device->state == DASD_STATE_NEW && 469 device->target >= DASD_STATE_KNOWN) 470 rc = dasd_state_new_to_known(device); 471 472 if (!rc && 473 device->state == DASD_STATE_KNOWN && 474 device->target >= DASD_STATE_BASIC) 475 rc = dasd_state_known_to_basic(device); 476 477 if (!rc && 478 device->state == DASD_STATE_BASIC && 479 device->target >= DASD_STATE_READY) 480 rc = dasd_state_basic_to_ready(device); 481 482 if (!rc && 483 device->state == DASD_STATE_UNFMT && 484 device->target > DASD_STATE_UNFMT) 485 rc = -EPERM; 486 487 if (!rc && 488 device->state == DASD_STATE_READY && 489 device->target >= DASD_STATE_ONLINE) 490 rc = dasd_state_ready_to_online(device); 491 492 return rc; 493 } 494 495 /* 496 * Device shutdown state changes. 497 */ 498 static int dasd_decrease_state(struct dasd_device *device) 499 { 500 int rc; 501 502 rc = 0; 503 if (device->state == DASD_STATE_ONLINE && 504 device->target <= DASD_STATE_READY) 505 rc = dasd_state_online_to_ready(device); 506 507 if (!rc && 508 device->state == DASD_STATE_READY && 509 device->target <= DASD_STATE_BASIC) 510 rc = dasd_state_ready_to_basic(device); 511 512 if (!rc && 513 device->state == DASD_STATE_UNFMT && 514 device->target <= DASD_STATE_BASIC) 515 rc = dasd_state_unfmt_to_basic(device); 516 517 if (!rc && 518 device->state == DASD_STATE_BASIC && 519 device->target <= DASD_STATE_KNOWN) 520 rc = dasd_state_basic_to_known(device); 521 522 if (!rc && 523 device->state == DASD_STATE_KNOWN && 524 device->target <= DASD_STATE_NEW) 525 rc = dasd_state_known_to_new(device); 526 527 return rc; 528 } 529 530 /* 531 * This is the main startup/shutdown routine. 532 */ 533 static void dasd_change_state(struct dasd_device *device) 534 { 535 int rc; 536 537 if (device->state == device->target) 538 /* Already where we want to go today... */ 539 return; 540 if (device->state < device->target) 541 rc = dasd_increase_state(device); 542 else 543 rc = dasd_decrease_state(device); 544 if (rc == -EAGAIN) 545 return; 546 if (rc) 547 device->target = device->state; 548 549 /* let user-space know that the device status changed */ 550 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 551 552 if (device->state == device->target) 553 wake_up(&dasd_init_waitq); 554 } 555 556 /* 557 * Kick starter for devices that did not complete the startup/shutdown 558 * procedure or were sleeping because of a pending state. 559 * dasd_kick_device will schedule a call do do_kick_device to the kernel 560 * event daemon. 561 */ 562 static void do_kick_device(struct work_struct *work) 563 { 564 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 565 mutex_lock(&device->state_mutex); 566 dasd_change_state(device); 567 mutex_unlock(&device->state_mutex); 568 dasd_schedule_device_bh(device); 569 dasd_put_device(device); 570 } 571 572 void dasd_kick_device(struct dasd_device *device) 573 { 574 dasd_get_device(device); 575 /* queue call to dasd_kick_device to the kernel event daemon. */ 576 if (!schedule_work(&device->kick_work)) 577 dasd_put_device(device); 578 } 579 EXPORT_SYMBOL(dasd_kick_device); 580 581 /* 582 * dasd_reload_device will schedule a call do do_reload_device to the kernel 583 * event daemon. 584 */ 585 static void do_reload_device(struct work_struct *work) 586 { 587 struct dasd_device *device = container_of(work, struct dasd_device, 588 reload_device); 589 device->discipline->reload(device); 590 dasd_put_device(device); 591 } 592 593 void dasd_reload_device(struct dasd_device *device) 594 { 595 dasd_get_device(device); 596 /* queue call to dasd_reload_device to the kernel event daemon. */ 597 if (!schedule_work(&device->reload_device)) 598 dasd_put_device(device); 599 } 600 EXPORT_SYMBOL(dasd_reload_device); 601 602 /* 603 * Set the target state for a device and starts the state change. 604 */ 605 void dasd_set_target_state(struct dasd_device *device, int target) 606 { 607 dasd_get_device(device); 608 mutex_lock(&device->state_mutex); 609 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 610 if (dasd_probeonly && target > DASD_STATE_READY) 611 target = DASD_STATE_READY; 612 if (device->target != target) { 613 if (device->state == target) 614 wake_up(&dasd_init_waitq); 615 device->target = target; 616 } 617 if (device->state != device->target) 618 dasd_change_state(device); 619 mutex_unlock(&device->state_mutex); 620 dasd_put_device(device); 621 } 622 623 /* 624 * Enable devices with device numbers in [from..to]. 625 */ 626 static inline int _wait_for_device(struct dasd_device *device) 627 { 628 return (device->state == device->target); 629 } 630 631 void dasd_enable_device(struct dasd_device *device) 632 { 633 dasd_set_target_state(device, DASD_STATE_ONLINE); 634 if (device->state <= DASD_STATE_KNOWN) 635 /* No discipline for device found. */ 636 dasd_set_target_state(device, DASD_STATE_NEW); 637 /* Now wait for the devices to come up. */ 638 wait_event(dasd_init_waitq, _wait_for_device(device)); 639 640 dasd_reload_device(device); 641 if (device->discipline->kick_validate) 642 device->discipline->kick_validate(device); 643 } 644 EXPORT_SYMBOL(dasd_enable_device); 645 646 /* 647 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 648 */ 649 650 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 651 652 #ifdef CONFIG_DASD_PROFILE 653 struct dasd_profile dasd_global_profile = { 654 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 655 }; 656 static struct dentry *dasd_debugfs_global_entry; 657 658 /* 659 * Add profiling information for cqr before execution. 660 */ 661 static void dasd_profile_start(struct dasd_block *block, 662 struct dasd_ccw_req *cqr, 663 struct request *req) 664 { 665 struct list_head *l; 666 unsigned int counter; 667 struct dasd_device *device; 668 669 /* count the length of the chanq for statistics */ 670 counter = 0; 671 if (dasd_global_profile_level || block->profile.data) 672 list_for_each(l, &block->ccw_queue) 673 if (++counter >= 31) 674 break; 675 676 spin_lock(&dasd_global_profile.lock); 677 if (dasd_global_profile.data) { 678 dasd_global_profile.data->dasd_io_nr_req[counter]++; 679 if (rq_data_dir(req) == READ) 680 dasd_global_profile.data->dasd_read_nr_req[counter]++; 681 } 682 spin_unlock(&dasd_global_profile.lock); 683 684 spin_lock(&block->profile.lock); 685 if (block->profile.data) { 686 block->profile.data->dasd_io_nr_req[counter]++; 687 if (rq_data_dir(req) == READ) 688 block->profile.data->dasd_read_nr_req[counter]++; 689 } 690 spin_unlock(&block->profile.lock); 691 692 /* 693 * We count the request for the start device, even though it may run on 694 * some other device due to error recovery. This way we make sure that 695 * we count each request only once. 696 */ 697 device = cqr->startdev; 698 if (!device->profile.data) 699 return; 700 701 spin_lock(get_ccwdev_lock(device->cdev)); 702 counter = 1; /* request is not yet queued on the start device */ 703 list_for_each(l, &device->ccw_queue) 704 if (++counter >= 31) 705 break; 706 spin_unlock(get_ccwdev_lock(device->cdev)); 707 708 spin_lock(&device->profile.lock); 709 device->profile.data->dasd_io_nr_req[counter]++; 710 if (rq_data_dir(req) == READ) 711 device->profile.data->dasd_read_nr_req[counter]++; 712 spin_unlock(&device->profile.lock); 713 } 714 715 /* 716 * Add profiling information for cqr after execution. 717 */ 718 719 #define dasd_profile_counter(value, index) \ 720 { \ 721 for (index = 0; index < 31 && value >> (2+index); index++) \ 722 ; \ 723 } 724 725 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 726 int is_alias, 727 int is_tpm, 728 int is_read, 729 long sectors, 730 int sectors_ind, 731 int tottime_ind, 732 int tottimeps_ind, 733 int strtime_ind, 734 int irqtime_ind, 735 int irqtimeps_ind, 736 int endtime_ind) 737 { 738 /* in case of an overflow, reset the whole profile */ 739 if (data->dasd_io_reqs == UINT_MAX) { 740 memset(data, 0, sizeof(*data)); 741 ktime_get_real_ts64(&data->starttod); 742 } 743 data->dasd_io_reqs++; 744 data->dasd_io_sects += sectors; 745 if (is_alias) 746 data->dasd_io_alias++; 747 if (is_tpm) 748 data->dasd_io_tpm++; 749 750 data->dasd_io_secs[sectors_ind]++; 751 data->dasd_io_times[tottime_ind]++; 752 data->dasd_io_timps[tottimeps_ind]++; 753 data->dasd_io_time1[strtime_ind]++; 754 data->dasd_io_time2[irqtime_ind]++; 755 data->dasd_io_time2ps[irqtimeps_ind]++; 756 data->dasd_io_time3[endtime_ind]++; 757 758 if (is_read) { 759 data->dasd_read_reqs++; 760 data->dasd_read_sects += sectors; 761 if (is_alias) 762 data->dasd_read_alias++; 763 if (is_tpm) 764 data->dasd_read_tpm++; 765 data->dasd_read_secs[sectors_ind]++; 766 data->dasd_read_times[tottime_ind]++; 767 data->dasd_read_time1[strtime_ind]++; 768 data->dasd_read_time2[irqtime_ind]++; 769 data->dasd_read_time3[endtime_ind]++; 770 } 771 } 772 773 static void dasd_profile_end(struct dasd_block *block, 774 struct dasd_ccw_req *cqr, 775 struct request *req) 776 { 777 unsigned long strtime, irqtime, endtime, tottime; 778 unsigned long tottimeps, sectors; 779 struct dasd_device *device; 780 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 781 int irqtime_ind, irqtimeps_ind, endtime_ind; 782 struct dasd_profile_info *data; 783 784 device = cqr->startdev; 785 if (!(dasd_global_profile_level || 786 block->profile.data || 787 device->profile.data)) 788 return; 789 790 sectors = blk_rq_sectors(req); 791 if (!cqr->buildclk || !cqr->startclk || 792 !cqr->stopclk || !cqr->endclk || 793 !sectors) 794 return; 795 796 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 797 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 798 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 799 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 800 tottimeps = tottime / sectors; 801 802 dasd_profile_counter(sectors, sectors_ind); 803 dasd_profile_counter(tottime, tottime_ind); 804 dasd_profile_counter(tottimeps, tottimeps_ind); 805 dasd_profile_counter(strtime, strtime_ind); 806 dasd_profile_counter(irqtime, irqtime_ind); 807 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 808 dasd_profile_counter(endtime, endtime_ind); 809 810 spin_lock(&dasd_global_profile.lock); 811 if (dasd_global_profile.data) { 812 data = dasd_global_profile.data; 813 data->dasd_sum_times += tottime; 814 data->dasd_sum_time_str += strtime; 815 data->dasd_sum_time_irq += irqtime; 816 data->dasd_sum_time_end += endtime; 817 dasd_profile_end_add_data(dasd_global_profile.data, 818 cqr->startdev != block->base, 819 cqr->cpmode == 1, 820 rq_data_dir(req) == READ, 821 sectors, sectors_ind, tottime_ind, 822 tottimeps_ind, strtime_ind, 823 irqtime_ind, irqtimeps_ind, 824 endtime_ind); 825 } 826 spin_unlock(&dasd_global_profile.lock); 827 828 spin_lock(&block->profile.lock); 829 if (block->profile.data) { 830 data = block->profile.data; 831 data->dasd_sum_times += tottime; 832 data->dasd_sum_time_str += strtime; 833 data->dasd_sum_time_irq += irqtime; 834 data->dasd_sum_time_end += endtime; 835 dasd_profile_end_add_data(block->profile.data, 836 cqr->startdev != block->base, 837 cqr->cpmode == 1, 838 rq_data_dir(req) == READ, 839 sectors, sectors_ind, tottime_ind, 840 tottimeps_ind, strtime_ind, 841 irqtime_ind, irqtimeps_ind, 842 endtime_ind); 843 } 844 spin_unlock(&block->profile.lock); 845 846 spin_lock(&device->profile.lock); 847 if (device->profile.data) { 848 data = device->profile.data; 849 data->dasd_sum_times += tottime; 850 data->dasd_sum_time_str += strtime; 851 data->dasd_sum_time_irq += irqtime; 852 data->dasd_sum_time_end += endtime; 853 dasd_profile_end_add_data(device->profile.data, 854 cqr->startdev != block->base, 855 cqr->cpmode == 1, 856 rq_data_dir(req) == READ, 857 sectors, sectors_ind, tottime_ind, 858 tottimeps_ind, strtime_ind, 859 irqtime_ind, irqtimeps_ind, 860 endtime_ind); 861 } 862 spin_unlock(&device->profile.lock); 863 } 864 865 void dasd_profile_reset(struct dasd_profile *profile) 866 { 867 struct dasd_profile_info *data; 868 869 spin_lock_bh(&profile->lock); 870 data = profile->data; 871 if (!data) { 872 spin_unlock_bh(&profile->lock); 873 return; 874 } 875 memset(data, 0, sizeof(*data)); 876 ktime_get_real_ts64(&data->starttod); 877 spin_unlock_bh(&profile->lock); 878 } 879 880 int dasd_profile_on(struct dasd_profile *profile) 881 { 882 struct dasd_profile_info *data; 883 884 data = kzalloc(sizeof(*data), GFP_KERNEL); 885 if (!data) 886 return -ENOMEM; 887 spin_lock_bh(&profile->lock); 888 if (profile->data) { 889 spin_unlock_bh(&profile->lock); 890 kfree(data); 891 return 0; 892 } 893 ktime_get_real_ts64(&data->starttod); 894 profile->data = data; 895 spin_unlock_bh(&profile->lock); 896 return 0; 897 } 898 899 void dasd_profile_off(struct dasd_profile *profile) 900 { 901 spin_lock_bh(&profile->lock); 902 kfree(profile->data); 903 profile->data = NULL; 904 spin_unlock_bh(&profile->lock); 905 } 906 907 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 908 { 909 char *buffer; 910 911 buffer = vmalloc(user_len + 1); 912 if (buffer == NULL) 913 return ERR_PTR(-ENOMEM); 914 if (copy_from_user(buffer, user_buf, user_len) != 0) { 915 vfree(buffer); 916 return ERR_PTR(-EFAULT); 917 } 918 /* got the string, now strip linefeed. */ 919 if (buffer[user_len - 1] == '\n') 920 buffer[user_len - 1] = 0; 921 else 922 buffer[user_len] = 0; 923 return buffer; 924 } 925 926 static ssize_t dasd_stats_write(struct file *file, 927 const char __user *user_buf, 928 size_t user_len, loff_t *pos) 929 { 930 char *buffer, *str; 931 int rc; 932 struct seq_file *m = (struct seq_file *)file->private_data; 933 struct dasd_profile *prof = m->private; 934 935 if (user_len > 65536) 936 user_len = 65536; 937 buffer = dasd_get_user_string(user_buf, user_len); 938 if (IS_ERR(buffer)) 939 return PTR_ERR(buffer); 940 941 str = skip_spaces(buffer); 942 rc = user_len; 943 if (strncmp(str, "reset", 5) == 0) { 944 dasd_profile_reset(prof); 945 } else if (strncmp(str, "on", 2) == 0) { 946 rc = dasd_profile_on(prof); 947 if (rc) 948 goto out; 949 rc = user_len; 950 if (prof == &dasd_global_profile) { 951 dasd_profile_reset(prof); 952 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 953 } 954 } else if (strncmp(str, "off", 3) == 0) { 955 if (prof == &dasd_global_profile) 956 dasd_global_profile_level = DASD_PROFILE_OFF; 957 dasd_profile_off(prof); 958 } else 959 rc = -EINVAL; 960 out: 961 vfree(buffer); 962 return rc; 963 } 964 965 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 966 { 967 int i; 968 969 for (i = 0; i < 32; i++) 970 seq_printf(m, "%u ", array[i]); 971 seq_putc(m, '\n'); 972 } 973 974 static void dasd_stats_seq_print(struct seq_file *m, 975 struct dasd_profile_info *data) 976 { 977 seq_printf(m, "start_time %lld.%09ld\n", 978 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 979 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 980 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 981 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 982 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 983 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 984 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 985 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 986 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 987 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 988 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 989 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 990 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 991 seq_puts(m, "histogram_sectors "); 992 dasd_stats_array(m, data->dasd_io_secs); 993 seq_puts(m, "histogram_io_times "); 994 dasd_stats_array(m, data->dasd_io_times); 995 seq_puts(m, "histogram_io_times_weighted "); 996 dasd_stats_array(m, data->dasd_io_timps); 997 seq_puts(m, "histogram_time_build_to_ssch "); 998 dasd_stats_array(m, data->dasd_io_time1); 999 seq_puts(m, "histogram_time_ssch_to_irq "); 1000 dasd_stats_array(m, data->dasd_io_time2); 1001 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1002 dasd_stats_array(m, data->dasd_io_time2ps); 1003 seq_puts(m, "histogram_time_irq_to_end "); 1004 dasd_stats_array(m, data->dasd_io_time3); 1005 seq_puts(m, "histogram_ccw_queue_length "); 1006 dasd_stats_array(m, data->dasd_io_nr_req); 1007 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1008 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1009 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1010 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1011 seq_puts(m, "histogram_read_sectors "); 1012 dasd_stats_array(m, data->dasd_read_secs); 1013 seq_puts(m, "histogram_read_times "); 1014 dasd_stats_array(m, data->dasd_read_times); 1015 seq_puts(m, "histogram_read_time_build_to_ssch "); 1016 dasd_stats_array(m, data->dasd_read_time1); 1017 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1018 dasd_stats_array(m, data->dasd_read_time2); 1019 seq_puts(m, "histogram_read_time_irq_to_end "); 1020 dasd_stats_array(m, data->dasd_read_time3); 1021 seq_puts(m, "histogram_read_ccw_queue_length "); 1022 dasd_stats_array(m, data->dasd_read_nr_req); 1023 } 1024 1025 static int dasd_stats_show(struct seq_file *m, void *v) 1026 { 1027 struct dasd_profile *profile; 1028 struct dasd_profile_info *data; 1029 1030 profile = m->private; 1031 spin_lock_bh(&profile->lock); 1032 data = profile->data; 1033 if (!data) { 1034 spin_unlock_bh(&profile->lock); 1035 seq_puts(m, "disabled\n"); 1036 return 0; 1037 } 1038 dasd_stats_seq_print(m, data); 1039 spin_unlock_bh(&profile->lock); 1040 return 0; 1041 } 1042 1043 static int dasd_stats_open(struct inode *inode, struct file *file) 1044 { 1045 struct dasd_profile *profile = inode->i_private; 1046 return single_open(file, dasd_stats_show, profile); 1047 } 1048 1049 static const struct file_operations dasd_stats_raw_fops = { 1050 .owner = THIS_MODULE, 1051 .open = dasd_stats_open, 1052 .read = seq_read, 1053 .llseek = seq_lseek, 1054 .release = single_release, 1055 .write = dasd_stats_write, 1056 }; 1057 1058 static void dasd_profile_init(struct dasd_profile *profile, 1059 struct dentry *base_dentry) 1060 { 1061 umode_t mode; 1062 struct dentry *pde; 1063 1064 if (!base_dentry) 1065 return; 1066 profile->dentry = NULL; 1067 profile->data = NULL; 1068 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1069 pde = debugfs_create_file("statistics", mode, base_dentry, 1070 profile, &dasd_stats_raw_fops); 1071 if (pde && !IS_ERR(pde)) 1072 profile->dentry = pde; 1073 return; 1074 } 1075 1076 static void dasd_profile_exit(struct dasd_profile *profile) 1077 { 1078 dasd_profile_off(profile); 1079 debugfs_remove(profile->dentry); 1080 profile->dentry = NULL; 1081 } 1082 1083 static void dasd_statistics_removeroot(void) 1084 { 1085 dasd_global_profile_level = DASD_PROFILE_OFF; 1086 dasd_profile_exit(&dasd_global_profile); 1087 debugfs_remove(dasd_debugfs_global_entry); 1088 debugfs_remove(dasd_debugfs_root_entry); 1089 } 1090 1091 static void dasd_statistics_createroot(void) 1092 { 1093 struct dentry *pde; 1094 1095 dasd_debugfs_root_entry = NULL; 1096 pde = debugfs_create_dir("dasd", NULL); 1097 if (!pde || IS_ERR(pde)) 1098 goto error; 1099 dasd_debugfs_root_entry = pde; 1100 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1101 if (!pde || IS_ERR(pde)) 1102 goto error; 1103 dasd_debugfs_global_entry = pde; 1104 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1105 return; 1106 1107 error: 1108 DBF_EVENT(DBF_ERR, "%s", 1109 "Creation of the dasd debugfs interface failed"); 1110 dasd_statistics_removeroot(); 1111 return; 1112 } 1113 1114 #else 1115 #define dasd_profile_start(block, cqr, req) do {} while (0) 1116 #define dasd_profile_end(block, cqr, req) do {} while (0) 1117 1118 static void dasd_statistics_createroot(void) 1119 { 1120 return; 1121 } 1122 1123 static void dasd_statistics_removeroot(void) 1124 { 1125 return; 1126 } 1127 1128 static void dasd_profile_init(struct dasd_profile *profile, 1129 struct dentry *base_dentry) 1130 { 1131 return; 1132 } 1133 1134 static void dasd_profile_exit(struct dasd_profile *profile) 1135 { 1136 return; 1137 } 1138 1139 int dasd_profile_on(struct dasd_profile *profile) 1140 { 1141 return 0; 1142 } 1143 1144 #endif /* CONFIG_DASD_PROFILE */ 1145 1146 static int dasd_hosts_show(struct seq_file *m, void *v) 1147 { 1148 struct dasd_device *device; 1149 int rc = -EOPNOTSUPP; 1150 1151 device = m->private; 1152 dasd_get_device(device); 1153 1154 if (device->discipline->hosts_print) 1155 rc = device->discipline->hosts_print(device, m); 1156 1157 dasd_put_device(device); 1158 return rc; 1159 } 1160 1161 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1162 1163 static void dasd_hosts_exit(struct dasd_device *device) 1164 { 1165 debugfs_remove(device->hosts_dentry); 1166 device->hosts_dentry = NULL; 1167 } 1168 1169 static void dasd_hosts_init(struct dentry *base_dentry, 1170 struct dasd_device *device) 1171 { 1172 struct dentry *pde; 1173 umode_t mode; 1174 1175 if (!base_dentry) 1176 return; 1177 1178 mode = S_IRUSR | S_IFREG; 1179 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1180 device, &dasd_hosts_fops); 1181 if (pde && !IS_ERR(pde)) 1182 device->hosts_dentry = pde; 1183 } 1184 1185 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1186 struct dasd_device *device, 1187 struct dasd_ccw_req *cqr) 1188 { 1189 unsigned long flags; 1190 char *data, *chunk; 1191 int size = 0; 1192 1193 if (cplength > 0) 1194 size += cplength * sizeof(struct ccw1); 1195 if (datasize > 0) 1196 size += datasize; 1197 if (!cqr) 1198 size += (sizeof(*cqr) + 7L) & -8L; 1199 1200 spin_lock_irqsave(&device->mem_lock, flags); 1201 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1202 spin_unlock_irqrestore(&device->mem_lock, flags); 1203 if (!chunk) 1204 return ERR_PTR(-ENOMEM); 1205 if (!cqr) { 1206 cqr = (void *) data; 1207 data += (sizeof(*cqr) + 7L) & -8L; 1208 } 1209 memset(cqr, 0, sizeof(*cqr)); 1210 cqr->mem_chunk = chunk; 1211 if (cplength > 0) { 1212 cqr->cpaddr = data; 1213 data += cplength * sizeof(struct ccw1); 1214 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1215 } 1216 if (datasize > 0) { 1217 cqr->data = data; 1218 memset(cqr->data, 0, datasize); 1219 } 1220 cqr->magic = magic; 1221 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1222 dasd_get_device(device); 1223 return cqr; 1224 } 1225 EXPORT_SYMBOL(dasd_smalloc_request); 1226 1227 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1228 int datasize, 1229 struct dasd_device *device) 1230 { 1231 struct dasd_ccw_req *cqr; 1232 unsigned long flags; 1233 int size, cqr_size; 1234 char *data; 1235 1236 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1237 size = cqr_size; 1238 if (cplength > 0) 1239 size += cplength * sizeof(struct ccw1); 1240 if (datasize > 0) 1241 size += datasize; 1242 1243 spin_lock_irqsave(&device->mem_lock, flags); 1244 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1245 spin_unlock_irqrestore(&device->mem_lock, flags); 1246 if (!cqr) 1247 return ERR_PTR(-ENOMEM); 1248 memset(cqr, 0, sizeof(*cqr)); 1249 data = (char *)cqr + cqr_size; 1250 cqr->cpaddr = NULL; 1251 if (cplength > 0) { 1252 cqr->cpaddr = data; 1253 data += cplength * sizeof(struct ccw1); 1254 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1255 } 1256 cqr->data = NULL; 1257 if (datasize > 0) { 1258 cqr->data = data; 1259 memset(cqr->data, 0, datasize); 1260 } 1261 1262 cqr->magic = magic; 1263 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1264 dasd_get_device(device); 1265 1266 return cqr; 1267 } 1268 EXPORT_SYMBOL(dasd_fmalloc_request); 1269 1270 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1271 { 1272 unsigned long flags; 1273 1274 spin_lock_irqsave(&device->mem_lock, flags); 1275 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1276 spin_unlock_irqrestore(&device->mem_lock, flags); 1277 dasd_put_device(device); 1278 } 1279 EXPORT_SYMBOL(dasd_sfree_request); 1280 1281 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1282 { 1283 unsigned long flags; 1284 1285 spin_lock_irqsave(&device->mem_lock, flags); 1286 dasd_free_chunk(&device->ese_chunks, cqr); 1287 spin_unlock_irqrestore(&device->mem_lock, flags); 1288 dasd_put_device(device); 1289 } 1290 EXPORT_SYMBOL(dasd_ffree_request); 1291 1292 /* 1293 * Check discipline magic in cqr. 1294 */ 1295 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1296 { 1297 struct dasd_device *device; 1298 1299 if (cqr == NULL) 1300 return -EINVAL; 1301 device = cqr->startdev; 1302 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1303 DBF_DEV_EVENT(DBF_WARNING, device, 1304 " dasd_ccw_req 0x%08x magic doesn't match" 1305 " discipline 0x%08x", 1306 cqr->magic, 1307 *(unsigned int *) device->discipline->name); 1308 return -EINVAL; 1309 } 1310 return 0; 1311 } 1312 1313 /* 1314 * Terminate the current i/o and set the request to clear_pending. 1315 * Timer keeps device runnig. 1316 * ccw_device_clear can fail if the i/o subsystem 1317 * is in a bad mood. 1318 */ 1319 int dasd_term_IO(struct dasd_ccw_req *cqr) 1320 { 1321 struct dasd_device *device; 1322 int retries, rc; 1323 1324 /* Check the cqr */ 1325 rc = dasd_check_cqr(cqr); 1326 if (rc) 1327 return rc; 1328 retries = 0; 1329 device = (struct dasd_device *) cqr->startdev; 1330 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1331 rc = ccw_device_clear(device->cdev, (long) cqr); 1332 switch (rc) { 1333 case 0: /* termination successful */ 1334 cqr->status = DASD_CQR_CLEAR_PENDING; 1335 cqr->stopclk = get_tod_clock(); 1336 cqr->starttime = 0; 1337 DBF_DEV_EVENT(DBF_DEBUG, device, 1338 "terminate cqr %p successful", 1339 cqr); 1340 break; 1341 case -ENODEV: 1342 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1343 "device gone, retry"); 1344 break; 1345 case -EINVAL: 1346 /* 1347 * device not valid so no I/O could be running 1348 * handle CQR as termination successful 1349 */ 1350 cqr->status = DASD_CQR_CLEARED; 1351 cqr->stopclk = get_tod_clock(); 1352 cqr->starttime = 0; 1353 /* no retries for invalid devices */ 1354 cqr->retries = -1; 1355 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1356 "EINVAL, handle as terminated"); 1357 /* fake rc to success */ 1358 rc = 0; 1359 break; 1360 default: 1361 dev_err(&device->cdev->dev, 1362 "Unexpected error during request termination %d\n", rc); 1363 BUG(); 1364 break; 1365 } 1366 retries++; 1367 } 1368 dasd_schedule_device_bh(device); 1369 return rc; 1370 } 1371 EXPORT_SYMBOL(dasd_term_IO); 1372 1373 /* 1374 * Start the i/o. This start_IO can fail if the channel is really busy. 1375 * In that case set up a timer to start the request later. 1376 */ 1377 int dasd_start_IO(struct dasd_ccw_req *cqr) 1378 { 1379 struct dasd_device *device; 1380 int rc; 1381 1382 /* Check the cqr */ 1383 rc = dasd_check_cqr(cqr); 1384 if (rc) { 1385 cqr->intrc = rc; 1386 return rc; 1387 } 1388 device = (struct dasd_device *) cqr->startdev; 1389 if (((cqr->block && 1390 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1391 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1392 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1393 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1394 "because of stolen lock", cqr); 1395 cqr->status = DASD_CQR_ERROR; 1396 cqr->intrc = -EPERM; 1397 return -EPERM; 1398 } 1399 if (cqr->retries < 0) { 1400 dev_err(&device->cdev->dev, 1401 "Start I/O ran out of retries\n"); 1402 cqr->status = DASD_CQR_ERROR; 1403 return -EIO; 1404 } 1405 cqr->startclk = get_tod_clock(); 1406 cqr->starttime = jiffies; 1407 cqr->retries--; 1408 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1409 cqr->lpm &= dasd_path_get_opm(device); 1410 if (!cqr->lpm) 1411 cqr->lpm = dasd_path_get_opm(device); 1412 } 1413 /* 1414 * remember the amount of formatted tracks to prevent double format on 1415 * ESE devices 1416 */ 1417 if (cqr->block) 1418 cqr->trkcount = atomic_read(&cqr->block->trkcount); 1419 1420 if (cqr->cpmode == 1) { 1421 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1422 (long) cqr, cqr->lpm); 1423 } else { 1424 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1425 (long) cqr, cqr->lpm, 0); 1426 } 1427 switch (rc) { 1428 case 0: 1429 cqr->status = DASD_CQR_IN_IO; 1430 break; 1431 case -EBUSY: 1432 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1433 "start_IO: device busy, retry later"); 1434 break; 1435 case -EACCES: 1436 /* -EACCES indicates that the request used only a subset of the 1437 * available paths and all these paths are gone. If the lpm of 1438 * this request was only a subset of the opm (e.g. the ppm) then 1439 * we just do a retry with all available paths. 1440 * If we already use the full opm, something is amiss, and we 1441 * need a full path verification. 1442 */ 1443 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1444 DBF_DEV_EVENT(DBF_WARNING, device, 1445 "start_IO: selected paths gone (%x)", 1446 cqr->lpm); 1447 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1448 cqr->lpm = dasd_path_get_opm(device); 1449 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1450 "start_IO: selected paths gone," 1451 " retry on all paths"); 1452 } else { 1453 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1454 "start_IO: all paths in opm gone," 1455 " do path verification"); 1456 dasd_generic_last_path_gone(device); 1457 dasd_path_no_path(device); 1458 dasd_path_set_tbvpm(device, 1459 ccw_device_get_path_mask( 1460 device->cdev)); 1461 } 1462 break; 1463 case -ENODEV: 1464 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1465 "start_IO: -ENODEV device gone, retry"); 1466 /* this is equivalent to CC=3 for SSCH report this to EER */ 1467 dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO); 1468 break; 1469 case -EIO: 1470 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1471 "start_IO: -EIO device gone, retry"); 1472 break; 1473 case -EINVAL: 1474 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1475 "start_IO: -EINVAL device currently " 1476 "not accessible"); 1477 break; 1478 default: 1479 dev_err(&device->cdev->dev, 1480 "Unexpected error during request start %d", rc); 1481 BUG(); 1482 break; 1483 } 1484 cqr->intrc = rc; 1485 return rc; 1486 } 1487 EXPORT_SYMBOL(dasd_start_IO); 1488 1489 /* 1490 * Timeout function for dasd devices. This is used for different purposes 1491 * 1) missing interrupt handler for normal operation 1492 * 2) delayed start of request where start_IO failed with -EBUSY 1493 * 3) timeout for missing state change interrupts 1494 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1495 * DASD_CQR_QUEUED for 2) and 3). 1496 */ 1497 static void dasd_device_timeout(struct timer_list *t) 1498 { 1499 unsigned long flags; 1500 struct dasd_device *device; 1501 1502 device = timer_container_of(device, t, timer); 1503 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1504 /* re-activate request queue */ 1505 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1506 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1507 dasd_schedule_device_bh(device); 1508 } 1509 1510 /* 1511 * Setup timeout for a device in jiffies. 1512 */ 1513 void dasd_device_set_timer(struct dasd_device *device, int expires) 1514 { 1515 if (expires == 0) 1516 timer_delete(&device->timer); 1517 else 1518 mod_timer(&device->timer, jiffies + expires); 1519 } 1520 EXPORT_SYMBOL(dasd_device_set_timer); 1521 1522 /* 1523 * Clear timeout for a device. 1524 */ 1525 void dasd_device_clear_timer(struct dasd_device *device) 1526 { 1527 timer_delete(&device->timer); 1528 } 1529 EXPORT_SYMBOL(dasd_device_clear_timer); 1530 1531 static void dasd_handle_killed_request(struct ccw_device *cdev, 1532 unsigned long intparm) 1533 { 1534 struct dasd_ccw_req *cqr; 1535 struct dasd_device *device; 1536 1537 if (!intparm) 1538 return; 1539 cqr = (struct dasd_ccw_req *) intparm; 1540 if (cqr->status != DASD_CQR_IN_IO) { 1541 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1542 "invalid status in handle_killed_request: " 1543 "%02x", cqr->status); 1544 return; 1545 } 1546 1547 device = dasd_device_from_cdev_locked(cdev); 1548 if (IS_ERR(device)) { 1549 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1550 "unable to get device from cdev"); 1551 return; 1552 } 1553 1554 if (!cqr->startdev || 1555 device != cqr->startdev || 1556 strncmp(cqr->startdev->discipline->ebcname, 1557 (char *) &cqr->magic, 4)) { 1558 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1559 "invalid device in request"); 1560 dasd_put_device(device); 1561 return; 1562 } 1563 1564 /* Schedule request to be retried. */ 1565 cqr->status = DASD_CQR_QUEUED; 1566 1567 dasd_device_clear_timer(device); 1568 dasd_schedule_device_bh(device); 1569 dasd_put_device(device); 1570 } 1571 1572 void dasd_generic_handle_state_change(struct dasd_device *device) 1573 { 1574 /* First of all start sense subsystem status request. */ 1575 dasd_eer_snss(device); 1576 1577 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1578 dasd_schedule_device_bh(device); 1579 if (device->block) { 1580 dasd_schedule_block_bh(device->block); 1581 if (device->block->gdp) 1582 blk_mq_run_hw_queues(device->block->gdp->queue, true); 1583 } 1584 } 1585 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1586 1587 static int dasd_check_hpf_error(struct irb *irb) 1588 { 1589 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1590 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1591 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1592 } 1593 1594 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1595 { 1596 struct dasd_device *device = NULL; 1597 u8 *sense = NULL; 1598 1599 if (!block) 1600 return 0; 1601 device = block->base; 1602 if (!device || !device->discipline->is_ese) 1603 return 0; 1604 if (!device->discipline->is_ese(device)) 1605 return 0; 1606 1607 sense = dasd_get_sense(irb); 1608 if (!sense) 1609 return 0; 1610 1611 if (sense[1] & SNS1_NO_REC_FOUND) 1612 return 1; 1613 1614 if ((sense[1] & SNS1_INV_TRACK_FORMAT) && 1615 scsw_is_tm(&irb->scsw) && 1616 !(sense[2] & SNS2_ENV_DATA_PRESENT)) 1617 return 1; 1618 1619 return 0; 1620 } 1621 1622 static int dasd_ese_oos_cond(u8 *sense) 1623 { 1624 return sense[0] & SNS0_EQUIPMENT_CHECK && 1625 sense[1] & SNS1_PERM_ERR && 1626 sense[1] & SNS1_WRITE_INHIBITED && 1627 sense[25] == 0x01; 1628 } 1629 1630 /* 1631 * Interrupt handler for "normal" ssch-io based dasd devices. 1632 */ 1633 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1634 struct irb *irb) 1635 { 1636 struct dasd_ccw_req *cqr, *next, *fcqr; 1637 struct dasd_device *device; 1638 unsigned long now; 1639 int nrf_suppressed = 0; 1640 int it_suppressed = 0; 1641 struct request *req; 1642 u8 *sense = NULL; 1643 int expires; 1644 1645 cqr = (struct dasd_ccw_req *) intparm; 1646 if (IS_ERR(irb)) { 1647 switch (PTR_ERR(irb)) { 1648 case -EIO: 1649 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1650 device = cqr->startdev; 1651 cqr->status = DASD_CQR_CLEARED; 1652 dasd_device_clear_timer(device); 1653 wake_up(&dasd_flush_wq); 1654 dasd_schedule_device_bh(device); 1655 return; 1656 } 1657 break; 1658 case -ETIMEDOUT: 1659 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1660 "request timed out\n", __func__); 1661 break; 1662 default: 1663 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1664 "unknown error %ld\n", __func__, 1665 PTR_ERR(irb)); 1666 } 1667 dasd_handle_killed_request(cdev, intparm); 1668 return; 1669 } 1670 1671 now = get_tod_clock(); 1672 /* check for conditions that should be handled immediately */ 1673 if (!cqr || 1674 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1675 scsw_cstat(&irb->scsw) == 0)) { 1676 if (cqr) 1677 memcpy(&cqr->irb, irb, sizeof(*irb)); 1678 device = dasd_device_from_cdev_locked(cdev); 1679 if (IS_ERR(device)) 1680 return; 1681 /* ignore unsolicited interrupts for DIAG discipline */ 1682 if (device->discipline == dasd_diag_discipline_pointer) { 1683 dasd_put_device(device); 1684 return; 1685 } 1686 1687 /* 1688 * In some cases 'File Protected' or 'No Record Found' errors 1689 * might be expected and debug log messages for the 1690 * corresponding interrupts shouldn't be written then. 1691 * Check if either of the according suppress bits is set. 1692 */ 1693 sense = dasd_get_sense(irb); 1694 if (sense) { 1695 it_suppressed = (sense[1] & SNS1_INV_TRACK_FORMAT) && 1696 !(sense[2] & SNS2_ENV_DATA_PRESENT) && 1697 test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags); 1698 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1699 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1700 1701 /* 1702 * Extent pool probably out-of-space. 1703 * Stop device and check exhaust level. 1704 */ 1705 if (dasd_ese_oos_cond(sense)) { 1706 dasd_generic_space_exhaust(device, cqr); 1707 device->discipline->ext_pool_exhaust(device, cqr); 1708 dasd_put_device(device); 1709 return; 1710 } 1711 } 1712 if (!(it_suppressed || nrf_suppressed)) 1713 device->discipline->dump_sense_dbf(device, irb, "int"); 1714 1715 if (device->features & DASD_FEATURE_ERPLOG) 1716 device->discipline->dump_sense(device, cqr, irb); 1717 device->discipline->check_for_device_change(device, cqr, irb); 1718 dasd_put_device(device); 1719 } 1720 1721 /* check for attention message */ 1722 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1723 device = dasd_device_from_cdev_locked(cdev); 1724 if (!IS_ERR(device)) { 1725 device->discipline->check_attention(device, 1726 irb->esw.esw1.lpum); 1727 dasd_put_device(device); 1728 } 1729 } 1730 1731 if (!cqr) 1732 return; 1733 1734 device = (struct dasd_device *) cqr->startdev; 1735 if (!device || 1736 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1737 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1738 "invalid device in request"); 1739 return; 1740 } 1741 1742 if (dasd_ese_needs_format(cqr->block, irb)) { 1743 req = dasd_get_callback_data(cqr); 1744 if (!req) { 1745 cqr->status = DASD_CQR_ERROR; 1746 return; 1747 } 1748 if (rq_data_dir(req) == READ) { 1749 device->discipline->ese_read(cqr, irb); 1750 cqr->status = DASD_CQR_SUCCESS; 1751 cqr->stopclk = now; 1752 dasd_device_clear_timer(device); 1753 dasd_schedule_device_bh(device); 1754 return; 1755 } 1756 fcqr = device->discipline->ese_format(device, cqr, irb); 1757 if (IS_ERR(fcqr)) { 1758 if (PTR_ERR(fcqr) == -EINVAL) { 1759 cqr->status = DASD_CQR_ERROR; 1760 return; 1761 } 1762 /* 1763 * If we can't format now, let the request go 1764 * one extra round. Maybe we can format later. 1765 */ 1766 cqr->status = DASD_CQR_QUEUED; 1767 dasd_schedule_device_bh(device); 1768 return; 1769 } else { 1770 fcqr->status = DASD_CQR_QUEUED; 1771 cqr->status = DASD_CQR_QUEUED; 1772 list_add(&fcqr->devlist, &device->ccw_queue); 1773 dasd_schedule_device_bh(device); 1774 return; 1775 } 1776 } 1777 1778 /* Check for clear pending */ 1779 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1780 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1781 cqr->status = DASD_CQR_CLEARED; 1782 dasd_device_clear_timer(device); 1783 wake_up(&dasd_flush_wq); 1784 dasd_schedule_device_bh(device); 1785 return; 1786 } 1787 1788 /* check status - the request might have been killed by dyn detach */ 1789 if (cqr->status != DASD_CQR_IN_IO) { 1790 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1791 "status %02x", dev_name(&cdev->dev), cqr->status); 1792 return; 1793 } 1794 1795 next = NULL; 1796 expires = 0; 1797 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1798 scsw_cstat(&irb->scsw) == 0) { 1799 /* request was completed successfully */ 1800 cqr->status = DASD_CQR_SUCCESS; 1801 cqr->stopclk = now; 1802 /* Start first request on queue if possible -> fast_io. */ 1803 if (cqr->devlist.next != &device->ccw_queue) { 1804 next = list_entry(cqr->devlist.next, 1805 struct dasd_ccw_req, devlist); 1806 } 1807 } else { /* error */ 1808 /* check for HPF error 1809 * call discipline function to requeue all requests 1810 * and disable HPF accordingly 1811 */ 1812 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1813 device->discipline->handle_hpf_error) 1814 device->discipline->handle_hpf_error(device, irb); 1815 /* 1816 * If we don't want complex ERP for this request, then just 1817 * reset this and retry it in the fastpath 1818 */ 1819 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1820 cqr->retries > 0) { 1821 if (cqr->lpm == dasd_path_get_opm(device)) 1822 DBF_DEV_EVENT(DBF_DEBUG, device, 1823 "default ERP in fastpath " 1824 "(%i retries left)", 1825 cqr->retries); 1826 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1827 cqr->lpm = dasd_path_get_opm(device); 1828 cqr->status = DASD_CQR_QUEUED; 1829 next = cqr; 1830 } else 1831 cqr->status = DASD_CQR_ERROR; 1832 } 1833 if (next && (next->status == DASD_CQR_QUEUED) && 1834 (!device->stopped)) { 1835 if (device->discipline->start_IO(next) == 0) 1836 expires = next->expires; 1837 } 1838 if (expires != 0) 1839 dasd_device_set_timer(device, expires); 1840 else 1841 dasd_device_clear_timer(device); 1842 dasd_schedule_device_bh(device); 1843 } 1844 EXPORT_SYMBOL(dasd_int_handler); 1845 1846 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1847 { 1848 struct dasd_device *device; 1849 1850 device = dasd_device_from_cdev_locked(cdev); 1851 1852 if (IS_ERR(device)) 1853 goto out; 1854 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1855 device->state != device->target || 1856 !device->discipline->check_for_device_change){ 1857 dasd_put_device(device); 1858 goto out; 1859 } 1860 if (device->discipline->dump_sense_dbf) 1861 device->discipline->dump_sense_dbf(device, irb, "uc"); 1862 device->discipline->check_for_device_change(device, NULL, irb); 1863 dasd_put_device(device); 1864 out: 1865 return UC_TODO_RETRY; 1866 } 1867 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1868 1869 /* 1870 * If we have an error on a dasd_block layer request then we cancel 1871 * and return all further requests from the same dasd_block as well. 1872 */ 1873 static void __dasd_device_recovery(struct dasd_device *device, 1874 struct dasd_ccw_req *ref_cqr) 1875 { 1876 struct list_head *l, *n; 1877 struct dasd_ccw_req *cqr; 1878 1879 /* 1880 * only requeue request that came from the dasd_block layer 1881 */ 1882 if (!ref_cqr->block) 1883 return; 1884 1885 list_for_each_safe(l, n, &device->ccw_queue) { 1886 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1887 if (cqr->status == DASD_CQR_QUEUED && 1888 ref_cqr->block == cqr->block) { 1889 cqr->status = DASD_CQR_CLEARED; 1890 } 1891 } 1892 }; 1893 1894 /* 1895 * Remove those ccw requests from the queue that need to be returned 1896 * to the upper layer. 1897 */ 1898 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1899 struct list_head *final_queue) 1900 { 1901 struct list_head *l, *n; 1902 struct dasd_ccw_req *cqr; 1903 1904 /* Process request with final status. */ 1905 list_for_each_safe(l, n, &device->ccw_queue) { 1906 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1907 1908 /* Skip any non-final request. */ 1909 if (cqr->status == DASD_CQR_QUEUED || 1910 cqr->status == DASD_CQR_IN_IO || 1911 cqr->status == DASD_CQR_CLEAR_PENDING) 1912 continue; 1913 if (cqr->status == DASD_CQR_ERROR) { 1914 __dasd_device_recovery(device, cqr); 1915 } 1916 /* Rechain finished requests to final queue */ 1917 list_move_tail(&cqr->devlist, final_queue); 1918 } 1919 } 1920 1921 static void __dasd_process_cqr(struct dasd_device *device, 1922 struct dasd_ccw_req *cqr) 1923 { 1924 switch (cqr->status) { 1925 case DASD_CQR_SUCCESS: 1926 cqr->status = DASD_CQR_DONE; 1927 break; 1928 case DASD_CQR_ERROR: 1929 cqr->status = DASD_CQR_NEED_ERP; 1930 break; 1931 case DASD_CQR_CLEARED: 1932 cqr->status = DASD_CQR_TERMINATED; 1933 break; 1934 default: 1935 dev_err(&device->cdev->dev, 1936 "Unexpected CQR status %02x", cqr->status); 1937 BUG(); 1938 } 1939 if (cqr->callback) 1940 cqr->callback(cqr, cqr->callback_data); 1941 } 1942 1943 /* 1944 * the cqrs from the final queue are returned to the upper layer 1945 * by setting a dasd_block state and calling the callback function 1946 */ 1947 static void __dasd_device_process_final_queue(struct dasd_device *device, 1948 struct list_head *final_queue) 1949 { 1950 struct list_head *l, *n; 1951 struct dasd_ccw_req *cqr; 1952 struct dasd_block *block; 1953 1954 list_for_each_safe(l, n, final_queue) { 1955 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1956 list_del_init(&cqr->devlist); 1957 block = cqr->block; 1958 if (!block) { 1959 __dasd_process_cqr(device, cqr); 1960 } else { 1961 spin_lock_bh(&block->queue_lock); 1962 __dasd_process_cqr(device, cqr); 1963 spin_unlock_bh(&block->queue_lock); 1964 } 1965 } 1966 } 1967 1968 /* 1969 * check if device should be autoquiesced due to too many timeouts 1970 */ 1971 static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device, 1972 struct dasd_ccw_req *cqr) 1973 { 1974 if ((device->default_retries - cqr->retries) >= device->aq_timeouts) 1975 dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS); 1976 } 1977 1978 /* 1979 * Take a look at the first request on the ccw queue and check 1980 * if it reached its expire time. If so, terminate the IO. 1981 */ 1982 static void __dasd_device_check_expire(struct dasd_device *device) 1983 { 1984 struct dasd_ccw_req *cqr; 1985 1986 if (list_empty(&device->ccw_queue)) 1987 return; 1988 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1989 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1990 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1991 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1992 /* 1993 * IO in safe offline processing should not 1994 * run out of retries 1995 */ 1996 cqr->retries++; 1997 } 1998 if (device->discipline->term_IO(cqr) != 0) { 1999 /* Hmpf, try again in 5 sec */ 2000 dev_err(&device->cdev->dev, 2001 "CQR timed out (%lus) but cannot be ended, retrying in 5s\n", 2002 (cqr->expires / HZ)); 2003 cqr->expires += 5*HZ; 2004 dasd_device_set_timer(device, 5*HZ); 2005 } else { 2006 dev_err(&device->cdev->dev, 2007 "CQR timed out (%lus), %i retries remaining\n", 2008 (cqr->expires / HZ), cqr->retries); 2009 } 2010 __dasd_device_check_autoquiesce_timeout(device, cqr); 2011 } 2012 } 2013 2014 /* 2015 * return 1 when device is not eligible for IO 2016 */ 2017 static int __dasd_device_is_unusable(struct dasd_device *device, 2018 struct dasd_ccw_req *cqr) 2019 { 2020 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC); 2021 2022 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2023 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2024 /* 2025 * dasd is being set offline 2026 * but it is no safe offline where we have to allow I/O 2027 */ 2028 return 1; 2029 } 2030 if (device->stopped) { 2031 if (device->stopped & mask) { 2032 /* stopped and CQR will not change that. */ 2033 return 1; 2034 } 2035 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2036 /* CQR is not able to change device to 2037 * operational. */ 2038 return 1; 2039 } 2040 /* CQR required to get device operational. */ 2041 } 2042 return 0; 2043 } 2044 2045 /* 2046 * Take a look at the first request on the ccw queue and check 2047 * if it needs to be started. 2048 */ 2049 static void __dasd_device_start_head(struct dasd_device *device) 2050 { 2051 struct dasd_ccw_req *cqr; 2052 int rc; 2053 2054 if (list_empty(&device->ccw_queue)) 2055 return; 2056 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2057 if (cqr->status != DASD_CQR_QUEUED) 2058 return; 2059 /* if device is not usable return request to upper layer */ 2060 if (__dasd_device_is_unusable(device, cqr)) { 2061 cqr->intrc = -EAGAIN; 2062 cqr->status = DASD_CQR_CLEARED; 2063 dasd_schedule_device_bh(device); 2064 return; 2065 } 2066 2067 rc = device->discipline->start_IO(cqr); 2068 if (rc == 0) 2069 dasd_device_set_timer(device, cqr->expires); 2070 else if (rc == -EACCES) { 2071 dasd_schedule_device_bh(device); 2072 } else 2073 /* Hmpf, try again in 1/2 sec */ 2074 dasd_device_set_timer(device, 50); 2075 } 2076 2077 static void __dasd_device_check_path_events(struct dasd_device *device) 2078 { 2079 __u8 tbvpm, fcsecpm; 2080 int rc; 2081 2082 tbvpm = dasd_path_get_tbvpm(device); 2083 fcsecpm = dasd_path_get_fcsecpm(device); 2084 2085 if (!tbvpm && !fcsecpm) 2086 return; 2087 2088 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) 2089 return; 2090 2091 dasd_path_clear_all_verify(device); 2092 dasd_path_clear_all_fcsec(device); 2093 2094 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); 2095 if (rc) { 2096 dasd_path_add_tbvpm(device, tbvpm); 2097 dasd_path_add_fcsecpm(device, fcsecpm); 2098 dasd_device_set_timer(device, 50); 2099 } 2100 }; 2101 2102 /* 2103 * Go through all request on the dasd_device request queue, 2104 * terminate them on the cdev if necessary, and return them to the 2105 * submitting layer via callback. 2106 * Note: 2107 * Make sure that all 'submitting layers' still exist when 2108 * this function is called!. In other words, when 'device' is a base 2109 * device then all block layer requests must have been removed before 2110 * via dasd_flush_block_queue. 2111 */ 2112 int dasd_flush_device_queue(struct dasd_device *device) 2113 { 2114 struct dasd_ccw_req *cqr, *n; 2115 int rc; 2116 struct list_head flush_queue; 2117 2118 INIT_LIST_HEAD(&flush_queue); 2119 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2120 rc = 0; 2121 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2122 /* Check status and move request to flush_queue */ 2123 switch (cqr->status) { 2124 case DASD_CQR_IN_IO: 2125 rc = device->discipline->term_IO(cqr); 2126 if (rc) { 2127 /* unable to terminate request */ 2128 dev_err(&device->cdev->dev, 2129 "Flushing the DASD request queue failed\n"); 2130 /* stop flush processing */ 2131 goto finished; 2132 } 2133 break; 2134 case DASD_CQR_QUEUED: 2135 cqr->stopclk = get_tod_clock(); 2136 cqr->status = DASD_CQR_CLEARED; 2137 break; 2138 default: /* no need to modify the others */ 2139 break; 2140 } 2141 list_move_tail(&cqr->devlist, &flush_queue); 2142 } 2143 finished: 2144 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2145 /* 2146 * After this point all requests must be in state CLEAR_PENDING, 2147 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2148 * one of the others. 2149 */ 2150 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2151 wait_event(dasd_flush_wq, 2152 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2153 /* 2154 * Now set each request back to TERMINATED, DONE or NEED_ERP 2155 * and call the callback function of flushed requests 2156 */ 2157 __dasd_device_process_final_queue(device, &flush_queue); 2158 return rc; 2159 } 2160 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2161 2162 /* 2163 * Acquire the device lock and process queues for the device. 2164 */ 2165 static void dasd_device_tasklet(unsigned long data) 2166 { 2167 struct dasd_device *device = (struct dasd_device *) data; 2168 struct list_head final_queue; 2169 2170 atomic_set (&device->tasklet_scheduled, 0); 2171 INIT_LIST_HEAD(&final_queue); 2172 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2173 /* Check expire time of first request on the ccw queue. */ 2174 __dasd_device_check_expire(device); 2175 /* find final requests on ccw queue */ 2176 __dasd_device_process_ccw_queue(device, &final_queue); 2177 __dasd_device_check_path_events(device); 2178 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2179 /* Now call the callback function of requests with final status */ 2180 __dasd_device_process_final_queue(device, &final_queue); 2181 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2182 /* Now check if the head of the ccw queue needs to be started. */ 2183 __dasd_device_start_head(device); 2184 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2185 if (waitqueue_active(&shutdown_waitq)) 2186 wake_up(&shutdown_waitq); 2187 dasd_put_device(device); 2188 } 2189 2190 /* 2191 * Schedules a call to dasd_tasklet over the device tasklet. 2192 */ 2193 void dasd_schedule_device_bh(struct dasd_device *device) 2194 { 2195 /* Protect against rescheduling. */ 2196 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2197 return; 2198 dasd_get_device(device); 2199 tasklet_hi_schedule(&device->tasklet); 2200 } 2201 EXPORT_SYMBOL(dasd_schedule_device_bh); 2202 2203 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2204 { 2205 device->stopped |= bits; 2206 } 2207 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2208 2209 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2210 { 2211 device->stopped &= ~bits; 2212 if (!device->stopped) 2213 wake_up(&generic_waitq); 2214 } 2215 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2216 2217 /* 2218 * Queue a request to the head of the device ccw_queue. 2219 * Start the I/O if possible. 2220 */ 2221 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2222 { 2223 struct dasd_device *device; 2224 unsigned long flags; 2225 2226 device = cqr->startdev; 2227 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2228 cqr->status = DASD_CQR_QUEUED; 2229 list_add(&cqr->devlist, &device->ccw_queue); 2230 /* let the bh start the request to keep them in order */ 2231 dasd_schedule_device_bh(device); 2232 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2233 } 2234 EXPORT_SYMBOL(dasd_add_request_head); 2235 2236 /* 2237 * Queue a request to the tail of the device ccw_queue. 2238 * Start the I/O if possible. 2239 */ 2240 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2241 { 2242 struct dasd_device *device; 2243 unsigned long flags; 2244 2245 device = cqr->startdev; 2246 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2247 cqr->status = DASD_CQR_QUEUED; 2248 list_add_tail(&cqr->devlist, &device->ccw_queue); 2249 /* let the bh start the request to keep them in order */ 2250 dasd_schedule_device_bh(device); 2251 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2252 } 2253 EXPORT_SYMBOL(dasd_add_request_tail); 2254 2255 /* 2256 * Wakeup helper for the 'sleep_on' functions. 2257 */ 2258 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2259 { 2260 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2261 cqr->callback_data = DASD_SLEEPON_END_TAG; 2262 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2263 wake_up(&generic_waitq); 2264 } 2265 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2266 2267 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2268 { 2269 struct dasd_device *device; 2270 int rc; 2271 2272 device = cqr->startdev; 2273 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2274 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2275 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2276 return rc; 2277 } 2278 2279 /* 2280 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2281 */ 2282 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2283 { 2284 struct dasd_device *device; 2285 dasd_erp_fn_t erp_fn; 2286 2287 if (cqr->status == DASD_CQR_FILLED) 2288 return 0; 2289 device = cqr->startdev; 2290 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2291 if (cqr->status == DASD_CQR_TERMINATED) { 2292 device->discipline->handle_terminated_request(cqr); 2293 return 1; 2294 } 2295 if (cqr->status == DASD_CQR_NEED_ERP) { 2296 erp_fn = device->discipline->erp_action(cqr); 2297 erp_fn(cqr); 2298 return 1; 2299 } 2300 if (cqr->status == DASD_CQR_FAILED) 2301 dasd_log_sense(cqr, &cqr->irb); 2302 if (cqr->refers) { 2303 __dasd_process_erp(device, cqr); 2304 return 1; 2305 } 2306 } 2307 return 0; 2308 } 2309 2310 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2311 { 2312 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2313 if (cqr->refers) /* erp is not done yet */ 2314 return 1; 2315 return ((cqr->status != DASD_CQR_DONE) && 2316 (cqr->status != DASD_CQR_FAILED)); 2317 } else 2318 return (cqr->status == DASD_CQR_FILLED); 2319 } 2320 2321 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2322 { 2323 struct dasd_device *device; 2324 int rc; 2325 struct list_head ccw_queue; 2326 struct dasd_ccw_req *cqr; 2327 2328 INIT_LIST_HEAD(&ccw_queue); 2329 maincqr->status = DASD_CQR_FILLED; 2330 device = maincqr->startdev; 2331 list_add(&maincqr->blocklist, &ccw_queue); 2332 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2333 cqr = list_first_entry(&ccw_queue, 2334 struct dasd_ccw_req, blocklist)) { 2335 2336 if (__dasd_sleep_on_erp(cqr)) 2337 continue; 2338 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2339 continue; 2340 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2341 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2342 cqr->status = DASD_CQR_FAILED; 2343 cqr->intrc = -EPERM; 2344 continue; 2345 } 2346 /* Non-temporary stop condition will trigger fail fast */ 2347 if (device->stopped & ~DASD_STOPPED_PENDING && 2348 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2349 !dasd_eer_enabled(device) && device->aq_mask == 0) { 2350 cqr->status = DASD_CQR_FAILED; 2351 cqr->intrc = -ENOLINK; 2352 continue; 2353 } 2354 /* 2355 * Don't try to start requests if device is in 2356 * offline processing, it might wait forever 2357 */ 2358 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2359 cqr->status = DASD_CQR_FAILED; 2360 cqr->intrc = -ENODEV; 2361 continue; 2362 } 2363 /* 2364 * Don't try to start requests if device is stopped 2365 * except path verification requests 2366 */ 2367 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2368 if (interruptible) { 2369 rc = wait_event_interruptible( 2370 generic_waitq, !(device->stopped)); 2371 if (rc == -ERESTARTSYS) { 2372 cqr->status = DASD_CQR_FAILED; 2373 maincqr->intrc = rc; 2374 continue; 2375 } 2376 } else 2377 wait_event(generic_waitq, !(device->stopped)); 2378 } 2379 if (!cqr->callback) 2380 cqr->callback = dasd_wakeup_cb; 2381 2382 cqr->callback_data = DASD_SLEEPON_START_TAG; 2383 dasd_add_request_tail(cqr); 2384 if (interruptible) { 2385 rc = wait_event_interruptible( 2386 generic_waitq, _wait_for_wakeup(cqr)); 2387 if (rc == -ERESTARTSYS) { 2388 dasd_cancel_req(cqr); 2389 /* wait (non-interruptible) for final status */ 2390 wait_event(generic_waitq, 2391 _wait_for_wakeup(cqr)); 2392 cqr->status = DASD_CQR_FAILED; 2393 maincqr->intrc = rc; 2394 continue; 2395 } 2396 } else 2397 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2398 } 2399 2400 maincqr->endclk = get_tod_clock(); 2401 if ((maincqr->status != DASD_CQR_DONE) && 2402 (maincqr->intrc != -ERESTARTSYS)) 2403 dasd_log_sense(maincqr, &maincqr->irb); 2404 if (maincqr->status == DASD_CQR_DONE) 2405 rc = 0; 2406 else if (maincqr->intrc) 2407 rc = maincqr->intrc; 2408 else 2409 rc = -EIO; 2410 return rc; 2411 } 2412 2413 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2414 { 2415 struct dasd_ccw_req *cqr; 2416 2417 list_for_each_entry(cqr, ccw_queue, blocklist) { 2418 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2419 return 0; 2420 } 2421 2422 return 1; 2423 } 2424 2425 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2426 { 2427 struct dasd_device *device; 2428 struct dasd_ccw_req *cqr, *n; 2429 u8 *sense = NULL; 2430 int rc; 2431 2432 retry: 2433 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2434 device = cqr->startdev; 2435 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2436 continue; 2437 2438 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2439 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2440 cqr->status = DASD_CQR_FAILED; 2441 cqr->intrc = -EPERM; 2442 continue; 2443 } 2444 /*Non-temporary stop condition will trigger fail fast*/ 2445 if (device->stopped & ~DASD_STOPPED_PENDING && 2446 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2447 !dasd_eer_enabled(device)) { 2448 cqr->status = DASD_CQR_FAILED; 2449 cqr->intrc = -EAGAIN; 2450 continue; 2451 } 2452 2453 /*Don't try to start requests if device is stopped*/ 2454 if (interruptible) { 2455 rc = wait_event_interruptible( 2456 generic_waitq, !device->stopped); 2457 if (rc == -ERESTARTSYS) { 2458 cqr->status = DASD_CQR_FAILED; 2459 cqr->intrc = rc; 2460 continue; 2461 } 2462 } else 2463 wait_event(generic_waitq, !(device->stopped)); 2464 2465 if (!cqr->callback) 2466 cqr->callback = dasd_wakeup_cb; 2467 cqr->callback_data = DASD_SLEEPON_START_TAG; 2468 dasd_add_request_tail(cqr); 2469 } 2470 2471 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2472 2473 rc = 0; 2474 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2475 /* 2476 * In some cases certain errors might be expected and 2477 * error recovery would be unnecessary in these cases. 2478 * Check if the according suppress bit is set. 2479 */ 2480 sense = dasd_get_sense(&cqr->irb); 2481 if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) && 2482 !(sense[2] & SNS2_ENV_DATA_PRESENT) && 2483 test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags)) 2484 continue; 2485 if (sense && (sense[1] & SNS1_NO_REC_FOUND) && 2486 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags)) 2487 continue; 2488 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2489 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2490 continue; 2491 2492 /* 2493 * for alias devices simplify error recovery and 2494 * return to upper layer 2495 * do not skip ERP requests 2496 */ 2497 if (cqr->startdev != cqr->basedev && !cqr->refers && 2498 (cqr->status == DASD_CQR_TERMINATED || 2499 cqr->status == DASD_CQR_NEED_ERP)) 2500 return -EAGAIN; 2501 2502 /* normal recovery for basedev IO */ 2503 if (__dasd_sleep_on_erp(cqr)) 2504 /* handle erp first */ 2505 goto retry; 2506 } 2507 2508 return 0; 2509 } 2510 2511 /* 2512 * Queue a request to the tail of the device ccw_queue and wait for 2513 * it's completion. 2514 */ 2515 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2516 { 2517 return _dasd_sleep_on(cqr, 0); 2518 } 2519 EXPORT_SYMBOL(dasd_sleep_on); 2520 2521 /* 2522 * Start requests from a ccw_queue and wait for their completion. 2523 */ 2524 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2525 { 2526 return _dasd_sleep_on_queue(ccw_queue, 0); 2527 } 2528 EXPORT_SYMBOL(dasd_sleep_on_queue); 2529 2530 /* 2531 * Start requests from a ccw_queue and wait interruptible for their completion. 2532 */ 2533 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2534 { 2535 return _dasd_sleep_on_queue(ccw_queue, 1); 2536 } 2537 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2538 2539 /* 2540 * Queue a request to the tail of the device ccw_queue and wait 2541 * interruptible for it's completion. 2542 */ 2543 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2544 { 2545 return _dasd_sleep_on(cqr, 1); 2546 } 2547 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2548 2549 /* 2550 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2551 * for eckd devices) the currently running request has to be terminated 2552 * and be put back to status queued, before the special request is added 2553 * to the head of the queue. Then the special request is waited on normally. 2554 */ 2555 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2556 { 2557 struct dasd_ccw_req *cqr; 2558 int rc; 2559 2560 if (list_empty(&device->ccw_queue)) 2561 return 0; 2562 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2563 rc = device->discipline->term_IO(cqr); 2564 if (!rc) 2565 /* 2566 * CQR terminated because a more important request is pending. 2567 * Undo decreasing of retry counter because this is 2568 * not an error case. 2569 */ 2570 cqr->retries++; 2571 return rc; 2572 } 2573 2574 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2575 { 2576 struct dasd_device *device; 2577 int rc; 2578 2579 device = cqr->startdev; 2580 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2581 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2582 cqr->status = DASD_CQR_FAILED; 2583 cqr->intrc = -EPERM; 2584 return -EIO; 2585 } 2586 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2587 rc = _dasd_term_running_cqr(device); 2588 if (rc) { 2589 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2590 return rc; 2591 } 2592 cqr->callback = dasd_wakeup_cb; 2593 cqr->callback_data = DASD_SLEEPON_START_TAG; 2594 cqr->status = DASD_CQR_QUEUED; 2595 /* 2596 * add new request as second 2597 * first the terminated cqr needs to be finished 2598 */ 2599 list_add(&cqr->devlist, device->ccw_queue.next); 2600 2601 /* let the bh start the request to keep them in order */ 2602 dasd_schedule_device_bh(device); 2603 2604 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2605 2606 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2607 2608 if (cqr->status == DASD_CQR_DONE) 2609 rc = 0; 2610 else if (cqr->intrc) 2611 rc = cqr->intrc; 2612 else 2613 rc = -EIO; 2614 2615 /* kick tasklets */ 2616 dasd_schedule_device_bh(device); 2617 if (device->block) 2618 dasd_schedule_block_bh(device->block); 2619 2620 return rc; 2621 } 2622 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2623 2624 /* 2625 * Cancels a request that was started with dasd_sleep_on_req. 2626 * This is useful to timeout requests. The request will be 2627 * terminated if it is currently in i/o. 2628 * Returns 0 if request termination was successful 2629 * negative error code if termination failed 2630 * Cancellation of a request is an asynchronous operation! The calling 2631 * function has to wait until the request is properly returned via callback. 2632 */ 2633 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2634 { 2635 struct dasd_device *device = cqr->startdev; 2636 int rc = 0; 2637 2638 switch (cqr->status) { 2639 case DASD_CQR_QUEUED: 2640 /* request was not started - just set to cleared */ 2641 cqr->status = DASD_CQR_CLEARED; 2642 break; 2643 case DASD_CQR_IN_IO: 2644 /* request in IO - terminate IO and release again */ 2645 rc = device->discipline->term_IO(cqr); 2646 if (rc) { 2647 dev_err(&device->cdev->dev, 2648 "Cancelling request failed with rc=%d\n", rc); 2649 } else { 2650 cqr->stopclk = get_tod_clock(); 2651 } 2652 break; 2653 default: /* already finished or clear pending - do nothing */ 2654 break; 2655 } 2656 dasd_schedule_device_bh(device); 2657 return rc; 2658 } 2659 2660 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2661 { 2662 struct dasd_device *device = cqr->startdev; 2663 unsigned long flags; 2664 int rc; 2665 2666 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2667 rc = __dasd_cancel_req(cqr); 2668 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2669 return rc; 2670 } 2671 2672 /* 2673 * SECTION: Operations of the dasd_block layer. 2674 */ 2675 2676 /* 2677 * Timeout function for dasd_block. This is used when the block layer 2678 * is waiting for something that may not come reliably, (e.g. a state 2679 * change interrupt) 2680 */ 2681 static void dasd_block_timeout(struct timer_list *t) 2682 { 2683 unsigned long flags; 2684 struct dasd_block *block; 2685 2686 block = timer_container_of(block, t, timer); 2687 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2688 /* re-activate request queue */ 2689 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2690 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2691 dasd_schedule_block_bh(block); 2692 blk_mq_run_hw_queues(block->gdp->queue, true); 2693 } 2694 2695 /* 2696 * Setup timeout for a dasd_block in jiffies. 2697 */ 2698 void dasd_block_set_timer(struct dasd_block *block, int expires) 2699 { 2700 if (expires == 0) 2701 timer_delete(&block->timer); 2702 else 2703 mod_timer(&block->timer, jiffies + expires); 2704 } 2705 EXPORT_SYMBOL(dasd_block_set_timer); 2706 2707 /* 2708 * Clear timeout for a dasd_block. 2709 */ 2710 void dasd_block_clear_timer(struct dasd_block *block) 2711 { 2712 timer_delete(&block->timer); 2713 } 2714 EXPORT_SYMBOL(dasd_block_clear_timer); 2715 2716 /* 2717 * Process finished error recovery ccw. 2718 */ 2719 static void __dasd_process_erp(struct dasd_device *device, 2720 struct dasd_ccw_req *cqr) 2721 { 2722 dasd_erp_fn_t erp_fn; 2723 2724 if (cqr->status == DASD_CQR_DONE) 2725 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2726 else 2727 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2728 erp_fn = device->discipline->erp_postaction(cqr); 2729 erp_fn(cqr); 2730 } 2731 2732 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2733 { 2734 struct request *req; 2735 blk_status_t error = BLK_STS_OK; 2736 unsigned int proc_bytes; 2737 int status; 2738 2739 req = (struct request *) cqr->callback_data; 2740 dasd_profile_end(cqr->block, cqr, req); 2741 2742 proc_bytes = cqr->proc_bytes; 2743 status = cqr->block->base->discipline->free_cp(cqr, req); 2744 if (status < 0) 2745 error = errno_to_blk_status(status); 2746 else if (status == 0) { 2747 switch (cqr->intrc) { 2748 case -EPERM: 2749 /* 2750 * DASD doesn't implement SCSI/NVMe reservations, but it 2751 * implements a locking scheme similar to them. We 2752 * return this error when we no longer have the lock. 2753 */ 2754 error = BLK_STS_RESV_CONFLICT; 2755 break; 2756 case -ENOLINK: 2757 error = BLK_STS_TRANSPORT; 2758 break; 2759 case -ETIMEDOUT: 2760 error = BLK_STS_TIMEOUT; 2761 break; 2762 default: 2763 error = BLK_STS_IOERR; 2764 break; 2765 } 2766 } 2767 2768 /* 2769 * We need to take care for ETIMEDOUT errors here since the 2770 * complete callback does not get called in this case. 2771 * Take care of all errors here and avoid additional code to 2772 * transfer the error value to the complete callback. 2773 */ 2774 if (error) { 2775 blk_mq_end_request(req, error); 2776 blk_mq_run_hw_queues(req->q, true); 2777 } else { 2778 /* 2779 * Partial completed requests can happen with ESE devices. 2780 * During read we might have gotten a NRF error and have to 2781 * complete a request partially. 2782 */ 2783 if (proc_bytes) { 2784 blk_update_request(req, BLK_STS_OK, proc_bytes); 2785 blk_mq_requeue_request(req, true); 2786 } else if (likely(!blk_should_fake_timeout(req->q))) { 2787 blk_mq_complete_request(req); 2788 } 2789 } 2790 } 2791 2792 /* 2793 * Process ccw request queue. 2794 */ 2795 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2796 struct list_head *final_queue) 2797 { 2798 struct list_head *l, *n; 2799 struct dasd_ccw_req *cqr; 2800 dasd_erp_fn_t erp_fn; 2801 unsigned long flags; 2802 struct dasd_device *base = block->base; 2803 2804 restart: 2805 /* Process request with final status. */ 2806 list_for_each_safe(l, n, &block->ccw_queue) { 2807 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2808 if (cqr->status != DASD_CQR_DONE && 2809 cqr->status != DASD_CQR_FAILED && 2810 cqr->status != DASD_CQR_NEED_ERP && 2811 cqr->status != DASD_CQR_TERMINATED) 2812 continue; 2813 2814 if (cqr->status == DASD_CQR_TERMINATED) { 2815 base->discipline->handle_terminated_request(cqr); 2816 goto restart; 2817 } 2818 2819 /* Process requests that may be recovered */ 2820 if (cqr->status == DASD_CQR_NEED_ERP) { 2821 erp_fn = base->discipline->erp_action(cqr); 2822 if (IS_ERR(erp_fn(cqr))) 2823 continue; 2824 goto restart; 2825 } 2826 2827 /* log sense for fatal error */ 2828 if (cqr->status == DASD_CQR_FAILED) { 2829 dasd_log_sense(cqr, &cqr->irb); 2830 } 2831 2832 /* 2833 * First call extended error reporting and check for autoquiesce 2834 */ 2835 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2836 if (cqr->status == DASD_CQR_FAILED && 2837 dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) { 2838 cqr->status = DASD_CQR_FILLED; 2839 cqr->retries = 255; 2840 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 2841 goto restart; 2842 } 2843 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 2844 2845 /* Process finished ERP request. */ 2846 if (cqr->refers) { 2847 __dasd_process_erp(base, cqr); 2848 goto restart; 2849 } 2850 2851 /* Rechain finished requests to final queue */ 2852 cqr->endclk = get_tod_clock(); 2853 list_move_tail(&cqr->blocklist, final_queue); 2854 } 2855 } 2856 2857 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2858 { 2859 dasd_schedule_block_bh(cqr->block); 2860 } 2861 2862 static void __dasd_block_start_head(struct dasd_block *block) 2863 { 2864 struct dasd_ccw_req *cqr; 2865 2866 if (list_empty(&block->ccw_queue)) 2867 return; 2868 /* We allways begin with the first requests on the queue, as some 2869 * of previously started requests have to be enqueued on a 2870 * dasd_device again for error recovery. 2871 */ 2872 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2873 if (cqr->status != DASD_CQR_FILLED) 2874 continue; 2875 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2876 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2877 cqr->status = DASD_CQR_FAILED; 2878 cqr->intrc = -EPERM; 2879 dasd_schedule_block_bh(block); 2880 continue; 2881 } 2882 /* Non-temporary stop condition will trigger fail fast */ 2883 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2884 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2885 !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) { 2886 cqr->status = DASD_CQR_FAILED; 2887 cqr->intrc = -ENOLINK; 2888 dasd_schedule_block_bh(block); 2889 continue; 2890 } 2891 /* Don't try to start requests if device is stopped */ 2892 if (block->base->stopped) 2893 return; 2894 2895 /* just a fail safe check, should not happen */ 2896 if (!cqr->startdev) 2897 cqr->startdev = block->base; 2898 2899 /* make sure that the requests we submit find their way back */ 2900 cqr->callback = dasd_return_cqr_cb; 2901 2902 dasd_add_request_tail(cqr); 2903 } 2904 } 2905 2906 /* 2907 * Central dasd_block layer routine. Takes requests from the generic 2908 * block layer request queue, creates ccw requests, enqueues them on 2909 * a dasd_device and processes ccw requests that have been returned. 2910 */ 2911 static void dasd_block_tasklet(unsigned long data) 2912 { 2913 struct dasd_block *block = (struct dasd_block *) data; 2914 struct list_head final_queue; 2915 struct list_head *l, *n; 2916 struct dasd_ccw_req *cqr; 2917 struct dasd_queue *dq; 2918 2919 atomic_set(&block->tasklet_scheduled, 0); 2920 INIT_LIST_HEAD(&final_queue); 2921 spin_lock_irq(&block->queue_lock); 2922 /* Finish off requests on ccw queue */ 2923 __dasd_process_block_ccw_queue(block, &final_queue); 2924 spin_unlock_irq(&block->queue_lock); 2925 2926 /* Now call the callback function of requests with final status */ 2927 list_for_each_safe(l, n, &final_queue) { 2928 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2929 dq = cqr->dq; 2930 spin_lock_irq(&dq->lock); 2931 list_del_init(&cqr->blocklist); 2932 __dasd_cleanup_cqr(cqr); 2933 spin_unlock_irq(&dq->lock); 2934 } 2935 2936 spin_lock_irq(&block->queue_lock); 2937 /* Now check if the head of the ccw queue needs to be started. */ 2938 __dasd_block_start_head(block); 2939 spin_unlock_irq(&block->queue_lock); 2940 2941 if (waitqueue_active(&shutdown_waitq)) 2942 wake_up(&shutdown_waitq); 2943 dasd_put_device(block->base); 2944 } 2945 2946 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2947 { 2948 wake_up(&dasd_flush_wq); 2949 } 2950 2951 /* 2952 * Requeue a request back to the block request queue 2953 * only works for block requests 2954 */ 2955 static void _dasd_requeue_request(struct dasd_ccw_req *cqr) 2956 { 2957 struct request *req; 2958 2959 /* 2960 * If the request is an ERP request there is nothing to requeue. 2961 * This will be done with the remaining original request. 2962 */ 2963 if (cqr->refers) 2964 return; 2965 spin_lock_irq(&cqr->dq->lock); 2966 req = (struct request *) cqr->callback_data; 2967 blk_mq_requeue_request(req, true); 2968 spin_unlock_irq(&cqr->dq->lock); 2969 2970 return; 2971 } 2972 2973 static int _dasd_requests_to_flushqueue(struct dasd_block *block, 2974 struct list_head *flush_queue) 2975 { 2976 struct dasd_ccw_req *cqr, *n; 2977 unsigned long flags; 2978 int rc, i; 2979 2980 spin_lock_irqsave(&block->queue_lock, flags); 2981 rc = 0; 2982 restart: 2983 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2984 /* if this request currently owned by a dasd_device cancel it */ 2985 if (cqr->status >= DASD_CQR_QUEUED) 2986 rc = dasd_cancel_req(cqr); 2987 if (rc < 0) 2988 break; 2989 /* Rechain request (including erp chain) so it won't be 2990 * touched by the dasd_block_tasklet anymore. 2991 * Replace the callback so we notice when the request 2992 * is returned from the dasd_device layer. 2993 */ 2994 cqr->callback = _dasd_wake_block_flush_cb; 2995 for (i = 0; cqr; cqr = cqr->refers, i++) 2996 list_move_tail(&cqr->blocklist, flush_queue); 2997 if (i > 1) 2998 /* moved more than one request - need to restart */ 2999 goto restart; 3000 } 3001 spin_unlock_irqrestore(&block->queue_lock, flags); 3002 3003 return rc; 3004 } 3005 3006 /* 3007 * Go through all request on the dasd_block request queue, cancel them 3008 * on the respective dasd_device, and return them to the generic 3009 * block layer. 3010 */ 3011 static int dasd_flush_block_queue(struct dasd_block *block) 3012 { 3013 struct dasd_ccw_req *cqr, *n; 3014 struct list_head flush_queue; 3015 unsigned long flags; 3016 int rc; 3017 3018 INIT_LIST_HEAD(&flush_queue); 3019 rc = _dasd_requests_to_flushqueue(block, &flush_queue); 3020 3021 /* Now call the callback function of flushed requests */ 3022 restart_cb: 3023 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3024 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3025 /* Process finished ERP request. */ 3026 if (cqr->refers) { 3027 spin_lock_bh(&block->queue_lock); 3028 __dasd_process_erp(block->base, cqr); 3029 spin_unlock_bh(&block->queue_lock); 3030 /* restart list_for_xx loop since dasd_process_erp 3031 * might remove multiple elements */ 3032 goto restart_cb; 3033 } 3034 /* call the callback function */ 3035 spin_lock_irqsave(&cqr->dq->lock, flags); 3036 cqr->endclk = get_tod_clock(); 3037 list_del_init(&cqr->blocklist); 3038 __dasd_cleanup_cqr(cqr); 3039 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3040 } 3041 return rc; 3042 } 3043 3044 /* 3045 * Schedules a call to dasd_tasklet over the device tasklet. 3046 */ 3047 void dasd_schedule_block_bh(struct dasd_block *block) 3048 { 3049 /* Protect against rescheduling. */ 3050 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3051 return; 3052 /* life cycle of block is bound to it's base device */ 3053 dasd_get_device(block->base); 3054 tasklet_hi_schedule(&block->tasklet); 3055 } 3056 EXPORT_SYMBOL(dasd_schedule_block_bh); 3057 3058 3059 /* 3060 * SECTION: external block device operations 3061 * (request queue handling, open, release, etc.) 3062 */ 3063 3064 /* 3065 * Dasd request queue function. Called from ll_rw_blk.c 3066 */ 3067 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3068 const struct blk_mq_queue_data *qd) 3069 { 3070 struct dasd_block *block = hctx->queue->queuedata; 3071 struct dasd_queue *dq = hctx->driver_data; 3072 struct request *req = qd->rq; 3073 struct dasd_device *basedev; 3074 struct dasd_ccw_req *cqr; 3075 blk_status_t rc = BLK_STS_OK; 3076 3077 basedev = block->base; 3078 spin_lock_irq(&dq->lock); 3079 if (basedev->state < DASD_STATE_READY || 3080 test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { 3081 DBF_DEV_EVENT(DBF_ERR, basedev, 3082 "device not ready for request %p", req); 3083 rc = BLK_STS_IOERR; 3084 goto out; 3085 } 3086 3087 /* 3088 * if device is stopped do not fetch new requests 3089 * except failfast is active which will let requests fail 3090 * immediately in __dasd_block_start_head() 3091 */ 3092 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3093 DBF_DEV_EVENT(DBF_ERR, basedev, 3094 "device stopped request %p", req); 3095 rc = BLK_STS_RESOURCE; 3096 goto out; 3097 } 3098 3099 if (basedev->features & DASD_FEATURE_READONLY && 3100 rq_data_dir(req) == WRITE) { 3101 DBF_DEV_EVENT(DBF_ERR, basedev, 3102 "Rejecting write request %p", req); 3103 rc = BLK_STS_IOERR; 3104 goto out; 3105 } 3106 3107 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3108 (basedev->features & DASD_FEATURE_FAILFAST || 3109 blk_noretry_request(req))) { 3110 DBF_DEV_EVENT(DBF_ERR, basedev, 3111 "Rejecting failfast request %p", req); 3112 rc = BLK_STS_IOERR; 3113 goto out; 3114 } 3115 3116 cqr = basedev->discipline->build_cp(basedev, block, req); 3117 if (IS_ERR(cqr)) { 3118 if (PTR_ERR(cqr) == -EBUSY || 3119 PTR_ERR(cqr) == -ENOMEM || 3120 PTR_ERR(cqr) == -EAGAIN) { 3121 rc = BLK_STS_RESOURCE; 3122 } else if (PTR_ERR(cqr) == -EINVAL) { 3123 rc = BLK_STS_INVAL; 3124 } else { 3125 DBF_DEV_EVENT(DBF_ERR, basedev, 3126 "CCW creation failed (rc=%ld) on request %p", 3127 PTR_ERR(cqr), req); 3128 rc = BLK_STS_IOERR; 3129 } 3130 goto out; 3131 } 3132 /* 3133 * Note: callback is set to dasd_return_cqr_cb in 3134 * __dasd_block_start_head to cover erp requests as well 3135 */ 3136 cqr->callback_data = req; 3137 cqr->status = DASD_CQR_FILLED; 3138 cqr->dq = dq; 3139 3140 blk_mq_start_request(req); 3141 spin_lock(&block->queue_lock); 3142 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3143 INIT_LIST_HEAD(&cqr->devlist); 3144 dasd_profile_start(block, cqr, req); 3145 dasd_schedule_block_bh(block); 3146 spin_unlock(&block->queue_lock); 3147 3148 out: 3149 spin_unlock_irq(&dq->lock); 3150 return rc; 3151 } 3152 3153 /* 3154 * Block timeout callback, called from the block layer 3155 * 3156 * Return values: 3157 * BLK_EH_RESET_TIMER if the request should be left running 3158 * BLK_EH_DONE if the request is handled or terminated 3159 * by the driver. 3160 */ 3161 enum blk_eh_timer_return dasd_times_out(struct request *req) 3162 { 3163 struct dasd_block *block = req->q->queuedata; 3164 struct dasd_device *device; 3165 struct dasd_ccw_req *cqr; 3166 unsigned long flags; 3167 int rc = 0; 3168 3169 cqr = blk_mq_rq_to_pdu(req); 3170 if (!cqr) 3171 return BLK_EH_DONE; 3172 3173 spin_lock_irqsave(&cqr->dq->lock, flags); 3174 device = cqr->startdev ? cqr->startdev : block->base; 3175 if (!device->blk_timeout) { 3176 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3177 return BLK_EH_RESET_TIMER; 3178 } 3179 DBF_DEV_EVENT(DBF_WARNING, device, 3180 " dasd_times_out cqr %p status %x", 3181 cqr, cqr->status); 3182 3183 spin_lock(&block->queue_lock); 3184 spin_lock(get_ccwdev_lock(device->cdev)); 3185 cqr->retries = -1; 3186 cqr->intrc = -ETIMEDOUT; 3187 if (cqr->status >= DASD_CQR_QUEUED) { 3188 rc = __dasd_cancel_req(cqr); 3189 } else if (cqr->status == DASD_CQR_FILLED || 3190 cqr->status == DASD_CQR_NEED_ERP) { 3191 cqr->status = DASD_CQR_TERMINATED; 3192 } else if (cqr->status == DASD_CQR_IN_ERP) { 3193 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3194 3195 list_for_each_entry_safe(searchcqr, nextcqr, 3196 &block->ccw_queue, blocklist) { 3197 tmpcqr = searchcqr; 3198 while (tmpcqr->refers) 3199 tmpcqr = tmpcqr->refers; 3200 if (tmpcqr != cqr) 3201 continue; 3202 /* searchcqr is an ERP request for cqr */ 3203 searchcqr->retries = -1; 3204 searchcqr->intrc = -ETIMEDOUT; 3205 if (searchcqr->status >= DASD_CQR_QUEUED) { 3206 rc = __dasd_cancel_req(searchcqr); 3207 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3208 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3209 searchcqr->status = DASD_CQR_TERMINATED; 3210 rc = 0; 3211 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3212 /* 3213 * Shouldn't happen; most recent ERP 3214 * request is at the front of queue 3215 */ 3216 continue; 3217 } 3218 break; 3219 } 3220 } 3221 spin_unlock(get_ccwdev_lock(device->cdev)); 3222 dasd_schedule_block_bh(block); 3223 spin_unlock(&block->queue_lock); 3224 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3225 3226 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3227 } 3228 3229 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3230 unsigned int idx) 3231 { 3232 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3233 3234 if (!dq) 3235 return -ENOMEM; 3236 3237 spin_lock_init(&dq->lock); 3238 hctx->driver_data = dq; 3239 3240 return 0; 3241 } 3242 3243 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3244 { 3245 kfree(hctx->driver_data); 3246 hctx->driver_data = NULL; 3247 } 3248 3249 static void dasd_request_done(struct request *req) 3250 { 3251 blk_mq_end_request(req, 0); 3252 blk_mq_run_hw_queues(req->q, true); 3253 } 3254 3255 struct blk_mq_ops dasd_mq_ops = { 3256 .queue_rq = do_dasd_request, 3257 .complete = dasd_request_done, 3258 .timeout = dasd_times_out, 3259 .init_hctx = dasd_init_hctx, 3260 .exit_hctx = dasd_exit_hctx, 3261 }; 3262 3263 static int dasd_open(struct gendisk *disk, blk_mode_t mode) 3264 { 3265 struct dasd_device *base; 3266 int rc; 3267 3268 base = dasd_device_from_gendisk(disk); 3269 if (!base) 3270 return -ENODEV; 3271 3272 atomic_inc(&base->block->open_count); 3273 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3274 rc = -ENODEV; 3275 goto unlock; 3276 } 3277 3278 if (!try_module_get(base->discipline->owner)) { 3279 rc = -EINVAL; 3280 goto unlock; 3281 } 3282 3283 if (dasd_probeonly) { 3284 dev_info(&base->cdev->dev, 3285 "Accessing the DASD failed because it is in " 3286 "probeonly mode\n"); 3287 rc = -EPERM; 3288 goto out; 3289 } 3290 3291 if (base->state <= DASD_STATE_BASIC) { 3292 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3293 " Cannot open unrecognized device"); 3294 rc = -ENODEV; 3295 goto out; 3296 } 3297 if ((mode & BLK_OPEN_WRITE) && 3298 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3299 (base->features & DASD_FEATURE_READONLY))) { 3300 rc = -EROFS; 3301 goto out; 3302 } 3303 dasd_put_device(base); 3304 return 0; 3305 3306 out: 3307 module_put(base->discipline->owner); 3308 unlock: 3309 atomic_dec(&base->block->open_count); 3310 dasd_put_device(base); 3311 return rc; 3312 } 3313 3314 static void dasd_release(struct gendisk *disk) 3315 { 3316 struct dasd_device *base = dasd_device_from_gendisk(disk); 3317 if (base) { 3318 atomic_dec(&base->block->open_count); 3319 module_put(base->discipline->owner); 3320 dasd_put_device(base); 3321 } 3322 } 3323 3324 /* 3325 * Return disk geometry. 3326 */ 3327 static int dasd_getgeo(struct gendisk *disk, struct hd_geometry *geo) 3328 { 3329 struct dasd_device *base; 3330 3331 base = dasd_device_from_gendisk(disk); 3332 if (!base) 3333 return -ENODEV; 3334 3335 if (!base->discipline || 3336 !base->discipline->fill_geometry) { 3337 dasd_put_device(base); 3338 return -EINVAL; 3339 } 3340 base->discipline->fill_geometry(base->block, geo); 3341 // geo->start is left unchanged by the above 3342 geo->start >>= base->block->s2b_shift; 3343 dasd_put_device(base); 3344 return 0; 3345 } 3346 3347 const struct block_device_operations 3348 dasd_device_operations = { 3349 .owner = THIS_MODULE, 3350 .open = dasd_open, 3351 .release = dasd_release, 3352 .ioctl = dasd_ioctl, 3353 .compat_ioctl = dasd_ioctl, 3354 .getgeo = dasd_getgeo, 3355 .set_read_only = dasd_set_read_only, 3356 }; 3357 3358 /******************************************************************************* 3359 * end of block device operations 3360 */ 3361 3362 static void 3363 dasd_exit(void) 3364 { 3365 #ifdef CONFIG_PROC_FS 3366 dasd_proc_exit(); 3367 #endif 3368 dasd_eer_exit(); 3369 kmem_cache_destroy(dasd_page_cache); 3370 dasd_page_cache = NULL; 3371 dasd_gendisk_exit(); 3372 dasd_devmap_exit(); 3373 if (dasd_debug_area != NULL) { 3374 debug_unregister(dasd_debug_area); 3375 dasd_debug_area = NULL; 3376 } 3377 dasd_statistics_removeroot(); 3378 } 3379 3380 /* 3381 * SECTION: common functions for ccw_driver use 3382 */ 3383 3384 /* 3385 * Is the device read-only? 3386 * Note that this function does not report the setting of the 3387 * readonly device attribute, but how it is configured in z/VM. 3388 */ 3389 int dasd_device_is_ro(struct dasd_device *device) 3390 { 3391 struct ccw_dev_id dev_id; 3392 struct diag210 diag_data; 3393 int rc; 3394 3395 if (!machine_is_vm()) 3396 return 0; 3397 ccw_device_get_id(device->cdev, &dev_id); 3398 memset(&diag_data, 0, sizeof(diag_data)); 3399 diag_data.vrdcdvno = dev_id.devno; 3400 diag_data.vrdclen = sizeof(diag_data); 3401 rc = diag210(&diag_data); 3402 if (rc == 0 || rc == 2) { 3403 return diag_data.vrdcvfla & 0x80; 3404 } else { 3405 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3406 dev_id.devno, rc); 3407 return 0; 3408 } 3409 } 3410 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3411 3412 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3413 { 3414 struct ccw_device *cdev = data; 3415 int ret; 3416 3417 ret = ccw_device_set_online(cdev); 3418 if (ret) 3419 dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret); 3420 } 3421 3422 /* 3423 * Initial attempt at a probe function. this can be simplified once 3424 * the other detection code is gone. 3425 */ 3426 int dasd_generic_probe(struct ccw_device *cdev) 3427 { 3428 cdev->handler = &dasd_int_handler; 3429 3430 /* 3431 * Automatically online either all dasd devices (dasd_autodetect) 3432 * or all devices specified with dasd= parameters during 3433 * initial probe. 3434 */ 3435 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3436 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3437 async_schedule(dasd_generic_auto_online, cdev); 3438 return 0; 3439 } 3440 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3441 3442 void dasd_generic_free_discipline(struct dasd_device *device) 3443 { 3444 /* Forget the discipline information. */ 3445 if (device->discipline) { 3446 if (device->discipline->uncheck_device) 3447 device->discipline->uncheck_device(device); 3448 module_put(device->discipline->owner); 3449 device->discipline = NULL; 3450 } 3451 if (device->base_discipline) { 3452 module_put(device->base_discipline->owner); 3453 device->base_discipline = NULL; 3454 } 3455 } 3456 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3457 3458 /* 3459 * This will one day be called from a global not_oper handler. 3460 * It is also used by driver_unregister during module unload. 3461 */ 3462 void dasd_generic_remove(struct ccw_device *cdev) 3463 { 3464 struct dasd_device *device; 3465 struct dasd_block *block; 3466 3467 device = dasd_device_from_cdev(cdev); 3468 if (IS_ERR(device)) 3469 return; 3470 3471 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3472 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3473 /* Already doing offline processing */ 3474 dasd_put_device(device); 3475 return; 3476 } 3477 /* 3478 * This device is removed unconditionally. Set offline 3479 * flag to prevent dasd_open from opening it while it is 3480 * no quite down yet. 3481 */ 3482 dasd_set_target_state(device, DASD_STATE_NEW); 3483 cdev->handler = NULL; 3484 /* dasd_delete_device destroys the device reference. */ 3485 block = device->block; 3486 dasd_delete_device(device); 3487 /* 3488 * life cycle of block is bound to device, so delete it after 3489 * device was safely removed 3490 */ 3491 if (block) 3492 dasd_free_block(block); 3493 } 3494 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3495 3496 /* 3497 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3498 * the device is detected for the first time and is supposed to be used 3499 * or the user has started activation through sysfs. 3500 */ 3501 int dasd_generic_set_online(struct ccw_device *cdev, 3502 struct dasd_discipline *base_discipline) 3503 { 3504 struct dasd_discipline *discipline; 3505 struct dasd_device *device; 3506 struct device *dev; 3507 int rc; 3508 3509 dev = &cdev->dev; 3510 3511 /* first online clears initial online feature flag */ 3512 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3513 device = dasd_create_device(cdev); 3514 if (IS_ERR(device)) 3515 return PTR_ERR(device); 3516 3517 discipline = base_discipline; 3518 if (device->features & DASD_FEATURE_USEDIAG) { 3519 if (!dasd_diag_discipline_pointer) { 3520 /* Try to load the required module. */ 3521 rc = request_module(DASD_DIAG_MOD); 3522 if (rc) { 3523 dev_warn(dev, "Setting the DASD online failed " 3524 "because the required module %s " 3525 "could not be loaded (rc=%d)\n", 3526 DASD_DIAG_MOD, rc); 3527 dasd_delete_device(device); 3528 return -ENODEV; 3529 } 3530 } 3531 /* Module init could have failed, so check again here after 3532 * request_module(). */ 3533 if (!dasd_diag_discipline_pointer) { 3534 dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n"); 3535 dasd_delete_device(device); 3536 return -ENODEV; 3537 } 3538 discipline = dasd_diag_discipline_pointer; 3539 } 3540 if (!try_module_get(base_discipline->owner)) { 3541 dasd_delete_device(device); 3542 return -EINVAL; 3543 } 3544 device->base_discipline = base_discipline; 3545 if (!try_module_get(discipline->owner)) { 3546 dasd_delete_device(device); 3547 return -EINVAL; 3548 } 3549 device->discipline = discipline; 3550 3551 /* check_device will allocate block device if necessary */ 3552 rc = discipline->check_device(device); 3553 if (rc) { 3554 dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n", 3555 discipline->name, rc); 3556 dasd_delete_device(device); 3557 return rc; 3558 } 3559 3560 dasd_set_target_state(device, DASD_STATE_ONLINE); 3561 if (device->state <= DASD_STATE_KNOWN) { 3562 dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n"); 3563 rc = -ENODEV; 3564 dasd_set_target_state(device, DASD_STATE_NEW); 3565 if (device->block) 3566 dasd_free_block(device->block); 3567 dasd_delete_device(device); 3568 } else { 3569 dev_dbg(dev, "dasd_generic device found\n"); 3570 } 3571 3572 wait_event(dasd_init_waitq, _wait_for_device(device)); 3573 3574 dasd_put_device(device); 3575 return rc; 3576 } 3577 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3578 3579 int dasd_generic_set_offline(struct ccw_device *cdev) 3580 { 3581 int max_count, open_count, rc; 3582 struct dasd_device *device; 3583 struct dasd_block *block; 3584 unsigned long flags; 3585 struct device *dev; 3586 3587 dev = &cdev->dev; 3588 3589 rc = 0; 3590 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3591 device = dasd_device_from_cdev_locked(cdev); 3592 if (IS_ERR(device)) { 3593 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3594 return PTR_ERR(device); 3595 } 3596 3597 /* 3598 * We must make sure that this device is currently not in use. 3599 * The open_count is increased for every opener, that includes 3600 * the blkdev_get in dasd_scan_partitions. We are only interested 3601 * in the other openers. 3602 */ 3603 if (device->block) { 3604 max_count = device->block->bdev_file ? 0 : -1; 3605 open_count = atomic_read(&device->block->open_count); 3606 if (open_count > max_count) { 3607 if (open_count > 0) 3608 dev_warn(dev, "The DASD cannot be set offline with open count %i\n", 3609 open_count); 3610 else 3611 dev_warn(dev, "The DASD cannot be set offline while it is in use\n"); 3612 rc = -EBUSY; 3613 goto out_err; 3614 } 3615 } 3616 3617 /* 3618 * Test if the offline processing is already running and exit if so. 3619 * If a safe offline is being processed this could only be a normal 3620 * offline that should be able to overtake the safe offline and 3621 * cancel any I/O we do not want to wait for any longer 3622 */ 3623 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3624 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3625 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3626 &device->flags); 3627 } else { 3628 rc = -EBUSY; 3629 goto out_err; 3630 } 3631 } 3632 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3633 3634 /* 3635 * if safe_offline is called set safe_offline_running flag and 3636 * clear safe_offline so that a call to normal offline 3637 * can overrun safe_offline processing 3638 */ 3639 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3640 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3641 /* need to unlock here to wait for outstanding I/O */ 3642 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3643 /* 3644 * If we want to set the device safe offline all IO operations 3645 * should be finished before continuing the offline process 3646 * so sync bdev first and then wait for our queues to become 3647 * empty 3648 */ 3649 if (device->block && device->block->bdev_file) 3650 bdev_mark_dead(file_bdev(device->block->bdev_file), false); 3651 dasd_schedule_device_bh(device); 3652 rc = wait_event_interruptible(shutdown_waitq, 3653 _wait_for_empty_queues(device)); 3654 if (rc != 0) 3655 goto interrupted; 3656 3657 /* 3658 * check if a normal offline process overtook the offline 3659 * processing in this case simply do nothing beside returning 3660 * that we got interrupted 3661 * otherwise mark safe offline as not running any longer and 3662 * continue with normal offline 3663 */ 3664 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3665 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3666 rc = -ERESTARTSYS; 3667 goto out_err; 3668 } 3669 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3670 } 3671 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3672 3673 dasd_set_target_state(device, DASD_STATE_NEW); 3674 /* dasd_delete_device destroys the device reference. */ 3675 block = device->block; 3676 dasd_delete_device(device); 3677 /* 3678 * life cycle of block is bound to device, so delete it after 3679 * device was safely removed 3680 */ 3681 if (block) 3682 dasd_free_block(block); 3683 3684 return 0; 3685 3686 interrupted: 3687 /* interrupted by signal */ 3688 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3689 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3690 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3691 out_err: 3692 dasd_put_device(device); 3693 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3694 return rc; 3695 } 3696 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3697 3698 int dasd_generic_last_path_gone(struct dasd_device *device) 3699 { 3700 struct dasd_ccw_req *cqr; 3701 3702 dev_warn(&device->cdev->dev, "No operational channel path is left " 3703 "for the device\n"); 3704 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3705 /* First call extended error reporting and check for autoquiesce. */ 3706 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); 3707 3708 if (device->state < DASD_STATE_BASIC) 3709 return 0; 3710 /* Device is active. We want to keep it. */ 3711 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3712 if ((cqr->status == DASD_CQR_IN_IO) || 3713 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3714 cqr->status = DASD_CQR_QUEUED; 3715 cqr->retries++; 3716 } 3717 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3718 dasd_device_clear_timer(device); 3719 dasd_schedule_device_bh(device); 3720 return 1; 3721 } 3722 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3723 3724 int dasd_generic_path_operational(struct dasd_device *device) 3725 { 3726 dev_info(&device->cdev->dev, "A channel path to the device has become " 3727 "operational\n"); 3728 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3729 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3730 dasd_schedule_device_bh(device); 3731 if (device->block) { 3732 dasd_schedule_block_bh(device->block); 3733 if (device->block->gdp) 3734 blk_mq_run_hw_queues(device->block->gdp->queue, true); 3735 } 3736 3737 if (!device->stopped) 3738 wake_up(&generic_waitq); 3739 3740 return 1; 3741 } 3742 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3743 3744 int dasd_generic_notify(struct ccw_device *cdev, int event) 3745 { 3746 struct dasd_device *device; 3747 int ret; 3748 3749 device = dasd_device_from_cdev_locked(cdev); 3750 if (IS_ERR(device)) 3751 return 0; 3752 ret = 0; 3753 switch (event) { 3754 case CIO_GONE: 3755 case CIO_BOXED: 3756 case CIO_NO_PATH: 3757 dasd_path_no_path(device); 3758 ret = dasd_generic_last_path_gone(device); 3759 break; 3760 case CIO_OPER: 3761 ret = 1; 3762 if (dasd_path_get_opm(device)) 3763 ret = dasd_generic_path_operational(device); 3764 break; 3765 } 3766 dasd_put_device(device); 3767 return ret; 3768 } 3769 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3770 3771 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3772 { 3773 struct dasd_device *device; 3774 int chp, oldopm, hpfpm, ifccpm; 3775 3776 device = dasd_device_from_cdev_locked(cdev); 3777 if (IS_ERR(device)) 3778 return; 3779 3780 oldopm = dasd_path_get_opm(device); 3781 for (chp = 0; chp < 8; chp++) { 3782 if (path_event[chp] & PE_PATH_GONE) { 3783 dasd_path_notoper(device, chp); 3784 } 3785 if (path_event[chp] & PE_PATH_AVAILABLE) { 3786 dasd_path_available(device, chp); 3787 dasd_schedule_device_bh(device); 3788 } 3789 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3790 if (!dasd_path_is_operational(device, chp) && 3791 !dasd_path_need_verify(device, chp)) { 3792 /* 3793 * we can not establish a pathgroup on an 3794 * unavailable path, so trigger a path 3795 * verification first 3796 */ 3797 dasd_path_available(device, chp); 3798 dasd_schedule_device_bh(device); 3799 } 3800 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3801 "Pathgroup re-established\n"); 3802 if (device->discipline->kick_validate) 3803 device->discipline->kick_validate(device); 3804 } 3805 if (path_event[chp] & PE_PATH_FCES_EVENT) { 3806 dasd_path_fcsec_update(device, chp); 3807 dasd_schedule_device_bh(device); 3808 } 3809 } 3810 hpfpm = dasd_path_get_hpfpm(device); 3811 ifccpm = dasd_path_get_ifccpm(device); 3812 if (!dasd_path_get_opm(device) && hpfpm) { 3813 /* 3814 * device has no operational paths but at least one path is 3815 * disabled due to HPF errors 3816 * disable HPF at all and use the path(s) again 3817 */ 3818 if (device->discipline->disable_hpf) 3819 device->discipline->disable_hpf(device); 3820 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3821 dasd_path_set_tbvpm(device, hpfpm); 3822 dasd_schedule_device_bh(device); 3823 dasd_schedule_requeue(device); 3824 } else if (!dasd_path_get_opm(device) && ifccpm) { 3825 /* 3826 * device has no operational paths but at least one path is 3827 * disabled due to IFCC errors 3828 * trigger path verification on paths with IFCC errors 3829 */ 3830 dasd_path_set_tbvpm(device, ifccpm); 3831 dasd_schedule_device_bh(device); 3832 } 3833 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3834 dev_warn(&device->cdev->dev, 3835 "No verified channel paths remain for the device\n"); 3836 DBF_DEV_EVENT(DBF_WARNING, device, 3837 "%s", "last verified path gone"); 3838 /* First call extended error reporting and check for autoquiesce. */ 3839 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); 3840 dasd_device_set_stop_bits(device, 3841 DASD_STOPPED_DC_WAIT); 3842 } 3843 dasd_put_device(device); 3844 } 3845 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3846 3847 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3848 { 3849 if (!dasd_path_get_opm(device) && lpm) { 3850 dasd_path_set_opm(device, lpm); 3851 dasd_generic_path_operational(device); 3852 } else 3853 dasd_path_add_opm(device, lpm); 3854 return 0; 3855 } 3856 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3857 3858 void dasd_generic_space_exhaust(struct dasd_device *device, 3859 struct dasd_ccw_req *cqr) 3860 { 3861 /* First call extended error reporting and check for autoquiesce. */ 3862 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC); 3863 3864 if (device->state < DASD_STATE_BASIC) 3865 return; 3866 3867 if (cqr->status == DASD_CQR_IN_IO || 3868 cqr->status == DASD_CQR_CLEAR_PENDING) { 3869 cqr->status = DASD_CQR_QUEUED; 3870 cqr->retries++; 3871 } 3872 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3873 dasd_device_clear_timer(device); 3874 dasd_schedule_device_bh(device); 3875 } 3876 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3877 3878 void dasd_generic_space_avail(struct dasd_device *device) 3879 { 3880 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3881 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3882 3883 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3884 dasd_schedule_device_bh(device); 3885 3886 if (device->block) { 3887 dasd_schedule_block_bh(device->block); 3888 if (device->block->gdp) 3889 blk_mq_run_hw_queues(device->block->gdp->queue, true); 3890 } 3891 if (!device->stopped) 3892 wake_up(&generic_waitq); 3893 } 3894 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3895 3896 /* 3897 * clear active requests and requeue them to block layer if possible 3898 */ 3899 int dasd_generic_requeue_all_requests(struct dasd_device *device) 3900 { 3901 struct dasd_block *block = device->block; 3902 struct list_head requeue_queue; 3903 struct dasd_ccw_req *cqr, *n; 3904 int rc; 3905 3906 if (!block) 3907 return 0; 3908 3909 INIT_LIST_HEAD(&requeue_queue); 3910 rc = _dasd_requests_to_flushqueue(block, &requeue_queue); 3911 3912 /* Now call the callback function of flushed requests */ 3913 restart_cb: 3914 list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { 3915 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3916 /* Process finished ERP request. */ 3917 if (cqr->refers) { 3918 spin_lock_bh(&block->queue_lock); 3919 __dasd_process_erp(block->base, cqr); 3920 spin_unlock_bh(&block->queue_lock); 3921 /* restart list_for_xx loop since dasd_process_erp 3922 * might remove multiple elements 3923 */ 3924 goto restart_cb; 3925 } 3926 _dasd_requeue_request(cqr); 3927 list_del_init(&cqr->blocklist); 3928 cqr->block->base->discipline->free_cp( 3929 cqr, (struct request *) cqr->callback_data); 3930 } 3931 dasd_schedule_device_bh(device); 3932 return rc; 3933 } 3934 EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests); 3935 3936 static void do_requeue_requests(struct work_struct *work) 3937 { 3938 struct dasd_device *device = container_of(work, struct dasd_device, 3939 requeue_requests); 3940 dasd_generic_requeue_all_requests(device); 3941 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 3942 if (device->block) 3943 dasd_schedule_block_bh(device->block); 3944 dasd_put_device(device); 3945 } 3946 3947 void dasd_schedule_requeue(struct dasd_device *device) 3948 { 3949 dasd_get_device(device); 3950 /* queue call to dasd_reload_device to the kernel event daemon. */ 3951 if (!schedule_work(&device->requeue_requests)) 3952 dasd_put_device(device); 3953 } 3954 EXPORT_SYMBOL(dasd_schedule_requeue); 3955 3956 static int dasd_handle_autoquiesce(struct dasd_device *device, 3957 struct dasd_ccw_req *cqr, 3958 unsigned int reason) 3959 { 3960 /* in any case write eer message with reason */ 3961 if (dasd_eer_enabled(device)) 3962 dasd_eer_write(device, cqr, reason); 3963 3964 if (!test_bit(reason, &device->aq_mask)) 3965 return 0; 3966 3967 /* notify eer about autoquiesce */ 3968 if (dasd_eer_enabled(device)) 3969 dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE); 3970 3971 dev_info(&device->cdev->dev, 3972 "The DASD has been put in the quiesce state\n"); 3973 dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE); 3974 3975 if (device->features & DASD_FEATURE_REQUEUEQUIESCE) 3976 dasd_schedule_requeue(device); 3977 3978 return 1; 3979 } 3980 3981 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3982 int rdc_buffer_size, 3983 int magic) 3984 { 3985 struct dasd_ccw_req *cqr; 3986 struct ccw1 *ccw; 3987 3988 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 3989 NULL); 3990 3991 if (IS_ERR(cqr)) { 3992 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 3993 "Could not allocate RDC request"); 3994 return cqr; 3995 } 3996 3997 ccw = cqr->cpaddr; 3998 ccw->cmd_code = CCW_CMD_RDC; 3999 ccw->cda = virt_to_dma32(cqr->data); 4000 ccw->flags = 0; 4001 ccw->count = rdc_buffer_size; 4002 cqr->startdev = device; 4003 cqr->memdev = device; 4004 cqr->expires = 10*HZ; 4005 cqr->retries = 256; 4006 cqr->buildclk = get_tod_clock(); 4007 cqr->status = DASD_CQR_FILLED; 4008 return cqr; 4009 } 4010 4011 4012 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4013 void *rdc_buffer, int rdc_buffer_size) 4014 { 4015 int ret; 4016 struct dasd_ccw_req *cqr; 4017 4018 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4019 if (IS_ERR(cqr)) 4020 return PTR_ERR(cqr); 4021 4022 ret = dasd_sleep_on(cqr); 4023 if (ret == 0) 4024 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4025 dasd_sfree_request(cqr, cqr->memdev); 4026 return ret; 4027 } 4028 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4029 4030 /* 4031 * In command mode and transport mode we need to look for sense 4032 * data in different places. The sense data itself is allways 4033 * an array of 32 bytes, so we can unify the sense data access 4034 * for both modes. 4035 */ 4036 char *dasd_get_sense(struct irb *irb) 4037 { 4038 struct tsb *tsb = NULL; 4039 char *sense = NULL; 4040 4041 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4042 if (irb->scsw.tm.tcw) 4043 tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw)); 4044 if (tsb && tsb->length == 64 && tsb->flags) 4045 switch (tsb->flags & 0x07) { 4046 case 1: /* tsa_iostat */ 4047 sense = tsb->tsa.iostat.sense; 4048 break; 4049 case 2: /* tsa_ddpc */ 4050 sense = tsb->tsa.ddpc.sense; 4051 break; 4052 default: 4053 /* currently we don't use interrogate data */ 4054 break; 4055 } 4056 } else if (irb->esw.esw0.erw.cons) { 4057 sense = irb->ecw; 4058 } 4059 return sense; 4060 } 4061 EXPORT_SYMBOL_GPL(dasd_get_sense); 4062 4063 void dasd_generic_shutdown(struct ccw_device *cdev) 4064 { 4065 struct dasd_device *device; 4066 4067 device = dasd_device_from_cdev(cdev); 4068 if (IS_ERR(device)) 4069 return; 4070 4071 if (device->block) 4072 dasd_schedule_block_bh(device->block); 4073 4074 dasd_schedule_device_bh(device); 4075 4076 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4077 } 4078 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4079 4080 static int __init dasd_init(void) 4081 { 4082 int rc; 4083 4084 init_waitqueue_head(&dasd_init_waitq); 4085 init_waitqueue_head(&dasd_flush_wq); 4086 init_waitqueue_head(&generic_waitq); 4087 init_waitqueue_head(&shutdown_waitq); 4088 4089 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4090 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4091 if (dasd_debug_area == NULL) { 4092 rc = -ENOMEM; 4093 goto failed; 4094 } 4095 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4096 debug_set_level(dasd_debug_area, DBF_WARNING); 4097 4098 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4099 4100 dasd_diag_discipline_pointer = NULL; 4101 4102 dasd_statistics_createroot(); 4103 4104 rc = dasd_devmap_init(); 4105 if (rc) 4106 goto failed; 4107 rc = dasd_gendisk_init(); 4108 if (rc) 4109 goto failed; 4110 rc = dasd_parse(); 4111 if (rc) 4112 goto failed; 4113 rc = dasd_eer_init(); 4114 if (rc) 4115 goto failed; 4116 #ifdef CONFIG_PROC_FS 4117 rc = dasd_proc_init(); 4118 if (rc) 4119 goto failed; 4120 #endif 4121 4122 return 0; 4123 failed: 4124 pr_info("The DASD device driver could not be initialized\n"); 4125 dasd_exit(); 4126 return rc; 4127 } 4128 4129 module_init(dasd_init); 4130 module_exit(dasd_exit); 4131