1 /* 2 * scsi.c Copyright (C) 1992 Drew Eckhardt 3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 4 * Copyright (C) 2002, 2003 Christoph Hellwig 5 * 6 * generic mid-level SCSI driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * 10 * <drew@colorado.edu> 11 * 12 * Bug correction thanks go to : 13 * Rik Faith <faith@cs.unc.edu> 14 * Tommy Thorn <tthorn> 15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> 16 * 17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to 18 * add scatter-gather, multiple outstanding request, and other 19 * enhancements. 20 * 21 * Native multichannel, wide scsi, /proc/scsi and hot plugging 22 * support added by Michael Neuffer <mike@i-connect.net> 23 * 24 * Added request_module("scsi_hostadapter") for kerneld: 25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) 26 * Bjorn Ekwall <bj0rn@blox.se> 27 * (changed to kmod) 28 * 29 * Major improvements to the timeout, abort, and reset processing, 30 * as well as performance modifications for large queue depths by 31 * Leonard N. Zubkoff <lnz@dandelion.com> 32 * 33 * Converted cli() code to spinlocks, Ingo Molnar 34 * 35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli 36 * 37 * out_of_space hacks, D. Gilbert (dpg) 990608 38 */ 39 40 #include <linux/module.h> 41 #include <linux/moduleparam.h> 42 #include <linux/kernel.h> 43 #include <linux/sched.h> 44 #include <linux/timer.h> 45 #include <linux/string.h> 46 #include <linux/slab.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/init.h> 50 #include <linux/completion.h> 51 #include <linux/unistd.h> 52 #include <linux/spinlock.h> 53 #include <linux/kmod.h> 54 #include <linux/interrupt.h> 55 #include <linux/notifier.h> 56 #include <linux/cpu.h> 57 #include <linux/mutex.h> 58 59 #include <scsi/scsi.h> 60 #include <scsi/scsi_cmnd.h> 61 #include <scsi/scsi_dbg.h> 62 #include <scsi/scsi_device.h> 63 #include <scsi/scsi_eh.h> 64 #include <scsi/scsi_host.h> 65 #include <scsi/scsi_tcq.h> 66 67 #include "scsi_priv.h" 68 #include "scsi_logging.h" 69 70 static void scsi_done(struct scsi_cmnd *cmd); 71 72 /* 73 * Definitions and constants. 74 */ 75 76 #define MIN_RESET_DELAY (2*HZ) 77 78 /* Do not call reset on error if we just did a reset within 15 sec. */ 79 #define MIN_RESET_PERIOD (15*HZ) 80 81 /* 82 * Macro to determine the size of SCSI command. This macro takes vendor 83 * unique commands into account. SCSI commands in groups 6 and 7 are 84 * vendor unique and we will depend upon the command length being 85 * supplied correctly in cmd_len. 86 */ 87 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \ 88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len) 89 90 /* 91 * Note - the initial logging level can be set here to log events at boot time. 92 * After the system is up, you may enable logging via the /proc interface. 93 */ 94 unsigned int scsi_logging_level; 95 #if defined(CONFIG_SCSI_LOGGING) 96 EXPORT_SYMBOL(scsi_logging_level); 97 #endif 98 99 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { 100 "Direct-Access ", 101 "Sequential-Access", 102 "Printer ", 103 "Processor ", 104 "WORM ", 105 "CD-ROM ", 106 "Scanner ", 107 "Optical Device ", 108 "Medium Changer ", 109 "Communications ", 110 "Unknown ", 111 "Unknown ", 112 "RAID ", 113 "Enclosure ", 114 "Direct-Access-RBC", 115 }; 116 EXPORT_SYMBOL(scsi_device_types); 117 118 struct scsi_host_cmd_pool { 119 kmem_cache_t *slab; 120 unsigned int users; 121 char *name; 122 unsigned int slab_flags; 123 gfp_t gfp_mask; 124 }; 125 126 static struct scsi_host_cmd_pool scsi_cmd_pool = { 127 .name = "scsi_cmd_cache", 128 .slab_flags = SLAB_HWCACHE_ALIGN, 129 }; 130 131 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { 132 .name = "scsi_cmd_cache(DMA)", 133 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, 134 .gfp_mask = __GFP_DMA, 135 }; 136 137 static DEFINE_MUTEX(host_cmd_pool_mutex); 138 139 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 140 gfp_t gfp_mask) 141 { 142 struct scsi_cmnd *cmd; 143 144 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 145 gfp_mask | shost->cmd_pool->gfp_mask); 146 147 if (unlikely(!cmd)) { 148 unsigned long flags; 149 150 spin_lock_irqsave(&shost->free_list_lock, flags); 151 if (likely(!list_empty(&shost->free_list))) { 152 cmd = list_entry(shost->free_list.next, 153 struct scsi_cmnd, list); 154 list_del_init(&cmd->list); 155 } 156 spin_unlock_irqrestore(&shost->free_list_lock, flags); 157 } 158 159 return cmd; 160 } 161 162 /* 163 * Function: scsi_get_command() 164 * 165 * Purpose: Allocate and setup a scsi command block 166 * 167 * Arguments: dev - parent scsi device 168 * gfp_mask- allocator flags 169 * 170 * Returns: The allocated scsi command structure. 171 */ 172 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) 173 { 174 struct scsi_cmnd *cmd; 175 176 /* Bail if we can't get a reference to the device */ 177 if (!get_device(&dev->sdev_gendev)) 178 return NULL; 179 180 cmd = __scsi_get_command(dev->host, gfp_mask); 181 182 if (likely(cmd != NULL)) { 183 unsigned long flags; 184 185 memset(cmd, 0, sizeof(*cmd)); 186 cmd->device = dev; 187 init_timer(&cmd->eh_timeout); 188 INIT_LIST_HEAD(&cmd->list); 189 spin_lock_irqsave(&dev->list_lock, flags); 190 list_add_tail(&cmd->list, &dev->cmd_list); 191 spin_unlock_irqrestore(&dev->list_lock, flags); 192 cmd->jiffies_at_alloc = jiffies; 193 } else 194 put_device(&dev->sdev_gendev); 195 196 return cmd; 197 } 198 EXPORT_SYMBOL(scsi_get_command); 199 200 /* 201 * Function: scsi_put_command() 202 * 203 * Purpose: Free a scsi command block 204 * 205 * Arguments: cmd - command block to free 206 * 207 * Returns: Nothing. 208 * 209 * Notes: The command must not belong to any lists. 210 */ 211 void scsi_put_command(struct scsi_cmnd *cmd) 212 { 213 struct scsi_device *sdev = cmd->device; 214 struct Scsi_Host *shost = sdev->host; 215 unsigned long flags; 216 217 /* serious error if the command hasn't come from a device list */ 218 spin_lock_irqsave(&cmd->device->list_lock, flags); 219 BUG_ON(list_empty(&cmd->list)); 220 list_del_init(&cmd->list); 221 spin_unlock(&cmd->device->list_lock); 222 /* changing locks here, don't need to restore the irq state */ 223 spin_lock(&shost->free_list_lock); 224 if (unlikely(list_empty(&shost->free_list))) { 225 list_add(&cmd->list, &shost->free_list); 226 cmd = NULL; 227 } 228 spin_unlock_irqrestore(&shost->free_list_lock, flags); 229 230 if (likely(cmd != NULL)) 231 kmem_cache_free(shost->cmd_pool->slab, cmd); 232 233 put_device(&sdev->sdev_gendev); 234 } 235 EXPORT_SYMBOL(scsi_put_command); 236 237 /* 238 * Function: scsi_setup_command_freelist() 239 * 240 * Purpose: Setup the command freelist for a scsi host. 241 * 242 * Arguments: shost - host to allocate the freelist for. 243 * 244 * Returns: Nothing. 245 */ 246 int scsi_setup_command_freelist(struct Scsi_Host *shost) 247 { 248 struct scsi_host_cmd_pool *pool; 249 struct scsi_cmnd *cmd; 250 251 spin_lock_init(&shost->free_list_lock); 252 INIT_LIST_HEAD(&shost->free_list); 253 254 /* 255 * Select a command slab for this host and create it if not 256 * yet existant. 257 */ 258 mutex_lock(&host_cmd_pool_mutex); 259 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); 260 if (!pool->users) { 261 pool->slab = kmem_cache_create(pool->name, 262 sizeof(struct scsi_cmnd), 0, 263 pool->slab_flags, NULL, NULL); 264 if (!pool->slab) 265 goto fail; 266 } 267 268 pool->users++; 269 shost->cmd_pool = pool; 270 mutex_unlock(&host_cmd_pool_mutex); 271 272 /* 273 * Get one backup command for this host. 274 */ 275 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 276 GFP_KERNEL | shost->cmd_pool->gfp_mask); 277 if (!cmd) 278 goto fail2; 279 list_add(&cmd->list, &shost->free_list); 280 return 0; 281 282 fail2: 283 if (!--pool->users) 284 kmem_cache_destroy(pool->slab); 285 return -ENOMEM; 286 fail: 287 mutex_unlock(&host_cmd_pool_mutex); 288 return -ENOMEM; 289 290 } 291 292 /* 293 * Function: scsi_destroy_command_freelist() 294 * 295 * Purpose: Release the command freelist for a scsi host. 296 * 297 * Arguments: shost - host that's freelist is going to be destroyed 298 */ 299 void scsi_destroy_command_freelist(struct Scsi_Host *shost) 300 { 301 while (!list_empty(&shost->free_list)) { 302 struct scsi_cmnd *cmd; 303 304 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 305 list_del_init(&cmd->list); 306 kmem_cache_free(shost->cmd_pool->slab, cmd); 307 } 308 309 mutex_lock(&host_cmd_pool_mutex); 310 if (!--shost->cmd_pool->users) 311 kmem_cache_destroy(shost->cmd_pool->slab); 312 mutex_unlock(&host_cmd_pool_mutex); 313 } 314 315 #ifdef CONFIG_SCSI_LOGGING 316 void scsi_log_send(struct scsi_cmnd *cmd) 317 { 318 unsigned int level; 319 struct scsi_device *sdev; 320 321 /* 322 * If ML QUEUE log level is greater than or equal to: 323 * 324 * 1: nothing (match completion) 325 * 326 * 2: log opcode + command of all commands 327 * 328 * 3: same as 2 plus dump cmd address 329 * 330 * 4: same as 3 plus dump extra junk 331 */ 332 if (unlikely(scsi_logging_level)) { 333 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 334 SCSI_LOG_MLQUEUE_BITS); 335 if (level > 1) { 336 sdev = cmd->device; 337 sdev_printk(KERN_INFO, sdev, "send "); 338 if (level > 2) 339 printk("0x%p ", cmd); 340 /* 341 * spaces to match disposition and cmd->result 342 * output in scsi_log_completion. 343 */ 344 printk(" "); 345 scsi_print_command(cmd); 346 if (level > 3) { 347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 348 " done = 0x%p, queuecommand 0x%p\n", 349 cmd->buffer, cmd->bufflen, 350 cmd->done, 351 sdev->host->hostt->queuecommand); 352 353 } 354 } 355 } 356 } 357 358 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 359 { 360 unsigned int level; 361 struct scsi_device *sdev; 362 363 /* 364 * If ML COMPLETE log level is greater than or equal to: 365 * 366 * 1: log disposition, result, opcode + command, and conditionally 367 * sense data for failures or non SUCCESS dispositions. 368 * 369 * 2: same as 1 but for all command completions. 370 * 371 * 3: same as 2 plus dump cmd address 372 * 373 * 4: same as 3 plus dump extra junk 374 */ 375 if (unlikely(scsi_logging_level)) { 376 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 377 SCSI_LOG_MLCOMPLETE_BITS); 378 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 379 (level > 1)) { 380 sdev = cmd->device; 381 sdev_printk(KERN_INFO, sdev, "done "); 382 if (level > 2) 383 printk("0x%p ", cmd); 384 /* 385 * Dump truncated values, so we usually fit within 386 * 80 chars. 387 */ 388 switch (disposition) { 389 case SUCCESS: 390 printk("SUCCESS"); 391 break; 392 case NEEDS_RETRY: 393 printk("RETRY "); 394 break; 395 case ADD_TO_MLQUEUE: 396 printk("MLQUEUE"); 397 break; 398 case FAILED: 399 printk("FAILED "); 400 break; 401 case TIMEOUT_ERROR: 402 /* 403 * If called via scsi_times_out. 404 */ 405 printk("TIMEOUT"); 406 break; 407 default: 408 printk("UNKNOWN"); 409 } 410 printk(" %8x ", cmd->result); 411 scsi_print_command(cmd); 412 if (status_byte(cmd->result) & CHECK_CONDITION) { 413 /* 414 * XXX The scsi_print_sense formatting/prefix 415 * doesn't match this function. 416 */ 417 scsi_print_sense("", cmd); 418 } 419 if (level > 3) { 420 printk(KERN_INFO "scsi host busy %d failed %d\n", 421 sdev->host->host_busy, 422 sdev->host->host_failed); 423 } 424 } 425 } 426 } 427 #endif 428 429 /* 430 * Assign a serial number and pid to the request for error recovery 431 * and debugging purposes. Protected by the Host_Lock of host. 432 */ 433 static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) 434 { 435 cmd->serial_number = host->cmd_serial_number++; 436 if (cmd->serial_number == 0) 437 cmd->serial_number = host->cmd_serial_number++; 438 439 cmd->pid = host->cmd_pid++; 440 if (cmd->pid == 0) 441 cmd->pid = host->cmd_pid++; 442 } 443 444 /* 445 * Function: scsi_dispatch_command 446 * 447 * Purpose: Dispatch a command to the low-level driver. 448 * 449 * Arguments: cmd - command block we are dispatching. 450 * 451 * Notes: 452 */ 453 int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 454 { 455 struct Scsi_Host *host = cmd->device->host; 456 unsigned long flags = 0; 457 unsigned long timeout; 458 int rtn = 0; 459 460 /* check if the device is still usable */ 461 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 462 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 463 * returns an immediate error upwards, and signals 464 * that the device is no longer present */ 465 cmd->result = DID_NO_CONNECT << 16; 466 atomic_inc(&cmd->device->iorequest_cnt); 467 __scsi_done(cmd); 468 /* return 0 (because the command has been processed) */ 469 goto out; 470 } 471 472 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 473 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 474 /* 475 * in SDEV_BLOCK, the command is just put back on the device 476 * queue. The suspend state has already blocked the queue so 477 * future requests should not occur until the device 478 * transitions out of the suspend state. 479 */ 480 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 481 482 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 483 484 /* 485 * NOTE: rtn is still zero here because we don't need the 486 * queue to be plugged on return (it's already stopped) 487 */ 488 goto out; 489 } 490 491 /* 492 * If SCSI-2 or lower, store the LUN value in cmnd. 493 */ 494 if (cmd->device->scsi_level <= SCSI_2 && 495 cmd->device->scsi_level != SCSI_UNKNOWN) { 496 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 497 (cmd->device->lun << 5 & 0xe0); 498 } 499 500 /* 501 * We will wait MIN_RESET_DELAY clock ticks after the last reset so 502 * we can avoid the drive not being ready. 503 */ 504 timeout = host->last_reset + MIN_RESET_DELAY; 505 506 if (host->resetting && time_before(jiffies, timeout)) { 507 int ticks_remaining = timeout - jiffies; 508 /* 509 * NOTE: This may be executed from within an interrupt 510 * handler! This is bad, but for now, it'll do. The irq 511 * level of the interrupt handler has been masked out by the 512 * platform dependent interrupt handling code already, so the 513 * sti() here will not cause another call to the SCSI host's 514 * interrupt handler (assuming there is one irq-level per 515 * host). 516 */ 517 while (--ticks_remaining >= 0) 518 mdelay(1 + 999 / HZ); 519 host->resetting = 0; 520 } 521 522 /* 523 * AK: unlikely race here: for some reason the timer could 524 * expire before the serial number is set up below. 525 */ 526 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); 527 528 scsi_log_send(cmd); 529 530 /* 531 * We will use a queued command if possible, otherwise we will 532 * emulate the queuing and calling of completion function ourselves. 533 */ 534 atomic_inc(&cmd->device->iorequest_cnt); 535 536 /* 537 * Before we queue this command, check if the command 538 * length exceeds what the host adapter can handle. 539 */ 540 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) { 541 SCSI_LOG_MLQUEUE(3, 542 printk("queuecommand : command too long.\n")); 543 cmd->result = (DID_ABORT << 16); 544 545 scsi_done(cmd); 546 goto out; 547 } 548 549 spin_lock_irqsave(host->host_lock, flags); 550 scsi_cmd_get_serial(host, cmd); 551 552 if (unlikely(host->shost_state == SHOST_DEL)) { 553 cmd->result = (DID_NO_CONNECT << 16); 554 scsi_done(cmd); 555 } else { 556 rtn = host->hostt->queuecommand(cmd, scsi_done); 557 } 558 spin_unlock_irqrestore(host->host_lock, flags); 559 if (rtn) { 560 if (scsi_delete_timer(cmd)) { 561 atomic_inc(&cmd->device->iodone_cnt); 562 scsi_queue_insert(cmd, 563 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? 564 rtn : SCSI_MLQUEUE_HOST_BUSY); 565 } 566 SCSI_LOG_MLQUEUE(3, 567 printk("queuecommand : request rejected\n")); 568 } 569 570 out: 571 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); 572 return rtn; 573 } 574 575 576 /* 577 * Per-CPU I/O completion queue. 578 */ 579 static DEFINE_PER_CPU(struct list_head, scsi_done_q); 580 581 /** 582 * scsi_done - Enqueue the finished SCSI command into the done queue. 583 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 584 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 585 * 586 * This function is the mid-level's (SCSI Core) interrupt routine, which 587 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues 588 * the command to the done queue for further processing. 589 * 590 * This is the producer of the done queue who enqueues at the tail. 591 * 592 * This function is interrupt context safe. 593 */ 594 static void scsi_done(struct scsi_cmnd *cmd) 595 { 596 /* 597 * We don't have to worry about this one timing out any more. 598 * If we are unable to remove the timer, then the command 599 * has already timed out. In which case, we have no choice but to 600 * let the timeout function run, as we have no idea where in fact 601 * that function could really be. It might be on another processor, 602 * etc, etc. 603 */ 604 if (!scsi_delete_timer(cmd)) 605 return; 606 __scsi_done(cmd); 607 } 608 609 /* Private entry to scsi_done() to complete a command when the timer 610 * isn't running --- used by scsi_times_out */ 611 void __scsi_done(struct scsi_cmnd *cmd) 612 { 613 struct request *rq = cmd->request; 614 615 /* 616 * Set the serial numbers back to zero 617 */ 618 cmd->serial_number = 0; 619 620 atomic_inc(&cmd->device->iodone_cnt); 621 if (cmd->result) 622 atomic_inc(&cmd->device->ioerr_cnt); 623 624 BUG_ON(!rq); 625 626 /* 627 * The uptodate/nbytes values don't matter, as we allow partial 628 * completes and thus will check this in the softirq callback 629 */ 630 rq->completion_data = cmd; 631 blk_complete_request(rq); 632 } 633 634 /* 635 * Function: scsi_retry_command 636 * 637 * Purpose: Send a command back to the low level to be retried. 638 * 639 * Notes: This command is always executed in the context of the 640 * bottom half handler, or the error handler thread. Low 641 * level drivers should not become re-entrant as a result of 642 * this. 643 */ 644 int scsi_retry_command(struct scsi_cmnd *cmd) 645 { 646 /* 647 * Restore the SCSI command state. 648 */ 649 scsi_setup_cmd_retry(cmd); 650 651 /* 652 * Zero the sense information from the last time we tried 653 * this command. 654 */ 655 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 656 657 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 658 } 659 660 /* 661 * Function: scsi_finish_command 662 * 663 * Purpose: Pass command off to upper layer for finishing of I/O 664 * request, waking processes that are waiting on results, 665 * etc. 666 */ 667 void scsi_finish_command(struct scsi_cmnd *cmd) 668 { 669 struct scsi_device *sdev = cmd->device; 670 struct Scsi_Host *shost = sdev->host; 671 672 scsi_device_unbusy(sdev); 673 674 /* 675 * Clear the flags which say that the device/host is no longer 676 * capable of accepting new commands. These are set in scsi_queue.c 677 * for both the queue full condition on a device, and for a 678 * host full condition on the host. 679 * 680 * XXX(hch): What about locking? 681 */ 682 shost->host_blocked = 0; 683 sdev->device_blocked = 0; 684 685 /* 686 * If we have valid sense information, then some kind of recovery 687 * must have taken place. Make a note of this. 688 */ 689 if (SCSI_SENSE_VALID(cmd)) 690 cmd->result |= (DRIVER_SENSE << 24); 691 692 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, 693 "Notifying upper driver of completion " 694 "(result %x)\n", cmd->result)); 695 696 /* 697 * We can get here with use_sg=0, causing a panic in the upper level 698 */ 699 cmd->use_sg = cmd->old_use_sg; 700 cmd->done(cmd); 701 } 702 EXPORT_SYMBOL(scsi_finish_command); 703 704 /* 705 * Function: scsi_adjust_queue_depth() 706 * 707 * Purpose: Allow low level drivers to tell us to change the queue depth 708 * on a specific SCSI device 709 * 710 * Arguments: sdev - SCSI Device in question 711 * tagged - Do we use tagged queueing (non-0) or do we treat 712 * this device as an untagged device (0) 713 * tags - Number of tags allowed if tagged queueing enabled, 714 * or number of commands the low level driver can 715 * queue up in non-tagged mode (as per cmd_per_lun). 716 * 717 * Returns: Nothing 718 * 719 * Lock Status: None held on entry 720 * 721 * Notes: Low level drivers may call this at any time and we will do 722 * the right thing depending on whether or not the device is 723 * currently active and whether or not it even has the 724 * command blocks built yet. 725 */ 726 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) 727 { 728 unsigned long flags; 729 730 /* 731 * refuse to set tagged depth to an unworkable size 732 */ 733 if (tags <= 0) 734 return; 735 736 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 737 738 /* Check to see if the queue is managed by the block layer 739 * if it is, and we fail to adjust the depth, exit */ 740 if (blk_queue_tagged(sdev->request_queue) && 741 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 742 goto out; 743 744 sdev->queue_depth = tags; 745 switch (tagged) { 746 case MSG_ORDERED_TAG: 747 sdev->ordered_tags = 1; 748 sdev->simple_tags = 1; 749 break; 750 case MSG_SIMPLE_TAG: 751 sdev->ordered_tags = 0; 752 sdev->simple_tags = 1; 753 break; 754 default: 755 sdev_printk(KERN_WARNING, sdev, 756 "scsi_adjust_queue_depth, bad queue type, " 757 "disabled\n"); 758 case 0: 759 sdev->ordered_tags = sdev->simple_tags = 0; 760 sdev->queue_depth = tags; 761 break; 762 } 763 out: 764 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 765 } 766 EXPORT_SYMBOL(scsi_adjust_queue_depth); 767 768 /* 769 * Function: scsi_track_queue_full() 770 * 771 * Purpose: This function will track successive QUEUE_FULL events on a 772 * specific SCSI device to determine if and when there is a 773 * need to adjust the queue depth on the device. 774 * 775 * Arguments: sdev - SCSI Device in question 776 * depth - Current number of outstanding SCSI commands on 777 * this device, not counting the one returned as 778 * QUEUE_FULL. 779 * 780 * Returns: 0 - No change needed 781 * >0 - Adjust queue depth to this new depth 782 * -1 - Drop back to untagged operation using host->cmd_per_lun 783 * as the untagged command depth 784 * 785 * Lock Status: None held on entry 786 * 787 * Notes: Low level drivers may call this at any time and we will do 788 * "The Right Thing." We are interrupt context safe. 789 */ 790 int scsi_track_queue_full(struct scsi_device *sdev, int depth) 791 { 792 if ((jiffies >> 4) == sdev->last_queue_full_time) 793 return 0; 794 795 sdev->last_queue_full_time = (jiffies >> 4); 796 if (sdev->last_queue_full_depth != depth) { 797 sdev->last_queue_full_count = 1; 798 sdev->last_queue_full_depth = depth; 799 } else { 800 sdev->last_queue_full_count++; 801 } 802 803 if (sdev->last_queue_full_count <= 10) 804 return 0; 805 if (sdev->last_queue_full_depth < 8) { 806 /* Drop back to untagged */ 807 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 808 return -1; 809 } 810 811 if (sdev->ordered_tags) 812 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 813 else 814 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 815 return depth; 816 } 817 EXPORT_SYMBOL(scsi_track_queue_full); 818 819 /** 820 * scsi_device_get - get an addition reference to a scsi_device 821 * @sdev: device to get a reference to 822 * 823 * Gets a reference to the scsi_device and increments the use count 824 * of the underlying LLDD module. You must hold host_lock of the 825 * parent Scsi_Host or already have a reference when calling this. 826 */ 827 int scsi_device_get(struct scsi_device *sdev) 828 { 829 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 830 return -ENXIO; 831 if (!get_device(&sdev->sdev_gendev)) 832 return -ENXIO; 833 if (!try_module_get(sdev->host->hostt->module)) { 834 put_device(&sdev->sdev_gendev); 835 return -ENXIO; 836 } 837 return 0; 838 } 839 EXPORT_SYMBOL(scsi_device_get); 840 841 /** 842 * scsi_device_put - release a reference to a scsi_device 843 * @sdev: device to release a reference on. 844 * 845 * Release a reference to the scsi_device and decrements the use count 846 * of the underlying LLDD module. The device is freed once the last 847 * user vanishes. 848 */ 849 void scsi_device_put(struct scsi_device *sdev) 850 { 851 module_put(sdev->host->hostt->module); 852 put_device(&sdev->sdev_gendev); 853 } 854 EXPORT_SYMBOL(scsi_device_put); 855 856 /* helper for shost_for_each_device, thus not documented */ 857 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 858 struct scsi_device *prev) 859 { 860 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); 861 struct scsi_device *next = NULL; 862 unsigned long flags; 863 864 spin_lock_irqsave(shost->host_lock, flags); 865 while (list->next != &shost->__devices) { 866 next = list_entry(list->next, struct scsi_device, siblings); 867 /* skip devices that we can't get a reference to */ 868 if (!scsi_device_get(next)) 869 break; 870 next = NULL; 871 list = list->next; 872 } 873 spin_unlock_irqrestore(shost->host_lock, flags); 874 875 if (prev) 876 scsi_device_put(prev); 877 return next; 878 } 879 EXPORT_SYMBOL(__scsi_iterate_devices); 880 881 /** 882 * starget_for_each_device - helper to walk all devices of a target 883 * @starget: target whose devices we want to iterate over. 884 * 885 * This traverses over each devices of @shost. The devices have 886 * a reference that must be released by scsi_host_put when breaking 887 * out of the loop. 888 */ 889 void starget_for_each_device(struct scsi_target *starget, void * data, 890 void (*fn)(struct scsi_device *, void *)) 891 { 892 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 893 struct scsi_device *sdev; 894 895 shost_for_each_device(sdev, shost) { 896 if ((sdev->channel == starget->channel) && 897 (sdev->id == starget->id)) 898 fn(sdev, data); 899 } 900 } 901 EXPORT_SYMBOL(starget_for_each_device); 902 903 /** 904 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) 905 * @starget: SCSI target pointer 906 * @lun: SCSI Logical Unit Number 907 * 908 * Looks up the scsi_device with the specified @lun for a give 909 * @starget. The returned scsi_device does not have an additional 910 * reference. You must hold the host's host_lock over this call and 911 * any access to the returned scsi_device. 912 * 913 * Note: The only reason why drivers would want to use this is because 914 * they're need to access the device list in irq context. Otherwise you 915 * really want to use scsi_device_lookup_by_target instead. 916 **/ 917 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 918 uint lun) 919 { 920 struct scsi_device *sdev; 921 922 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 923 if (sdev->lun ==lun) 924 return sdev; 925 } 926 927 return NULL; 928 } 929 EXPORT_SYMBOL(__scsi_device_lookup_by_target); 930 931 /** 932 * scsi_device_lookup_by_target - find a device given the target 933 * @starget: SCSI target pointer 934 * @lun: SCSI Logical Unit Number 935 * 936 * Looks up the scsi_device with the specified @channel, @id, @lun for a 937 * give host. The returned scsi_device has an additional reference that 938 * needs to be release with scsi_host_put once you're done with it. 939 **/ 940 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 941 uint lun) 942 { 943 struct scsi_device *sdev; 944 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 945 unsigned long flags; 946 947 spin_lock_irqsave(shost->host_lock, flags); 948 sdev = __scsi_device_lookup_by_target(starget, lun); 949 if (sdev && scsi_device_get(sdev)) 950 sdev = NULL; 951 spin_unlock_irqrestore(shost->host_lock, flags); 952 953 return sdev; 954 } 955 EXPORT_SYMBOL(scsi_device_lookup_by_target); 956 957 /** 958 * scsi_device_lookup - find a device given the host (UNLOCKED) 959 * @shost: SCSI host pointer 960 * @channel: SCSI channel (zero if only one channel) 961 * @pun: SCSI target number (physical unit number) 962 * @lun: SCSI Logical Unit Number 963 * 964 * Looks up the scsi_device with the specified @channel, @id, @lun for a 965 * give host. The returned scsi_device does not have an additional reference. 966 * You must hold the host's host_lock over this call and any access to the 967 * returned scsi_device. 968 * 969 * Note: The only reason why drivers would want to use this is because 970 * they're need to access the device list in irq context. Otherwise you 971 * really want to use scsi_device_lookup instead. 972 **/ 973 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 974 uint channel, uint id, uint lun) 975 { 976 struct scsi_device *sdev; 977 978 list_for_each_entry(sdev, &shost->__devices, siblings) { 979 if (sdev->channel == channel && sdev->id == id && 980 sdev->lun ==lun) 981 return sdev; 982 } 983 984 return NULL; 985 } 986 EXPORT_SYMBOL(__scsi_device_lookup); 987 988 /** 989 * scsi_device_lookup - find a device given the host 990 * @shost: SCSI host pointer 991 * @channel: SCSI channel (zero if only one channel) 992 * @id: SCSI target number (physical unit number) 993 * @lun: SCSI Logical Unit Number 994 * 995 * Looks up the scsi_device with the specified @channel, @id, @lun for a 996 * give host. The returned scsi_device has an additional reference that 997 * needs to be release with scsi_host_put once you're done with it. 998 **/ 999 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1000 uint channel, uint id, uint lun) 1001 { 1002 struct scsi_device *sdev; 1003 unsigned long flags; 1004 1005 spin_lock_irqsave(shost->host_lock, flags); 1006 sdev = __scsi_device_lookup(shost, channel, id, lun); 1007 if (sdev && scsi_device_get(sdev)) 1008 sdev = NULL; 1009 spin_unlock_irqrestore(shost->host_lock, flags); 1010 1011 return sdev; 1012 } 1013 EXPORT_SYMBOL(scsi_device_lookup); 1014 1015 /** 1016 * scsi_device_cancel - cancel outstanding IO to this device 1017 * @sdev: Pointer to struct scsi_device 1018 * @recovery: Boolean instructing function to recover device or not. 1019 * 1020 **/ 1021 int scsi_device_cancel(struct scsi_device *sdev, int recovery) 1022 { 1023 struct scsi_cmnd *scmd; 1024 LIST_HEAD(active_list); 1025 struct list_head *lh, *lh_sf; 1026 unsigned long flags; 1027 1028 scsi_device_set_state(sdev, SDEV_CANCEL); 1029 1030 spin_lock_irqsave(&sdev->list_lock, flags); 1031 list_for_each_entry(scmd, &sdev->cmd_list, list) { 1032 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) { 1033 /* 1034 * If we are unable to remove the timer, it means 1035 * that the command has already timed out or 1036 * finished. 1037 */ 1038 if (!scsi_delete_timer(scmd)) 1039 continue; 1040 list_add_tail(&scmd->eh_entry, &active_list); 1041 } 1042 } 1043 spin_unlock_irqrestore(&sdev->list_lock, flags); 1044 1045 if (!list_empty(&active_list)) { 1046 list_for_each_safe(lh, lh_sf, &active_list) { 1047 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1048 list_del_init(lh); 1049 if (recovery && 1050 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) { 1051 scmd->result = (DID_ABORT << 16); 1052 scsi_finish_command(scmd); 1053 } 1054 } 1055 } 1056 1057 return 0; 1058 } 1059 EXPORT_SYMBOL(scsi_device_cancel); 1060 1061 MODULE_DESCRIPTION("SCSI core"); 1062 MODULE_LICENSE("GPL"); 1063 1064 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1065 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1066 1067 static int __init init_scsi(void) 1068 { 1069 int error, i; 1070 1071 error = scsi_init_queue(); 1072 if (error) 1073 return error; 1074 error = scsi_init_procfs(); 1075 if (error) 1076 goto cleanup_queue; 1077 error = scsi_init_devinfo(); 1078 if (error) 1079 goto cleanup_procfs; 1080 error = scsi_init_hosts(); 1081 if (error) 1082 goto cleanup_devlist; 1083 error = scsi_init_sysctl(); 1084 if (error) 1085 goto cleanup_hosts; 1086 error = scsi_sysfs_register(); 1087 if (error) 1088 goto cleanup_sysctl; 1089 1090 for_each_possible_cpu(i) 1091 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1092 1093 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1094 return 0; 1095 1096 cleanup_sysctl: 1097 scsi_exit_sysctl(); 1098 cleanup_hosts: 1099 scsi_exit_hosts(); 1100 cleanup_devlist: 1101 scsi_exit_devinfo(); 1102 cleanup_procfs: 1103 scsi_exit_procfs(); 1104 cleanup_queue: 1105 scsi_exit_queue(); 1106 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", 1107 -error); 1108 return error; 1109 } 1110 1111 static void __exit exit_scsi(void) 1112 { 1113 scsi_sysfs_unregister(); 1114 scsi_exit_sysctl(); 1115 scsi_exit_hosts(); 1116 scsi_exit_devinfo(); 1117 scsi_exit_procfs(); 1118 scsi_exit_queue(); 1119 } 1120 1121 subsys_initcall(init_scsi); 1122 module_exit(exit_scsi); 1123