1 /* 2 * scsi.c Copyright (C) 1992 Drew Eckhardt 3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 4 * Copyright (C) 2002, 2003 Christoph Hellwig 5 * 6 * generic mid-level SCSI driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * 10 * <drew@colorado.edu> 11 * 12 * Bug correction thanks go to : 13 * Rik Faith <faith@cs.unc.edu> 14 * Tommy Thorn <tthorn> 15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> 16 * 17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to 18 * add scatter-gather, multiple outstanding request, and other 19 * enhancements. 20 * 21 * Native multichannel, wide scsi, /proc/scsi and hot plugging 22 * support added by Michael Neuffer <mike@i-connect.net> 23 * 24 * Added request_module("scsi_hostadapter") for kerneld: 25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) 26 * Bjorn Ekwall <bj0rn@blox.se> 27 * (changed to kmod) 28 * 29 * Major improvements to the timeout, abort, and reset processing, 30 * as well as performance modifications for large queue depths by 31 * Leonard N. Zubkoff <lnz@dandelion.com> 32 * 33 * Converted cli() code to spinlocks, Ingo Molnar 34 * 35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli 36 * 37 * out_of_space hacks, D. Gilbert (dpg) 990608 38 */ 39 40 #include <linux/module.h> 41 #include <linux/moduleparam.h> 42 #include <linux/kernel.h> 43 #include <linux/sched.h> 44 #include <linux/timer.h> 45 #include <linux/string.h> 46 #include <linux/slab.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/init.h> 50 #include <linux/completion.h> 51 #include <linux/devfs_fs_kernel.h> 52 #include <linux/unistd.h> 53 #include <linux/spinlock.h> 54 #include <linux/kmod.h> 55 #include <linux/interrupt.h> 56 #include <linux/notifier.h> 57 #include <linux/cpu.h> 58 59 #include <scsi/scsi.h> 60 #include <scsi/scsi_cmnd.h> 61 #include <scsi/scsi_dbg.h> 62 #include <scsi/scsi_device.h> 63 #include <scsi/scsi_eh.h> 64 #include <scsi/scsi_host.h> 65 #include <scsi/scsi_tcq.h> 66 #include <scsi/scsi_request.h> 67 68 #include "scsi_priv.h" 69 #include "scsi_logging.h" 70 71 static void scsi_done(struct scsi_cmnd *cmd); 72 static int scsi_retry_command(struct scsi_cmnd *cmd); 73 74 /* 75 * Definitions and constants. 76 */ 77 78 #define MIN_RESET_DELAY (2*HZ) 79 80 /* Do not call reset on error if we just did a reset within 15 sec. */ 81 #define MIN_RESET_PERIOD (15*HZ) 82 83 /* 84 * Macro to determine the size of SCSI command. This macro takes vendor 85 * unique commands into account. SCSI commands in groups 6 and 7 are 86 * vendor unique and we will depend upon the command length being 87 * supplied correctly in cmd_len. 88 */ 89 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \ 90 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len) 91 92 /* 93 * Note - the initial logging level can be set here to log events at boot time. 94 * After the system is up, you may enable logging via the /proc interface. 95 */ 96 unsigned int scsi_logging_level; 97 #if defined(CONFIG_SCSI_LOGGING) 98 EXPORT_SYMBOL(scsi_logging_level); 99 #endif 100 101 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { 102 "Direct-Access ", 103 "Sequential-Access", 104 "Printer ", 105 "Processor ", 106 "WORM ", 107 "CD-ROM ", 108 "Scanner ", 109 "Optical Device ", 110 "Medium Changer ", 111 "Communications ", 112 "Unknown ", 113 "Unknown ", 114 "RAID ", 115 "Enclosure ", 116 "Direct-Access-RBC", 117 }; 118 EXPORT_SYMBOL(scsi_device_types); 119 120 /* 121 * Function: scsi_allocate_request 122 * 123 * Purpose: Allocate a request descriptor. 124 * 125 * Arguments: device - device for which we want a request 126 * gfp_mask - allocation flags passed to kmalloc 127 * 128 * Lock status: No locks assumed to be held. This function is SMP-safe. 129 * 130 * Returns: Pointer to request block. 131 */ 132 struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, 133 int gfp_mask) 134 { 135 const int offset = ALIGN(sizeof(struct scsi_request), 4); 136 const int size = offset + sizeof(struct request); 137 struct scsi_request *sreq; 138 139 sreq = kmalloc(size, gfp_mask); 140 if (likely(sreq != NULL)) { 141 memset(sreq, 0, size); 142 sreq->sr_request = (struct request *)(((char *)sreq) + offset); 143 sreq->sr_device = sdev; 144 sreq->sr_host = sdev->host; 145 sreq->sr_magic = SCSI_REQ_MAGIC; 146 sreq->sr_data_direction = DMA_BIDIRECTIONAL; 147 } 148 149 return sreq; 150 } 151 EXPORT_SYMBOL(scsi_allocate_request); 152 153 void __scsi_release_request(struct scsi_request *sreq) 154 { 155 struct request *req = sreq->sr_request; 156 157 /* unlikely because the tag was usually ended earlier by the 158 * mid-layer. However, for layering reasons ULD's don't end 159 * the tag of commands they generate. */ 160 if (unlikely(blk_rq_tagged(req))) { 161 unsigned long flags; 162 struct request_queue *q = req->q; 163 164 spin_lock_irqsave(q->queue_lock, flags); 165 blk_queue_end_tag(q, req); 166 spin_unlock_irqrestore(q->queue_lock, flags); 167 } 168 169 170 if (likely(sreq->sr_command != NULL)) { 171 struct scsi_cmnd *cmd = sreq->sr_command; 172 173 sreq->sr_command = NULL; 174 scsi_next_command(cmd); 175 } 176 } 177 178 /* 179 * Function: scsi_release_request 180 * 181 * Purpose: Release a request descriptor. 182 * 183 * Arguments: sreq - request to release 184 * 185 * Lock status: No locks assumed to be held. This function is SMP-safe. 186 */ 187 void scsi_release_request(struct scsi_request *sreq) 188 { 189 __scsi_release_request(sreq); 190 kfree(sreq); 191 } 192 EXPORT_SYMBOL(scsi_release_request); 193 194 struct scsi_host_cmd_pool { 195 kmem_cache_t *slab; 196 unsigned int users; 197 char *name; 198 unsigned int slab_flags; 199 unsigned int gfp_mask; 200 }; 201 202 static struct scsi_host_cmd_pool scsi_cmd_pool = { 203 .name = "scsi_cmd_cache", 204 .slab_flags = SLAB_HWCACHE_ALIGN, 205 }; 206 207 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { 208 .name = "scsi_cmd_cache(DMA)", 209 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, 210 .gfp_mask = __GFP_DMA, 211 }; 212 213 static DECLARE_MUTEX(host_cmd_pool_mutex); 214 215 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 216 int gfp_mask) 217 { 218 struct scsi_cmnd *cmd; 219 220 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 221 gfp_mask | shost->cmd_pool->gfp_mask); 222 223 if (unlikely(!cmd)) { 224 unsigned long flags; 225 226 spin_lock_irqsave(&shost->free_list_lock, flags); 227 if (likely(!list_empty(&shost->free_list))) { 228 cmd = list_entry(shost->free_list.next, 229 struct scsi_cmnd, list); 230 list_del_init(&cmd->list); 231 } 232 spin_unlock_irqrestore(&shost->free_list_lock, flags); 233 } 234 235 return cmd; 236 } 237 238 /* 239 * Function: scsi_get_command() 240 * 241 * Purpose: Allocate and setup a scsi command block 242 * 243 * Arguments: dev - parent scsi device 244 * gfp_mask- allocator flags 245 * 246 * Returns: The allocated scsi command structure. 247 */ 248 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) 249 { 250 struct scsi_cmnd *cmd; 251 252 /* Bail if we can't get a reference to the device */ 253 if (!get_device(&dev->sdev_gendev)) 254 return NULL; 255 256 cmd = __scsi_get_command(dev->host, gfp_mask); 257 258 if (likely(cmd != NULL)) { 259 unsigned long flags; 260 261 memset(cmd, 0, sizeof(*cmd)); 262 cmd->device = dev; 263 init_timer(&cmd->eh_timeout); 264 INIT_LIST_HEAD(&cmd->list); 265 spin_lock_irqsave(&dev->list_lock, flags); 266 list_add_tail(&cmd->list, &dev->cmd_list); 267 spin_unlock_irqrestore(&dev->list_lock, flags); 268 } else 269 put_device(&dev->sdev_gendev); 270 271 return cmd; 272 } 273 EXPORT_SYMBOL(scsi_get_command); 274 275 /* 276 * Function: scsi_put_command() 277 * 278 * Purpose: Free a scsi command block 279 * 280 * Arguments: cmd - command block to free 281 * 282 * Returns: Nothing. 283 * 284 * Notes: The command must not belong to any lists. 285 */ 286 void scsi_put_command(struct scsi_cmnd *cmd) 287 { 288 struct scsi_device *sdev = cmd->device; 289 struct Scsi_Host *shost = sdev->host; 290 unsigned long flags; 291 292 /* serious error if the command hasn't come from a device list */ 293 spin_lock_irqsave(&cmd->device->list_lock, flags); 294 BUG_ON(list_empty(&cmd->list)); 295 list_del_init(&cmd->list); 296 spin_unlock(&cmd->device->list_lock); 297 /* changing locks here, don't need to restore the irq state */ 298 spin_lock(&shost->free_list_lock); 299 if (unlikely(list_empty(&shost->free_list))) { 300 list_add(&cmd->list, &shost->free_list); 301 cmd = NULL; 302 } 303 spin_unlock_irqrestore(&shost->free_list_lock, flags); 304 305 if (likely(cmd != NULL)) 306 kmem_cache_free(shost->cmd_pool->slab, cmd); 307 308 put_device(&sdev->sdev_gendev); 309 } 310 EXPORT_SYMBOL(scsi_put_command); 311 312 /* 313 * Function: scsi_setup_command_freelist() 314 * 315 * Purpose: Setup the command freelist for a scsi host. 316 * 317 * Arguments: shost - host to allocate the freelist for. 318 * 319 * Returns: Nothing. 320 */ 321 int scsi_setup_command_freelist(struct Scsi_Host *shost) 322 { 323 struct scsi_host_cmd_pool *pool; 324 struct scsi_cmnd *cmd; 325 326 spin_lock_init(&shost->free_list_lock); 327 INIT_LIST_HEAD(&shost->free_list); 328 329 /* 330 * Select a command slab for this host and create it if not 331 * yet existant. 332 */ 333 down(&host_cmd_pool_mutex); 334 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); 335 if (!pool->users) { 336 pool->slab = kmem_cache_create(pool->name, 337 sizeof(struct scsi_cmnd), 0, 338 pool->slab_flags, NULL, NULL); 339 if (!pool->slab) 340 goto fail; 341 } 342 343 pool->users++; 344 shost->cmd_pool = pool; 345 up(&host_cmd_pool_mutex); 346 347 /* 348 * Get one backup command for this host. 349 */ 350 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 351 GFP_KERNEL | shost->cmd_pool->gfp_mask); 352 if (!cmd) 353 goto fail2; 354 list_add(&cmd->list, &shost->free_list); 355 return 0; 356 357 fail2: 358 if (!--pool->users) 359 kmem_cache_destroy(pool->slab); 360 return -ENOMEM; 361 fail: 362 up(&host_cmd_pool_mutex); 363 return -ENOMEM; 364 365 } 366 367 /* 368 * Function: scsi_destroy_command_freelist() 369 * 370 * Purpose: Release the command freelist for a scsi host. 371 * 372 * Arguments: shost - host that's freelist is going to be destroyed 373 */ 374 void scsi_destroy_command_freelist(struct Scsi_Host *shost) 375 { 376 while (!list_empty(&shost->free_list)) { 377 struct scsi_cmnd *cmd; 378 379 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 380 list_del_init(&cmd->list); 381 kmem_cache_free(shost->cmd_pool->slab, cmd); 382 } 383 384 down(&host_cmd_pool_mutex); 385 if (!--shost->cmd_pool->users) 386 kmem_cache_destroy(shost->cmd_pool->slab); 387 up(&host_cmd_pool_mutex); 388 } 389 390 #ifdef CONFIG_SCSI_LOGGING 391 void scsi_log_send(struct scsi_cmnd *cmd) 392 { 393 unsigned int level; 394 struct scsi_device *sdev; 395 396 /* 397 * If ML QUEUE log level is greater than or equal to: 398 * 399 * 1: nothing (match completion) 400 * 401 * 2: log opcode + command of all commands 402 * 403 * 3: same as 2 plus dump cmd address 404 * 405 * 4: same as 3 plus dump extra junk 406 */ 407 if (unlikely(scsi_logging_level)) { 408 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 409 SCSI_LOG_MLQUEUE_BITS); 410 if (level > 1) { 411 sdev = cmd->device; 412 printk(KERN_INFO "scsi <%d:%d:%d:%d> send ", 413 sdev->host->host_no, sdev->channel, sdev->id, 414 sdev->lun); 415 if (level > 2) 416 printk("0x%p ", cmd); 417 /* 418 * spaces to match disposition and cmd->result 419 * output in scsi_log_completion. 420 */ 421 printk(" "); 422 scsi_print_command(cmd); 423 if (level > 3) { 424 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 425 " done = 0x%p, queuecommand 0x%p\n", 426 cmd->buffer, cmd->bufflen, 427 cmd->done, 428 sdev->host->hostt->queuecommand); 429 430 } 431 } 432 } 433 } 434 435 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 436 { 437 unsigned int level; 438 struct scsi_device *sdev; 439 440 /* 441 * If ML COMPLETE log level is greater than or equal to: 442 * 443 * 1: log disposition, result, opcode + command, and conditionally 444 * sense data for failures or non SUCCESS dispositions. 445 * 446 * 2: same as 1 but for all command completions. 447 * 448 * 3: same as 2 plus dump cmd address 449 * 450 * 4: same as 3 plus dump extra junk 451 */ 452 if (unlikely(scsi_logging_level)) { 453 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 454 SCSI_LOG_MLCOMPLETE_BITS); 455 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 456 (level > 1)) { 457 sdev = cmd->device; 458 printk(KERN_INFO "scsi <%d:%d:%d:%d> done ", 459 sdev->host->host_no, sdev->channel, sdev->id, 460 sdev->lun); 461 if (level > 2) 462 printk("0x%p ", cmd); 463 /* 464 * Dump truncated values, so we usually fit within 465 * 80 chars. 466 */ 467 switch (disposition) { 468 case SUCCESS: 469 printk("SUCCESS"); 470 break; 471 case NEEDS_RETRY: 472 printk("RETRY "); 473 break; 474 case ADD_TO_MLQUEUE: 475 printk("MLQUEUE"); 476 break; 477 case FAILED: 478 printk("FAILED "); 479 break; 480 case TIMEOUT_ERROR: 481 /* 482 * If called via scsi_times_out. 483 */ 484 printk("TIMEOUT"); 485 break; 486 default: 487 printk("UNKNOWN"); 488 } 489 printk(" %8x ", cmd->result); 490 scsi_print_command(cmd); 491 if (status_byte(cmd->result) & CHECK_CONDITION) { 492 /* 493 * XXX The scsi_print_sense formatting/prefix 494 * doesn't match this function. 495 */ 496 scsi_print_sense("", cmd); 497 } 498 if (level > 3) { 499 printk(KERN_INFO "scsi host busy %d failed %d\n", 500 sdev->host->host_busy, 501 sdev->host->host_failed); 502 } 503 } 504 } 505 } 506 #endif 507 508 /* 509 * Assign a serial number and pid to the request for error recovery 510 * and debugging purposes. Protected by the Host_Lock of host. 511 */ 512 static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) 513 { 514 cmd->serial_number = host->cmd_serial_number++; 515 if (cmd->serial_number == 0) 516 cmd->serial_number = host->cmd_serial_number++; 517 518 cmd->pid = host->cmd_pid++; 519 if (cmd->pid == 0) 520 cmd->pid = host->cmd_pid++; 521 } 522 523 /* 524 * Function: scsi_dispatch_command 525 * 526 * Purpose: Dispatch a command to the low-level driver. 527 * 528 * Arguments: cmd - command block we are dispatching. 529 * 530 * Notes: 531 */ 532 int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 533 { 534 struct Scsi_Host *host = cmd->device->host; 535 unsigned long flags = 0; 536 unsigned long timeout; 537 int rtn = 0; 538 539 /* check if the device is still usable */ 540 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 541 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 542 * returns an immediate error upwards, and signals 543 * that the device is no longer present */ 544 cmd->result = DID_NO_CONNECT << 16; 545 atomic_inc(&cmd->device->iorequest_cnt); 546 __scsi_done(cmd); 547 /* return 0 (because the command has been processed) */ 548 goto out; 549 } 550 551 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 552 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 553 /* 554 * in SDEV_BLOCK, the command is just put back on the device 555 * queue. The suspend state has already blocked the queue so 556 * future requests should not occur until the device 557 * transitions out of the suspend state. 558 */ 559 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 560 561 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 562 563 /* 564 * NOTE: rtn is still zero here because we don't need the 565 * queue to be plugged on return (it's already stopped) 566 */ 567 goto out; 568 } 569 570 /* 571 * If SCSI-2 or lower, store the LUN value in cmnd. 572 */ 573 if (cmd->device->scsi_level <= SCSI_2) { 574 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 575 (cmd->device->lun << 5 & 0xe0); 576 } 577 578 /* 579 * We will wait MIN_RESET_DELAY clock ticks after the last reset so 580 * we can avoid the drive not being ready. 581 */ 582 timeout = host->last_reset + MIN_RESET_DELAY; 583 584 if (host->resetting && time_before(jiffies, timeout)) { 585 int ticks_remaining = timeout - jiffies; 586 /* 587 * NOTE: This may be executed from within an interrupt 588 * handler! This is bad, but for now, it'll do. The irq 589 * level of the interrupt handler has been masked out by the 590 * platform dependent interrupt handling code already, so the 591 * sti() here will not cause another call to the SCSI host's 592 * interrupt handler (assuming there is one irq-level per 593 * host). 594 */ 595 while (--ticks_remaining >= 0) 596 mdelay(1 + 999 / HZ); 597 host->resetting = 0; 598 } 599 600 /* 601 * AK: unlikely race here: for some reason the timer could 602 * expire before the serial number is set up below. 603 */ 604 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); 605 606 scsi_log_send(cmd); 607 608 /* 609 * We will use a queued command if possible, otherwise we will 610 * emulate the queuing and calling of completion function ourselves. 611 */ 612 atomic_inc(&cmd->device->iorequest_cnt); 613 614 /* 615 * Before we queue this command, check if the command 616 * length exceeds what the host adapter can handle. 617 */ 618 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) { 619 SCSI_LOG_MLQUEUE(3, 620 printk("queuecommand : command too long.\n")); 621 cmd->result = (DID_ABORT << 16); 622 623 scsi_done(cmd); 624 goto out; 625 } 626 627 spin_lock_irqsave(host->host_lock, flags); 628 scsi_cmd_get_serial(host, cmd); 629 630 if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) { 631 cmd->result = (DID_NO_CONNECT << 16); 632 scsi_done(cmd); 633 } else { 634 rtn = host->hostt->queuecommand(cmd, scsi_done); 635 } 636 spin_unlock_irqrestore(host->host_lock, flags); 637 if (rtn) { 638 if (scsi_delete_timer(cmd)) { 639 atomic_inc(&cmd->device->iodone_cnt); 640 scsi_queue_insert(cmd, 641 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? 642 rtn : SCSI_MLQUEUE_HOST_BUSY); 643 } 644 SCSI_LOG_MLQUEUE(3, 645 printk("queuecommand : request rejected\n")); 646 } 647 648 out: 649 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); 650 return rtn; 651 } 652 653 /* 654 * Function: scsi_init_cmd_from_req 655 * 656 * Purpose: Queue a SCSI command 657 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request 658 * 659 * Arguments: cmd - command descriptor. 660 * sreq - Request from the queue. 661 * 662 * Lock status: None needed. 663 * 664 * Returns: Nothing. 665 * 666 * Notes: Mainly transfer data from the request structure to the 667 * command structure. The request structure is allocated 668 * using the normal memory allocator, and requests can pile 669 * up to more or less any depth. The command structure represents 670 * a consumable resource, as these are allocated into a pool 671 * when the SCSI subsystem initializes. The preallocation is 672 * required so that in low-memory situations a disk I/O request 673 * won't cause the memory manager to try and write out a page. 674 * The request structure is generally used by ioctls and character 675 * devices. 676 */ 677 void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) 678 { 679 sreq->sr_command = cmd; 680 681 cmd->cmd_len = sreq->sr_cmd_len; 682 cmd->use_sg = sreq->sr_use_sg; 683 684 cmd->request = sreq->sr_request; 685 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd)); 686 cmd->serial_number = 0; 687 cmd->bufflen = sreq->sr_bufflen; 688 cmd->buffer = sreq->sr_buffer; 689 cmd->retries = 0; 690 cmd->allowed = sreq->sr_allowed; 691 cmd->done = sreq->sr_done; 692 cmd->timeout_per_command = sreq->sr_timeout_per_command; 693 cmd->sc_data_direction = sreq->sr_data_direction; 694 cmd->sglist_len = sreq->sr_sglist_len; 695 cmd->underflow = sreq->sr_underflow; 696 cmd->sc_request = sreq; 697 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd)); 698 699 /* 700 * Zero the sense buffer. Some host adapters automatically request 701 * sense on error. 0 is not a valid sense code. 702 */ 703 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer)); 704 cmd->request_buffer = sreq->sr_buffer; 705 cmd->request_bufflen = sreq->sr_bufflen; 706 cmd->old_use_sg = cmd->use_sg; 707 if (cmd->cmd_len == 0) 708 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 709 cmd->old_cmd_len = cmd->cmd_len; 710 cmd->sc_old_data_direction = cmd->sc_data_direction; 711 cmd->old_underflow = cmd->underflow; 712 713 /* 714 * Start the timer ticking. 715 */ 716 cmd->result = 0; 717 718 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n")); 719 } 720 721 /* 722 * Per-CPU I/O completion queue. 723 */ 724 static DEFINE_PER_CPU(struct list_head, scsi_done_q); 725 726 /** 727 * scsi_done - Enqueue the finished SCSI command into the done queue. 728 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 729 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 730 * 731 * This function is the mid-level's (SCSI Core) interrupt routine, which 732 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues 733 * the command to the done queue for further processing. 734 * 735 * This is the producer of the done queue who enqueues at the tail. 736 * 737 * This function is interrupt context safe. 738 */ 739 static void scsi_done(struct scsi_cmnd *cmd) 740 { 741 /* 742 * We don't have to worry about this one timing out any more. 743 * If we are unable to remove the timer, then the command 744 * has already timed out. In which case, we have no choice but to 745 * let the timeout function run, as we have no idea where in fact 746 * that function could really be. It might be on another processor, 747 * etc, etc. 748 */ 749 if (!scsi_delete_timer(cmd)) 750 return; 751 __scsi_done(cmd); 752 } 753 754 /* Private entry to scsi_done() to complete a command when the timer 755 * isn't running --- used by scsi_times_out */ 756 void __scsi_done(struct scsi_cmnd *cmd) 757 { 758 unsigned long flags; 759 760 /* 761 * Set the serial numbers back to zero 762 */ 763 cmd->serial_number = 0; 764 765 atomic_inc(&cmd->device->iodone_cnt); 766 if (cmd->result) 767 atomic_inc(&cmd->device->ioerr_cnt); 768 769 /* 770 * Next, enqueue the command into the done queue. 771 * It is a per-CPU queue, so we just disable local interrupts 772 * and need no spinlock. 773 */ 774 local_irq_save(flags); 775 list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q)); 776 raise_softirq_irqoff(SCSI_SOFTIRQ); 777 local_irq_restore(flags); 778 } 779 780 /** 781 * scsi_softirq - Perform post-interrupt processing of finished SCSI commands. 782 * 783 * This is the consumer of the done queue. 784 * 785 * This is called with all interrupts enabled. This should reduce 786 * interrupt latency, stack depth, and reentrancy of the low-level 787 * drivers. 788 */ 789 static void scsi_softirq(struct softirq_action *h) 790 { 791 int disposition; 792 LIST_HEAD(local_q); 793 794 local_irq_disable(); 795 list_splice_init(&__get_cpu_var(scsi_done_q), &local_q); 796 local_irq_enable(); 797 798 while (!list_empty(&local_q)) { 799 struct scsi_cmnd *cmd = list_entry(local_q.next, 800 struct scsi_cmnd, eh_entry); 801 list_del_init(&cmd->eh_entry); 802 803 disposition = scsi_decide_disposition(cmd); 804 scsi_log_completion(cmd, disposition); 805 switch (disposition) { 806 case SUCCESS: 807 scsi_finish_command(cmd); 808 break; 809 case NEEDS_RETRY: 810 scsi_retry_command(cmd); 811 break; 812 case ADD_TO_MLQUEUE: 813 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 814 break; 815 default: 816 if (!scsi_eh_scmd_add(cmd, 0)) 817 scsi_finish_command(cmd); 818 } 819 } 820 } 821 822 /* 823 * Function: scsi_retry_command 824 * 825 * Purpose: Send a command back to the low level to be retried. 826 * 827 * Notes: This command is always executed in the context of the 828 * bottom half handler, or the error handler thread. Low 829 * level drivers should not become re-entrant as a result of 830 * this. 831 */ 832 static int scsi_retry_command(struct scsi_cmnd *cmd) 833 { 834 /* 835 * Restore the SCSI command state. 836 */ 837 scsi_setup_cmd_retry(cmd); 838 839 /* 840 * Zero the sense information from the last time we tried 841 * this command. 842 */ 843 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 844 845 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 846 } 847 848 /* 849 * Function: scsi_finish_command 850 * 851 * Purpose: Pass command off to upper layer for finishing of I/O 852 * request, waking processes that are waiting on results, 853 * etc. 854 */ 855 void scsi_finish_command(struct scsi_cmnd *cmd) 856 { 857 struct scsi_device *sdev = cmd->device; 858 struct Scsi_Host *shost = sdev->host; 859 struct scsi_request *sreq; 860 861 scsi_device_unbusy(sdev); 862 863 /* 864 * Clear the flags which say that the device/host is no longer 865 * capable of accepting new commands. These are set in scsi_queue.c 866 * for both the queue full condition on a device, and for a 867 * host full condition on the host. 868 * 869 * XXX(hch): What about locking? 870 */ 871 shost->host_blocked = 0; 872 sdev->device_blocked = 0; 873 874 /* 875 * If we have valid sense information, then some kind of recovery 876 * must have taken place. Make a note of this. 877 */ 878 if (SCSI_SENSE_VALID(cmd)) 879 cmd->result |= (DRIVER_SENSE << 24); 880 881 SCSI_LOG_MLCOMPLETE(4, printk("Notifying upper driver of completion " 882 "for device %d %x\n", sdev->id, cmd->result)); 883 884 /* 885 * We can get here with use_sg=0, causing a panic in the upper level 886 */ 887 cmd->use_sg = cmd->old_use_sg; 888 889 /* 890 * If there is an associated request structure, copy the data over 891 * before we call the completion function. 892 */ 893 sreq = cmd->sc_request; 894 if (sreq) { 895 sreq->sr_result = sreq->sr_command->result; 896 if (sreq->sr_result) { 897 memcpy(sreq->sr_sense_buffer, 898 sreq->sr_command->sense_buffer, 899 sizeof(sreq->sr_sense_buffer)); 900 } 901 } 902 903 cmd->done(cmd); 904 } 905 EXPORT_SYMBOL(scsi_finish_command); 906 907 /* 908 * Function: scsi_adjust_queue_depth() 909 * 910 * Purpose: Allow low level drivers to tell us to change the queue depth 911 * on a specific SCSI device 912 * 913 * Arguments: sdev - SCSI Device in question 914 * tagged - Do we use tagged queueing (non-0) or do we treat 915 * this device as an untagged device (0) 916 * tags - Number of tags allowed if tagged queueing enabled, 917 * or number of commands the low level driver can 918 * queue up in non-tagged mode (as per cmd_per_lun). 919 * 920 * Returns: Nothing 921 * 922 * Lock Status: None held on entry 923 * 924 * Notes: Low level drivers may call this at any time and we will do 925 * the right thing depending on whether or not the device is 926 * currently active and whether or not it even has the 927 * command blocks built yet. 928 */ 929 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) 930 { 931 unsigned long flags; 932 933 /* 934 * refuse to set tagged depth to an unworkable size 935 */ 936 if (tags <= 0) 937 return; 938 939 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 940 941 /* Check to see if the queue is managed by the block layer 942 * if it is, and we fail to adjust the depth, exit */ 943 if (blk_queue_tagged(sdev->request_queue) && 944 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 945 goto out; 946 947 sdev->queue_depth = tags; 948 switch (tagged) { 949 case MSG_ORDERED_TAG: 950 sdev->ordered_tags = 1; 951 sdev->simple_tags = 1; 952 break; 953 case MSG_SIMPLE_TAG: 954 sdev->ordered_tags = 0; 955 sdev->simple_tags = 1; 956 break; 957 default: 958 printk(KERN_WARNING "(scsi%d:%d:%d:%d) " 959 "scsi_adjust_queue_depth, bad queue type, " 960 "disabled\n", sdev->host->host_no, 961 sdev->channel, sdev->id, sdev->lun); 962 case 0: 963 sdev->ordered_tags = sdev->simple_tags = 0; 964 sdev->queue_depth = tags; 965 break; 966 } 967 out: 968 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 969 } 970 EXPORT_SYMBOL(scsi_adjust_queue_depth); 971 972 /* 973 * Function: scsi_track_queue_full() 974 * 975 * Purpose: This function will track successive QUEUE_FULL events on a 976 * specific SCSI device to determine if and when there is a 977 * need to adjust the queue depth on the device. 978 * 979 * Arguments: sdev - SCSI Device in question 980 * depth - Current number of outstanding SCSI commands on 981 * this device, not counting the one returned as 982 * QUEUE_FULL. 983 * 984 * Returns: 0 - No change needed 985 * >0 - Adjust queue depth to this new depth 986 * -1 - Drop back to untagged operation using host->cmd_per_lun 987 * as the untagged command depth 988 * 989 * Lock Status: None held on entry 990 * 991 * Notes: Low level drivers may call this at any time and we will do 992 * "The Right Thing." We are interrupt context safe. 993 */ 994 int scsi_track_queue_full(struct scsi_device *sdev, int depth) 995 { 996 if ((jiffies >> 4) == sdev->last_queue_full_time) 997 return 0; 998 999 sdev->last_queue_full_time = (jiffies >> 4); 1000 if (sdev->last_queue_full_depth != depth) { 1001 sdev->last_queue_full_count = 1; 1002 sdev->last_queue_full_depth = depth; 1003 } else { 1004 sdev->last_queue_full_count++; 1005 } 1006 1007 if (sdev->last_queue_full_count <= 10) 1008 return 0; 1009 if (sdev->last_queue_full_depth < 8) { 1010 /* Drop back to untagged */ 1011 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 1012 return -1; 1013 } 1014 1015 if (sdev->ordered_tags) 1016 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 1017 else 1018 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 1019 return depth; 1020 } 1021 EXPORT_SYMBOL(scsi_track_queue_full); 1022 1023 /** 1024 * scsi_device_get - get an addition reference to a scsi_device 1025 * @sdev: device to get a reference to 1026 * 1027 * Gets a reference to the scsi_device and increments the use count 1028 * of the underlying LLDD module. You must hold host_lock of the 1029 * parent Scsi_Host or already have a reference when calling this. 1030 */ 1031 int scsi_device_get(struct scsi_device *sdev) 1032 { 1033 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 1034 return -ENXIO; 1035 if (!get_device(&sdev->sdev_gendev)) 1036 return -ENXIO; 1037 if (!try_module_get(sdev->host->hostt->module)) { 1038 put_device(&sdev->sdev_gendev); 1039 return -ENXIO; 1040 } 1041 return 0; 1042 } 1043 EXPORT_SYMBOL(scsi_device_get); 1044 1045 /** 1046 * scsi_device_put - release a reference to a scsi_device 1047 * @sdev: device to release a reference on. 1048 * 1049 * Release a reference to the scsi_device and decrements the use count 1050 * of the underlying LLDD module. The device is freed once the last 1051 * user vanishes. 1052 */ 1053 void scsi_device_put(struct scsi_device *sdev) 1054 { 1055 module_put(sdev->host->hostt->module); 1056 put_device(&sdev->sdev_gendev); 1057 } 1058 EXPORT_SYMBOL(scsi_device_put); 1059 1060 /* helper for shost_for_each_device, thus not documented */ 1061 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 1062 struct scsi_device *prev) 1063 { 1064 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); 1065 struct scsi_device *next = NULL; 1066 unsigned long flags; 1067 1068 spin_lock_irqsave(shost->host_lock, flags); 1069 while (list->next != &shost->__devices) { 1070 next = list_entry(list->next, struct scsi_device, siblings); 1071 /* skip devices that we can't get a reference to */ 1072 if (!scsi_device_get(next)) 1073 break; 1074 next = NULL; 1075 list = list->next; 1076 } 1077 spin_unlock_irqrestore(shost->host_lock, flags); 1078 1079 if (prev) 1080 scsi_device_put(prev); 1081 return next; 1082 } 1083 EXPORT_SYMBOL(__scsi_iterate_devices); 1084 1085 /** 1086 * starget_for_each_device - helper to walk all devices of a target 1087 * @starget: target whose devices we want to iterate over. 1088 * 1089 * This traverses over each devices of @shost. The devices have 1090 * a reference that must be released by scsi_host_put when breaking 1091 * out of the loop. 1092 */ 1093 void starget_for_each_device(struct scsi_target *starget, void * data, 1094 void (*fn)(struct scsi_device *, void *)) 1095 { 1096 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1097 struct scsi_device *sdev; 1098 1099 shost_for_each_device(sdev, shost) { 1100 if ((sdev->channel == starget->channel) && 1101 (sdev->id == starget->id)) 1102 fn(sdev, data); 1103 } 1104 } 1105 EXPORT_SYMBOL(starget_for_each_device); 1106 1107 /** 1108 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) 1109 * @starget: SCSI target pointer 1110 * @lun: SCSI Logical Unit Number 1111 * 1112 * Looks up the scsi_device with the specified @lun for a give 1113 * @starget. The returned scsi_device does not have an additional 1114 * reference. You must hold the host's host_lock over this call and 1115 * any access to the returned scsi_device. 1116 * 1117 * Note: The only reason why drivers would want to use this is because 1118 * they're need to access the device list in irq context. Otherwise you 1119 * really want to use scsi_device_lookup_by_target instead. 1120 **/ 1121 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 1122 uint lun) 1123 { 1124 struct scsi_device *sdev; 1125 1126 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 1127 if (sdev->lun ==lun) 1128 return sdev; 1129 } 1130 1131 return NULL; 1132 } 1133 EXPORT_SYMBOL(__scsi_device_lookup_by_target); 1134 1135 /** 1136 * scsi_device_lookup_by_target - find a device given the target 1137 * @starget: SCSI target pointer 1138 * @lun: SCSI Logical Unit Number 1139 * 1140 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1141 * give host. The returned scsi_device has an additional reference that 1142 * needs to be release with scsi_host_put once you're done with it. 1143 **/ 1144 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1145 uint lun) 1146 { 1147 struct scsi_device *sdev; 1148 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1149 unsigned long flags; 1150 1151 spin_lock_irqsave(shost->host_lock, flags); 1152 sdev = __scsi_device_lookup_by_target(starget, lun); 1153 if (sdev && scsi_device_get(sdev)) 1154 sdev = NULL; 1155 spin_unlock_irqrestore(shost->host_lock, flags); 1156 1157 return sdev; 1158 } 1159 EXPORT_SYMBOL(scsi_device_lookup_by_target); 1160 1161 /** 1162 * scsi_device_lookup - find a device given the host (UNLOCKED) 1163 * @shost: SCSI host pointer 1164 * @channel: SCSI channel (zero if only one channel) 1165 * @pun: SCSI target number (physical unit number) 1166 * @lun: SCSI Logical Unit Number 1167 * 1168 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1169 * give host. The returned scsi_device does not have an additional reference. 1170 * You must hold the host's host_lock over this call and any access to the 1171 * returned scsi_device. 1172 * 1173 * Note: The only reason why drivers would want to use this is because 1174 * they're need to access the device list in irq context. Otherwise you 1175 * really want to use scsi_device_lookup instead. 1176 **/ 1177 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 1178 uint channel, uint id, uint lun) 1179 { 1180 struct scsi_device *sdev; 1181 1182 list_for_each_entry(sdev, &shost->__devices, siblings) { 1183 if (sdev->channel == channel && sdev->id == id && 1184 sdev->lun ==lun) 1185 return sdev; 1186 } 1187 1188 return NULL; 1189 } 1190 EXPORT_SYMBOL(__scsi_device_lookup); 1191 1192 /** 1193 * scsi_device_lookup - find a device given the host 1194 * @shost: SCSI host pointer 1195 * @channel: SCSI channel (zero if only one channel) 1196 * @id: SCSI target number (physical unit number) 1197 * @lun: SCSI Logical Unit Number 1198 * 1199 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1200 * give host. The returned scsi_device has an additional reference that 1201 * needs to be release with scsi_host_put once you're done with it. 1202 **/ 1203 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1204 uint channel, uint id, uint lun) 1205 { 1206 struct scsi_device *sdev; 1207 unsigned long flags; 1208 1209 spin_lock_irqsave(shost->host_lock, flags); 1210 sdev = __scsi_device_lookup(shost, channel, id, lun); 1211 if (sdev && scsi_device_get(sdev)) 1212 sdev = NULL; 1213 spin_unlock_irqrestore(shost->host_lock, flags); 1214 1215 return sdev; 1216 } 1217 EXPORT_SYMBOL(scsi_device_lookup); 1218 1219 /** 1220 * scsi_device_cancel - cancel outstanding IO to this device 1221 * @sdev: Pointer to struct scsi_device 1222 * @recovery: Boolean instructing function to recover device or not. 1223 * 1224 **/ 1225 int scsi_device_cancel(struct scsi_device *sdev, int recovery) 1226 { 1227 struct scsi_cmnd *scmd; 1228 LIST_HEAD(active_list); 1229 struct list_head *lh, *lh_sf; 1230 unsigned long flags; 1231 1232 scsi_device_set_state(sdev, SDEV_CANCEL); 1233 1234 spin_lock_irqsave(&sdev->list_lock, flags); 1235 list_for_each_entry(scmd, &sdev->cmd_list, list) { 1236 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) { 1237 /* 1238 * If we are unable to remove the timer, it means 1239 * that the command has already timed out or 1240 * finished. 1241 */ 1242 if (!scsi_delete_timer(scmd)) 1243 continue; 1244 list_add_tail(&scmd->eh_entry, &active_list); 1245 } 1246 } 1247 spin_unlock_irqrestore(&sdev->list_lock, flags); 1248 1249 if (!list_empty(&active_list)) { 1250 list_for_each_safe(lh, lh_sf, &active_list) { 1251 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1252 list_del_init(lh); 1253 if (recovery) { 1254 scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD); 1255 } else { 1256 scmd->result = (DID_ABORT << 16); 1257 scsi_finish_command(scmd); 1258 } 1259 } 1260 } 1261 1262 return 0; 1263 } 1264 EXPORT_SYMBOL(scsi_device_cancel); 1265 1266 #ifdef CONFIG_HOTPLUG_CPU 1267 static int scsi_cpu_notify(struct notifier_block *self, 1268 unsigned long action, void *hcpu) 1269 { 1270 int cpu = (unsigned long)hcpu; 1271 1272 switch(action) { 1273 case CPU_DEAD: 1274 /* Drain scsi_done_q. */ 1275 local_irq_disable(); 1276 list_splice_init(&per_cpu(scsi_done_q, cpu), 1277 &__get_cpu_var(scsi_done_q)); 1278 raise_softirq_irqoff(SCSI_SOFTIRQ); 1279 local_irq_enable(); 1280 break; 1281 default: 1282 break; 1283 } 1284 return NOTIFY_OK; 1285 } 1286 1287 static struct notifier_block __devinitdata scsi_cpu_nb = { 1288 .notifier_call = scsi_cpu_notify, 1289 }; 1290 1291 #define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb) 1292 #define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb) 1293 #else 1294 #define register_scsi_cpu() 1295 #define unregister_scsi_cpu() 1296 #endif /* CONFIG_HOTPLUG_CPU */ 1297 1298 MODULE_DESCRIPTION("SCSI core"); 1299 MODULE_LICENSE("GPL"); 1300 1301 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1302 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1303 1304 static int __init init_scsi(void) 1305 { 1306 int error, i; 1307 1308 error = scsi_init_queue(); 1309 if (error) 1310 return error; 1311 error = scsi_init_procfs(); 1312 if (error) 1313 goto cleanup_queue; 1314 error = scsi_init_devinfo(); 1315 if (error) 1316 goto cleanup_procfs; 1317 error = scsi_init_hosts(); 1318 if (error) 1319 goto cleanup_devlist; 1320 error = scsi_init_sysctl(); 1321 if (error) 1322 goto cleanup_hosts; 1323 error = scsi_sysfs_register(); 1324 if (error) 1325 goto cleanup_sysctl; 1326 1327 for (i = 0; i < NR_CPUS; i++) 1328 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1329 1330 devfs_mk_dir("scsi"); 1331 open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL); 1332 register_scsi_cpu(); 1333 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1334 return 0; 1335 1336 cleanup_sysctl: 1337 scsi_exit_sysctl(); 1338 cleanup_hosts: 1339 scsi_exit_hosts(); 1340 cleanup_devlist: 1341 scsi_exit_devinfo(); 1342 cleanup_procfs: 1343 scsi_exit_procfs(); 1344 cleanup_queue: 1345 scsi_exit_queue(); 1346 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", 1347 -error); 1348 return error; 1349 } 1350 1351 static void __exit exit_scsi(void) 1352 { 1353 scsi_sysfs_unregister(); 1354 scsi_exit_sysctl(); 1355 scsi_exit_hosts(); 1356 scsi_exit_devinfo(); 1357 devfs_remove("scsi"); 1358 scsi_exit_procfs(); 1359 scsi_exit_queue(); 1360 unregister_scsi_cpu(); 1361 } 1362 1363 subsys_initcall(init_scsi); 1364 module_exit(exit_scsi); 1365