1 /* 2 * scsi.c Copyright (C) 1992 Drew Eckhardt 3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 4 * Copyright (C) 2002, 2003 Christoph Hellwig 5 * 6 * generic mid-level SCSI driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * 10 * <drew@colorado.edu> 11 * 12 * Bug correction thanks go to : 13 * Rik Faith <faith@cs.unc.edu> 14 * Tommy Thorn <tthorn> 15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> 16 * 17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to 18 * add scatter-gather, multiple outstanding request, and other 19 * enhancements. 20 * 21 * Native multichannel, wide scsi, /proc/scsi and hot plugging 22 * support added by Michael Neuffer <mike@i-connect.net> 23 * 24 * Added request_module("scsi_hostadapter") for kerneld: 25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) 26 * Bjorn Ekwall <bj0rn@blox.se> 27 * (changed to kmod) 28 * 29 * Major improvements to the timeout, abort, and reset processing, 30 * as well as performance modifications for large queue depths by 31 * Leonard N. Zubkoff <lnz@dandelion.com> 32 * 33 * Converted cli() code to spinlocks, Ingo Molnar 34 * 35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli 36 * 37 * out_of_space hacks, D. Gilbert (dpg) 990608 38 */ 39 40 #include <linux/module.h> 41 #include <linux/moduleparam.h> 42 #include <linux/kernel.h> 43 #include <linux/sched.h> 44 #include <linux/timer.h> 45 #include <linux/string.h> 46 #include <linux/slab.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/init.h> 50 #include <linux/completion.h> 51 #include <linux/devfs_fs_kernel.h> 52 #include <linux/unistd.h> 53 #include <linux/spinlock.h> 54 #include <linux/kmod.h> 55 #include <linux/interrupt.h> 56 #include <linux/notifier.h> 57 #include <linux/cpu.h> 58 59 #include <scsi/scsi.h> 60 #include <scsi/scsi_cmnd.h> 61 #include <scsi/scsi_dbg.h> 62 #include <scsi/scsi_device.h> 63 #include <scsi/scsi_eh.h> 64 #include <scsi/scsi_host.h> 65 #include <scsi/scsi_tcq.h> 66 #include <scsi/scsi_request.h> 67 68 #include "scsi_priv.h" 69 #include "scsi_logging.h" 70 71 72 /* 73 * Definitions and constants. 74 */ 75 76 #define MIN_RESET_DELAY (2*HZ) 77 78 /* Do not call reset on error if we just did a reset within 15 sec. */ 79 #define MIN_RESET_PERIOD (15*HZ) 80 81 /* 82 * Macro to determine the size of SCSI command. This macro takes vendor 83 * unique commands into account. SCSI commands in groups 6 and 7 are 84 * vendor unique and we will depend upon the command length being 85 * supplied correctly in cmd_len. 86 */ 87 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \ 88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len) 89 90 /* 91 * Note - the initial logging level can be set here to log events at boot time. 92 * After the system is up, you may enable logging via the /proc interface. 93 */ 94 unsigned int scsi_logging_level; 95 #if defined(CONFIG_SCSI_LOGGING) 96 EXPORT_SYMBOL(scsi_logging_level); 97 #endif 98 99 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { 100 "Direct-Access ", 101 "Sequential-Access", 102 "Printer ", 103 "Processor ", 104 "WORM ", 105 "CD-ROM ", 106 "Scanner ", 107 "Optical Device ", 108 "Medium Changer ", 109 "Communications ", 110 "Unknown ", 111 "Unknown ", 112 "RAID ", 113 "Enclosure ", 114 }; 115 EXPORT_SYMBOL(scsi_device_types); 116 117 /* 118 * Function: scsi_allocate_request 119 * 120 * Purpose: Allocate a request descriptor. 121 * 122 * Arguments: device - device for which we want a request 123 * gfp_mask - allocation flags passed to kmalloc 124 * 125 * Lock status: No locks assumed to be held. This function is SMP-safe. 126 * 127 * Returns: Pointer to request block. 128 */ 129 struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, 130 int gfp_mask) 131 { 132 const int offset = ALIGN(sizeof(struct scsi_request), 4); 133 const int size = offset + sizeof(struct request); 134 struct scsi_request *sreq; 135 136 sreq = kmalloc(size, gfp_mask); 137 if (likely(sreq != NULL)) { 138 memset(sreq, 0, size); 139 sreq->sr_request = (struct request *)(((char *)sreq) + offset); 140 sreq->sr_device = sdev; 141 sreq->sr_host = sdev->host; 142 sreq->sr_magic = SCSI_REQ_MAGIC; 143 sreq->sr_data_direction = DMA_BIDIRECTIONAL; 144 } 145 146 return sreq; 147 } 148 EXPORT_SYMBOL(scsi_allocate_request); 149 150 void __scsi_release_request(struct scsi_request *sreq) 151 { 152 struct request *req = sreq->sr_request; 153 154 /* unlikely because the tag was usually ended earlier by the 155 * mid-layer. However, for layering reasons ULD's don't end 156 * the tag of commands they generate. */ 157 if (unlikely(blk_rq_tagged(req))) { 158 unsigned long flags; 159 struct request_queue *q = req->q; 160 161 spin_lock_irqsave(q->queue_lock, flags); 162 blk_queue_end_tag(q, req); 163 spin_unlock_irqrestore(q->queue_lock, flags); 164 } 165 166 167 if (likely(sreq->sr_command != NULL)) { 168 struct scsi_cmnd *cmd = sreq->sr_command; 169 170 sreq->sr_command = NULL; 171 scsi_next_command(cmd); 172 } 173 } 174 175 /* 176 * Function: scsi_release_request 177 * 178 * Purpose: Release a request descriptor. 179 * 180 * Arguments: sreq - request to release 181 * 182 * Lock status: No locks assumed to be held. This function is SMP-safe. 183 */ 184 void scsi_release_request(struct scsi_request *sreq) 185 { 186 __scsi_release_request(sreq); 187 kfree(sreq); 188 } 189 EXPORT_SYMBOL(scsi_release_request); 190 191 struct scsi_host_cmd_pool { 192 kmem_cache_t *slab; 193 unsigned int users; 194 char *name; 195 unsigned int slab_flags; 196 unsigned int gfp_mask; 197 }; 198 199 static struct scsi_host_cmd_pool scsi_cmd_pool = { 200 .name = "scsi_cmd_cache", 201 .slab_flags = SLAB_HWCACHE_ALIGN, 202 }; 203 204 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { 205 .name = "scsi_cmd_cache(DMA)", 206 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, 207 .gfp_mask = __GFP_DMA, 208 }; 209 210 static DECLARE_MUTEX(host_cmd_pool_mutex); 211 212 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 213 int gfp_mask) 214 { 215 struct scsi_cmnd *cmd; 216 217 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 218 gfp_mask | shost->cmd_pool->gfp_mask); 219 220 if (unlikely(!cmd)) { 221 unsigned long flags; 222 223 spin_lock_irqsave(&shost->free_list_lock, flags); 224 if (likely(!list_empty(&shost->free_list))) { 225 cmd = list_entry(shost->free_list.next, 226 struct scsi_cmnd, list); 227 list_del_init(&cmd->list); 228 } 229 spin_unlock_irqrestore(&shost->free_list_lock, flags); 230 } 231 232 return cmd; 233 } 234 235 /* 236 * Function: scsi_get_command() 237 * 238 * Purpose: Allocate and setup a scsi command block 239 * 240 * Arguments: dev - parent scsi device 241 * gfp_mask- allocator flags 242 * 243 * Returns: The allocated scsi command structure. 244 */ 245 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) 246 { 247 struct scsi_cmnd *cmd; 248 249 /* Bail if we can't get a reference to the device */ 250 if (!get_device(&dev->sdev_gendev)) 251 return NULL; 252 253 cmd = __scsi_get_command(dev->host, gfp_mask); 254 255 if (likely(cmd != NULL)) { 256 unsigned long flags; 257 258 memset(cmd, 0, sizeof(*cmd)); 259 cmd->device = dev; 260 cmd->state = SCSI_STATE_UNUSED; 261 cmd->owner = SCSI_OWNER_NOBODY; 262 init_timer(&cmd->eh_timeout); 263 INIT_LIST_HEAD(&cmd->list); 264 spin_lock_irqsave(&dev->list_lock, flags); 265 list_add_tail(&cmd->list, &dev->cmd_list); 266 spin_unlock_irqrestore(&dev->list_lock, flags); 267 } else 268 put_device(&dev->sdev_gendev); 269 270 return cmd; 271 } 272 EXPORT_SYMBOL(scsi_get_command); 273 274 /* 275 * Function: scsi_put_command() 276 * 277 * Purpose: Free a scsi command block 278 * 279 * Arguments: cmd - command block to free 280 * 281 * Returns: Nothing. 282 * 283 * Notes: The command must not belong to any lists. 284 */ 285 void scsi_put_command(struct scsi_cmnd *cmd) 286 { 287 struct scsi_device *sdev = cmd->device; 288 struct Scsi_Host *shost = sdev->host; 289 unsigned long flags; 290 291 /* serious error if the command hasn't come from a device list */ 292 spin_lock_irqsave(&cmd->device->list_lock, flags); 293 BUG_ON(list_empty(&cmd->list)); 294 list_del_init(&cmd->list); 295 spin_unlock(&cmd->device->list_lock); 296 /* changing locks here, don't need to restore the irq state */ 297 spin_lock(&shost->free_list_lock); 298 if (unlikely(list_empty(&shost->free_list))) { 299 list_add(&cmd->list, &shost->free_list); 300 cmd = NULL; 301 } 302 spin_unlock_irqrestore(&shost->free_list_lock, flags); 303 304 if (likely(cmd != NULL)) 305 kmem_cache_free(shost->cmd_pool->slab, cmd); 306 307 put_device(&sdev->sdev_gendev); 308 } 309 EXPORT_SYMBOL(scsi_put_command); 310 311 /* 312 * Function: scsi_setup_command_freelist() 313 * 314 * Purpose: Setup the command freelist for a scsi host. 315 * 316 * Arguments: shost - host to allocate the freelist for. 317 * 318 * Returns: Nothing. 319 */ 320 int scsi_setup_command_freelist(struct Scsi_Host *shost) 321 { 322 struct scsi_host_cmd_pool *pool; 323 struct scsi_cmnd *cmd; 324 325 spin_lock_init(&shost->free_list_lock); 326 INIT_LIST_HEAD(&shost->free_list); 327 328 /* 329 * Select a command slab for this host and create it if not 330 * yet existant. 331 */ 332 down(&host_cmd_pool_mutex); 333 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); 334 if (!pool->users) { 335 pool->slab = kmem_cache_create(pool->name, 336 sizeof(struct scsi_cmnd), 0, 337 pool->slab_flags, NULL, NULL); 338 if (!pool->slab) 339 goto fail; 340 } 341 342 pool->users++; 343 shost->cmd_pool = pool; 344 up(&host_cmd_pool_mutex); 345 346 /* 347 * Get one backup command for this host. 348 */ 349 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 350 GFP_KERNEL | shost->cmd_pool->gfp_mask); 351 if (!cmd) 352 goto fail2; 353 list_add(&cmd->list, &shost->free_list); 354 return 0; 355 356 fail2: 357 if (!--pool->users) 358 kmem_cache_destroy(pool->slab); 359 return -ENOMEM; 360 fail: 361 up(&host_cmd_pool_mutex); 362 return -ENOMEM; 363 364 } 365 366 /* 367 * Function: scsi_destroy_command_freelist() 368 * 369 * Purpose: Release the command freelist for a scsi host. 370 * 371 * Arguments: shost - host that's freelist is going to be destroyed 372 */ 373 void scsi_destroy_command_freelist(struct Scsi_Host *shost) 374 { 375 while (!list_empty(&shost->free_list)) { 376 struct scsi_cmnd *cmd; 377 378 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 379 list_del_init(&cmd->list); 380 kmem_cache_free(shost->cmd_pool->slab, cmd); 381 } 382 383 down(&host_cmd_pool_mutex); 384 if (!--shost->cmd_pool->users) 385 kmem_cache_destroy(shost->cmd_pool->slab); 386 up(&host_cmd_pool_mutex); 387 } 388 389 #ifdef CONFIG_SCSI_LOGGING 390 void scsi_log_send(struct scsi_cmnd *cmd) 391 { 392 unsigned int level; 393 struct scsi_device *sdev; 394 395 /* 396 * If ML QUEUE log level is greater than or equal to: 397 * 398 * 1: nothing (match completion) 399 * 400 * 2: log opcode + command of all commands 401 * 402 * 3: same as 2 plus dump cmd address 403 * 404 * 4: same as 3 plus dump extra junk 405 */ 406 if (unlikely(scsi_logging_level)) { 407 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 408 SCSI_LOG_MLQUEUE_BITS); 409 if (level > 1) { 410 sdev = cmd->device; 411 printk(KERN_INFO "scsi <%d:%d:%d:%d> send ", 412 sdev->host->host_no, sdev->channel, sdev->id, 413 sdev->lun); 414 if (level > 2) 415 printk("0x%p ", cmd); 416 /* 417 * spaces to match disposition and cmd->result 418 * output in scsi_log_completion. 419 */ 420 printk(" "); 421 scsi_print_command(cmd); 422 if (level > 3) { 423 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 424 " done = 0x%p, queuecommand 0x%p\n", 425 cmd->buffer, cmd->bufflen, 426 cmd->done, 427 sdev->host->hostt->queuecommand); 428 429 } 430 } 431 } 432 } 433 434 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 435 { 436 unsigned int level; 437 struct scsi_device *sdev; 438 439 /* 440 * If ML COMPLETE log level is greater than or equal to: 441 * 442 * 1: log disposition, result, opcode + command, and conditionally 443 * sense data for failures or non SUCCESS dispositions. 444 * 445 * 2: same as 1 but for all command completions. 446 * 447 * 3: same as 2 plus dump cmd address 448 * 449 * 4: same as 3 plus dump extra junk 450 */ 451 if (unlikely(scsi_logging_level)) { 452 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 453 SCSI_LOG_MLCOMPLETE_BITS); 454 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 455 (level > 1)) { 456 sdev = cmd->device; 457 printk(KERN_INFO "scsi <%d:%d:%d:%d> done ", 458 sdev->host->host_no, sdev->channel, sdev->id, 459 sdev->lun); 460 if (level > 2) 461 printk("0x%p ", cmd); 462 /* 463 * Dump truncated values, so we usually fit within 464 * 80 chars. 465 */ 466 switch (disposition) { 467 case SUCCESS: 468 printk("SUCCESS"); 469 break; 470 case NEEDS_RETRY: 471 printk("RETRY "); 472 break; 473 case ADD_TO_MLQUEUE: 474 printk("MLQUEUE"); 475 break; 476 case FAILED: 477 printk("FAILED "); 478 break; 479 case TIMEOUT_ERROR: 480 /* 481 * If called via scsi_times_out. 482 */ 483 printk("TIMEOUT"); 484 break; 485 default: 486 printk("UNKNOWN"); 487 } 488 printk(" %8x ", cmd->result); 489 scsi_print_command(cmd); 490 if (status_byte(cmd->result) & CHECK_CONDITION) { 491 /* 492 * XXX The scsi_print_sense formatting/prefix 493 * doesn't match this function. 494 */ 495 scsi_print_sense("", cmd); 496 } 497 if (level > 3) { 498 printk(KERN_INFO "scsi host busy %d failed %d\n", 499 sdev->host->host_busy, 500 sdev->host->host_failed); 501 } 502 } 503 } 504 } 505 #endif 506 507 /* 508 * Assign a serial number and pid to the request for error recovery 509 * and debugging purposes. Protected by the Host_Lock of host. 510 */ 511 static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) 512 { 513 cmd->serial_number = host->cmd_serial_number++; 514 if (cmd->serial_number == 0) 515 cmd->serial_number = host->cmd_serial_number++; 516 517 cmd->pid = host->cmd_pid++; 518 if (cmd->pid == 0) 519 cmd->pid = host->cmd_pid++; 520 } 521 522 /* 523 * Function: scsi_dispatch_command 524 * 525 * Purpose: Dispatch a command to the low-level driver. 526 * 527 * Arguments: cmd - command block we are dispatching. 528 * 529 * Notes: 530 */ 531 int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 532 { 533 struct Scsi_Host *host = cmd->device->host; 534 unsigned long flags = 0; 535 unsigned long timeout; 536 int rtn = 0; 537 538 /* check if the device is still usable */ 539 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 540 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 541 * returns an immediate error upwards, and signals 542 * that the device is no longer present */ 543 cmd->result = DID_NO_CONNECT << 16; 544 atomic_inc(&cmd->device->iorequest_cnt); 545 scsi_done(cmd); 546 /* return 0 (because the command has been processed) */ 547 goto out; 548 } 549 550 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 551 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 552 /* 553 * in SDEV_BLOCK, the command is just put back on the device 554 * queue. The suspend state has already blocked the queue so 555 * future requests should not occur until the device 556 * transitions out of the suspend state. 557 */ 558 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 559 560 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 561 562 /* 563 * NOTE: rtn is still zero here because we don't need the 564 * queue to be plugged on return (it's already stopped) 565 */ 566 goto out; 567 } 568 569 /* 570 * If SCSI-2 or lower, store the LUN value in cmnd. 571 */ 572 if (cmd->device->scsi_level <= SCSI_2) { 573 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 574 (cmd->device->lun << 5 & 0xe0); 575 } 576 577 /* 578 * We will wait MIN_RESET_DELAY clock ticks after the last reset so 579 * we can avoid the drive not being ready. 580 */ 581 timeout = host->last_reset + MIN_RESET_DELAY; 582 583 if (host->resetting && time_before(jiffies, timeout)) { 584 int ticks_remaining = timeout - jiffies; 585 /* 586 * NOTE: This may be executed from within an interrupt 587 * handler! This is bad, but for now, it'll do. The irq 588 * level of the interrupt handler has been masked out by the 589 * platform dependent interrupt handling code already, so the 590 * sti() here will not cause another call to the SCSI host's 591 * interrupt handler (assuming there is one irq-level per 592 * host). 593 */ 594 while (--ticks_remaining >= 0) 595 mdelay(1 + 999 / HZ); 596 host->resetting = 0; 597 } 598 599 /* 600 * AK: unlikely race here: for some reason the timer could 601 * expire before the serial number is set up below. 602 */ 603 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); 604 605 scsi_log_send(cmd); 606 607 /* 608 * We will use a queued command if possible, otherwise we will 609 * emulate the queuing and calling of completion function ourselves. 610 */ 611 612 cmd->state = SCSI_STATE_QUEUED; 613 cmd->owner = SCSI_OWNER_LOWLEVEL; 614 615 atomic_inc(&cmd->device->iorequest_cnt); 616 617 /* 618 * Before we queue this command, check if the command 619 * length exceeds what the host adapter can handle. 620 */ 621 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) { 622 SCSI_LOG_MLQUEUE(3, 623 printk("queuecommand : command too long.\n")); 624 cmd->result = (DID_ABORT << 16); 625 626 scsi_done(cmd); 627 goto out; 628 } 629 630 spin_lock_irqsave(host->host_lock, flags); 631 scsi_cmd_get_serial(host, cmd); 632 633 if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) { 634 cmd->result = (DID_NO_CONNECT << 16); 635 scsi_done(cmd); 636 } else { 637 rtn = host->hostt->queuecommand(cmd, scsi_done); 638 } 639 spin_unlock_irqrestore(host->host_lock, flags); 640 if (rtn) { 641 atomic_inc(&cmd->device->iodone_cnt); 642 scsi_queue_insert(cmd, 643 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? 644 rtn : SCSI_MLQUEUE_HOST_BUSY); 645 SCSI_LOG_MLQUEUE(3, 646 printk("queuecommand : request rejected\n")); 647 } 648 649 out: 650 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); 651 return rtn; 652 } 653 654 /* 655 * Function: scsi_init_cmd_from_req 656 * 657 * Purpose: Queue a SCSI command 658 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request 659 * 660 * Arguments: cmd - command descriptor. 661 * sreq - Request from the queue. 662 * 663 * Lock status: None needed. 664 * 665 * Returns: Nothing. 666 * 667 * Notes: Mainly transfer data from the request structure to the 668 * command structure. The request structure is allocated 669 * using the normal memory allocator, and requests can pile 670 * up to more or less any depth. The command structure represents 671 * a consumable resource, as these are allocated into a pool 672 * when the SCSI subsystem initializes. The preallocation is 673 * required so that in low-memory situations a disk I/O request 674 * won't cause the memory manager to try and write out a page. 675 * The request structure is generally used by ioctls and character 676 * devices. 677 */ 678 void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) 679 { 680 sreq->sr_command = cmd; 681 682 cmd->owner = SCSI_OWNER_MIDLEVEL; 683 cmd->cmd_len = sreq->sr_cmd_len; 684 cmd->use_sg = sreq->sr_use_sg; 685 686 cmd->request = sreq->sr_request; 687 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd)); 688 cmd->serial_number = 0; 689 cmd->bufflen = sreq->sr_bufflen; 690 cmd->buffer = sreq->sr_buffer; 691 cmd->retries = 0; 692 cmd->allowed = sreq->sr_allowed; 693 cmd->done = sreq->sr_done; 694 cmd->timeout_per_command = sreq->sr_timeout_per_command; 695 cmd->sc_data_direction = sreq->sr_data_direction; 696 cmd->sglist_len = sreq->sr_sglist_len; 697 cmd->underflow = sreq->sr_underflow; 698 cmd->sc_request = sreq; 699 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd)); 700 701 /* 702 * Zero the sense buffer. Some host adapters automatically request 703 * sense on error. 0 is not a valid sense code. 704 */ 705 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer)); 706 cmd->request_buffer = sreq->sr_buffer; 707 cmd->request_bufflen = sreq->sr_bufflen; 708 cmd->old_use_sg = cmd->use_sg; 709 if (cmd->cmd_len == 0) 710 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 711 cmd->old_cmd_len = cmd->cmd_len; 712 cmd->sc_old_data_direction = cmd->sc_data_direction; 713 cmd->old_underflow = cmd->underflow; 714 715 /* 716 * Start the timer ticking. 717 */ 718 cmd->abort_reason = 0; 719 cmd->result = 0; 720 721 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n")); 722 } 723 724 /* 725 * Per-CPU I/O completion queue. 726 */ 727 static DEFINE_PER_CPU(struct list_head, scsi_done_q); 728 729 /** 730 * scsi_done - Enqueue the finished SCSI command into the done queue. 731 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 732 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 733 * 734 * This function is the mid-level's (SCSI Core) interrupt routine, which 735 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues 736 * the command to the done queue for further processing. 737 * 738 * This is the producer of the done queue who enqueues at the tail. 739 * 740 * This function is interrupt context safe. 741 */ 742 void scsi_done(struct scsi_cmnd *cmd) 743 { 744 /* 745 * We don't have to worry about this one timing out any more. 746 * If we are unable to remove the timer, then the command 747 * has already timed out. In which case, we have no choice but to 748 * let the timeout function run, as we have no idea where in fact 749 * that function could really be. It might be on another processor, 750 * etc, etc. 751 */ 752 if (!scsi_delete_timer(cmd)) 753 return; 754 __scsi_done(cmd); 755 } 756 757 /* Private entry to scsi_done() to complete a command when the timer 758 * isn't running --- used by scsi_times_out */ 759 void __scsi_done(struct scsi_cmnd *cmd) 760 { 761 unsigned long flags; 762 763 /* 764 * Set the serial numbers back to zero 765 */ 766 cmd->serial_number = 0; 767 cmd->state = SCSI_STATE_BHQUEUE; 768 cmd->owner = SCSI_OWNER_BH_HANDLER; 769 770 atomic_inc(&cmd->device->iodone_cnt); 771 if (cmd->result) 772 atomic_inc(&cmd->device->ioerr_cnt); 773 774 /* 775 * Next, enqueue the command into the done queue. 776 * It is a per-CPU queue, so we just disable local interrupts 777 * and need no spinlock. 778 */ 779 local_irq_save(flags); 780 list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q)); 781 raise_softirq_irqoff(SCSI_SOFTIRQ); 782 local_irq_restore(flags); 783 } 784 785 /** 786 * scsi_softirq - Perform post-interrupt processing of finished SCSI commands. 787 * 788 * This is the consumer of the done queue. 789 * 790 * This is called with all interrupts enabled. This should reduce 791 * interrupt latency, stack depth, and reentrancy of the low-level 792 * drivers. 793 */ 794 static void scsi_softirq(struct softirq_action *h) 795 { 796 int disposition; 797 LIST_HEAD(local_q); 798 799 local_irq_disable(); 800 list_splice_init(&__get_cpu_var(scsi_done_q), &local_q); 801 local_irq_enable(); 802 803 while (!list_empty(&local_q)) { 804 struct scsi_cmnd *cmd = list_entry(local_q.next, 805 struct scsi_cmnd, eh_entry); 806 list_del_init(&cmd->eh_entry); 807 808 disposition = scsi_decide_disposition(cmd); 809 scsi_log_completion(cmd, disposition); 810 switch (disposition) { 811 case SUCCESS: 812 scsi_finish_command(cmd); 813 break; 814 case NEEDS_RETRY: 815 scsi_retry_command(cmd); 816 break; 817 case ADD_TO_MLQUEUE: 818 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 819 break; 820 default: 821 if (!scsi_eh_scmd_add(cmd, 0)) 822 scsi_finish_command(cmd); 823 } 824 } 825 } 826 827 /* 828 * Function: scsi_retry_command 829 * 830 * Purpose: Send a command back to the low level to be retried. 831 * 832 * Notes: This command is always executed in the context of the 833 * bottom half handler, or the error handler thread. Low 834 * level drivers should not become re-entrant as a result of 835 * this. 836 */ 837 int scsi_retry_command(struct scsi_cmnd *cmd) 838 { 839 /* 840 * Restore the SCSI command state. 841 */ 842 scsi_setup_cmd_retry(cmd); 843 844 /* 845 * Zero the sense information from the last time we tried 846 * this command. 847 */ 848 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 849 850 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 851 } 852 853 /* 854 * Function: scsi_finish_command 855 * 856 * Purpose: Pass command off to upper layer for finishing of I/O 857 * request, waking processes that are waiting on results, 858 * etc. 859 */ 860 void scsi_finish_command(struct scsi_cmnd *cmd) 861 { 862 struct scsi_device *sdev = cmd->device; 863 struct Scsi_Host *shost = sdev->host; 864 struct scsi_request *sreq; 865 866 scsi_device_unbusy(sdev); 867 868 /* 869 * Clear the flags which say that the device/host is no longer 870 * capable of accepting new commands. These are set in scsi_queue.c 871 * for both the queue full condition on a device, and for a 872 * host full condition on the host. 873 * 874 * XXX(hch): What about locking? 875 */ 876 shost->host_blocked = 0; 877 sdev->device_blocked = 0; 878 879 /* 880 * If we have valid sense information, then some kind of recovery 881 * must have taken place. Make a note of this. 882 */ 883 if (SCSI_SENSE_VALID(cmd)) 884 cmd->result |= (DRIVER_SENSE << 24); 885 886 SCSI_LOG_MLCOMPLETE(4, printk("Notifying upper driver of completion " 887 "for device %d %x\n", sdev->id, cmd->result)); 888 889 cmd->owner = SCSI_OWNER_HIGHLEVEL; 890 cmd->state = SCSI_STATE_FINISHED; 891 892 /* 893 * We can get here with use_sg=0, causing a panic in the upper level 894 */ 895 cmd->use_sg = cmd->old_use_sg; 896 897 /* 898 * If there is an associated request structure, copy the data over 899 * before we call the completion function. 900 */ 901 sreq = cmd->sc_request; 902 if (sreq) { 903 sreq->sr_result = sreq->sr_command->result; 904 if (sreq->sr_result) { 905 memcpy(sreq->sr_sense_buffer, 906 sreq->sr_command->sense_buffer, 907 sizeof(sreq->sr_sense_buffer)); 908 } 909 } 910 911 cmd->done(cmd); 912 } 913 EXPORT_SYMBOL(scsi_finish_command); 914 915 /* 916 * Function: scsi_adjust_queue_depth() 917 * 918 * Purpose: Allow low level drivers to tell us to change the queue depth 919 * on a specific SCSI device 920 * 921 * Arguments: sdev - SCSI Device in question 922 * tagged - Do we use tagged queueing (non-0) or do we treat 923 * this device as an untagged device (0) 924 * tags - Number of tags allowed if tagged queueing enabled, 925 * or number of commands the low level driver can 926 * queue up in non-tagged mode (as per cmd_per_lun). 927 * 928 * Returns: Nothing 929 * 930 * Lock Status: None held on entry 931 * 932 * Notes: Low level drivers may call this at any time and we will do 933 * the right thing depending on whether or not the device is 934 * currently active and whether or not it even has the 935 * command blocks built yet. 936 */ 937 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) 938 { 939 unsigned long flags; 940 941 /* 942 * refuse to set tagged depth to an unworkable size 943 */ 944 if (tags <= 0) 945 return; 946 947 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 948 949 /* Check to see if the queue is managed by the block layer 950 * if it is, and we fail to adjust the depth, exit */ 951 if (blk_queue_tagged(sdev->request_queue) && 952 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 953 goto out; 954 955 sdev->queue_depth = tags; 956 switch (tagged) { 957 case MSG_ORDERED_TAG: 958 sdev->ordered_tags = 1; 959 sdev->simple_tags = 1; 960 break; 961 case MSG_SIMPLE_TAG: 962 sdev->ordered_tags = 0; 963 sdev->simple_tags = 1; 964 break; 965 default: 966 printk(KERN_WARNING "(scsi%d:%d:%d:%d) " 967 "scsi_adjust_queue_depth, bad queue type, " 968 "disabled\n", sdev->host->host_no, 969 sdev->channel, sdev->id, sdev->lun); 970 case 0: 971 sdev->ordered_tags = sdev->simple_tags = 0; 972 sdev->queue_depth = tags; 973 break; 974 } 975 out: 976 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 977 } 978 EXPORT_SYMBOL(scsi_adjust_queue_depth); 979 980 /* 981 * Function: scsi_track_queue_full() 982 * 983 * Purpose: This function will track successive QUEUE_FULL events on a 984 * specific SCSI device to determine if and when there is a 985 * need to adjust the queue depth on the device. 986 * 987 * Arguments: sdev - SCSI Device in question 988 * depth - Current number of outstanding SCSI commands on 989 * this device, not counting the one returned as 990 * QUEUE_FULL. 991 * 992 * Returns: 0 - No change needed 993 * >0 - Adjust queue depth to this new depth 994 * -1 - Drop back to untagged operation using host->cmd_per_lun 995 * as the untagged command depth 996 * 997 * Lock Status: None held on entry 998 * 999 * Notes: Low level drivers may call this at any time and we will do 1000 * "The Right Thing." We are interrupt context safe. 1001 */ 1002 int scsi_track_queue_full(struct scsi_device *sdev, int depth) 1003 { 1004 if ((jiffies >> 4) == sdev->last_queue_full_time) 1005 return 0; 1006 1007 sdev->last_queue_full_time = (jiffies >> 4); 1008 if (sdev->last_queue_full_depth != depth) { 1009 sdev->last_queue_full_count = 1; 1010 sdev->last_queue_full_depth = depth; 1011 } else { 1012 sdev->last_queue_full_count++; 1013 } 1014 1015 if (sdev->last_queue_full_count <= 10) 1016 return 0; 1017 if (sdev->last_queue_full_depth < 8) { 1018 /* Drop back to untagged */ 1019 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 1020 return -1; 1021 } 1022 1023 if (sdev->ordered_tags) 1024 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 1025 else 1026 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 1027 return depth; 1028 } 1029 EXPORT_SYMBOL(scsi_track_queue_full); 1030 1031 /** 1032 * scsi_device_get - get an addition reference to a scsi_device 1033 * @sdev: device to get a reference to 1034 * 1035 * Gets a reference to the scsi_device and increments the use count 1036 * of the underlying LLDD module. You must hold host_lock of the 1037 * parent Scsi_Host or already have a reference when calling this. 1038 */ 1039 int scsi_device_get(struct scsi_device *sdev) 1040 { 1041 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 1042 return -ENXIO; 1043 if (!get_device(&sdev->sdev_gendev)) 1044 return -ENXIO; 1045 if (!try_module_get(sdev->host->hostt->module)) { 1046 put_device(&sdev->sdev_gendev); 1047 return -ENXIO; 1048 } 1049 return 0; 1050 } 1051 EXPORT_SYMBOL(scsi_device_get); 1052 1053 /** 1054 * scsi_device_put - release a reference to a scsi_device 1055 * @sdev: device to release a reference on. 1056 * 1057 * Release a reference to the scsi_device and decrements the use count 1058 * of the underlying LLDD module. The device is freed once the last 1059 * user vanishes. 1060 */ 1061 void scsi_device_put(struct scsi_device *sdev) 1062 { 1063 module_put(sdev->host->hostt->module); 1064 put_device(&sdev->sdev_gendev); 1065 } 1066 EXPORT_SYMBOL(scsi_device_put); 1067 1068 /* helper for shost_for_each_device, thus not documented */ 1069 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 1070 struct scsi_device *prev) 1071 { 1072 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); 1073 struct scsi_device *next = NULL; 1074 unsigned long flags; 1075 1076 spin_lock_irqsave(shost->host_lock, flags); 1077 while (list->next != &shost->__devices) { 1078 next = list_entry(list->next, struct scsi_device, siblings); 1079 /* skip devices that we can't get a reference to */ 1080 if (!scsi_device_get(next)) 1081 break; 1082 next = NULL; 1083 list = list->next; 1084 } 1085 spin_unlock_irqrestore(shost->host_lock, flags); 1086 1087 if (prev) 1088 scsi_device_put(prev); 1089 return next; 1090 } 1091 EXPORT_SYMBOL(__scsi_iterate_devices); 1092 1093 /** 1094 * starget_for_each_device - helper to walk all devices of a target 1095 * @starget: target whose devices we want to iterate over. 1096 * 1097 * This traverses over each devices of @shost. The devices have 1098 * a reference that must be released by scsi_host_put when breaking 1099 * out of the loop. 1100 */ 1101 void starget_for_each_device(struct scsi_target *starget, void * data, 1102 void (*fn)(struct scsi_device *, void *)) 1103 { 1104 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1105 struct scsi_device *sdev; 1106 1107 shost_for_each_device(sdev, shost) { 1108 if ((sdev->channel == starget->channel) && 1109 (sdev->id == starget->id)) 1110 fn(sdev, data); 1111 } 1112 } 1113 EXPORT_SYMBOL(starget_for_each_device); 1114 1115 /** 1116 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) 1117 * @starget: SCSI target pointer 1118 * @lun: SCSI Logical Unit Number 1119 * 1120 * Looks up the scsi_device with the specified @lun for a give 1121 * @starget. The returned scsi_device does not have an additional 1122 * reference. You must hold the host's host_lock over this call and 1123 * any access to the returned scsi_device. 1124 * 1125 * Note: The only reason why drivers would want to use this is because 1126 * they're need to access the device list in irq context. Otherwise you 1127 * really want to use scsi_device_lookup_by_target instead. 1128 **/ 1129 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 1130 uint lun) 1131 { 1132 struct scsi_device *sdev; 1133 1134 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 1135 if (sdev->lun ==lun) 1136 return sdev; 1137 } 1138 1139 return NULL; 1140 } 1141 EXPORT_SYMBOL(__scsi_device_lookup_by_target); 1142 1143 /** 1144 * scsi_device_lookup_by_target - find a device given the target 1145 * @starget: SCSI target pointer 1146 * @lun: SCSI Logical Unit Number 1147 * 1148 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1149 * give host. The returned scsi_device has an additional reference that 1150 * needs to be release with scsi_host_put once you're done with it. 1151 **/ 1152 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1153 uint lun) 1154 { 1155 struct scsi_device *sdev; 1156 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1157 unsigned long flags; 1158 1159 spin_lock_irqsave(shost->host_lock, flags); 1160 sdev = __scsi_device_lookup_by_target(starget, lun); 1161 if (sdev && scsi_device_get(sdev)) 1162 sdev = NULL; 1163 spin_unlock_irqrestore(shost->host_lock, flags); 1164 1165 return sdev; 1166 } 1167 EXPORT_SYMBOL(scsi_device_lookup_by_target); 1168 1169 /** 1170 * scsi_device_lookup - find a device given the host (UNLOCKED) 1171 * @shost: SCSI host pointer 1172 * @channel: SCSI channel (zero if only one channel) 1173 * @pun: SCSI target number (physical unit number) 1174 * @lun: SCSI Logical Unit Number 1175 * 1176 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1177 * give host. The returned scsi_device does not have an additional reference. 1178 * You must hold the host's host_lock over this call and any access to the 1179 * returned scsi_device. 1180 * 1181 * Note: The only reason why drivers would want to use this is because 1182 * they're need to access the device list in irq context. Otherwise you 1183 * really want to use scsi_device_lookup instead. 1184 **/ 1185 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 1186 uint channel, uint id, uint lun) 1187 { 1188 struct scsi_device *sdev; 1189 1190 list_for_each_entry(sdev, &shost->__devices, siblings) { 1191 if (sdev->channel == channel && sdev->id == id && 1192 sdev->lun ==lun) 1193 return sdev; 1194 } 1195 1196 return NULL; 1197 } 1198 EXPORT_SYMBOL(__scsi_device_lookup); 1199 1200 /** 1201 * scsi_device_lookup - find a device given the host 1202 * @shost: SCSI host pointer 1203 * @channel: SCSI channel (zero if only one channel) 1204 * @id: SCSI target number (physical unit number) 1205 * @lun: SCSI Logical Unit Number 1206 * 1207 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1208 * give host. The returned scsi_device has an additional reference that 1209 * needs to be release with scsi_host_put once you're done with it. 1210 **/ 1211 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1212 uint channel, uint id, uint lun) 1213 { 1214 struct scsi_device *sdev; 1215 unsigned long flags; 1216 1217 spin_lock_irqsave(shost->host_lock, flags); 1218 sdev = __scsi_device_lookup(shost, channel, id, lun); 1219 if (sdev && scsi_device_get(sdev)) 1220 sdev = NULL; 1221 spin_unlock_irqrestore(shost->host_lock, flags); 1222 1223 return sdev; 1224 } 1225 EXPORT_SYMBOL(scsi_device_lookup); 1226 1227 /** 1228 * scsi_device_cancel - cancel outstanding IO to this device 1229 * @sdev: Pointer to struct scsi_device 1230 * @recovery: Boolean instructing function to recover device or not. 1231 * 1232 **/ 1233 int scsi_device_cancel(struct scsi_device *sdev, int recovery) 1234 { 1235 struct scsi_cmnd *scmd; 1236 LIST_HEAD(active_list); 1237 struct list_head *lh, *lh_sf; 1238 unsigned long flags; 1239 1240 scsi_device_set_state(sdev, SDEV_CANCEL); 1241 1242 spin_lock_irqsave(&sdev->list_lock, flags); 1243 list_for_each_entry(scmd, &sdev->cmd_list, list) { 1244 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) { 1245 /* 1246 * If we are unable to remove the timer, it means 1247 * that the command has already timed out or 1248 * finished. 1249 */ 1250 if (!scsi_delete_timer(scmd)) 1251 continue; 1252 list_add_tail(&scmd->eh_entry, &active_list); 1253 } 1254 } 1255 spin_unlock_irqrestore(&sdev->list_lock, flags); 1256 1257 if (!list_empty(&active_list)) { 1258 list_for_each_safe(lh, lh_sf, &active_list) { 1259 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1260 list_del_init(lh); 1261 if (recovery) { 1262 scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD); 1263 } else { 1264 scmd->result = (DID_ABORT << 16); 1265 scsi_finish_command(scmd); 1266 } 1267 } 1268 } 1269 1270 return 0; 1271 } 1272 EXPORT_SYMBOL(scsi_device_cancel); 1273 1274 #ifdef CONFIG_HOTPLUG_CPU 1275 static int scsi_cpu_notify(struct notifier_block *self, 1276 unsigned long action, void *hcpu) 1277 { 1278 int cpu = (unsigned long)hcpu; 1279 1280 switch(action) { 1281 case CPU_DEAD: 1282 /* Drain scsi_done_q. */ 1283 local_irq_disable(); 1284 list_splice_init(&per_cpu(scsi_done_q, cpu), 1285 &__get_cpu_var(scsi_done_q)); 1286 raise_softirq_irqoff(SCSI_SOFTIRQ); 1287 local_irq_enable(); 1288 break; 1289 default: 1290 break; 1291 } 1292 return NOTIFY_OK; 1293 } 1294 1295 static struct notifier_block __devinitdata scsi_cpu_nb = { 1296 .notifier_call = scsi_cpu_notify, 1297 }; 1298 1299 #define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb) 1300 #define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb) 1301 #else 1302 #define register_scsi_cpu() 1303 #define unregister_scsi_cpu() 1304 #endif /* CONFIG_HOTPLUG_CPU */ 1305 1306 MODULE_DESCRIPTION("SCSI core"); 1307 MODULE_LICENSE("GPL"); 1308 1309 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1310 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1311 1312 static int __init init_scsi(void) 1313 { 1314 int error, i; 1315 1316 error = scsi_init_queue(); 1317 if (error) 1318 return error; 1319 error = scsi_init_procfs(); 1320 if (error) 1321 goto cleanup_queue; 1322 error = scsi_init_devinfo(); 1323 if (error) 1324 goto cleanup_procfs; 1325 error = scsi_init_hosts(); 1326 if (error) 1327 goto cleanup_devlist; 1328 error = scsi_init_sysctl(); 1329 if (error) 1330 goto cleanup_hosts; 1331 error = scsi_sysfs_register(); 1332 if (error) 1333 goto cleanup_sysctl; 1334 1335 for (i = 0; i < NR_CPUS; i++) 1336 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1337 1338 devfs_mk_dir("scsi"); 1339 open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL); 1340 register_scsi_cpu(); 1341 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1342 return 0; 1343 1344 cleanup_sysctl: 1345 scsi_exit_sysctl(); 1346 cleanup_hosts: 1347 scsi_exit_hosts(); 1348 cleanup_devlist: 1349 scsi_exit_devinfo(); 1350 cleanup_procfs: 1351 scsi_exit_procfs(); 1352 cleanup_queue: 1353 scsi_exit_queue(); 1354 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", 1355 -error); 1356 return error; 1357 } 1358 1359 static void __exit exit_scsi(void) 1360 { 1361 scsi_sysfs_unregister(); 1362 scsi_exit_sysctl(); 1363 scsi_exit_hosts(); 1364 scsi_exit_devinfo(); 1365 devfs_remove("scsi"); 1366 scsi_exit_procfs(); 1367 scsi_exit_queue(); 1368 unregister_scsi_cpu(); 1369 } 1370 1371 subsys_initcall(init_scsi); 1372 module_exit(exit_scsi); 1373