1 /* 2 * scsi.c Copyright (C) 1992 Drew Eckhardt 3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 4 * Copyright (C) 2002, 2003 Christoph Hellwig 5 * 6 * generic mid-level SCSI driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * 10 * <drew@colorado.edu> 11 * 12 * Bug correction thanks go to : 13 * Rik Faith <faith@cs.unc.edu> 14 * Tommy Thorn <tthorn> 15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> 16 * 17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to 18 * add scatter-gather, multiple outstanding request, and other 19 * enhancements. 20 * 21 * Native multichannel, wide scsi, /proc/scsi and hot plugging 22 * support added by Michael Neuffer <mike@i-connect.net> 23 * 24 * Added request_module("scsi_hostadapter") for kerneld: 25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) 26 * Bjorn Ekwall <bj0rn@blox.se> 27 * (changed to kmod) 28 * 29 * Major improvements to the timeout, abort, and reset processing, 30 * as well as performance modifications for large queue depths by 31 * Leonard N. Zubkoff <lnz@dandelion.com> 32 * 33 * Converted cli() code to spinlocks, Ingo Molnar 34 * 35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli 36 * 37 * out_of_space hacks, D. Gilbert (dpg) 990608 38 */ 39 40 #include <linux/module.h> 41 #include <linux/moduleparam.h> 42 #include <linux/kernel.h> 43 #include <linux/sched.h> 44 #include <linux/timer.h> 45 #include <linux/string.h> 46 #include <linux/slab.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/init.h> 50 #include <linux/completion.h> 51 #include <linux/unistd.h> 52 #include <linux/spinlock.h> 53 #include <linux/kmod.h> 54 #include <linux/interrupt.h> 55 #include <linux/notifier.h> 56 #include <linux/cpu.h> 57 #include <linux/mutex.h> 58 59 #include <scsi/scsi.h> 60 #include <scsi/scsi_cmnd.h> 61 #include <scsi/scsi_dbg.h> 62 #include <scsi/scsi_device.h> 63 #include <scsi/scsi_eh.h> 64 #include <scsi/scsi_host.h> 65 #include <scsi/scsi_tcq.h> 66 #include <scsi/scsi_request.h> 67 68 #include "scsi_priv.h" 69 #include "scsi_logging.h" 70 71 static void scsi_done(struct scsi_cmnd *cmd); 72 73 /* 74 * Definitions and constants. 75 */ 76 77 #define MIN_RESET_DELAY (2*HZ) 78 79 /* Do not call reset on error if we just did a reset within 15 sec. */ 80 #define MIN_RESET_PERIOD (15*HZ) 81 82 /* 83 * Macro to determine the size of SCSI command. This macro takes vendor 84 * unique commands into account. SCSI commands in groups 6 and 7 are 85 * vendor unique and we will depend upon the command length being 86 * supplied correctly in cmd_len. 87 */ 88 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \ 89 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len) 90 91 /* 92 * Note - the initial logging level can be set here to log events at boot time. 93 * After the system is up, you may enable logging via the /proc interface. 94 */ 95 unsigned int scsi_logging_level; 96 #if defined(CONFIG_SCSI_LOGGING) 97 EXPORT_SYMBOL(scsi_logging_level); 98 #endif 99 100 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { 101 "Direct-Access ", 102 "Sequential-Access", 103 "Printer ", 104 "Processor ", 105 "WORM ", 106 "CD-ROM ", 107 "Scanner ", 108 "Optical Device ", 109 "Medium Changer ", 110 "Communications ", 111 "Unknown ", 112 "Unknown ", 113 "RAID ", 114 "Enclosure ", 115 "Direct-Access-RBC", 116 }; 117 EXPORT_SYMBOL(scsi_device_types); 118 119 /* 120 * Function: scsi_allocate_request 121 * 122 * Purpose: Allocate a request descriptor. 123 * 124 * Arguments: device - device for which we want a request 125 * gfp_mask - allocation flags passed to kmalloc 126 * 127 * Lock status: No locks assumed to be held. This function is SMP-safe. 128 * 129 * Returns: Pointer to request block. 130 */ 131 struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, 132 gfp_t gfp_mask) 133 { 134 const int offset = ALIGN(sizeof(struct scsi_request), 4); 135 const int size = offset + sizeof(struct request); 136 struct scsi_request *sreq; 137 138 sreq = kzalloc(size, gfp_mask); 139 if (likely(sreq != NULL)) { 140 sreq->sr_request = (struct request *)(((char *)sreq) + offset); 141 sreq->sr_device = sdev; 142 sreq->sr_host = sdev->host; 143 sreq->sr_magic = SCSI_REQ_MAGIC; 144 sreq->sr_data_direction = DMA_BIDIRECTIONAL; 145 } 146 147 return sreq; 148 } 149 EXPORT_SYMBOL(scsi_allocate_request); 150 151 void __scsi_release_request(struct scsi_request *sreq) 152 { 153 struct request *req = sreq->sr_request; 154 155 /* unlikely because the tag was usually ended earlier by the 156 * mid-layer. However, for layering reasons ULD's don't end 157 * the tag of commands they generate. */ 158 if (unlikely(blk_rq_tagged(req))) { 159 unsigned long flags; 160 struct request_queue *q = req->q; 161 162 spin_lock_irqsave(q->queue_lock, flags); 163 blk_queue_end_tag(q, req); 164 spin_unlock_irqrestore(q->queue_lock, flags); 165 } 166 167 168 if (likely(sreq->sr_command != NULL)) { 169 struct scsi_cmnd *cmd = sreq->sr_command; 170 171 sreq->sr_command = NULL; 172 scsi_next_command(cmd); 173 } 174 } 175 176 /* 177 * Function: scsi_release_request 178 * 179 * Purpose: Release a request descriptor. 180 * 181 * Arguments: sreq - request to release 182 * 183 * Lock status: No locks assumed to be held. This function is SMP-safe. 184 */ 185 void scsi_release_request(struct scsi_request *sreq) 186 { 187 __scsi_release_request(sreq); 188 kfree(sreq); 189 } 190 EXPORT_SYMBOL(scsi_release_request); 191 192 struct scsi_host_cmd_pool { 193 kmem_cache_t *slab; 194 unsigned int users; 195 char *name; 196 unsigned int slab_flags; 197 gfp_t gfp_mask; 198 }; 199 200 static struct scsi_host_cmd_pool scsi_cmd_pool = { 201 .name = "scsi_cmd_cache", 202 .slab_flags = SLAB_HWCACHE_ALIGN, 203 }; 204 205 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { 206 .name = "scsi_cmd_cache(DMA)", 207 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, 208 .gfp_mask = __GFP_DMA, 209 }; 210 211 static DEFINE_MUTEX(host_cmd_pool_mutex); 212 213 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 214 gfp_t gfp_mask) 215 { 216 struct scsi_cmnd *cmd; 217 218 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 219 gfp_mask | shost->cmd_pool->gfp_mask); 220 221 if (unlikely(!cmd)) { 222 unsigned long flags; 223 224 spin_lock_irqsave(&shost->free_list_lock, flags); 225 if (likely(!list_empty(&shost->free_list))) { 226 cmd = list_entry(shost->free_list.next, 227 struct scsi_cmnd, list); 228 list_del_init(&cmd->list); 229 } 230 spin_unlock_irqrestore(&shost->free_list_lock, flags); 231 } 232 233 return cmd; 234 } 235 236 /* 237 * Function: scsi_get_command() 238 * 239 * Purpose: Allocate and setup a scsi command block 240 * 241 * Arguments: dev - parent scsi device 242 * gfp_mask- allocator flags 243 * 244 * Returns: The allocated scsi command structure. 245 */ 246 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) 247 { 248 struct scsi_cmnd *cmd; 249 250 /* Bail if we can't get a reference to the device */ 251 if (!get_device(&dev->sdev_gendev)) 252 return NULL; 253 254 cmd = __scsi_get_command(dev->host, gfp_mask); 255 256 if (likely(cmd != NULL)) { 257 unsigned long flags; 258 259 memset(cmd, 0, sizeof(*cmd)); 260 cmd->device = dev; 261 init_timer(&cmd->eh_timeout); 262 INIT_LIST_HEAD(&cmd->list); 263 spin_lock_irqsave(&dev->list_lock, flags); 264 list_add_tail(&cmd->list, &dev->cmd_list); 265 spin_unlock_irqrestore(&dev->list_lock, flags); 266 cmd->jiffies_at_alloc = jiffies; 267 } else 268 put_device(&dev->sdev_gendev); 269 270 return cmd; 271 } 272 EXPORT_SYMBOL(scsi_get_command); 273 274 /* 275 * Function: scsi_put_command() 276 * 277 * Purpose: Free a scsi command block 278 * 279 * Arguments: cmd - command block to free 280 * 281 * Returns: Nothing. 282 * 283 * Notes: The command must not belong to any lists. 284 */ 285 void scsi_put_command(struct scsi_cmnd *cmd) 286 { 287 struct scsi_device *sdev = cmd->device; 288 struct Scsi_Host *shost = sdev->host; 289 unsigned long flags; 290 291 /* serious error if the command hasn't come from a device list */ 292 spin_lock_irqsave(&cmd->device->list_lock, flags); 293 BUG_ON(list_empty(&cmd->list)); 294 list_del_init(&cmd->list); 295 spin_unlock(&cmd->device->list_lock); 296 /* changing locks here, don't need to restore the irq state */ 297 spin_lock(&shost->free_list_lock); 298 if (unlikely(list_empty(&shost->free_list))) { 299 list_add(&cmd->list, &shost->free_list); 300 cmd = NULL; 301 } 302 spin_unlock_irqrestore(&shost->free_list_lock, flags); 303 304 if (likely(cmd != NULL)) 305 kmem_cache_free(shost->cmd_pool->slab, cmd); 306 307 put_device(&sdev->sdev_gendev); 308 } 309 EXPORT_SYMBOL(scsi_put_command); 310 311 /* 312 * Function: scsi_setup_command_freelist() 313 * 314 * Purpose: Setup the command freelist for a scsi host. 315 * 316 * Arguments: shost - host to allocate the freelist for. 317 * 318 * Returns: Nothing. 319 */ 320 int scsi_setup_command_freelist(struct Scsi_Host *shost) 321 { 322 struct scsi_host_cmd_pool *pool; 323 struct scsi_cmnd *cmd; 324 325 spin_lock_init(&shost->free_list_lock); 326 INIT_LIST_HEAD(&shost->free_list); 327 328 /* 329 * Select a command slab for this host and create it if not 330 * yet existant. 331 */ 332 mutex_lock(&host_cmd_pool_mutex); 333 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); 334 if (!pool->users) { 335 pool->slab = kmem_cache_create(pool->name, 336 sizeof(struct scsi_cmnd), 0, 337 pool->slab_flags, NULL, NULL); 338 if (!pool->slab) 339 goto fail; 340 } 341 342 pool->users++; 343 shost->cmd_pool = pool; 344 mutex_unlock(&host_cmd_pool_mutex); 345 346 /* 347 * Get one backup command for this host. 348 */ 349 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 350 GFP_KERNEL | shost->cmd_pool->gfp_mask); 351 if (!cmd) 352 goto fail2; 353 list_add(&cmd->list, &shost->free_list); 354 return 0; 355 356 fail2: 357 if (!--pool->users) 358 kmem_cache_destroy(pool->slab); 359 return -ENOMEM; 360 fail: 361 mutex_unlock(&host_cmd_pool_mutex); 362 return -ENOMEM; 363 364 } 365 366 /* 367 * Function: scsi_destroy_command_freelist() 368 * 369 * Purpose: Release the command freelist for a scsi host. 370 * 371 * Arguments: shost - host that's freelist is going to be destroyed 372 */ 373 void scsi_destroy_command_freelist(struct Scsi_Host *shost) 374 { 375 while (!list_empty(&shost->free_list)) { 376 struct scsi_cmnd *cmd; 377 378 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 379 list_del_init(&cmd->list); 380 kmem_cache_free(shost->cmd_pool->slab, cmd); 381 } 382 383 mutex_lock(&host_cmd_pool_mutex); 384 if (!--shost->cmd_pool->users) 385 kmem_cache_destroy(shost->cmd_pool->slab); 386 mutex_unlock(&host_cmd_pool_mutex); 387 } 388 389 #ifdef CONFIG_SCSI_LOGGING 390 void scsi_log_send(struct scsi_cmnd *cmd) 391 { 392 unsigned int level; 393 struct scsi_device *sdev; 394 395 /* 396 * If ML QUEUE log level is greater than or equal to: 397 * 398 * 1: nothing (match completion) 399 * 400 * 2: log opcode + command of all commands 401 * 402 * 3: same as 2 plus dump cmd address 403 * 404 * 4: same as 3 plus dump extra junk 405 */ 406 if (unlikely(scsi_logging_level)) { 407 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 408 SCSI_LOG_MLQUEUE_BITS); 409 if (level > 1) { 410 sdev = cmd->device; 411 sdev_printk(KERN_INFO, sdev, "send "); 412 if (level > 2) 413 printk("0x%p ", cmd); 414 /* 415 * spaces to match disposition and cmd->result 416 * output in scsi_log_completion. 417 */ 418 printk(" "); 419 scsi_print_command(cmd); 420 if (level > 3) { 421 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 422 " done = 0x%p, queuecommand 0x%p\n", 423 cmd->buffer, cmd->bufflen, 424 cmd->done, 425 sdev->host->hostt->queuecommand); 426 427 } 428 } 429 } 430 } 431 432 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 433 { 434 unsigned int level; 435 struct scsi_device *sdev; 436 437 /* 438 * If ML COMPLETE log level is greater than or equal to: 439 * 440 * 1: log disposition, result, opcode + command, and conditionally 441 * sense data for failures or non SUCCESS dispositions. 442 * 443 * 2: same as 1 but for all command completions. 444 * 445 * 3: same as 2 plus dump cmd address 446 * 447 * 4: same as 3 plus dump extra junk 448 */ 449 if (unlikely(scsi_logging_level)) { 450 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 451 SCSI_LOG_MLCOMPLETE_BITS); 452 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 453 (level > 1)) { 454 sdev = cmd->device; 455 sdev_printk(KERN_INFO, sdev, "done "); 456 if (level > 2) 457 printk("0x%p ", cmd); 458 /* 459 * Dump truncated values, so we usually fit within 460 * 80 chars. 461 */ 462 switch (disposition) { 463 case SUCCESS: 464 printk("SUCCESS"); 465 break; 466 case NEEDS_RETRY: 467 printk("RETRY "); 468 break; 469 case ADD_TO_MLQUEUE: 470 printk("MLQUEUE"); 471 break; 472 case FAILED: 473 printk("FAILED "); 474 break; 475 case TIMEOUT_ERROR: 476 /* 477 * If called via scsi_times_out. 478 */ 479 printk("TIMEOUT"); 480 break; 481 default: 482 printk("UNKNOWN"); 483 } 484 printk(" %8x ", cmd->result); 485 scsi_print_command(cmd); 486 if (status_byte(cmd->result) & CHECK_CONDITION) { 487 /* 488 * XXX The scsi_print_sense formatting/prefix 489 * doesn't match this function. 490 */ 491 scsi_print_sense("", cmd); 492 } 493 if (level > 3) { 494 printk(KERN_INFO "scsi host busy %d failed %d\n", 495 sdev->host->host_busy, 496 sdev->host->host_failed); 497 } 498 } 499 } 500 } 501 #endif 502 503 /* 504 * Assign a serial number and pid to the request for error recovery 505 * and debugging purposes. Protected by the Host_Lock of host. 506 */ 507 static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) 508 { 509 cmd->serial_number = host->cmd_serial_number++; 510 if (cmd->serial_number == 0) 511 cmd->serial_number = host->cmd_serial_number++; 512 513 cmd->pid = host->cmd_pid++; 514 if (cmd->pid == 0) 515 cmd->pid = host->cmd_pid++; 516 } 517 518 /* 519 * Function: scsi_dispatch_command 520 * 521 * Purpose: Dispatch a command to the low-level driver. 522 * 523 * Arguments: cmd - command block we are dispatching. 524 * 525 * Notes: 526 */ 527 int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 528 { 529 struct Scsi_Host *host = cmd->device->host; 530 unsigned long flags = 0; 531 unsigned long timeout; 532 int rtn = 0; 533 534 /* check if the device is still usable */ 535 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 536 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 537 * returns an immediate error upwards, and signals 538 * that the device is no longer present */ 539 cmd->result = DID_NO_CONNECT << 16; 540 atomic_inc(&cmd->device->iorequest_cnt); 541 __scsi_done(cmd); 542 /* return 0 (because the command has been processed) */ 543 goto out; 544 } 545 546 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 547 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 548 /* 549 * in SDEV_BLOCK, the command is just put back on the device 550 * queue. The suspend state has already blocked the queue so 551 * future requests should not occur until the device 552 * transitions out of the suspend state. 553 */ 554 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 555 556 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 557 558 /* 559 * NOTE: rtn is still zero here because we don't need the 560 * queue to be plugged on return (it's already stopped) 561 */ 562 goto out; 563 } 564 565 /* 566 * If SCSI-2 or lower, store the LUN value in cmnd. 567 */ 568 if (cmd->device->scsi_level <= SCSI_2 && 569 cmd->device->scsi_level != SCSI_UNKNOWN) { 570 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 571 (cmd->device->lun << 5 & 0xe0); 572 } 573 574 /* 575 * We will wait MIN_RESET_DELAY clock ticks after the last reset so 576 * we can avoid the drive not being ready. 577 */ 578 timeout = host->last_reset + MIN_RESET_DELAY; 579 580 if (host->resetting && time_before(jiffies, timeout)) { 581 int ticks_remaining = timeout - jiffies; 582 /* 583 * NOTE: This may be executed from within an interrupt 584 * handler! This is bad, but for now, it'll do. The irq 585 * level of the interrupt handler has been masked out by the 586 * platform dependent interrupt handling code already, so the 587 * sti() here will not cause another call to the SCSI host's 588 * interrupt handler (assuming there is one irq-level per 589 * host). 590 */ 591 while (--ticks_remaining >= 0) 592 mdelay(1 + 999 / HZ); 593 host->resetting = 0; 594 } 595 596 /* 597 * AK: unlikely race here: for some reason the timer could 598 * expire before the serial number is set up below. 599 */ 600 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); 601 602 scsi_log_send(cmd); 603 604 /* 605 * We will use a queued command if possible, otherwise we will 606 * emulate the queuing and calling of completion function ourselves. 607 */ 608 atomic_inc(&cmd->device->iorequest_cnt); 609 610 /* 611 * Before we queue this command, check if the command 612 * length exceeds what the host adapter can handle. 613 */ 614 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) { 615 SCSI_LOG_MLQUEUE(3, 616 printk("queuecommand : command too long.\n")); 617 cmd->result = (DID_ABORT << 16); 618 619 scsi_done(cmd); 620 goto out; 621 } 622 623 spin_lock_irqsave(host->host_lock, flags); 624 scsi_cmd_get_serial(host, cmd); 625 626 if (unlikely(host->shost_state == SHOST_DEL)) { 627 cmd->result = (DID_NO_CONNECT << 16); 628 scsi_done(cmd); 629 } else { 630 rtn = host->hostt->queuecommand(cmd, scsi_done); 631 } 632 spin_unlock_irqrestore(host->host_lock, flags); 633 if (rtn) { 634 if (scsi_delete_timer(cmd)) { 635 atomic_inc(&cmd->device->iodone_cnt); 636 scsi_queue_insert(cmd, 637 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? 638 rtn : SCSI_MLQUEUE_HOST_BUSY); 639 } 640 SCSI_LOG_MLQUEUE(3, 641 printk("queuecommand : request rejected\n")); 642 } 643 644 out: 645 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); 646 return rtn; 647 } 648 649 /* 650 * Function: scsi_init_cmd_from_req 651 * 652 * Purpose: Queue a SCSI command 653 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request 654 * 655 * Arguments: cmd - command descriptor. 656 * sreq - Request from the queue. 657 * 658 * Lock status: None needed. 659 * 660 * Returns: Nothing. 661 * 662 * Notes: Mainly transfer data from the request structure to the 663 * command structure. The request structure is allocated 664 * using the normal memory allocator, and requests can pile 665 * up to more or less any depth. The command structure represents 666 * a consumable resource, as these are allocated into a pool 667 * when the SCSI subsystem initializes. The preallocation is 668 * required so that in low-memory situations a disk I/O request 669 * won't cause the memory manager to try and write out a page. 670 * The request structure is generally used by ioctls and character 671 * devices. 672 */ 673 void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) 674 { 675 sreq->sr_command = cmd; 676 677 cmd->cmd_len = sreq->sr_cmd_len; 678 cmd->use_sg = sreq->sr_use_sg; 679 680 cmd->request = sreq->sr_request; 681 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd)); 682 cmd->serial_number = 0; 683 cmd->bufflen = sreq->sr_bufflen; 684 cmd->buffer = sreq->sr_buffer; 685 cmd->retries = 0; 686 cmd->allowed = sreq->sr_allowed; 687 cmd->done = sreq->sr_done; 688 cmd->timeout_per_command = sreq->sr_timeout_per_command; 689 cmd->sc_data_direction = sreq->sr_data_direction; 690 cmd->sglist_len = sreq->sr_sglist_len; 691 cmd->underflow = sreq->sr_underflow; 692 cmd->sc_request = sreq; 693 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd)); 694 695 /* 696 * Zero the sense buffer. Some host adapters automatically request 697 * sense on error. 0 is not a valid sense code. 698 */ 699 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer)); 700 cmd->request_buffer = sreq->sr_buffer; 701 cmd->request_bufflen = sreq->sr_bufflen; 702 cmd->old_use_sg = cmd->use_sg; 703 if (cmd->cmd_len == 0) 704 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 705 cmd->old_cmd_len = cmd->cmd_len; 706 cmd->sc_old_data_direction = cmd->sc_data_direction; 707 cmd->old_underflow = cmd->underflow; 708 709 /* 710 * Start the timer ticking. 711 */ 712 cmd->result = 0; 713 714 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n")); 715 } 716 717 /* 718 * Per-CPU I/O completion queue. 719 */ 720 static DEFINE_PER_CPU(struct list_head, scsi_done_q); 721 722 /** 723 * scsi_done - Enqueue the finished SCSI command into the done queue. 724 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 725 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 726 * 727 * This function is the mid-level's (SCSI Core) interrupt routine, which 728 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues 729 * the command to the done queue for further processing. 730 * 731 * This is the producer of the done queue who enqueues at the tail. 732 * 733 * This function is interrupt context safe. 734 */ 735 static void scsi_done(struct scsi_cmnd *cmd) 736 { 737 /* 738 * We don't have to worry about this one timing out any more. 739 * If we are unable to remove the timer, then the command 740 * has already timed out. In which case, we have no choice but to 741 * let the timeout function run, as we have no idea where in fact 742 * that function could really be. It might be on another processor, 743 * etc, etc. 744 */ 745 if (!scsi_delete_timer(cmd)) 746 return; 747 __scsi_done(cmd); 748 } 749 750 /* Private entry to scsi_done() to complete a command when the timer 751 * isn't running --- used by scsi_times_out */ 752 void __scsi_done(struct scsi_cmnd *cmd) 753 { 754 struct request *rq = cmd->request; 755 756 /* 757 * Set the serial numbers back to zero 758 */ 759 cmd->serial_number = 0; 760 761 atomic_inc(&cmd->device->iodone_cnt); 762 if (cmd->result) 763 atomic_inc(&cmd->device->ioerr_cnt); 764 765 BUG_ON(!rq); 766 767 /* 768 * The uptodate/nbytes values don't matter, as we allow partial 769 * completes and thus will check this in the softirq callback 770 */ 771 rq->completion_data = cmd; 772 blk_complete_request(rq); 773 } 774 775 /* 776 * Function: scsi_retry_command 777 * 778 * Purpose: Send a command back to the low level to be retried. 779 * 780 * Notes: This command is always executed in the context of the 781 * bottom half handler, or the error handler thread. Low 782 * level drivers should not become re-entrant as a result of 783 * this. 784 */ 785 int scsi_retry_command(struct scsi_cmnd *cmd) 786 { 787 /* 788 * Restore the SCSI command state. 789 */ 790 scsi_setup_cmd_retry(cmd); 791 792 /* 793 * Zero the sense information from the last time we tried 794 * this command. 795 */ 796 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 797 798 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 799 } 800 801 /* 802 * Function: scsi_finish_command 803 * 804 * Purpose: Pass command off to upper layer for finishing of I/O 805 * request, waking processes that are waiting on results, 806 * etc. 807 */ 808 void scsi_finish_command(struct scsi_cmnd *cmd) 809 { 810 struct scsi_device *sdev = cmd->device; 811 struct Scsi_Host *shost = sdev->host; 812 struct scsi_request *sreq; 813 814 scsi_device_unbusy(sdev); 815 816 /* 817 * Clear the flags which say that the device/host is no longer 818 * capable of accepting new commands. These are set in scsi_queue.c 819 * for both the queue full condition on a device, and for a 820 * host full condition on the host. 821 * 822 * XXX(hch): What about locking? 823 */ 824 shost->host_blocked = 0; 825 sdev->device_blocked = 0; 826 827 /* 828 * If we have valid sense information, then some kind of recovery 829 * must have taken place. Make a note of this. 830 */ 831 if (SCSI_SENSE_VALID(cmd)) 832 cmd->result |= (DRIVER_SENSE << 24); 833 834 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, 835 "Notifying upper driver of completion " 836 "(result %x)\n", cmd->result)); 837 838 /* 839 * We can get here with use_sg=0, causing a panic in the upper level 840 */ 841 cmd->use_sg = cmd->old_use_sg; 842 843 /* 844 * If there is an associated request structure, copy the data over 845 * before we call the completion function. 846 */ 847 sreq = cmd->sc_request; 848 if (sreq) { 849 sreq->sr_result = sreq->sr_command->result; 850 if (sreq->sr_result) { 851 memcpy(sreq->sr_sense_buffer, 852 sreq->sr_command->sense_buffer, 853 sizeof(sreq->sr_sense_buffer)); 854 } 855 } 856 857 cmd->done(cmd); 858 } 859 EXPORT_SYMBOL(scsi_finish_command); 860 861 /* 862 * Function: scsi_adjust_queue_depth() 863 * 864 * Purpose: Allow low level drivers to tell us to change the queue depth 865 * on a specific SCSI device 866 * 867 * Arguments: sdev - SCSI Device in question 868 * tagged - Do we use tagged queueing (non-0) or do we treat 869 * this device as an untagged device (0) 870 * tags - Number of tags allowed if tagged queueing enabled, 871 * or number of commands the low level driver can 872 * queue up in non-tagged mode (as per cmd_per_lun). 873 * 874 * Returns: Nothing 875 * 876 * Lock Status: None held on entry 877 * 878 * Notes: Low level drivers may call this at any time and we will do 879 * the right thing depending on whether or not the device is 880 * currently active and whether or not it even has the 881 * command blocks built yet. 882 */ 883 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) 884 { 885 unsigned long flags; 886 887 /* 888 * refuse to set tagged depth to an unworkable size 889 */ 890 if (tags <= 0) 891 return; 892 893 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 894 895 /* Check to see if the queue is managed by the block layer 896 * if it is, and we fail to adjust the depth, exit */ 897 if (blk_queue_tagged(sdev->request_queue) && 898 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 899 goto out; 900 901 sdev->queue_depth = tags; 902 switch (tagged) { 903 case MSG_ORDERED_TAG: 904 sdev->ordered_tags = 1; 905 sdev->simple_tags = 1; 906 break; 907 case MSG_SIMPLE_TAG: 908 sdev->ordered_tags = 0; 909 sdev->simple_tags = 1; 910 break; 911 default: 912 sdev_printk(KERN_WARNING, sdev, 913 "scsi_adjust_queue_depth, bad queue type, " 914 "disabled\n"); 915 case 0: 916 sdev->ordered_tags = sdev->simple_tags = 0; 917 sdev->queue_depth = tags; 918 break; 919 } 920 out: 921 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 922 } 923 EXPORT_SYMBOL(scsi_adjust_queue_depth); 924 925 /* 926 * Function: scsi_track_queue_full() 927 * 928 * Purpose: This function will track successive QUEUE_FULL events on a 929 * specific SCSI device to determine if and when there is a 930 * need to adjust the queue depth on the device. 931 * 932 * Arguments: sdev - SCSI Device in question 933 * depth - Current number of outstanding SCSI commands on 934 * this device, not counting the one returned as 935 * QUEUE_FULL. 936 * 937 * Returns: 0 - No change needed 938 * >0 - Adjust queue depth to this new depth 939 * -1 - Drop back to untagged operation using host->cmd_per_lun 940 * as the untagged command depth 941 * 942 * Lock Status: None held on entry 943 * 944 * Notes: Low level drivers may call this at any time and we will do 945 * "The Right Thing." We are interrupt context safe. 946 */ 947 int scsi_track_queue_full(struct scsi_device *sdev, int depth) 948 { 949 if ((jiffies >> 4) == sdev->last_queue_full_time) 950 return 0; 951 952 sdev->last_queue_full_time = (jiffies >> 4); 953 if (sdev->last_queue_full_depth != depth) { 954 sdev->last_queue_full_count = 1; 955 sdev->last_queue_full_depth = depth; 956 } else { 957 sdev->last_queue_full_count++; 958 } 959 960 if (sdev->last_queue_full_count <= 10) 961 return 0; 962 if (sdev->last_queue_full_depth < 8) { 963 /* Drop back to untagged */ 964 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 965 return -1; 966 } 967 968 if (sdev->ordered_tags) 969 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 970 else 971 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 972 return depth; 973 } 974 EXPORT_SYMBOL(scsi_track_queue_full); 975 976 /** 977 * scsi_device_get - get an addition reference to a scsi_device 978 * @sdev: device to get a reference to 979 * 980 * Gets a reference to the scsi_device and increments the use count 981 * of the underlying LLDD module. You must hold host_lock of the 982 * parent Scsi_Host or already have a reference when calling this. 983 */ 984 int scsi_device_get(struct scsi_device *sdev) 985 { 986 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 987 return -ENXIO; 988 if (!get_device(&sdev->sdev_gendev)) 989 return -ENXIO; 990 if (!try_module_get(sdev->host->hostt->module)) { 991 put_device(&sdev->sdev_gendev); 992 return -ENXIO; 993 } 994 return 0; 995 } 996 EXPORT_SYMBOL(scsi_device_get); 997 998 /** 999 * scsi_device_put - release a reference to a scsi_device 1000 * @sdev: device to release a reference on. 1001 * 1002 * Release a reference to the scsi_device and decrements the use count 1003 * of the underlying LLDD module. The device is freed once the last 1004 * user vanishes. 1005 */ 1006 void scsi_device_put(struct scsi_device *sdev) 1007 { 1008 module_put(sdev->host->hostt->module); 1009 put_device(&sdev->sdev_gendev); 1010 } 1011 EXPORT_SYMBOL(scsi_device_put); 1012 1013 /* helper for shost_for_each_device, thus not documented */ 1014 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 1015 struct scsi_device *prev) 1016 { 1017 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); 1018 struct scsi_device *next = NULL; 1019 unsigned long flags; 1020 1021 spin_lock_irqsave(shost->host_lock, flags); 1022 while (list->next != &shost->__devices) { 1023 next = list_entry(list->next, struct scsi_device, siblings); 1024 /* skip devices that we can't get a reference to */ 1025 if (!scsi_device_get(next)) 1026 break; 1027 next = NULL; 1028 list = list->next; 1029 } 1030 spin_unlock_irqrestore(shost->host_lock, flags); 1031 1032 if (prev) 1033 scsi_device_put(prev); 1034 return next; 1035 } 1036 EXPORT_SYMBOL(__scsi_iterate_devices); 1037 1038 /** 1039 * starget_for_each_device - helper to walk all devices of a target 1040 * @starget: target whose devices we want to iterate over. 1041 * 1042 * This traverses over each devices of @shost. The devices have 1043 * a reference that must be released by scsi_host_put when breaking 1044 * out of the loop. 1045 */ 1046 void starget_for_each_device(struct scsi_target *starget, void * data, 1047 void (*fn)(struct scsi_device *, void *)) 1048 { 1049 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1050 struct scsi_device *sdev; 1051 1052 shost_for_each_device(sdev, shost) { 1053 if ((sdev->channel == starget->channel) && 1054 (sdev->id == starget->id)) 1055 fn(sdev, data); 1056 } 1057 } 1058 EXPORT_SYMBOL(starget_for_each_device); 1059 1060 /** 1061 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) 1062 * @starget: SCSI target pointer 1063 * @lun: SCSI Logical Unit Number 1064 * 1065 * Looks up the scsi_device with the specified @lun for a give 1066 * @starget. The returned scsi_device does not have an additional 1067 * reference. You must hold the host's host_lock over this call and 1068 * any access to the returned scsi_device. 1069 * 1070 * Note: The only reason why drivers would want to use this is because 1071 * they're need to access the device list in irq context. Otherwise you 1072 * really want to use scsi_device_lookup_by_target instead. 1073 **/ 1074 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 1075 uint lun) 1076 { 1077 struct scsi_device *sdev; 1078 1079 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 1080 if (sdev->lun ==lun) 1081 return sdev; 1082 } 1083 1084 return NULL; 1085 } 1086 EXPORT_SYMBOL(__scsi_device_lookup_by_target); 1087 1088 /** 1089 * scsi_device_lookup_by_target - find a device given the target 1090 * @starget: SCSI target pointer 1091 * @lun: SCSI Logical Unit Number 1092 * 1093 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1094 * give host. The returned scsi_device has an additional reference that 1095 * needs to be release with scsi_host_put once you're done with it. 1096 **/ 1097 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1098 uint lun) 1099 { 1100 struct scsi_device *sdev; 1101 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1102 unsigned long flags; 1103 1104 spin_lock_irqsave(shost->host_lock, flags); 1105 sdev = __scsi_device_lookup_by_target(starget, lun); 1106 if (sdev && scsi_device_get(sdev)) 1107 sdev = NULL; 1108 spin_unlock_irqrestore(shost->host_lock, flags); 1109 1110 return sdev; 1111 } 1112 EXPORT_SYMBOL(scsi_device_lookup_by_target); 1113 1114 /** 1115 * scsi_device_lookup - find a device given the host (UNLOCKED) 1116 * @shost: SCSI host pointer 1117 * @channel: SCSI channel (zero if only one channel) 1118 * @pun: SCSI target number (physical unit number) 1119 * @lun: SCSI Logical Unit Number 1120 * 1121 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1122 * give host. The returned scsi_device does not have an additional reference. 1123 * You must hold the host's host_lock over this call and any access to the 1124 * returned scsi_device. 1125 * 1126 * Note: The only reason why drivers would want to use this is because 1127 * they're need to access the device list in irq context. Otherwise you 1128 * really want to use scsi_device_lookup instead. 1129 **/ 1130 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 1131 uint channel, uint id, uint lun) 1132 { 1133 struct scsi_device *sdev; 1134 1135 list_for_each_entry(sdev, &shost->__devices, siblings) { 1136 if (sdev->channel == channel && sdev->id == id && 1137 sdev->lun ==lun) 1138 return sdev; 1139 } 1140 1141 return NULL; 1142 } 1143 EXPORT_SYMBOL(__scsi_device_lookup); 1144 1145 /** 1146 * scsi_device_lookup - find a device given the host 1147 * @shost: SCSI host pointer 1148 * @channel: SCSI channel (zero if only one channel) 1149 * @id: SCSI target number (physical unit number) 1150 * @lun: SCSI Logical Unit Number 1151 * 1152 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1153 * give host. The returned scsi_device has an additional reference that 1154 * needs to be release with scsi_host_put once you're done with it. 1155 **/ 1156 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1157 uint channel, uint id, uint lun) 1158 { 1159 struct scsi_device *sdev; 1160 unsigned long flags; 1161 1162 spin_lock_irqsave(shost->host_lock, flags); 1163 sdev = __scsi_device_lookup(shost, channel, id, lun); 1164 if (sdev && scsi_device_get(sdev)) 1165 sdev = NULL; 1166 spin_unlock_irqrestore(shost->host_lock, flags); 1167 1168 return sdev; 1169 } 1170 EXPORT_SYMBOL(scsi_device_lookup); 1171 1172 /** 1173 * scsi_device_cancel - cancel outstanding IO to this device 1174 * @sdev: Pointer to struct scsi_device 1175 * @recovery: Boolean instructing function to recover device or not. 1176 * 1177 **/ 1178 int scsi_device_cancel(struct scsi_device *sdev, int recovery) 1179 { 1180 struct scsi_cmnd *scmd; 1181 LIST_HEAD(active_list); 1182 struct list_head *lh, *lh_sf; 1183 unsigned long flags; 1184 1185 scsi_device_set_state(sdev, SDEV_CANCEL); 1186 1187 spin_lock_irqsave(&sdev->list_lock, flags); 1188 list_for_each_entry(scmd, &sdev->cmd_list, list) { 1189 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) { 1190 /* 1191 * If we are unable to remove the timer, it means 1192 * that the command has already timed out or 1193 * finished. 1194 */ 1195 if (!scsi_delete_timer(scmd)) 1196 continue; 1197 list_add_tail(&scmd->eh_entry, &active_list); 1198 } 1199 } 1200 spin_unlock_irqrestore(&sdev->list_lock, flags); 1201 1202 if (!list_empty(&active_list)) { 1203 list_for_each_safe(lh, lh_sf, &active_list) { 1204 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1205 list_del_init(lh); 1206 if (recovery && 1207 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) { 1208 scmd->result = (DID_ABORT << 16); 1209 scsi_finish_command(scmd); 1210 } 1211 } 1212 } 1213 1214 return 0; 1215 } 1216 EXPORT_SYMBOL(scsi_device_cancel); 1217 1218 MODULE_DESCRIPTION("SCSI core"); 1219 MODULE_LICENSE("GPL"); 1220 1221 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1222 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1223 1224 static int __init init_scsi(void) 1225 { 1226 int error, i; 1227 1228 error = scsi_init_queue(); 1229 if (error) 1230 return error; 1231 error = scsi_init_procfs(); 1232 if (error) 1233 goto cleanup_queue; 1234 error = scsi_init_devinfo(); 1235 if (error) 1236 goto cleanup_procfs; 1237 error = scsi_init_hosts(); 1238 if (error) 1239 goto cleanup_devlist; 1240 error = scsi_init_sysctl(); 1241 if (error) 1242 goto cleanup_hosts; 1243 error = scsi_sysfs_register(); 1244 if (error) 1245 goto cleanup_sysctl; 1246 1247 for_each_possible_cpu(i) 1248 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1249 1250 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1251 return 0; 1252 1253 cleanup_sysctl: 1254 scsi_exit_sysctl(); 1255 cleanup_hosts: 1256 scsi_exit_hosts(); 1257 cleanup_devlist: 1258 scsi_exit_devinfo(); 1259 cleanup_procfs: 1260 scsi_exit_procfs(); 1261 cleanup_queue: 1262 scsi_exit_queue(); 1263 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", 1264 -error); 1265 return error; 1266 } 1267 1268 static void __exit exit_scsi(void) 1269 { 1270 scsi_sysfs_unregister(); 1271 scsi_exit_sysctl(); 1272 scsi_exit_hosts(); 1273 scsi_exit_devinfo(); 1274 scsi_exit_procfs(); 1275 scsi_exit_queue(); 1276 } 1277 1278 subsys_initcall(init_scsi); 1279 module_exit(exit_scsi); 1280