1 /* 2 * scsi.c Copyright (C) 1992 Drew Eckhardt 3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 4 * Copyright (C) 2002, 2003 Christoph Hellwig 5 * 6 * generic mid-level SCSI driver 7 * Initial versions: Drew Eckhardt 8 * Subsequent revisions: Eric Youngdale 9 * 10 * <drew@colorado.edu> 11 * 12 * Bug correction thanks go to : 13 * Rik Faith <faith@cs.unc.edu> 14 * Tommy Thorn <tthorn> 15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> 16 * 17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to 18 * add scatter-gather, multiple outstanding request, and other 19 * enhancements. 20 * 21 * Native multichannel, wide scsi, /proc/scsi and hot plugging 22 * support added by Michael Neuffer <mike@i-connect.net> 23 * 24 * Added request_module("scsi_hostadapter") for kerneld: 25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) 26 * Bjorn Ekwall <bj0rn@blox.se> 27 * (changed to kmod) 28 * 29 * Major improvements to the timeout, abort, and reset processing, 30 * as well as performance modifications for large queue depths by 31 * Leonard N. Zubkoff <lnz@dandelion.com> 32 * 33 * Converted cli() code to spinlocks, Ingo Molnar 34 * 35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli 36 * 37 * out_of_space hacks, D. Gilbert (dpg) 990608 38 */ 39 40 #include <linux/module.h> 41 #include <linux/moduleparam.h> 42 #include <linux/kernel.h> 43 #include <linux/sched.h> 44 #include <linux/timer.h> 45 #include <linux/string.h> 46 #include <linux/slab.h> 47 #include <linux/blkdev.h> 48 #include <linux/delay.h> 49 #include <linux/init.h> 50 #include <linux/completion.h> 51 #include <linux/unistd.h> 52 #include <linux/spinlock.h> 53 #include <linux/kmod.h> 54 #include <linux/interrupt.h> 55 #include <linux/notifier.h> 56 #include <linux/cpu.h> 57 #include <linux/mutex.h> 58 59 #include <scsi/scsi.h> 60 #include <scsi/scsi_cmnd.h> 61 #include <scsi/scsi_dbg.h> 62 #include <scsi/scsi_device.h> 63 #include <scsi/scsi_eh.h> 64 #include <scsi/scsi_host.h> 65 #include <scsi/scsi_tcq.h> 66 #include <scsi/scsi_request.h> 67 68 #include "scsi_priv.h" 69 #include "scsi_logging.h" 70 71 static void scsi_done(struct scsi_cmnd *cmd); 72 73 /* 74 * Definitions and constants. 75 */ 76 77 #define MIN_RESET_DELAY (2*HZ) 78 79 /* Do not call reset on error if we just did a reset within 15 sec. */ 80 #define MIN_RESET_PERIOD (15*HZ) 81 82 /* 83 * Macro to determine the size of SCSI command. This macro takes vendor 84 * unique commands into account. SCSI commands in groups 6 and 7 are 85 * vendor unique and we will depend upon the command length being 86 * supplied correctly in cmd_len. 87 */ 88 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \ 89 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len) 90 91 /* 92 * Note - the initial logging level can be set here to log events at boot time. 93 * After the system is up, you may enable logging via the /proc interface. 94 */ 95 unsigned int scsi_logging_level; 96 #if defined(CONFIG_SCSI_LOGGING) 97 EXPORT_SYMBOL(scsi_logging_level); 98 #endif 99 100 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { 101 "Direct-Access ", 102 "Sequential-Access", 103 "Printer ", 104 "Processor ", 105 "WORM ", 106 "CD-ROM ", 107 "Scanner ", 108 "Optical Device ", 109 "Medium Changer ", 110 "Communications ", 111 "Unknown ", 112 "Unknown ", 113 "RAID ", 114 "Enclosure ", 115 "Direct-Access-RBC", 116 }; 117 EXPORT_SYMBOL(scsi_device_types); 118 119 /* 120 * Function: scsi_allocate_request 121 * 122 * Purpose: Allocate a request descriptor. 123 * 124 * Arguments: device - device for which we want a request 125 * gfp_mask - allocation flags passed to kmalloc 126 * 127 * Lock status: No locks assumed to be held. This function is SMP-safe. 128 * 129 * Returns: Pointer to request block. 130 */ 131 struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, 132 gfp_t gfp_mask) 133 { 134 const int offset = ALIGN(sizeof(struct scsi_request), 4); 135 const int size = offset + sizeof(struct request); 136 struct scsi_request *sreq; 137 138 sreq = kzalloc(size, gfp_mask); 139 if (likely(sreq != NULL)) { 140 sreq->sr_request = (struct request *)(((char *)sreq) + offset); 141 sreq->sr_device = sdev; 142 sreq->sr_host = sdev->host; 143 sreq->sr_magic = SCSI_REQ_MAGIC; 144 sreq->sr_data_direction = DMA_BIDIRECTIONAL; 145 } 146 147 return sreq; 148 } 149 EXPORT_SYMBOL(scsi_allocate_request); 150 151 void __scsi_release_request(struct scsi_request *sreq) 152 { 153 struct request *req = sreq->sr_request; 154 155 /* unlikely because the tag was usually ended earlier by the 156 * mid-layer. However, for layering reasons ULD's don't end 157 * the tag of commands they generate. */ 158 if (unlikely(blk_rq_tagged(req))) { 159 unsigned long flags; 160 struct request_queue *q = req->q; 161 162 spin_lock_irqsave(q->queue_lock, flags); 163 blk_queue_end_tag(q, req); 164 spin_unlock_irqrestore(q->queue_lock, flags); 165 } 166 167 168 if (likely(sreq->sr_command != NULL)) { 169 struct scsi_cmnd *cmd = sreq->sr_command; 170 171 sreq->sr_command = NULL; 172 scsi_next_command(cmd); 173 } 174 } 175 176 /* 177 * Function: scsi_release_request 178 * 179 * Purpose: Release a request descriptor. 180 * 181 * Arguments: sreq - request to release 182 * 183 * Lock status: No locks assumed to be held. This function is SMP-safe. 184 */ 185 void scsi_release_request(struct scsi_request *sreq) 186 { 187 __scsi_release_request(sreq); 188 kfree(sreq); 189 } 190 EXPORT_SYMBOL(scsi_release_request); 191 192 struct scsi_host_cmd_pool { 193 kmem_cache_t *slab; 194 unsigned int users; 195 char *name; 196 unsigned int slab_flags; 197 gfp_t gfp_mask; 198 }; 199 200 static struct scsi_host_cmd_pool scsi_cmd_pool = { 201 .name = "scsi_cmd_cache", 202 .slab_flags = SLAB_HWCACHE_ALIGN, 203 }; 204 205 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { 206 .name = "scsi_cmd_cache(DMA)", 207 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, 208 .gfp_mask = __GFP_DMA, 209 }; 210 211 static DEFINE_MUTEX(host_cmd_pool_mutex); 212 213 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 214 gfp_t gfp_mask) 215 { 216 struct scsi_cmnd *cmd; 217 218 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 219 gfp_mask | shost->cmd_pool->gfp_mask); 220 221 if (unlikely(!cmd)) { 222 unsigned long flags; 223 224 spin_lock_irqsave(&shost->free_list_lock, flags); 225 if (likely(!list_empty(&shost->free_list))) { 226 cmd = list_entry(shost->free_list.next, 227 struct scsi_cmnd, list); 228 list_del_init(&cmd->list); 229 } 230 spin_unlock_irqrestore(&shost->free_list_lock, flags); 231 } 232 233 return cmd; 234 } 235 236 /* 237 * Function: scsi_get_command() 238 * 239 * Purpose: Allocate and setup a scsi command block 240 * 241 * Arguments: dev - parent scsi device 242 * gfp_mask- allocator flags 243 * 244 * Returns: The allocated scsi command structure. 245 */ 246 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) 247 { 248 struct scsi_cmnd *cmd; 249 250 /* Bail if we can't get a reference to the device */ 251 if (!get_device(&dev->sdev_gendev)) 252 return NULL; 253 254 cmd = __scsi_get_command(dev->host, gfp_mask); 255 256 if (likely(cmd != NULL)) { 257 unsigned long flags; 258 259 memset(cmd, 0, sizeof(*cmd)); 260 cmd->device = dev; 261 init_timer(&cmd->eh_timeout); 262 INIT_LIST_HEAD(&cmd->list); 263 spin_lock_irqsave(&dev->list_lock, flags); 264 list_add_tail(&cmd->list, &dev->cmd_list); 265 spin_unlock_irqrestore(&dev->list_lock, flags); 266 cmd->jiffies_at_alloc = jiffies; 267 } else 268 put_device(&dev->sdev_gendev); 269 270 return cmd; 271 } 272 EXPORT_SYMBOL(scsi_get_command); 273 274 /* 275 * Function: scsi_put_command() 276 * 277 * Purpose: Free a scsi command block 278 * 279 * Arguments: cmd - command block to free 280 * 281 * Returns: Nothing. 282 * 283 * Notes: The command must not belong to any lists. 284 */ 285 void scsi_put_command(struct scsi_cmnd *cmd) 286 { 287 struct scsi_device *sdev = cmd->device; 288 struct Scsi_Host *shost = sdev->host; 289 unsigned long flags; 290 291 /* serious error if the command hasn't come from a device list */ 292 spin_lock_irqsave(&cmd->device->list_lock, flags); 293 BUG_ON(list_empty(&cmd->list)); 294 list_del_init(&cmd->list); 295 spin_unlock(&cmd->device->list_lock); 296 /* changing locks here, don't need to restore the irq state */ 297 spin_lock(&shost->free_list_lock); 298 if (unlikely(list_empty(&shost->free_list))) { 299 list_add(&cmd->list, &shost->free_list); 300 cmd = NULL; 301 } 302 spin_unlock_irqrestore(&shost->free_list_lock, flags); 303 304 if (likely(cmd != NULL)) 305 kmem_cache_free(shost->cmd_pool->slab, cmd); 306 307 put_device(&sdev->sdev_gendev); 308 } 309 EXPORT_SYMBOL(scsi_put_command); 310 311 /* 312 * Function: scsi_setup_command_freelist() 313 * 314 * Purpose: Setup the command freelist for a scsi host. 315 * 316 * Arguments: shost - host to allocate the freelist for. 317 * 318 * Returns: Nothing. 319 */ 320 int scsi_setup_command_freelist(struct Scsi_Host *shost) 321 { 322 struct scsi_host_cmd_pool *pool; 323 struct scsi_cmnd *cmd; 324 325 spin_lock_init(&shost->free_list_lock); 326 INIT_LIST_HEAD(&shost->free_list); 327 328 /* 329 * Select a command slab for this host and create it if not 330 * yet existant. 331 */ 332 mutex_lock(&host_cmd_pool_mutex); 333 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); 334 if (!pool->users) { 335 pool->slab = kmem_cache_create(pool->name, 336 sizeof(struct scsi_cmnd), 0, 337 pool->slab_flags, NULL, NULL); 338 if (!pool->slab) 339 goto fail; 340 } 341 342 pool->users++; 343 shost->cmd_pool = pool; 344 mutex_unlock(&host_cmd_pool_mutex); 345 346 /* 347 * Get one backup command for this host. 348 */ 349 cmd = kmem_cache_alloc(shost->cmd_pool->slab, 350 GFP_KERNEL | shost->cmd_pool->gfp_mask); 351 if (!cmd) 352 goto fail2; 353 list_add(&cmd->list, &shost->free_list); 354 return 0; 355 356 fail2: 357 if (!--pool->users) 358 kmem_cache_destroy(pool->slab); 359 return -ENOMEM; 360 fail: 361 mutex_unlock(&host_cmd_pool_mutex); 362 return -ENOMEM; 363 364 } 365 366 /* 367 * Function: scsi_destroy_command_freelist() 368 * 369 * Purpose: Release the command freelist for a scsi host. 370 * 371 * Arguments: shost - host that's freelist is going to be destroyed 372 */ 373 void scsi_destroy_command_freelist(struct Scsi_Host *shost) 374 { 375 while (!list_empty(&shost->free_list)) { 376 struct scsi_cmnd *cmd; 377 378 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 379 list_del_init(&cmd->list); 380 kmem_cache_free(shost->cmd_pool->slab, cmd); 381 } 382 383 mutex_lock(&host_cmd_pool_mutex); 384 if (!--shost->cmd_pool->users) 385 kmem_cache_destroy(shost->cmd_pool->slab); 386 mutex_unlock(&host_cmd_pool_mutex); 387 } 388 389 #ifdef CONFIG_SCSI_LOGGING 390 void scsi_log_send(struct scsi_cmnd *cmd) 391 { 392 unsigned int level; 393 struct scsi_device *sdev; 394 395 /* 396 * If ML QUEUE log level is greater than or equal to: 397 * 398 * 1: nothing (match completion) 399 * 400 * 2: log opcode + command of all commands 401 * 402 * 3: same as 2 plus dump cmd address 403 * 404 * 4: same as 3 plus dump extra junk 405 */ 406 if (unlikely(scsi_logging_level)) { 407 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 408 SCSI_LOG_MLQUEUE_BITS); 409 if (level > 1) { 410 sdev = cmd->device; 411 sdev_printk(KERN_INFO, sdev, "send "); 412 if (level > 2) 413 printk("0x%p ", cmd); 414 /* 415 * spaces to match disposition and cmd->result 416 * output in scsi_log_completion. 417 */ 418 printk(" "); 419 scsi_print_command(cmd); 420 if (level > 3) { 421 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 422 " done = 0x%p, queuecommand 0x%p\n", 423 cmd->buffer, cmd->bufflen, 424 cmd->done, 425 sdev->host->hostt->queuecommand); 426 427 } 428 } 429 } 430 } 431 432 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 433 { 434 unsigned int level; 435 struct scsi_device *sdev; 436 437 /* 438 * If ML COMPLETE log level is greater than or equal to: 439 * 440 * 1: log disposition, result, opcode + command, and conditionally 441 * sense data for failures or non SUCCESS dispositions. 442 * 443 * 2: same as 1 but for all command completions. 444 * 445 * 3: same as 2 plus dump cmd address 446 * 447 * 4: same as 3 plus dump extra junk 448 */ 449 if (unlikely(scsi_logging_level)) { 450 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 451 SCSI_LOG_MLCOMPLETE_BITS); 452 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 453 (level > 1)) { 454 sdev = cmd->device; 455 sdev_printk(KERN_INFO, sdev, "done "); 456 if (level > 2) 457 printk("0x%p ", cmd); 458 /* 459 * Dump truncated values, so we usually fit within 460 * 80 chars. 461 */ 462 switch (disposition) { 463 case SUCCESS: 464 printk("SUCCESS"); 465 break; 466 case NEEDS_RETRY: 467 printk("RETRY "); 468 break; 469 case ADD_TO_MLQUEUE: 470 printk("MLQUEUE"); 471 break; 472 case FAILED: 473 printk("FAILED "); 474 break; 475 case TIMEOUT_ERROR: 476 /* 477 * If called via scsi_times_out. 478 */ 479 printk("TIMEOUT"); 480 break; 481 default: 482 printk("UNKNOWN"); 483 } 484 printk(" %8x ", cmd->result); 485 scsi_print_command(cmd); 486 if (status_byte(cmd->result) & CHECK_CONDITION) { 487 /* 488 * XXX The scsi_print_sense formatting/prefix 489 * doesn't match this function. 490 */ 491 scsi_print_sense("", cmd); 492 } 493 if (level > 3) { 494 printk(KERN_INFO "scsi host busy %d failed %d\n", 495 sdev->host->host_busy, 496 sdev->host->host_failed); 497 } 498 } 499 } 500 } 501 #endif 502 503 /* 504 * Assign a serial number and pid to the request for error recovery 505 * and debugging purposes. Protected by the Host_Lock of host. 506 */ 507 static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) 508 { 509 cmd->serial_number = host->cmd_serial_number++; 510 if (cmd->serial_number == 0) 511 cmd->serial_number = host->cmd_serial_number++; 512 513 cmd->pid = host->cmd_pid++; 514 if (cmd->pid == 0) 515 cmd->pid = host->cmd_pid++; 516 } 517 518 /* 519 * Function: scsi_dispatch_command 520 * 521 * Purpose: Dispatch a command to the low-level driver. 522 * 523 * Arguments: cmd - command block we are dispatching. 524 * 525 * Notes: 526 */ 527 int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 528 { 529 struct Scsi_Host *host = cmd->device->host; 530 unsigned long flags = 0; 531 unsigned long timeout; 532 int rtn = 0; 533 534 /* check if the device is still usable */ 535 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 536 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 537 * returns an immediate error upwards, and signals 538 * that the device is no longer present */ 539 cmd->result = DID_NO_CONNECT << 16; 540 atomic_inc(&cmd->device->iorequest_cnt); 541 __scsi_done(cmd); 542 /* return 0 (because the command has been processed) */ 543 goto out; 544 } 545 546 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 547 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 548 /* 549 * in SDEV_BLOCK, the command is just put back on the device 550 * queue. The suspend state has already blocked the queue so 551 * future requests should not occur until the device 552 * transitions out of the suspend state. 553 */ 554 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 555 556 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 557 558 /* 559 * NOTE: rtn is still zero here because we don't need the 560 * queue to be plugged on return (it's already stopped) 561 */ 562 goto out; 563 } 564 565 /* 566 * If SCSI-2 or lower, store the LUN value in cmnd. 567 */ 568 if (cmd->device->scsi_level <= SCSI_2) { 569 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 570 (cmd->device->lun << 5 & 0xe0); 571 } 572 573 /* 574 * We will wait MIN_RESET_DELAY clock ticks after the last reset so 575 * we can avoid the drive not being ready. 576 */ 577 timeout = host->last_reset + MIN_RESET_DELAY; 578 579 if (host->resetting && time_before(jiffies, timeout)) { 580 int ticks_remaining = timeout - jiffies; 581 /* 582 * NOTE: This may be executed from within an interrupt 583 * handler! This is bad, but for now, it'll do. The irq 584 * level of the interrupt handler has been masked out by the 585 * platform dependent interrupt handling code already, so the 586 * sti() here will not cause another call to the SCSI host's 587 * interrupt handler (assuming there is one irq-level per 588 * host). 589 */ 590 while (--ticks_remaining >= 0) 591 mdelay(1 + 999 / HZ); 592 host->resetting = 0; 593 } 594 595 /* 596 * AK: unlikely race here: for some reason the timer could 597 * expire before the serial number is set up below. 598 */ 599 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); 600 601 scsi_log_send(cmd); 602 603 /* 604 * We will use a queued command if possible, otherwise we will 605 * emulate the queuing and calling of completion function ourselves. 606 */ 607 atomic_inc(&cmd->device->iorequest_cnt); 608 609 /* 610 * Before we queue this command, check if the command 611 * length exceeds what the host adapter can handle. 612 */ 613 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) { 614 SCSI_LOG_MLQUEUE(3, 615 printk("queuecommand : command too long.\n")); 616 cmd->result = (DID_ABORT << 16); 617 618 scsi_done(cmd); 619 goto out; 620 } 621 622 spin_lock_irqsave(host->host_lock, flags); 623 scsi_cmd_get_serial(host, cmd); 624 625 if (unlikely(host->shost_state == SHOST_DEL)) { 626 cmd->result = (DID_NO_CONNECT << 16); 627 scsi_done(cmd); 628 } else { 629 rtn = host->hostt->queuecommand(cmd, scsi_done); 630 } 631 spin_unlock_irqrestore(host->host_lock, flags); 632 if (rtn) { 633 if (scsi_delete_timer(cmd)) { 634 atomic_inc(&cmd->device->iodone_cnt); 635 scsi_queue_insert(cmd, 636 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? 637 rtn : SCSI_MLQUEUE_HOST_BUSY); 638 } 639 SCSI_LOG_MLQUEUE(3, 640 printk("queuecommand : request rejected\n")); 641 } 642 643 out: 644 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); 645 return rtn; 646 } 647 648 /* 649 * Function: scsi_init_cmd_from_req 650 * 651 * Purpose: Queue a SCSI command 652 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request 653 * 654 * Arguments: cmd - command descriptor. 655 * sreq - Request from the queue. 656 * 657 * Lock status: None needed. 658 * 659 * Returns: Nothing. 660 * 661 * Notes: Mainly transfer data from the request structure to the 662 * command structure. The request structure is allocated 663 * using the normal memory allocator, and requests can pile 664 * up to more or less any depth. The command structure represents 665 * a consumable resource, as these are allocated into a pool 666 * when the SCSI subsystem initializes. The preallocation is 667 * required so that in low-memory situations a disk I/O request 668 * won't cause the memory manager to try and write out a page. 669 * The request structure is generally used by ioctls and character 670 * devices. 671 */ 672 void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) 673 { 674 sreq->sr_command = cmd; 675 676 cmd->cmd_len = sreq->sr_cmd_len; 677 cmd->use_sg = sreq->sr_use_sg; 678 679 cmd->request = sreq->sr_request; 680 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd)); 681 cmd->serial_number = 0; 682 cmd->bufflen = sreq->sr_bufflen; 683 cmd->buffer = sreq->sr_buffer; 684 cmd->retries = 0; 685 cmd->allowed = sreq->sr_allowed; 686 cmd->done = sreq->sr_done; 687 cmd->timeout_per_command = sreq->sr_timeout_per_command; 688 cmd->sc_data_direction = sreq->sr_data_direction; 689 cmd->sglist_len = sreq->sr_sglist_len; 690 cmd->underflow = sreq->sr_underflow; 691 cmd->sc_request = sreq; 692 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd)); 693 694 /* 695 * Zero the sense buffer. Some host adapters automatically request 696 * sense on error. 0 is not a valid sense code. 697 */ 698 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer)); 699 cmd->request_buffer = sreq->sr_buffer; 700 cmd->request_bufflen = sreq->sr_bufflen; 701 cmd->old_use_sg = cmd->use_sg; 702 if (cmd->cmd_len == 0) 703 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 704 cmd->old_cmd_len = cmd->cmd_len; 705 cmd->sc_old_data_direction = cmd->sc_data_direction; 706 cmd->old_underflow = cmd->underflow; 707 708 /* 709 * Start the timer ticking. 710 */ 711 cmd->result = 0; 712 713 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n")); 714 } 715 716 /* 717 * Per-CPU I/O completion queue. 718 */ 719 static DEFINE_PER_CPU(struct list_head, scsi_done_q); 720 721 /** 722 * scsi_done - Enqueue the finished SCSI command into the done queue. 723 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 724 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 725 * 726 * This function is the mid-level's (SCSI Core) interrupt routine, which 727 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues 728 * the command to the done queue for further processing. 729 * 730 * This is the producer of the done queue who enqueues at the tail. 731 * 732 * This function is interrupt context safe. 733 */ 734 static void scsi_done(struct scsi_cmnd *cmd) 735 { 736 /* 737 * We don't have to worry about this one timing out any more. 738 * If we are unable to remove the timer, then the command 739 * has already timed out. In which case, we have no choice but to 740 * let the timeout function run, as we have no idea where in fact 741 * that function could really be. It might be on another processor, 742 * etc, etc. 743 */ 744 if (!scsi_delete_timer(cmd)) 745 return; 746 __scsi_done(cmd); 747 } 748 749 /* Private entry to scsi_done() to complete a command when the timer 750 * isn't running --- used by scsi_times_out */ 751 void __scsi_done(struct scsi_cmnd *cmd) 752 { 753 struct request *rq = cmd->request; 754 755 /* 756 * Set the serial numbers back to zero 757 */ 758 cmd->serial_number = 0; 759 760 atomic_inc(&cmd->device->iodone_cnt); 761 if (cmd->result) 762 atomic_inc(&cmd->device->ioerr_cnt); 763 764 BUG_ON(!rq); 765 766 /* 767 * The uptodate/nbytes values don't matter, as we allow partial 768 * completes and thus will check this in the softirq callback 769 */ 770 rq->completion_data = cmd; 771 blk_complete_request(rq); 772 } 773 774 /* 775 * Function: scsi_retry_command 776 * 777 * Purpose: Send a command back to the low level to be retried. 778 * 779 * Notes: This command is always executed in the context of the 780 * bottom half handler, or the error handler thread. Low 781 * level drivers should not become re-entrant as a result of 782 * this. 783 */ 784 int scsi_retry_command(struct scsi_cmnd *cmd) 785 { 786 /* 787 * Restore the SCSI command state. 788 */ 789 scsi_setup_cmd_retry(cmd); 790 791 /* 792 * Zero the sense information from the last time we tried 793 * this command. 794 */ 795 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 796 797 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 798 } 799 800 /* 801 * Function: scsi_finish_command 802 * 803 * Purpose: Pass command off to upper layer for finishing of I/O 804 * request, waking processes that are waiting on results, 805 * etc. 806 */ 807 void scsi_finish_command(struct scsi_cmnd *cmd) 808 { 809 struct scsi_device *sdev = cmd->device; 810 struct Scsi_Host *shost = sdev->host; 811 struct scsi_request *sreq; 812 813 scsi_device_unbusy(sdev); 814 815 /* 816 * Clear the flags which say that the device/host is no longer 817 * capable of accepting new commands. These are set in scsi_queue.c 818 * for both the queue full condition on a device, and for a 819 * host full condition on the host. 820 * 821 * XXX(hch): What about locking? 822 */ 823 shost->host_blocked = 0; 824 sdev->device_blocked = 0; 825 826 /* 827 * If we have valid sense information, then some kind of recovery 828 * must have taken place. Make a note of this. 829 */ 830 if (SCSI_SENSE_VALID(cmd)) 831 cmd->result |= (DRIVER_SENSE << 24); 832 833 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, 834 "Notifying upper driver of completion " 835 "(result %x)\n", cmd->result)); 836 837 /* 838 * We can get here with use_sg=0, causing a panic in the upper level 839 */ 840 cmd->use_sg = cmd->old_use_sg; 841 842 /* 843 * If there is an associated request structure, copy the data over 844 * before we call the completion function. 845 */ 846 sreq = cmd->sc_request; 847 if (sreq) { 848 sreq->sr_result = sreq->sr_command->result; 849 if (sreq->sr_result) { 850 memcpy(sreq->sr_sense_buffer, 851 sreq->sr_command->sense_buffer, 852 sizeof(sreq->sr_sense_buffer)); 853 } 854 } 855 856 cmd->done(cmd); 857 } 858 EXPORT_SYMBOL(scsi_finish_command); 859 860 /* 861 * Function: scsi_adjust_queue_depth() 862 * 863 * Purpose: Allow low level drivers to tell us to change the queue depth 864 * on a specific SCSI device 865 * 866 * Arguments: sdev - SCSI Device in question 867 * tagged - Do we use tagged queueing (non-0) or do we treat 868 * this device as an untagged device (0) 869 * tags - Number of tags allowed if tagged queueing enabled, 870 * or number of commands the low level driver can 871 * queue up in non-tagged mode (as per cmd_per_lun). 872 * 873 * Returns: Nothing 874 * 875 * Lock Status: None held on entry 876 * 877 * Notes: Low level drivers may call this at any time and we will do 878 * the right thing depending on whether or not the device is 879 * currently active and whether or not it even has the 880 * command blocks built yet. 881 */ 882 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) 883 { 884 unsigned long flags; 885 886 /* 887 * refuse to set tagged depth to an unworkable size 888 */ 889 if (tags <= 0) 890 return; 891 892 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 893 894 /* Check to see if the queue is managed by the block layer 895 * if it is, and we fail to adjust the depth, exit */ 896 if (blk_queue_tagged(sdev->request_queue) && 897 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 898 goto out; 899 900 sdev->queue_depth = tags; 901 switch (tagged) { 902 case MSG_ORDERED_TAG: 903 sdev->ordered_tags = 1; 904 sdev->simple_tags = 1; 905 break; 906 case MSG_SIMPLE_TAG: 907 sdev->ordered_tags = 0; 908 sdev->simple_tags = 1; 909 break; 910 default: 911 sdev_printk(KERN_WARNING, sdev, 912 "scsi_adjust_queue_depth, bad queue type, " 913 "disabled\n"); 914 case 0: 915 sdev->ordered_tags = sdev->simple_tags = 0; 916 sdev->queue_depth = tags; 917 break; 918 } 919 out: 920 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 921 } 922 EXPORT_SYMBOL(scsi_adjust_queue_depth); 923 924 /* 925 * Function: scsi_track_queue_full() 926 * 927 * Purpose: This function will track successive QUEUE_FULL events on a 928 * specific SCSI device to determine if and when there is a 929 * need to adjust the queue depth on the device. 930 * 931 * Arguments: sdev - SCSI Device in question 932 * depth - Current number of outstanding SCSI commands on 933 * this device, not counting the one returned as 934 * QUEUE_FULL. 935 * 936 * Returns: 0 - No change needed 937 * >0 - Adjust queue depth to this new depth 938 * -1 - Drop back to untagged operation using host->cmd_per_lun 939 * as the untagged command depth 940 * 941 * Lock Status: None held on entry 942 * 943 * Notes: Low level drivers may call this at any time and we will do 944 * "The Right Thing." We are interrupt context safe. 945 */ 946 int scsi_track_queue_full(struct scsi_device *sdev, int depth) 947 { 948 if ((jiffies >> 4) == sdev->last_queue_full_time) 949 return 0; 950 951 sdev->last_queue_full_time = (jiffies >> 4); 952 if (sdev->last_queue_full_depth != depth) { 953 sdev->last_queue_full_count = 1; 954 sdev->last_queue_full_depth = depth; 955 } else { 956 sdev->last_queue_full_count++; 957 } 958 959 if (sdev->last_queue_full_count <= 10) 960 return 0; 961 if (sdev->last_queue_full_depth < 8) { 962 /* Drop back to untagged */ 963 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 964 return -1; 965 } 966 967 if (sdev->ordered_tags) 968 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 969 else 970 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 971 return depth; 972 } 973 EXPORT_SYMBOL(scsi_track_queue_full); 974 975 /** 976 * scsi_device_get - get an addition reference to a scsi_device 977 * @sdev: device to get a reference to 978 * 979 * Gets a reference to the scsi_device and increments the use count 980 * of the underlying LLDD module. You must hold host_lock of the 981 * parent Scsi_Host or already have a reference when calling this. 982 */ 983 int scsi_device_get(struct scsi_device *sdev) 984 { 985 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 986 return -ENXIO; 987 if (!get_device(&sdev->sdev_gendev)) 988 return -ENXIO; 989 if (!try_module_get(sdev->host->hostt->module)) { 990 put_device(&sdev->sdev_gendev); 991 return -ENXIO; 992 } 993 return 0; 994 } 995 EXPORT_SYMBOL(scsi_device_get); 996 997 /** 998 * scsi_device_put - release a reference to a scsi_device 999 * @sdev: device to release a reference on. 1000 * 1001 * Release a reference to the scsi_device and decrements the use count 1002 * of the underlying LLDD module. The device is freed once the last 1003 * user vanishes. 1004 */ 1005 void scsi_device_put(struct scsi_device *sdev) 1006 { 1007 module_put(sdev->host->hostt->module); 1008 put_device(&sdev->sdev_gendev); 1009 } 1010 EXPORT_SYMBOL(scsi_device_put); 1011 1012 /* helper for shost_for_each_device, thus not documented */ 1013 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 1014 struct scsi_device *prev) 1015 { 1016 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); 1017 struct scsi_device *next = NULL; 1018 unsigned long flags; 1019 1020 spin_lock_irqsave(shost->host_lock, flags); 1021 while (list->next != &shost->__devices) { 1022 next = list_entry(list->next, struct scsi_device, siblings); 1023 /* skip devices that we can't get a reference to */ 1024 if (!scsi_device_get(next)) 1025 break; 1026 next = NULL; 1027 list = list->next; 1028 } 1029 spin_unlock_irqrestore(shost->host_lock, flags); 1030 1031 if (prev) 1032 scsi_device_put(prev); 1033 return next; 1034 } 1035 EXPORT_SYMBOL(__scsi_iterate_devices); 1036 1037 /** 1038 * starget_for_each_device - helper to walk all devices of a target 1039 * @starget: target whose devices we want to iterate over. 1040 * 1041 * This traverses over each devices of @shost. The devices have 1042 * a reference that must be released by scsi_host_put when breaking 1043 * out of the loop. 1044 */ 1045 void starget_for_each_device(struct scsi_target *starget, void * data, 1046 void (*fn)(struct scsi_device *, void *)) 1047 { 1048 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1049 struct scsi_device *sdev; 1050 1051 shost_for_each_device(sdev, shost) { 1052 if ((sdev->channel == starget->channel) && 1053 (sdev->id == starget->id)) 1054 fn(sdev, data); 1055 } 1056 } 1057 EXPORT_SYMBOL(starget_for_each_device); 1058 1059 /** 1060 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) 1061 * @starget: SCSI target pointer 1062 * @lun: SCSI Logical Unit Number 1063 * 1064 * Looks up the scsi_device with the specified @lun for a give 1065 * @starget. The returned scsi_device does not have an additional 1066 * reference. You must hold the host's host_lock over this call and 1067 * any access to the returned scsi_device. 1068 * 1069 * Note: The only reason why drivers would want to use this is because 1070 * they're need to access the device list in irq context. Otherwise you 1071 * really want to use scsi_device_lookup_by_target instead. 1072 **/ 1073 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 1074 uint lun) 1075 { 1076 struct scsi_device *sdev; 1077 1078 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 1079 if (sdev->lun ==lun) 1080 return sdev; 1081 } 1082 1083 return NULL; 1084 } 1085 EXPORT_SYMBOL(__scsi_device_lookup_by_target); 1086 1087 /** 1088 * scsi_device_lookup_by_target - find a device given the target 1089 * @starget: SCSI target pointer 1090 * @lun: SCSI Logical Unit Number 1091 * 1092 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1093 * give host. The returned scsi_device has an additional reference that 1094 * needs to be release with scsi_host_put once you're done with it. 1095 **/ 1096 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1097 uint lun) 1098 { 1099 struct scsi_device *sdev; 1100 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1101 unsigned long flags; 1102 1103 spin_lock_irqsave(shost->host_lock, flags); 1104 sdev = __scsi_device_lookup_by_target(starget, lun); 1105 if (sdev && scsi_device_get(sdev)) 1106 sdev = NULL; 1107 spin_unlock_irqrestore(shost->host_lock, flags); 1108 1109 return sdev; 1110 } 1111 EXPORT_SYMBOL(scsi_device_lookup_by_target); 1112 1113 /** 1114 * scsi_device_lookup - find a device given the host (UNLOCKED) 1115 * @shost: SCSI host pointer 1116 * @channel: SCSI channel (zero if only one channel) 1117 * @pun: SCSI target number (physical unit number) 1118 * @lun: SCSI Logical Unit Number 1119 * 1120 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1121 * give host. The returned scsi_device does not have an additional reference. 1122 * You must hold the host's host_lock over this call and any access to the 1123 * returned scsi_device. 1124 * 1125 * Note: The only reason why drivers would want to use this is because 1126 * they're need to access the device list in irq context. Otherwise you 1127 * really want to use scsi_device_lookup instead. 1128 **/ 1129 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 1130 uint channel, uint id, uint lun) 1131 { 1132 struct scsi_device *sdev; 1133 1134 list_for_each_entry(sdev, &shost->__devices, siblings) { 1135 if (sdev->channel == channel && sdev->id == id && 1136 sdev->lun ==lun) 1137 return sdev; 1138 } 1139 1140 return NULL; 1141 } 1142 EXPORT_SYMBOL(__scsi_device_lookup); 1143 1144 /** 1145 * scsi_device_lookup - find a device given the host 1146 * @shost: SCSI host pointer 1147 * @channel: SCSI channel (zero if only one channel) 1148 * @id: SCSI target number (physical unit number) 1149 * @lun: SCSI Logical Unit Number 1150 * 1151 * Looks up the scsi_device with the specified @channel, @id, @lun for a 1152 * give host. The returned scsi_device has an additional reference that 1153 * needs to be release with scsi_host_put once you're done with it. 1154 **/ 1155 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1156 uint channel, uint id, uint lun) 1157 { 1158 struct scsi_device *sdev; 1159 unsigned long flags; 1160 1161 spin_lock_irqsave(shost->host_lock, flags); 1162 sdev = __scsi_device_lookup(shost, channel, id, lun); 1163 if (sdev && scsi_device_get(sdev)) 1164 sdev = NULL; 1165 spin_unlock_irqrestore(shost->host_lock, flags); 1166 1167 return sdev; 1168 } 1169 EXPORT_SYMBOL(scsi_device_lookup); 1170 1171 /** 1172 * scsi_device_cancel - cancel outstanding IO to this device 1173 * @sdev: Pointer to struct scsi_device 1174 * @recovery: Boolean instructing function to recover device or not. 1175 * 1176 **/ 1177 int scsi_device_cancel(struct scsi_device *sdev, int recovery) 1178 { 1179 struct scsi_cmnd *scmd; 1180 LIST_HEAD(active_list); 1181 struct list_head *lh, *lh_sf; 1182 unsigned long flags; 1183 1184 scsi_device_set_state(sdev, SDEV_CANCEL); 1185 1186 spin_lock_irqsave(&sdev->list_lock, flags); 1187 list_for_each_entry(scmd, &sdev->cmd_list, list) { 1188 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) { 1189 /* 1190 * If we are unable to remove the timer, it means 1191 * that the command has already timed out or 1192 * finished. 1193 */ 1194 if (!scsi_delete_timer(scmd)) 1195 continue; 1196 list_add_tail(&scmd->eh_entry, &active_list); 1197 } 1198 } 1199 spin_unlock_irqrestore(&sdev->list_lock, flags); 1200 1201 if (!list_empty(&active_list)) { 1202 list_for_each_safe(lh, lh_sf, &active_list) { 1203 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1204 list_del_init(lh); 1205 if (recovery && 1206 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) { 1207 scmd->result = (DID_ABORT << 16); 1208 scsi_finish_command(scmd); 1209 } 1210 } 1211 } 1212 1213 return 0; 1214 } 1215 EXPORT_SYMBOL(scsi_device_cancel); 1216 1217 MODULE_DESCRIPTION("SCSI core"); 1218 MODULE_LICENSE("GPL"); 1219 1220 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1221 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1222 1223 static int __init init_scsi(void) 1224 { 1225 int error, i; 1226 1227 error = scsi_init_queue(); 1228 if (error) 1229 return error; 1230 error = scsi_init_procfs(); 1231 if (error) 1232 goto cleanup_queue; 1233 error = scsi_init_devinfo(); 1234 if (error) 1235 goto cleanup_procfs; 1236 error = scsi_init_hosts(); 1237 if (error) 1238 goto cleanup_devlist; 1239 error = scsi_init_sysctl(); 1240 if (error) 1241 goto cleanup_hosts; 1242 error = scsi_sysfs_register(); 1243 if (error) 1244 goto cleanup_sysctl; 1245 1246 for_each_cpu(i) 1247 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1248 1249 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1250 return 0; 1251 1252 cleanup_sysctl: 1253 scsi_exit_sysctl(); 1254 cleanup_hosts: 1255 scsi_exit_hosts(); 1256 cleanup_devlist: 1257 scsi_exit_devinfo(); 1258 cleanup_procfs: 1259 scsi_exit_procfs(); 1260 cleanup_queue: 1261 scsi_exit_queue(); 1262 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", 1263 -error); 1264 return error; 1265 } 1266 1267 static void __exit exit_scsi(void) 1268 { 1269 scsi_sysfs_unregister(); 1270 scsi_exit_sysctl(); 1271 scsi_exit_hosts(); 1272 scsi_exit_devinfo(); 1273 scsi_exit_procfs(); 1274 scsi_exit_queue(); 1275 } 1276 1277 subsys_initcall(init_scsi); 1278 module_exit(exit_scsi); 1279