xref: /linux/drivers/scsi/scsi_lib.c (revision 87c2ce3b9305b9b723faeedf6e32ef703ec9b33a)
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9 
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27 
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30 
31 
32 #define SG_MEMPOOL_NR		(sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE		32
34 
35 struct scsi_host_sg_pool {
36 	size_t		size;
37 	char		*name;
38 	kmem_cache_t	*slab;
39 	mempool_t	*pool;
40 };
41 
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45 
46 #define SP(x) { x, "sgpool-" #x }
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48 	SP(8),
49 	SP(16),
50 	SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 	SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 	SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 	SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };
64 #undef SP
65 
66 static void scsi_run_queue(struct request_queue *q);
67 
68 /*
69  * Function:	scsi_unprep_request()
70  *
71  * Purpose:	Remove all preparation done for a request, including its
72  *		associated scsi_cmnd, so that it can be requeued.
73  *
74  * Arguments:	req	- request to unprepare
75  *
76  * Lock status:	Assumed that no locks are held upon entry.
77  *
78  * Returns:	Nothing.
79  */
80 static void scsi_unprep_request(struct request *req)
81 {
82 	struct scsi_cmnd *cmd = req->special;
83 
84 	req->flags &= ~REQ_DONTPREP;
85 	req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
86 
87 	scsi_put_command(cmd);
88 }
89 
90 /*
91  * Function:    scsi_queue_insert()
92  *
93  * Purpose:     Insert a command in the midlevel queue.
94  *
95  * Arguments:   cmd    - command that we are adding to queue.
96  *              reason - why we are inserting command to queue.
97  *
98  * Lock status: Assumed that lock is not held upon entry.
99  *
100  * Returns:     Nothing.
101  *
102  * Notes:       We do this for one of two cases.  Either the host is busy
103  *              and it cannot accept any more commands for the time being,
104  *              or the device returned QUEUE_FULL and can accept no more
105  *              commands.
106  * Notes:       This could be called either from an interrupt context or a
107  *              normal process context.
108  */
109 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
110 {
111 	struct Scsi_Host *host = cmd->device->host;
112 	struct scsi_device *device = cmd->device;
113 	struct request_queue *q = device->request_queue;
114 	unsigned long flags;
115 
116 	SCSI_LOG_MLQUEUE(1,
117 		 printk("Inserting command %p into mlqueue\n", cmd));
118 
119 	/*
120 	 * Set the appropriate busy bit for the device/host.
121 	 *
122 	 * If the host/device isn't busy, assume that something actually
123 	 * completed, and that we should be able to queue a command now.
124 	 *
125 	 * Note that the prior mid-layer assumption that any host could
126 	 * always queue at least one command is now broken.  The mid-layer
127 	 * will implement a user specifiable stall (see
128 	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
129 	 * if a command is requeued with no other commands outstanding
130 	 * either for the device or for the host.
131 	 */
132 	if (reason == SCSI_MLQUEUE_HOST_BUSY)
133 		host->host_blocked = host->max_host_blocked;
134 	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
135 		device->device_blocked = device->max_device_blocked;
136 
137 	/*
138 	 * Decrement the counters, since these commands are no longer
139 	 * active on the host/device.
140 	 */
141 	scsi_device_unbusy(device);
142 
143 	/*
144 	 * Requeue this command.  It will go before all other commands
145 	 * that are already in the queue.
146 	 *
147 	 * NOTE: there is magic here about the way the queue is plugged if
148 	 * we have no outstanding commands.
149 	 *
150 	 * Although we *don't* plug the queue, we call the request
151 	 * function.  The SCSI request function detects the blocked condition
152 	 * and plugs the queue appropriately.
153          */
154 	spin_lock_irqsave(q->queue_lock, flags);
155 	blk_requeue_request(q, cmd->request);
156 	spin_unlock_irqrestore(q->queue_lock, flags);
157 
158 	scsi_run_queue(q);
159 
160 	return 0;
161 }
162 
163 /*
164  * Function:    scsi_do_req
165  *
166  * Purpose:     Queue a SCSI request
167  *
168  * Arguments:   sreq	  - command descriptor.
169  *              cmnd      - actual SCSI command to be performed.
170  *              buffer    - data buffer.
171  *              bufflen   - size of data buffer.
172  *              done      - completion function to be run.
173  *              timeout   - how long to let it run before timeout.
174  *              retries   - number of retries we allow.
175  *
176  * Lock status: No locks held upon entry.
177  *
178  * Returns:     Nothing.
179  *
180  * Notes:	This function is only used for queueing requests for things
181  *		like ioctls and character device requests - this is because
182  *		we essentially just inject a request into the queue for the
183  *		device.
184  *
185  *		In order to support the scsi_device_quiesce function, we
186  *		now inject requests on the *head* of the device queue
187  *		rather than the tail.
188  */
189 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
190 		 void *buffer, unsigned bufflen,
191 		 void (*done)(struct scsi_cmnd *),
192 		 int timeout, int retries)
193 {
194 	/*
195 	 * If the upper level driver is reusing these things, then
196 	 * we should release the low-level block now.  Another one will
197 	 * be allocated later when this request is getting queued.
198 	 */
199 	__scsi_release_request(sreq);
200 
201 	/*
202 	 * Our own function scsi_done (which marks the host as not busy,
203 	 * disables the timeout counter, etc) will be called by us or by the
204 	 * scsi_hosts[host].queuecommand() function needs to also call
205 	 * the completion function for the high level driver.
206 	 */
207 	memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
208 	sreq->sr_bufflen = bufflen;
209 	sreq->sr_buffer = buffer;
210 	sreq->sr_allowed = retries;
211 	sreq->sr_done = done;
212 	sreq->sr_timeout_per_command = timeout;
213 
214 	if (sreq->sr_cmd_len == 0)
215 		sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
216 
217 	/*
218 	 * head injection *required* here otherwise quiesce won't work
219 	 *
220 	 * Because users of this function are apt to reuse requests with no
221 	 * modification, we have to sanitise the request flags here
222 	 */
223 	sreq->sr_request->flags &= ~REQ_DONTPREP;
224 	blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
225 		       	   1, sreq);
226 }
227 EXPORT_SYMBOL(scsi_do_req);
228 
229 /**
230  * scsi_execute - insert request and wait for the result
231  * @sdev:	scsi device
232  * @cmd:	scsi command
233  * @data_direction: data direction
234  * @buffer:	data buffer
235  * @bufflen:	len of buffer
236  * @sense:	optional sense buffer
237  * @timeout:	request timeout in seconds
238  * @retries:	number of times to retry request
239  * @flags:	or into request flags;
240  *
241  * returns the req->errors value which is the the scsi_cmnd result
242  * field.
243  **/
244 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
245 		 int data_direction, void *buffer, unsigned bufflen,
246 		 unsigned char *sense, int timeout, int retries, int flags)
247 {
248 	struct request *req;
249 	int write = (data_direction == DMA_TO_DEVICE);
250 	int ret = DRIVER_ERROR << 24;
251 
252 	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
253 
254 	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
255 					buffer, bufflen, __GFP_WAIT))
256 		goto out;
257 
258 	req->cmd_len = COMMAND_SIZE(cmd[0]);
259 	memcpy(req->cmd, cmd, req->cmd_len);
260 	req->sense = sense;
261 	req->sense_len = 0;
262 	req->retries = retries;
263 	req->timeout = timeout;
264 	req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
265 
266 	/*
267 	 * head injection *required* here otherwise quiesce won't work
268 	 */
269 	blk_execute_rq(req->q, NULL, req, 1);
270 
271 	ret = req->errors;
272  out:
273 	blk_put_request(req);
274 
275 	return ret;
276 }
277 EXPORT_SYMBOL(scsi_execute);
278 
279 
280 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
281 		     int data_direction, void *buffer, unsigned bufflen,
282 		     struct scsi_sense_hdr *sshdr, int timeout, int retries)
283 {
284 	char *sense = NULL;
285 	int result;
286 
287 	if (sshdr) {
288 		sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
289 		if (!sense)
290 			return DRIVER_ERROR << 24;
291 		memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
292 	}
293 	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
294 				  sense, timeout, retries, 0);
295 	if (sshdr)
296 		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
297 
298 	kfree(sense);
299 	return result;
300 }
301 EXPORT_SYMBOL(scsi_execute_req);
302 
303 struct scsi_io_context {
304 	void *data;
305 	void (*done)(void *data, char *sense, int result, int resid);
306 	char sense[SCSI_SENSE_BUFFERSIZE];
307 };
308 
309 static kmem_cache_t *scsi_io_context_cache;
310 
311 static void scsi_end_async(struct request *req, int uptodate)
312 {
313 	struct scsi_io_context *sioc = req->end_io_data;
314 
315 	if (sioc->done)
316 		sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
317 
318 	kmem_cache_free(scsi_io_context_cache, sioc);
319 	__blk_put_request(req->q, req);
320 }
321 
322 static int scsi_merge_bio(struct request *rq, struct bio *bio)
323 {
324 	struct request_queue *q = rq->q;
325 
326 	bio->bi_flags &= ~(1 << BIO_SEG_VALID);
327 	if (rq_data_dir(rq) == WRITE)
328 		bio->bi_rw |= (1 << BIO_RW);
329 	blk_queue_bounce(q, &bio);
330 
331 	if (!rq->bio)
332 		blk_rq_bio_prep(q, rq, bio);
333 	else if (!q->back_merge_fn(q, rq, bio))
334 		return -EINVAL;
335 	else {
336 		rq->biotail->bi_next = bio;
337 		rq->biotail = bio;
338 		rq->hard_nr_sectors += bio_sectors(bio);
339 		rq->nr_sectors = rq->hard_nr_sectors;
340 	}
341 
342 	return 0;
343 }
344 
345 static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
346 {
347 	if (bio->bi_size)
348 		return 1;
349 
350 	bio_put(bio);
351 	return 0;
352 }
353 
354 /**
355  * scsi_req_map_sg - map a scatterlist into a request
356  * @rq:		request to fill
357  * @sg:		scatterlist
358  * @nsegs:	number of elements
359  * @bufflen:	len of buffer
360  * @gfp:	memory allocation flags
361  *
362  * scsi_req_map_sg maps a scatterlist into a request so that the
363  * request can be sent to the block layer. We do not trust the scatterlist
364  * sent to use, as some ULDs use that struct to only organize the pages.
365  */
366 static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
367 			   int nsegs, unsigned bufflen, gfp_t gfp)
368 {
369 	struct request_queue *q = rq->q;
370 	int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
371 	unsigned int data_len = 0, len, bytes, off;
372 	struct page *page;
373 	struct bio *bio = NULL;
374 	int i, err, nr_vecs = 0;
375 
376 	for (i = 0; i < nsegs; i++) {
377 		page = sgl[i].page;
378 		off = sgl[i].offset;
379 		len = sgl[i].length;
380 		data_len += len;
381 
382 		while (len > 0) {
383 			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
384 
385 			if (!bio) {
386 				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
387 				nr_pages -= nr_vecs;
388 
389 				bio = bio_alloc(gfp, nr_vecs);
390 				if (!bio) {
391 					err = -ENOMEM;
392 					goto free_bios;
393 				}
394 				bio->bi_end_io = scsi_bi_endio;
395 			}
396 
397 			if (bio_add_pc_page(q, bio, page, bytes, off) !=
398 			    bytes) {
399 				bio_put(bio);
400 				err = -EINVAL;
401 				goto free_bios;
402 			}
403 
404 			if (bio->bi_vcnt >= nr_vecs) {
405 				err = scsi_merge_bio(rq, bio);
406 				if (err) {
407 					bio_endio(bio, bio->bi_size, 0);
408 					goto free_bios;
409 				}
410 				bio = NULL;
411 			}
412 
413 			page++;
414 			len -= bytes;
415 			off = 0;
416 		}
417 	}
418 
419 	rq->buffer = rq->data = NULL;
420 	rq->data_len = data_len;
421 	return 0;
422 
423 free_bios:
424 	while ((bio = rq->bio) != NULL) {
425 		rq->bio = bio->bi_next;
426 		/*
427 		 * call endio instead of bio_put incase it was bounced
428 		 */
429 		bio_endio(bio, bio->bi_size, 0);
430 	}
431 
432 	return err;
433 }
434 
435 /**
436  * scsi_execute_async - insert request
437  * @sdev:	scsi device
438  * @cmd:	scsi command
439  * @data_direction: data direction
440  * @buffer:	data buffer (this can be a kernel buffer or scatterlist)
441  * @bufflen:	len of buffer
442  * @use_sg:	if buffer is a scatterlist this is the number of elements
443  * @timeout:	request timeout in seconds
444  * @retries:	number of times to retry request
445  * @flags:	or into request flags
446  **/
447 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
448 		       int data_direction, void *buffer, unsigned bufflen,
449 		       int use_sg, int timeout, int retries, void *privdata,
450 		       void (*done)(void *, char *, int, int), gfp_t gfp)
451 {
452 	struct request *req;
453 	struct scsi_io_context *sioc;
454 	int err = 0;
455 	int write = (data_direction == DMA_TO_DEVICE);
456 
457 	sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
458 	if (!sioc)
459 		return DRIVER_ERROR << 24;
460 	memset(sioc, 0, sizeof(*sioc));
461 
462 	req = blk_get_request(sdev->request_queue, write, gfp);
463 	if (!req)
464 		goto free_sense;
465 	req->flags |= REQ_BLOCK_PC | REQ_QUIET;
466 
467 	if (use_sg)
468 		err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
469 	else if (bufflen)
470 		err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
471 
472 	if (err)
473 		goto free_req;
474 
475 	req->cmd_len = COMMAND_SIZE(cmd[0]);
476 	memcpy(req->cmd, cmd, req->cmd_len);
477 	req->sense = sioc->sense;
478 	req->sense_len = 0;
479 	req->timeout = timeout;
480 	req->retries = retries;
481 	req->end_io_data = sioc;
482 
483 	sioc->data = privdata;
484 	sioc->done = done;
485 
486 	blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
487 	return 0;
488 
489 free_req:
490 	blk_put_request(req);
491 free_sense:
492 	kfree(sioc);
493 	return DRIVER_ERROR << 24;
494 }
495 EXPORT_SYMBOL_GPL(scsi_execute_async);
496 
497 /*
498  * Function:    scsi_init_cmd_errh()
499  *
500  * Purpose:     Initialize cmd fields related to error handling.
501  *
502  * Arguments:   cmd	- command that is ready to be queued.
503  *
504  * Returns:     Nothing
505  *
506  * Notes:       This function has the job of initializing a number of
507  *              fields related to error handling.   Typically this will
508  *              be called once for each command, as required.
509  */
510 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
511 {
512 	cmd->serial_number = 0;
513 
514 	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
515 
516 	if (cmd->cmd_len == 0)
517 		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
518 
519 	/*
520 	 * We need saved copies of a number of fields - this is because
521 	 * error handling may need to overwrite these with different values
522 	 * to run different commands, and once error handling is complete,
523 	 * we will need to restore these values prior to running the actual
524 	 * command.
525 	 */
526 	cmd->old_use_sg = cmd->use_sg;
527 	cmd->old_cmd_len = cmd->cmd_len;
528 	cmd->sc_old_data_direction = cmd->sc_data_direction;
529 	cmd->old_underflow = cmd->underflow;
530 	memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
531 	cmd->buffer = cmd->request_buffer;
532 	cmd->bufflen = cmd->request_bufflen;
533 
534 	return 1;
535 }
536 
537 /*
538  * Function:   scsi_setup_cmd_retry()
539  *
540  * Purpose:    Restore the command state for a retry
541  *
542  * Arguments:  cmd	- command to be restored
543  *
544  * Returns:    Nothing
545  *
546  * Notes:      Immediately prior to retrying a command, we need
547  *             to restore certain fields that we saved above.
548  */
549 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
550 {
551 	memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
552 	cmd->request_buffer = cmd->buffer;
553 	cmd->request_bufflen = cmd->bufflen;
554 	cmd->use_sg = cmd->old_use_sg;
555 	cmd->cmd_len = cmd->old_cmd_len;
556 	cmd->sc_data_direction = cmd->sc_old_data_direction;
557 	cmd->underflow = cmd->old_underflow;
558 }
559 
560 void scsi_device_unbusy(struct scsi_device *sdev)
561 {
562 	struct Scsi_Host *shost = sdev->host;
563 	unsigned long flags;
564 
565 	spin_lock_irqsave(shost->host_lock, flags);
566 	shost->host_busy--;
567 	if (unlikely(scsi_host_in_recovery(shost) &&
568 		     shost->host_failed))
569 		scsi_eh_wakeup(shost);
570 	spin_unlock(shost->host_lock);
571 	spin_lock(sdev->request_queue->queue_lock);
572 	sdev->device_busy--;
573 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
574 }
575 
576 /*
577  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
578  * and call blk_run_queue for all the scsi_devices on the target -
579  * including current_sdev first.
580  *
581  * Called with *no* scsi locks held.
582  */
583 static void scsi_single_lun_run(struct scsi_device *current_sdev)
584 {
585 	struct Scsi_Host *shost = current_sdev->host;
586 	struct scsi_device *sdev, *tmp;
587 	struct scsi_target *starget = scsi_target(current_sdev);
588 	unsigned long flags;
589 
590 	spin_lock_irqsave(shost->host_lock, flags);
591 	starget->starget_sdev_user = NULL;
592 	spin_unlock_irqrestore(shost->host_lock, flags);
593 
594 	/*
595 	 * Call blk_run_queue for all LUNs on the target, starting with
596 	 * current_sdev. We race with others (to set starget_sdev_user),
597 	 * but in most cases, we will be first. Ideally, each LU on the
598 	 * target would get some limited time or requests on the target.
599 	 */
600 	blk_run_queue(current_sdev->request_queue);
601 
602 	spin_lock_irqsave(shost->host_lock, flags);
603 	if (starget->starget_sdev_user)
604 		goto out;
605 	list_for_each_entry_safe(sdev, tmp, &starget->devices,
606 			same_target_siblings) {
607 		if (sdev == current_sdev)
608 			continue;
609 		if (scsi_device_get(sdev))
610 			continue;
611 
612 		spin_unlock_irqrestore(shost->host_lock, flags);
613 		blk_run_queue(sdev->request_queue);
614 		spin_lock_irqsave(shost->host_lock, flags);
615 
616 		scsi_device_put(sdev);
617 	}
618  out:
619 	spin_unlock_irqrestore(shost->host_lock, flags);
620 }
621 
622 /*
623  * Function:	scsi_run_queue()
624  *
625  * Purpose:	Select a proper request queue to serve next
626  *
627  * Arguments:	q	- last request's queue
628  *
629  * Returns:     Nothing
630  *
631  * Notes:	The previous command was completely finished, start
632  *		a new one if possible.
633  */
634 static void scsi_run_queue(struct request_queue *q)
635 {
636 	struct scsi_device *sdev = q->queuedata;
637 	struct Scsi_Host *shost = sdev->host;
638 	unsigned long flags;
639 
640 	if (sdev->single_lun)
641 		scsi_single_lun_run(sdev);
642 
643 	spin_lock_irqsave(shost->host_lock, flags);
644 	while (!list_empty(&shost->starved_list) &&
645 	       !shost->host_blocked && !shost->host_self_blocked &&
646 		!((shost->can_queue > 0) &&
647 		  (shost->host_busy >= shost->can_queue))) {
648 		/*
649 		 * As long as shost is accepting commands and we have
650 		 * starved queues, call blk_run_queue. scsi_request_fn
651 		 * drops the queue_lock and can add us back to the
652 		 * starved_list.
653 		 *
654 		 * host_lock protects the starved_list and starved_entry.
655 		 * scsi_request_fn must get the host_lock before checking
656 		 * or modifying starved_list or starved_entry.
657 		 */
658 		sdev = list_entry(shost->starved_list.next,
659 					  struct scsi_device, starved_entry);
660 		list_del_init(&sdev->starved_entry);
661 		spin_unlock_irqrestore(shost->host_lock, flags);
662 
663 		blk_run_queue(sdev->request_queue);
664 
665 		spin_lock_irqsave(shost->host_lock, flags);
666 		if (unlikely(!list_empty(&sdev->starved_entry)))
667 			/*
668 			 * sdev lost a race, and was put back on the
669 			 * starved list. This is unlikely but without this
670 			 * in theory we could loop forever.
671 			 */
672 			break;
673 	}
674 	spin_unlock_irqrestore(shost->host_lock, flags);
675 
676 	blk_run_queue(q);
677 }
678 
679 /*
680  * Function:	scsi_requeue_command()
681  *
682  * Purpose:	Handle post-processing of completed commands.
683  *
684  * Arguments:	q	- queue to operate on
685  *		cmd	- command that may need to be requeued.
686  *
687  * Returns:	Nothing
688  *
689  * Notes:	After command completion, there may be blocks left
690  *		over which weren't finished by the previous command
691  *		this can be for a number of reasons - the main one is
692  *		I/O errors in the middle of the request, in which case
693  *		we need to request the blocks that come after the bad
694  *		sector.
695  * Notes:	Upon return, cmd is a stale pointer.
696  */
697 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
698 {
699 	struct request *req = cmd->request;
700 	unsigned long flags;
701 
702 	scsi_unprep_request(req);
703 	spin_lock_irqsave(q->queue_lock, flags);
704 	blk_requeue_request(q, req);
705 	spin_unlock_irqrestore(q->queue_lock, flags);
706 
707 	scsi_run_queue(q);
708 }
709 
710 void scsi_next_command(struct scsi_cmnd *cmd)
711 {
712 	struct scsi_device *sdev = cmd->device;
713 	struct request_queue *q = sdev->request_queue;
714 
715 	/* need to hold a reference on the device before we let go of the cmd */
716 	get_device(&sdev->sdev_gendev);
717 
718 	scsi_put_command(cmd);
719 	scsi_run_queue(q);
720 
721 	/* ok to remove device now */
722 	put_device(&sdev->sdev_gendev);
723 }
724 
725 void scsi_run_host_queues(struct Scsi_Host *shost)
726 {
727 	struct scsi_device *sdev;
728 
729 	shost_for_each_device(sdev, shost)
730 		scsi_run_queue(sdev->request_queue);
731 }
732 
733 /*
734  * Function:    scsi_end_request()
735  *
736  * Purpose:     Post-processing of completed commands (usually invoked at end
737  *		of upper level post-processing and scsi_io_completion).
738  *
739  * Arguments:   cmd	 - command that is complete.
740  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
741  *              bytes    - number of bytes of completed I/O
742  *		requeue  - indicates whether we should requeue leftovers.
743  *
744  * Lock status: Assumed that lock is not held upon entry.
745  *
746  * Returns:     cmd if requeue required, NULL otherwise.
747  *
748  * Notes:       This is called for block device requests in order to
749  *              mark some number of sectors as complete.
750  *
751  *		We are guaranteeing that the request queue will be goosed
752  *		at some point during this call.
753  * Notes:	If cmd was requeued, upon return it will be a stale pointer.
754  */
755 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
756 					  int bytes, int requeue)
757 {
758 	request_queue_t *q = cmd->device->request_queue;
759 	struct request *req = cmd->request;
760 	unsigned long flags;
761 
762 	/*
763 	 * If there are blocks left over at the end, set up the command
764 	 * to queue the remainder of them.
765 	 */
766 	if (end_that_request_chunk(req, uptodate, bytes)) {
767 		int leftover = (req->hard_nr_sectors << 9);
768 
769 		if (blk_pc_request(req))
770 			leftover = req->data_len;
771 
772 		/* kill remainder if no retrys */
773 		if (!uptodate && blk_noretry_request(req))
774 			end_that_request_chunk(req, 0, leftover);
775 		else {
776 			if (requeue) {
777 				/*
778 				 * Bleah.  Leftovers again.  Stick the
779 				 * leftovers in the front of the
780 				 * queue, and goose the queue again.
781 				 */
782 				scsi_requeue_command(q, cmd);
783 				cmd = NULL;
784 			}
785 			return cmd;
786 		}
787 	}
788 
789 	add_disk_randomness(req->rq_disk);
790 
791 	spin_lock_irqsave(q->queue_lock, flags);
792 	if (blk_rq_tagged(req))
793 		blk_queue_end_tag(q, req);
794 	end_that_request_last(req, uptodate);
795 	spin_unlock_irqrestore(q->queue_lock, flags);
796 
797 	/*
798 	 * This will goose the queue request function at the end, so we don't
799 	 * need to worry about launching another command.
800 	 */
801 	scsi_next_command(cmd);
802 	return NULL;
803 }
804 
805 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
806 {
807 	struct scsi_host_sg_pool *sgp;
808 	struct scatterlist *sgl;
809 
810 	BUG_ON(!cmd->use_sg);
811 
812 	switch (cmd->use_sg) {
813 	case 1 ... 8:
814 		cmd->sglist_len = 0;
815 		break;
816 	case 9 ... 16:
817 		cmd->sglist_len = 1;
818 		break;
819 	case 17 ... 32:
820 		cmd->sglist_len = 2;
821 		break;
822 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
823 	case 33 ... 64:
824 		cmd->sglist_len = 3;
825 		break;
826 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
827 	case 65 ... 128:
828 		cmd->sglist_len = 4;
829 		break;
830 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
831 	case 129 ... 256:
832 		cmd->sglist_len = 5;
833 		break;
834 #endif
835 #endif
836 #endif
837 	default:
838 		return NULL;
839 	}
840 
841 	sgp = scsi_sg_pools + cmd->sglist_len;
842 	sgl = mempool_alloc(sgp->pool, gfp_mask);
843 	return sgl;
844 }
845 
846 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
847 {
848 	struct scsi_host_sg_pool *sgp;
849 
850 	BUG_ON(index >= SG_MEMPOOL_NR);
851 
852 	sgp = scsi_sg_pools + index;
853 	mempool_free(sgl, sgp->pool);
854 }
855 
856 /*
857  * Function:    scsi_release_buffers()
858  *
859  * Purpose:     Completion processing for block device I/O requests.
860  *
861  * Arguments:   cmd	- command that we are bailing.
862  *
863  * Lock status: Assumed that no lock is held upon entry.
864  *
865  * Returns:     Nothing
866  *
867  * Notes:       In the event that an upper level driver rejects a
868  *		command, we must release resources allocated during
869  *		the __init_io() function.  Primarily this would involve
870  *		the scatter-gather table, and potentially any bounce
871  *		buffers.
872  */
873 static void scsi_release_buffers(struct scsi_cmnd *cmd)
874 {
875 	struct request *req = cmd->request;
876 
877 	/*
878 	 * Free up any indirection buffers we allocated for DMA purposes.
879 	 */
880 	if (cmd->use_sg)
881 		scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
882 	else if (cmd->request_buffer != req->buffer)
883 		kfree(cmd->request_buffer);
884 
885 	/*
886 	 * Zero these out.  They now point to freed memory, and it is
887 	 * dangerous to hang onto the pointers.
888 	 */
889 	cmd->buffer  = NULL;
890 	cmd->bufflen = 0;
891 	cmd->request_buffer = NULL;
892 	cmd->request_bufflen = 0;
893 }
894 
895 /*
896  * Function:    scsi_io_completion()
897  *
898  * Purpose:     Completion processing for block device I/O requests.
899  *
900  * Arguments:   cmd   - command that is finished.
901  *
902  * Lock status: Assumed that no lock is held upon entry.
903  *
904  * Returns:     Nothing
905  *
906  * Notes:       This function is matched in terms of capabilities to
907  *              the function that created the scatter-gather list.
908  *              In other words, if there are no bounce buffers
909  *              (the normal case for most drivers), we don't need
910  *              the logic to deal with cleaning up afterwards.
911  *
912  *		We must do one of several things here:
913  *
914  *		a) Call scsi_end_request.  This will finish off the
915  *		   specified number of sectors.  If we are done, the
916  *		   command block will be released, and the queue
917  *		   function will be goosed.  If we are not done, then
918  *		   scsi_end_request will directly goose the queue.
919  *
920  *		b) We can just use scsi_requeue_command() here.  This would
921  *		   be used if we just wanted to retry, for example.
922  */
923 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
924 			unsigned int block_bytes)
925 {
926 	int result = cmd->result;
927 	int this_count = cmd->bufflen;
928 	request_queue_t *q = cmd->device->request_queue;
929 	struct request *req = cmd->request;
930 	int clear_errors = 1;
931 	struct scsi_sense_hdr sshdr;
932 	int sense_valid = 0;
933 	int sense_deferred = 0;
934 
935 	/*
936 	 * Free up any indirection buffers we allocated for DMA purposes.
937 	 * For the case of a READ, we need to copy the data out of the
938 	 * bounce buffer and into the real buffer.
939 	 */
940 	if (cmd->use_sg)
941 		scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
942 	else if (cmd->buffer != req->buffer) {
943 		if (rq_data_dir(req) == READ) {
944 			unsigned long flags;
945 			char *to = bio_kmap_irq(req->bio, &flags);
946 			memcpy(to, cmd->buffer, cmd->bufflen);
947 			bio_kunmap_irq(to, &flags);
948 		}
949 		kfree(cmd->buffer);
950 	}
951 
952 	if (result) {
953 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
954 		if (sense_valid)
955 			sense_deferred = scsi_sense_is_deferred(&sshdr);
956 	}
957 	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
958 		req->errors = result;
959 		if (result) {
960 			clear_errors = 0;
961 			if (sense_valid && req->sense) {
962 				/*
963 				 * SG_IO wants current and deferred errors
964 				 */
965 				int len = 8 + cmd->sense_buffer[7];
966 
967 				if (len > SCSI_SENSE_BUFFERSIZE)
968 					len = SCSI_SENSE_BUFFERSIZE;
969 				memcpy(req->sense, cmd->sense_buffer,  len);
970 				req->sense_len = len;
971 			}
972 		} else
973 			req->data_len = cmd->resid;
974 	}
975 
976 	/*
977 	 * Zero these out.  They now point to freed memory, and it is
978 	 * dangerous to hang onto the pointers.
979 	 */
980 	cmd->buffer  = NULL;
981 	cmd->bufflen = 0;
982 	cmd->request_buffer = NULL;
983 	cmd->request_bufflen = 0;
984 
985 	/*
986 	 * Next deal with any sectors which we were able to correctly
987 	 * handle.
988 	 */
989 	if (good_bytes >= 0) {
990 		SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
991 					      req->nr_sectors, good_bytes));
992 		SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
993 
994 		if (clear_errors)
995 			req->errors = 0;
996 		/*
997 		 * If multiple sectors are requested in one buffer, then
998 		 * they will have been finished off by the first command.
999 		 * If not, then we have a multi-buffer command.
1000 		 *
1001 		 * If block_bytes != 0, it means we had a medium error
1002 		 * of some sort, and that we want to mark some number of
1003 		 * sectors as not uptodate.  Thus we want to inhibit
1004 		 * requeueing right here - we will requeue down below
1005 		 * when we handle the bad sectors.
1006 		 */
1007 
1008 		/*
1009 		 * If the command completed without error, then either
1010 		 * finish off the rest of the command, or start a new one.
1011 		 */
1012 		if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
1013 			return;
1014 	}
1015 	/*
1016 	 * Now, if we were good little boys and girls, Santa left us a request
1017 	 * sense buffer.  We can extract information from this, so we
1018 	 * can choose a block to remap, etc.
1019 	 */
1020 	if (sense_valid && !sense_deferred) {
1021 		switch (sshdr.sense_key) {
1022 		case UNIT_ATTENTION:
1023 			if (cmd->device->removable) {
1024 				/* detected disc change.  set a bit
1025 				 * and quietly refuse further access.
1026 				 */
1027 				cmd->device->changed = 1;
1028 				scsi_end_request(cmd, 0,
1029 						this_count, 1);
1030 				return;
1031 			} else {
1032 				/*
1033 				* Must have been a power glitch, or a
1034 				* bus reset.  Could not have been a
1035 				* media change, so we just retry the
1036 				* request and see what happens.
1037 				*/
1038 				scsi_requeue_command(q, cmd);
1039 				return;
1040 			}
1041 			break;
1042 		case ILLEGAL_REQUEST:
1043 			/*
1044 		 	* If we had an ILLEGAL REQUEST returned, then we may
1045 		 	* have performed an unsupported command.  The only
1046 		 	* thing this should be would be a ten byte read where
1047 			* only a six byte read was supported.  Also, on a
1048 			* system where READ CAPACITY failed, we may have read
1049 			* past the end of the disk.
1050 		 	*/
1051 			if ((cmd->device->use_10_for_rw &&
1052 			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
1053 			    (cmd->cmnd[0] == READ_10 ||
1054 			     cmd->cmnd[0] == WRITE_10)) {
1055 				cmd->device->use_10_for_rw = 0;
1056 				/*
1057 				 * This will cause a retry with a 6-byte
1058 				 * command.
1059 				 */
1060 				scsi_requeue_command(q, cmd);
1061 				result = 0;
1062 			} else {
1063 				scsi_end_request(cmd, 0, this_count, 1);
1064 				return;
1065 			}
1066 			break;
1067 		case NOT_READY:
1068 			/*
1069 			 * If the device is in the process of becoming ready,
1070 			 * retry.
1071 			 */
1072 			if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
1073 				scsi_requeue_command(q, cmd);
1074 				return;
1075 			}
1076 			if (!(req->flags & REQ_QUIET))
1077 				scmd_printk(KERN_INFO, cmd,
1078 					   "Device not ready.\n");
1079 			scsi_end_request(cmd, 0, this_count, 1);
1080 			return;
1081 		case VOLUME_OVERFLOW:
1082 			if (!(req->flags & REQ_QUIET)) {
1083 				scmd_printk(KERN_INFO, cmd,
1084 					   "Volume overflow, CDB: ");
1085 				__scsi_print_command(cmd->data_cmnd);
1086 				scsi_print_sense("", cmd);
1087 			}
1088 			scsi_end_request(cmd, 0, block_bytes, 1);
1089 			return;
1090 		default:
1091 			break;
1092 		}
1093 	}			/* driver byte != 0 */
1094 	if (host_byte(result) == DID_RESET) {
1095 		/*
1096 		 * Third party bus reset or reset for error
1097 		 * recovery reasons.  Just retry the request
1098 		 * and see what happens.
1099 		 */
1100 		scsi_requeue_command(q, cmd);
1101 		return;
1102 	}
1103 	if (result) {
1104 		if (!(req->flags & REQ_QUIET)) {
1105 			scmd_printk(KERN_INFO, cmd,
1106 				   "SCSI error: return code = 0x%x\n", result);
1107 
1108 			if (driver_byte(result) & DRIVER_SENSE)
1109 				scsi_print_sense("", cmd);
1110 		}
1111 		/*
1112 		 * Mark a single buffer as not uptodate.  Queue the remainder.
1113 		 * We sometimes get this cruft in the event that a medium error
1114 		 * isn't properly reported.
1115 		 */
1116 		block_bytes = req->hard_cur_sectors << 9;
1117 		if (!block_bytes)
1118 			block_bytes = req->data_len;
1119 		scsi_end_request(cmd, 0, block_bytes, 1);
1120 	}
1121 }
1122 EXPORT_SYMBOL(scsi_io_completion);
1123 
1124 /*
1125  * Function:    scsi_init_io()
1126  *
1127  * Purpose:     SCSI I/O initialize function.
1128  *
1129  * Arguments:   cmd   - Command descriptor we wish to initialize
1130  *
1131  * Returns:     0 on success
1132  *		BLKPREP_DEFER if the failure is retryable
1133  *		BLKPREP_KILL if the failure is fatal
1134  */
1135 static int scsi_init_io(struct scsi_cmnd *cmd)
1136 {
1137 	struct request     *req = cmd->request;
1138 	struct scatterlist *sgpnt;
1139 	int		   count;
1140 
1141 	/*
1142 	 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
1143 	 */
1144 	if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1145 		cmd->request_bufflen = req->data_len;
1146 		cmd->request_buffer = req->data;
1147 		req->buffer = req->data;
1148 		cmd->use_sg = 0;
1149 		return 0;
1150 	}
1151 
1152 	/*
1153 	 * we used to not use scatter-gather for single segment request,
1154 	 * but now we do (it makes highmem I/O easier to support without
1155 	 * kmapping pages)
1156 	 */
1157 	cmd->use_sg = req->nr_phys_segments;
1158 
1159 	/*
1160 	 * if sg table allocation fails, requeue request later.
1161 	 */
1162 	sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1163 	if (unlikely(!sgpnt)) {
1164 		scsi_unprep_request(req);
1165 		return BLKPREP_DEFER;
1166 	}
1167 
1168 	cmd->request_buffer = (char *) sgpnt;
1169 	cmd->request_bufflen = req->nr_sectors << 9;
1170 	if (blk_pc_request(req))
1171 		cmd->request_bufflen = req->data_len;
1172 	req->buffer = NULL;
1173 
1174 	/*
1175 	 * Next, walk the list, and fill in the addresses and sizes of
1176 	 * each segment.
1177 	 */
1178 	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1179 
1180 	/*
1181 	 * mapped well, send it off
1182 	 */
1183 	if (likely(count <= cmd->use_sg)) {
1184 		cmd->use_sg = count;
1185 		return 0;
1186 	}
1187 
1188 	printk(KERN_ERR "Incorrect number of segments after building list\n");
1189 	printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1190 	printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1191 			req->current_nr_sectors);
1192 
1193 	/* release the command and kill it */
1194 	scsi_release_buffers(cmd);
1195 	scsi_put_command(cmd);
1196 	return BLKPREP_KILL;
1197 }
1198 
1199 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1200 			       sector_t *error_sector)
1201 {
1202 	struct scsi_device *sdev = q->queuedata;
1203 	struct scsi_driver *drv;
1204 
1205 	if (sdev->sdev_state != SDEV_RUNNING)
1206 		return -ENXIO;
1207 
1208 	drv = *(struct scsi_driver **) disk->private_data;
1209 	if (drv->issue_flush)
1210 		return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1211 
1212 	return -EOPNOTSUPP;
1213 }
1214 
1215 static void scsi_generic_done(struct scsi_cmnd *cmd)
1216 {
1217 	BUG_ON(!blk_pc_request(cmd->request));
1218 	/*
1219 	 * This will complete the whole command with uptodate=1 so
1220 	 * as far as the block layer is concerned the command completed
1221 	 * successfully. Since this is a REQ_BLOCK_PC command the
1222 	 * caller should check the request's errors value
1223 	 */
1224 	scsi_io_completion(cmd, cmd->bufflen, 0);
1225 }
1226 
1227 void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1228 {
1229 	struct request *req = cmd->request;
1230 
1231 	BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1232 	memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1233 	cmd->cmd_len = req->cmd_len;
1234 	if (!req->data_len)
1235 		cmd->sc_data_direction = DMA_NONE;
1236 	else if (rq_data_dir(req) == WRITE)
1237 		cmd->sc_data_direction = DMA_TO_DEVICE;
1238 	else
1239 		cmd->sc_data_direction = DMA_FROM_DEVICE;
1240 
1241 	cmd->transfersize = req->data_len;
1242 	cmd->allowed = req->retries;
1243 	cmd->timeout_per_command = req->timeout;
1244 }
1245 EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd);
1246 
1247 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1248 {
1249 	struct scsi_device *sdev = q->queuedata;
1250 	struct scsi_cmnd *cmd;
1251 	int specials_only = 0;
1252 
1253 	/*
1254 	 * Just check to see if the device is online.  If it isn't, we
1255 	 * refuse to process any commands.  The device must be brought
1256 	 * online before trying any recovery commands
1257 	 */
1258 	if (unlikely(!scsi_device_online(sdev))) {
1259 		sdev_printk(KERN_ERR, sdev,
1260 			    "rejecting I/O to offline device\n");
1261 		goto kill;
1262 	}
1263 	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1264 		/* OK, we're not in a running state don't prep
1265 		 * user commands */
1266 		if (sdev->sdev_state == SDEV_DEL) {
1267 			/* Device is fully deleted, no commands
1268 			 * at all allowed down */
1269 			sdev_printk(KERN_ERR, sdev,
1270 				    "rejecting I/O to dead device\n");
1271 			goto kill;
1272 		}
1273 		/* OK, we only allow special commands (i.e. not
1274 		 * user initiated ones */
1275 		specials_only = sdev->sdev_state;
1276 	}
1277 
1278 	/*
1279 	 * Find the actual device driver associated with this command.
1280 	 * The SPECIAL requests are things like character device or
1281 	 * ioctls, which did not originate from ll_rw_blk.  Note that
1282 	 * the special field is also used to indicate the cmd for
1283 	 * the remainder of a partially fulfilled request that can
1284 	 * come up when there is a medium error.  We have to treat
1285 	 * these two cases differently.  We differentiate by looking
1286 	 * at request->cmd, as this tells us the real story.
1287 	 */
1288 	if (req->flags & REQ_SPECIAL && req->special) {
1289 		struct scsi_request *sreq = req->special;
1290 
1291 		if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1292 			cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1293 			if (unlikely(!cmd))
1294 				goto defer;
1295 			scsi_init_cmd_from_req(cmd, sreq);
1296 		} else
1297 			cmd = req->special;
1298 	} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1299 
1300 		if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1301 			if(specials_only == SDEV_QUIESCE ||
1302 					specials_only == SDEV_BLOCK)
1303 				goto defer;
1304 
1305 			sdev_printk(KERN_ERR, sdev,
1306 				    "rejecting I/O to device being removed\n");
1307 			goto kill;
1308 		}
1309 
1310 
1311 		/*
1312 		 * Now try and find a command block that we can use.
1313 		 */
1314 		if (!req->special) {
1315 			cmd = scsi_get_command(sdev, GFP_ATOMIC);
1316 			if (unlikely(!cmd))
1317 				goto defer;
1318 		} else
1319 			cmd = req->special;
1320 
1321 		/* pull a tag out of the request if we have one */
1322 		cmd->tag = req->tag;
1323 	} else {
1324 		blk_dump_rq_flags(req, "SCSI bad req");
1325 		goto kill;
1326 	}
1327 
1328 	/* note the overloading of req->special.  When the tag
1329 	 * is active it always means cmd.  If the tag goes
1330 	 * back for re-queueing, it may be reset */
1331 	req->special = cmd;
1332 	cmd->request = req;
1333 
1334 	/*
1335 	 * FIXME: drop the lock here because the functions below
1336 	 * expect to be called without the queue lock held.  Also,
1337 	 * previously, we dequeued the request before dropping the
1338 	 * lock.  We hope REQ_STARTED prevents anything untoward from
1339 	 * happening now.
1340 	 */
1341 	if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1342 		struct scsi_driver *drv;
1343 		int ret;
1344 
1345 		/*
1346 		 * This will do a couple of things:
1347 		 *  1) Fill in the actual SCSI command.
1348 		 *  2) Fill in any other upper-level specific fields
1349 		 * (timeout).
1350 		 *
1351 		 * If this returns 0, it means that the request failed
1352 		 * (reading past end of disk, reading offline device,
1353 		 * etc).   This won't actually talk to the device, but
1354 		 * some kinds of consistency checking may cause the
1355 		 * request to be rejected immediately.
1356 		 */
1357 
1358 		/*
1359 		 * This sets up the scatter-gather table (allocating if
1360 		 * required).
1361 		 */
1362 		ret = scsi_init_io(cmd);
1363 		switch(ret) {
1364 			/* For BLKPREP_KILL/DEFER the cmd was released */
1365 		case BLKPREP_KILL:
1366 			goto kill;
1367 		case BLKPREP_DEFER:
1368 			goto defer;
1369 		}
1370 
1371 		/*
1372 		 * Initialize the actual SCSI command for this request.
1373 		 */
1374 		if (req->rq_disk) {
1375 			drv = *(struct scsi_driver **)req->rq_disk->private_data;
1376 			if (unlikely(!drv->init_command(cmd))) {
1377 				scsi_release_buffers(cmd);
1378 				scsi_put_command(cmd);
1379 				goto kill;
1380 			}
1381 		} else {
1382 			scsi_setup_blk_pc_cmnd(cmd);
1383 			cmd->done = scsi_generic_done;
1384 		}
1385 	}
1386 
1387 	/*
1388 	 * The request is now prepped, no need to come back here
1389 	 */
1390 	req->flags |= REQ_DONTPREP;
1391 	return BLKPREP_OK;
1392 
1393  defer:
1394 	/* If we defer, the elv_next_request() returns NULL, but the
1395 	 * queue must be restarted, so we plug here if no returning
1396 	 * command will automatically do that. */
1397 	if (sdev->device_busy == 0)
1398 		blk_plug_device(q);
1399 	return BLKPREP_DEFER;
1400  kill:
1401 	req->errors = DID_NO_CONNECT << 16;
1402 	return BLKPREP_KILL;
1403 }
1404 
1405 /*
1406  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1407  * return 0.
1408  *
1409  * Called with the queue_lock held.
1410  */
1411 static inline int scsi_dev_queue_ready(struct request_queue *q,
1412 				  struct scsi_device *sdev)
1413 {
1414 	if (sdev->device_busy >= sdev->queue_depth)
1415 		return 0;
1416 	if (sdev->device_busy == 0 && sdev->device_blocked) {
1417 		/*
1418 		 * unblock after device_blocked iterates to zero
1419 		 */
1420 		if (--sdev->device_blocked == 0) {
1421 			SCSI_LOG_MLQUEUE(3,
1422 				   sdev_printk(KERN_INFO, sdev,
1423 				   "unblocking device at zero depth\n"));
1424 		} else {
1425 			blk_plug_device(q);
1426 			return 0;
1427 		}
1428 	}
1429 	if (sdev->device_blocked)
1430 		return 0;
1431 
1432 	return 1;
1433 }
1434 
1435 /*
1436  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1437  * return 0. We must end up running the queue again whenever 0 is
1438  * returned, else IO can hang.
1439  *
1440  * Called with host_lock held.
1441  */
1442 static inline int scsi_host_queue_ready(struct request_queue *q,
1443 				   struct Scsi_Host *shost,
1444 				   struct scsi_device *sdev)
1445 {
1446 	if (scsi_host_in_recovery(shost))
1447 		return 0;
1448 	if (shost->host_busy == 0 && shost->host_blocked) {
1449 		/*
1450 		 * unblock after host_blocked iterates to zero
1451 		 */
1452 		if (--shost->host_blocked == 0) {
1453 			SCSI_LOG_MLQUEUE(3,
1454 				printk("scsi%d unblocking host at zero depth\n",
1455 					shost->host_no));
1456 		} else {
1457 			blk_plug_device(q);
1458 			return 0;
1459 		}
1460 	}
1461 	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1462 	    shost->host_blocked || shost->host_self_blocked) {
1463 		if (list_empty(&sdev->starved_entry))
1464 			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1465 		return 0;
1466 	}
1467 
1468 	/* We're OK to process the command, so we can't be starved */
1469 	if (!list_empty(&sdev->starved_entry))
1470 		list_del_init(&sdev->starved_entry);
1471 
1472 	return 1;
1473 }
1474 
1475 /*
1476  * Kill a request for a dead device
1477  */
1478 static void scsi_kill_request(struct request *req, request_queue_t *q)
1479 {
1480 	struct scsi_cmnd *cmd = req->special;
1481 
1482 	blkdev_dequeue_request(req);
1483 
1484 	if (unlikely(cmd == NULL)) {
1485 		printk(KERN_CRIT "impossible request in %s.\n",
1486 				 __FUNCTION__);
1487 		BUG();
1488 	}
1489 
1490 	scsi_init_cmd_errh(cmd);
1491 	cmd->result = DID_NO_CONNECT << 16;
1492 	atomic_inc(&cmd->device->iorequest_cnt);
1493 	__scsi_done(cmd);
1494 }
1495 
1496 static void scsi_softirq_done(struct request *rq)
1497 {
1498 	struct scsi_cmnd *cmd = rq->completion_data;
1499 	unsigned long wait_for = cmd->allowed * cmd->timeout_per_command;
1500 	int disposition;
1501 
1502 	INIT_LIST_HEAD(&cmd->eh_entry);
1503 
1504 	disposition = scsi_decide_disposition(cmd);
1505 	if (disposition != SUCCESS &&
1506 	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1507 		sdev_printk(KERN_ERR, cmd->device,
1508 			    "timing out command, waited %lus\n",
1509 			    wait_for/HZ);
1510 		disposition = SUCCESS;
1511 	}
1512 
1513 	scsi_log_completion(cmd, disposition);
1514 
1515 	switch (disposition) {
1516 		case SUCCESS:
1517 			scsi_finish_command(cmd);
1518 			break;
1519 		case NEEDS_RETRY:
1520 			scsi_retry_command(cmd);
1521 			break;
1522 		case ADD_TO_MLQUEUE:
1523 			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1524 			break;
1525 		default:
1526 			if (!scsi_eh_scmd_add(cmd, 0))
1527 				scsi_finish_command(cmd);
1528 	}
1529 }
1530 
1531 /*
1532  * Function:    scsi_request_fn()
1533  *
1534  * Purpose:     Main strategy routine for SCSI.
1535  *
1536  * Arguments:   q       - Pointer to actual queue.
1537  *
1538  * Returns:     Nothing
1539  *
1540  * Lock status: IO request lock assumed to be held when called.
1541  */
1542 static void scsi_request_fn(struct request_queue *q)
1543 {
1544 	struct scsi_device *sdev = q->queuedata;
1545 	struct Scsi_Host *shost;
1546 	struct scsi_cmnd *cmd;
1547 	struct request *req;
1548 
1549 	if (!sdev) {
1550 		printk("scsi: killing requests for dead queue\n");
1551 		while ((req = elv_next_request(q)) != NULL)
1552 			scsi_kill_request(req, q);
1553 		return;
1554 	}
1555 
1556 	if(!get_device(&sdev->sdev_gendev))
1557 		/* We must be tearing the block queue down already */
1558 		return;
1559 
1560 	/*
1561 	 * To start with, we keep looping until the queue is empty, or until
1562 	 * the host is no longer able to accept any more requests.
1563 	 */
1564 	shost = sdev->host;
1565 	while (!blk_queue_plugged(q)) {
1566 		int rtn;
1567 		/*
1568 		 * get next queueable request.  We do this early to make sure
1569 		 * that the request is fully prepared even if we cannot
1570 		 * accept it.
1571 		 */
1572 		req = elv_next_request(q);
1573 		if (!req || !scsi_dev_queue_ready(q, sdev))
1574 			break;
1575 
1576 		if (unlikely(!scsi_device_online(sdev))) {
1577 			sdev_printk(KERN_ERR, sdev,
1578 				    "rejecting I/O to offline device\n");
1579 			scsi_kill_request(req, q);
1580 			continue;
1581 		}
1582 
1583 
1584 		/*
1585 		 * Remove the request from the request list.
1586 		 */
1587 		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1588 			blkdev_dequeue_request(req);
1589 		sdev->device_busy++;
1590 
1591 		spin_unlock(q->queue_lock);
1592 		cmd = req->special;
1593 		if (unlikely(cmd == NULL)) {
1594 			printk(KERN_CRIT "impossible request in %s.\n"
1595 					 "please mail a stack trace to "
1596 					 "linux-scsi@vger.kernel.org",
1597 					 __FUNCTION__);
1598 			BUG();
1599 		}
1600 		spin_lock(shost->host_lock);
1601 
1602 		if (!scsi_host_queue_ready(q, shost, sdev))
1603 			goto not_ready;
1604 		if (sdev->single_lun) {
1605 			if (scsi_target(sdev)->starget_sdev_user &&
1606 			    scsi_target(sdev)->starget_sdev_user != sdev)
1607 				goto not_ready;
1608 			scsi_target(sdev)->starget_sdev_user = sdev;
1609 		}
1610 		shost->host_busy++;
1611 
1612 		/*
1613 		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1614 		 *		take the lock again.
1615 		 */
1616 		spin_unlock_irq(shost->host_lock);
1617 
1618 		/*
1619 		 * Finally, initialize any error handling parameters, and set up
1620 		 * the timers for timeouts.
1621 		 */
1622 		scsi_init_cmd_errh(cmd);
1623 
1624 		/*
1625 		 * Dispatch the command to the low-level driver.
1626 		 */
1627 		rtn = scsi_dispatch_cmd(cmd);
1628 		spin_lock_irq(q->queue_lock);
1629 		if(rtn) {
1630 			/* we're refusing the command; because of
1631 			 * the way locks get dropped, we need to
1632 			 * check here if plugging is required */
1633 			if(sdev->device_busy == 0)
1634 				blk_plug_device(q);
1635 
1636 			break;
1637 		}
1638 	}
1639 
1640 	goto out;
1641 
1642  not_ready:
1643 	spin_unlock_irq(shost->host_lock);
1644 
1645 	/*
1646 	 * lock q, handle tag, requeue req, and decrement device_busy. We
1647 	 * must return with queue_lock held.
1648 	 *
1649 	 * Decrementing device_busy without checking it is OK, as all such
1650 	 * cases (host limits or settings) should run the queue at some
1651 	 * later time.
1652 	 */
1653 	spin_lock_irq(q->queue_lock);
1654 	blk_requeue_request(q, req);
1655 	sdev->device_busy--;
1656 	if(sdev->device_busy == 0)
1657 		blk_plug_device(q);
1658  out:
1659 	/* must be careful here...if we trigger the ->remove() function
1660 	 * we cannot be holding the q lock */
1661 	spin_unlock_irq(q->queue_lock);
1662 	put_device(&sdev->sdev_gendev);
1663 	spin_lock_irq(q->queue_lock);
1664 }
1665 
1666 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1667 {
1668 	struct device *host_dev;
1669 	u64 bounce_limit = 0xffffffff;
1670 
1671 	if (shost->unchecked_isa_dma)
1672 		return BLK_BOUNCE_ISA;
1673 	/*
1674 	 * Platforms with virtual-DMA translation
1675 	 * hardware have no practical limit.
1676 	 */
1677 	if (!PCI_DMA_BUS_IS_PHYS)
1678 		return BLK_BOUNCE_ANY;
1679 
1680 	host_dev = scsi_get_device(shost);
1681 	if (host_dev && host_dev->dma_mask)
1682 		bounce_limit = *host_dev->dma_mask;
1683 
1684 	return bounce_limit;
1685 }
1686 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1687 
1688 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1689 {
1690 	struct Scsi_Host *shost = sdev->host;
1691 	struct request_queue *q;
1692 
1693 	q = blk_init_queue(scsi_request_fn, NULL);
1694 	if (!q)
1695 		return NULL;
1696 
1697 	blk_queue_prep_rq(q, scsi_prep_fn);
1698 
1699 	blk_queue_max_hw_segments(q, shost->sg_tablesize);
1700 	blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1701 	blk_queue_max_sectors(q, shost->max_sectors);
1702 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1703 	blk_queue_segment_boundary(q, shost->dma_boundary);
1704 	blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1705 	blk_queue_softirq_done(q, scsi_softirq_done);
1706 
1707 	if (!shost->use_clustering)
1708 		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1709 	return q;
1710 }
1711 
1712 void scsi_free_queue(struct request_queue *q)
1713 {
1714 	blk_cleanup_queue(q);
1715 }
1716 
1717 /*
1718  * Function:    scsi_block_requests()
1719  *
1720  * Purpose:     Utility function used by low-level drivers to prevent further
1721  *		commands from being queued to the device.
1722  *
1723  * Arguments:   shost       - Host in question
1724  *
1725  * Returns:     Nothing
1726  *
1727  * Lock status: No locks are assumed held.
1728  *
1729  * Notes:       There is no timer nor any other means by which the requests
1730  *		get unblocked other than the low-level driver calling
1731  *		scsi_unblock_requests().
1732  */
1733 void scsi_block_requests(struct Scsi_Host *shost)
1734 {
1735 	shost->host_self_blocked = 1;
1736 }
1737 EXPORT_SYMBOL(scsi_block_requests);
1738 
1739 /*
1740  * Function:    scsi_unblock_requests()
1741  *
1742  * Purpose:     Utility function used by low-level drivers to allow further
1743  *		commands from being queued to the device.
1744  *
1745  * Arguments:   shost       - Host in question
1746  *
1747  * Returns:     Nothing
1748  *
1749  * Lock status: No locks are assumed held.
1750  *
1751  * Notes:       There is no timer nor any other means by which the requests
1752  *		get unblocked other than the low-level driver calling
1753  *		scsi_unblock_requests().
1754  *
1755  *		This is done as an API function so that changes to the
1756  *		internals of the scsi mid-layer won't require wholesale
1757  *		changes to drivers that use this feature.
1758  */
1759 void scsi_unblock_requests(struct Scsi_Host *shost)
1760 {
1761 	shost->host_self_blocked = 0;
1762 	scsi_run_host_queues(shost);
1763 }
1764 EXPORT_SYMBOL(scsi_unblock_requests);
1765 
1766 int __init scsi_init_queue(void)
1767 {
1768 	int i;
1769 
1770 	scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1771 					sizeof(struct scsi_io_context),
1772 					0, 0, NULL, NULL);
1773 	if (!scsi_io_context_cache) {
1774 		printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1775 		return -ENOMEM;
1776 	}
1777 
1778 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1779 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1780 		int size = sgp->size * sizeof(struct scatterlist);
1781 
1782 		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1783 				SLAB_HWCACHE_ALIGN, NULL, NULL);
1784 		if (!sgp->slab) {
1785 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1786 					sgp->name);
1787 		}
1788 
1789 		sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1790 				mempool_alloc_slab, mempool_free_slab,
1791 				sgp->slab);
1792 		if (!sgp->pool) {
1793 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1794 					sgp->name);
1795 		}
1796 	}
1797 
1798 	return 0;
1799 }
1800 
1801 void scsi_exit_queue(void)
1802 {
1803 	int i;
1804 
1805 	kmem_cache_destroy(scsi_io_context_cache);
1806 
1807 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1808 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1809 		mempool_destroy(sgp->pool);
1810 		kmem_cache_destroy(sgp->slab);
1811 	}
1812 }
1813 /**
1814  *	scsi_mode_sense - issue a mode sense, falling back from 10 to
1815  *		six bytes if necessary.
1816  *	@sdev:	SCSI device to be queried
1817  *	@dbd:	set if mode sense will allow block descriptors to be returned
1818  *	@modepage: mode page being requested
1819  *	@buffer: request buffer (may not be smaller than eight bytes)
1820  *	@len:	length of request buffer.
1821  *	@timeout: command timeout
1822  *	@retries: number of retries before failing
1823  *	@data: returns a structure abstracting the mode header data
1824  *	@sense: place to put sense data (or NULL if no sense to be collected).
1825  *		must be SCSI_SENSE_BUFFERSIZE big.
1826  *
1827  *	Returns zero if unsuccessful, or the header offset (either 4
1828  *	or 8 depending on whether a six or ten byte command was
1829  *	issued) if successful.
1830  **/
1831 int
1832 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1833 		  unsigned char *buffer, int len, int timeout, int retries,
1834 		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1835 	unsigned char cmd[12];
1836 	int use_10_for_ms;
1837 	int header_length;
1838 	int result;
1839 	struct scsi_sense_hdr my_sshdr;
1840 
1841 	memset(data, 0, sizeof(*data));
1842 	memset(&cmd[0], 0, 12);
1843 	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1844 	cmd[2] = modepage;
1845 
1846 	/* caller might not be interested in sense, but we need it */
1847 	if (!sshdr)
1848 		sshdr = &my_sshdr;
1849 
1850  retry:
1851 	use_10_for_ms = sdev->use_10_for_ms;
1852 
1853 	if (use_10_for_ms) {
1854 		if (len < 8)
1855 			len = 8;
1856 
1857 		cmd[0] = MODE_SENSE_10;
1858 		cmd[8] = len;
1859 		header_length = 8;
1860 	} else {
1861 		if (len < 4)
1862 			len = 4;
1863 
1864 		cmd[0] = MODE_SENSE;
1865 		cmd[4] = len;
1866 		header_length = 4;
1867 	}
1868 
1869 	memset(buffer, 0, len);
1870 
1871 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1872 				  sshdr, timeout, retries);
1873 
1874 	/* This code looks awful: what it's doing is making sure an
1875 	 * ILLEGAL REQUEST sense return identifies the actual command
1876 	 * byte as the problem.  MODE_SENSE commands can return
1877 	 * ILLEGAL REQUEST if the code page isn't supported */
1878 
1879 	if (use_10_for_ms && !scsi_status_is_good(result) &&
1880 	    (driver_byte(result) & DRIVER_SENSE)) {
1881 		if (scsi_sense_valid(sshdr)) {
1882 			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1883 			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1884 				/*
1885 				 * Invalid command operation code
1886 				 */
1887 				sdev->use_10_for_ms = 0;
1888 				goto retry;
1889 			}
1890 		}
1891 	}
1892 
1893 	if(scsi_status_is_good(result)) {
1894 		data->header_length = header_length;
1895 		if(use_10_for_ms) {
1896 			data->length = buffer[0]*256 + buffer[1] + 2;
1897 			data->medium_type = buffer[2];
1898 			data->device_specific = buffer[3];
1899 			data->longlba = buffer[4] & 0x01;
1900 			data->block_descriptor_length = buffer[6]*256
1901 				+ buffer[7];
1902 		} else {
1903 			data->length = buffer[0] + 1;
1904 			data->medium_type = buffer[1];
1905 			data->device_specific = buffer[2];
1906 			data->block_descriptor_length = buffer[3];
1907 		}
1908 	}
1909 
1910 	return result;
1911 }
1912 EXPORT_SYMBOL(scsi_mode_sense);
1913 
1914 int
1915 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1916 {
1917 	char cmd[] = {
1918 		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1919 	};
1920 	struct scsi_sense_hdr sshdr;
1921 	int result;
1922 
1923 	result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1924 				  timeout, retries);
1925 
1926 	if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1927 
1928 		if ((scsi_sense_valid(&sshdr)) &&
1929 		    ((sshdr.sense_key == UNIT_ATTENTION) ||
1930 		     (sshdr.sense_key == NOT_READY))) {
1931 			sdev->changed = 1;
1932 			result = 0;
1933 		}
1934 	}
1935 	return result;
1936 }
1937 EXPORT_SYMBOL(scsi_test_unit_ready);
1938 
1939 /**
1940  *	scsi_device_set_state - Take the given device through the device
1941  *		state model.
1942  *	@sdev:	scsi device to change the state of.
1943  *	@state:	state to change to.
1944  *
1945  *	Returns zero if unsuccessful or an error if the requested
1946  *	transition is illegal.
1947  **/
1948 int
1949 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1950 {
1951 	enum scsi_device_state oldstate = sdev->sdev_state;
1952 
1953 	if (state == oldstate)
1954 		return 0;
1955 
1956 	switch (state) {
1957 	case SDEV_CREATED:
1958 		/* There are no legal states that come back to
1959 		 * created.  This is the manually initialised start
1960 		 * state */
1961 		goto illegal;
1962 
1963 	case SDEV_RUNNING:
1964 		switch (oldstate) {
1965 		case SDEV_CREATED:
1966 		case SDEV_OFFLINE:
1967 		case SDEV_QUIESCE:
1968 		case SDEV_BLOCK:
1969 			break;
1970 		default:
1971 			goto illegal;
1972 		}
1973 		break;
1974 
1975 	case SDEV_QUIESCE:
1976 		switch (oldstate) {
1977 		case SDEV_RUNNING:
1978 		case SDEV_OFFLINE:
1979 			break;
1980 		default:
1981 			goto illegal;
1982 		}
1983 		break;
1984 
1985 	case SDEV_OFFLINE:
1986 		switch (oldstate) {
1987 		case SDEV_CREATED:
1988 		case SDEV_RUNNING:
1989 		case SDEV_QUIESCE:
1990 		case SDEV_BLOCK:
1991 			break;
1992 		default:
1993 			goto illegal;
1994 		}
1995 		break;
1996 
1997 	case SDEV_BLOCK:
1998 		switch (oldstate) {
1999 		case SDEV_CREATED:
2000 		case SDEV_RUNNING:
2001 			break;
2002 		default:
2003 			goto illegal;
2004 		}
2005 		break;
2006 
2007 	case SDEV_CANCEL:
2008 		switch (oldstate) {
2009 		case SDEV_CREATED:
2010 		case SDEV_RUNNING:
2011 		case SDEV_OFFLINE:
2012 		case SDEV_BLOCK:
2013 			break;
2014 		default:
2015 			goto illegal;
2016 		}
2017 		break;
2018 
2019 	case SDEV_DEL:
2020 		switch (oldstate) {
2021 		case SDEV_CANCEL:
2022 			break;
2023 		default:
2024 			goto illegal;
2025 		}
2026 		break;
2027 
2028 	}
2029 	sdev->sdev_state = state;
2030 	return 0;
2031 
2032  illegal:
2033 	SCSI_LOG_ERROR_RECOVERY(1,
2034 				sdev_printk(KERN_ERR, sdev,
2035 					    "Illegal state transition %s->%s\n",
2036 					    scsi_device_state_name(oldstate),
2037 					    scsi_device_state_name(state))
2038 				);
2039 	return -EINVAL;
2040 }
2041 EXPORT_SYMBOL(scsi_device_set_state);
2042 
2043 /**
2044  *	scsi_device_quiesce - Block user issued commands.
2045  *	@sdev:	scsi device to quiesce.
2046  *
2047  *	This works by trying to transition to the SDEV_QUIESCE state
2048  *	(which must be a legal transition).  When the device is in this
2049  *	state, only special requests will be accepted, all others will
2050  *	be deferred.  Since special requests may also be requeued requests,
2051  *	a successful return doesn't guarantee the device will be
2052  *	totally quiescent.
2053  *
2054  *	Must be called with user context, may sleep.
2055  *
2056  *	Returns zero if unsuccessful or an error if not.
2057  **/
2058 int
2059 scsi_device_quiesce(struct scsi_device *sdev)
2060 {
2061 	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2062 	if (err)
2063 		return err;
2064 
2065 	scsi_run_queue(sdev->request_queue);
2066 	while (sdev->device_busy) {
2067 		msleep_interruptible(200);
2068 		scsi_run_queue(sdev->request_queue);
2069 	}
2070 	return 0;
2071 }
2072 EXPORT_SYMBOL(scsi_device_quiesce);
2073 
2074 /**
2075  *	scsi_device_resume - Restart user issued commands to a quiesced device.
2076  *	@sdev:	scsi device to resume.
2077  *
2078  *	Moves the device from quiesced back to running and restarts the
2079  *	queues.
2080  *
2081  *	Must be called with user context, may sleep.
2082  **/
2083 void
2084 scsi_device_resume(struct scsi_device *sdev)
2085 {
2086 	if(scsi_device_set_state(sdev, SDEV_RUNNING))
2087 		return;
2088 	scsi_run_queue(sdev->request_queue);
2089 }
2090 EXPORT_SYMBOL(scsi_device_resume);
2091 
2092 static void
2093 device_quiesce_fn(struct scsi_device *sdev, void *data)
2094 {
2095 	scsi_device_quiesce(sdev);
2096 }
2097 
2098 void
2099 scsi_target_quiesce(struct scsi_target *starget)
2100 {
2101 	starget_for_each_device(starget, NULL, device_quiesce_fn);
2102 }
2103 EXPORT_SYMBOL(scsi_target_quiesce);
2104 
2105 static void
2106 device_resume_fn(struct scsi_device *sdev, void *data)
2107 {
2108 	scsi_device_resume(sdev);
2109 }
2110 
2111 void
2112 scsi_target_resume(struct scsi_target *starget)
2113 {
2114 	starget_for_each_device(starget, NULL, device_resume_fn);
2115 }
2116 EXPORT_SYMBOL(scsi_target_resume);
2117 
2118 /**
2119  * scsi_internal_device_block - internal function to put a device
2120  *				temporarily into the SDEV_BLOCK state
2121  * @sdev:	device to block
2122  *
2123  * Block request made by scsi lld's to temporarily stop all
2124  * scsi commands on the specified device.  Called from interrupt
2125  * or normal process context.
2126  *
2127  * Returns zero if successful or error if not
2128  *
2129  * Notes:
2130  *	This routine transitions the device to the SDEV_BLOCK state
2131  *	(which must be a legal transition).  When the device is in this
2132  *	state, all commands are deferred until the scsi lld reenables
2133  *	the device with scsi_device_unblock or device_block_tmo fires.
2134  *	This routine assumes the host_lock is held on entry.
2135  **/
2136 int
2137 scsi_internal_device_block(struct scsi_device *sdev)
2138 {
2139 	request_queue_t *q = sdev->request_queue;
2140 	unsigned long flags;
2141 	int err = 0;
2142 
2143 	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2144 	if (err)
2145 		return err;
2146 
2147 	/*
2148 	 * The device has transitioned to SDEV_BLOCK.  Stop the
2149 	 * block layer from calling the midlayer with this device's
2150 	 * request queue.
2151 	 */
2152 	spin_lock_irqsave(q->queue_lock, flags);
2153 	blk_stop_queue(q);
2154 	spin_unlock_irqrestore(q->queue_lock, flags);
2155 
2156 	return 0;
2157 }
2158 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2159 
2160 /**
2161  * scsi_internal_device_unblock - resume a device after a block request
2162  * @sdev:	device to resume
2163  *
2164  * Called by scsi lld's or the midlayer to restart the device queue
2165  * for the previously suspended scsi device.  Called from interrupt or
2166  * normal process context.
2167  *
2168  * Returns zero if successful or error if not.
2169  *
2170  * Notes:
2171  *	This routine transitions the device to the SDEV_RUNNING state
2172  *	(which must be a legal transition) allowing the midlayer to
2173  *	goose the queue for this device.  This routine assumes the
2174  *	host_lock is held upon entry.
2175  **/
2176 int
2177 scsi_internal_device_unblock(struct scsi_device *sdev)
2178 {
2179 	request_queue_t *q = sdev->request_queue;
2180 	int err;
2181 	unsigned long flags;
2182 
2183 	/*
2184 	 * Try to transition the scsi device to SDEV_RUNNING
2185 	 * and goose the device queue if successful.
2186 	 */
2187 	err = scsi_device_set_state(sdev, SDEV_RUNNING);
2188 	if (err)
2189 		return err;
2190 
2191 	spin_lock_irqsave(q->queue_lock, flags);
2192 	blk_start_queue(q);
2193 	spin_unlock_irqrestore(q->queue_lock, flags);
2194 
2195 	return 0;
2196 }
2197 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2198 
2199 static void
2200 device_block(struct scsi_device *sdev, void *data)
2201 {
2202 	scsi_internal_device_block(sdev);
2203 }
2204 
2205 static int
2206 target_block(struct device *dev, void *data)
2207 {
2208 	if (scsi_is_target_device(dev))
2209 		starget_for_each_device(to_scsi_target(dev), NULL,
2210 					device_block);
2211 	return 0;
2212 }
2213 
2214 void
2215 scsi_target_block(struct device *dev)
2216 {
2217 	if (scsi_is_target_device(dev))
2218 		starget_for_each_device(to_scsi_target(dev), NULL,
2219 					device_block);
2220 	else
2221 		device_for_each_child(dev, NULL, target_block);
2222 }
2223 EXPORT_SYMBOL_GPL(scsi_target_block);
2224 
2225 static void
2226 device_unblock(struct scsi_device *sdev, void *data)
2227 {
2228 	scsi_internal_device_unblock(sdev);
2229 }
2230 
2231 static int
2232 target_unblock(struct device *dev, void *data)
2233 {
2234 	if (scsi_is_target_device(dev))
2235 		starget_for_each_device(to_scsi_target(dev), NULL,
2236 					device_unblock);
2237 	return 0;
2238 }
2239 
2240 void
2241 scsi_target_unblock(struct device *dev)
2242 {
2243 	if (scsi_is_target_device(dev))
2244 		starget_for_each_device(to_scsi_target(dev), NULL,
2245 					device_unblock);
2246 	else
2247 		device_for_each_child(dev, NULL, target_unblock);
2248 }
2249 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2250