xref: /linux/drivers/scsi/scsi_lib.c (revision 776cfebb430c7b22c208b1b17add97f354d97cab)
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9 
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27 
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30 
31 
32 #define SG_MEMPOOL_NR		(sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE		32
34 
35 struct scsi_host_sg_pool {
36 	size_t		size;
37 	char		*name;
38 	kmem_cache_t	*slab;
39 	mempool_t	*pool;
40 };
41 
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45 
46 #define SP(x) { x, "sgpool-" #x }
47 struct scsi_host_sg_pool scsi_sg_pools[] = {
48 	SP(8),
49 	SP(16),
50 	SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 	SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 	SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 	SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };
64 #undef SP
65 
66 
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq	- request that is ready to be queued.
73  *              at_head	- boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89 	/*
90 	 * Because users of this function are apt to reuse requests with no
91 	 * modification, we have to sanitise the request flags here
92 	 */
93 	sreq->sr_request->flags &= ~REQ_DONTPREP;
94 	blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95 		       	   at_head, sreq, 0);
96 	return 0;
97 }
98 
99 /*
100  * Function:    scsi_queue_insert()
101  *
102  * Purpose:     Insert a command in the midlevel queue.
103  *
104  * Arguments:   cmd    - command that we are adding to queue.
105  *              reason - why we are inserting command to queue.
106  *
107  * Lock status: Assumed that lock is not held upon entry.
108  *
109  * Returns:     Nothing.
110  *
111  * Notes:       We do this for one of two cases.  Either the host is busy
112  *              and it cannot accept any more commands for the time being,
113  *              or the device returned QUEUE_FULL and can accept no more
114  *              commands.
115  * Notes:       This could be called either from an interrupt context or a
116  *              normal process context.
117  */
118 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
119 {
120 	struct Scsi_Host *host = cmd->device->host;
121 	struct scsi_device *device = cmd->device;
122 
123 	SCSI_LOG_MLQUEUE(1,
124 		 printk("Inserting command %p into mlqueue\n", cmd));
125 
126 	/*
127 	 * We are inserting the command into the ml queue.  First, we
128 	 * cancel the timer, so it doesn't time out.
129 	 */
130 	scsi_delete_timer(cmd);
131 
132 	/*
133 	 * Next, set the appropriate busy bit for the device/host.
134 	 *
135 	 * If the host/device isn't busy, assume that something actually
136 	 * completed, and that we should be able to queue a command now.
137 	 *
138 	 * Note that the prior mid-layer assumption that any host could
139 	 * always queue at least one command is now broken.  The mid-layer
140 	 * will implement a user specifiable stall (see
141 	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
142 	 * if a command is requeued with no other commands outstanding
143 	 * either for the device or for the host.
144 	 */
145 	if (reason == SCSI_MLQUEUE_HOST_BUSY)
146 		host->host_blocked = host->max_host_blocked;
147 	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
148 		device->device_blocked = device->max_device_blocked;
149 
150 	/*
151 	 * Register the fact that we own the thing for now.
152 	 */
153 	cmd->state = SCSI_STATE_MLQUEUE;
154 	cmd->owner = SCSI_OWNER_MIDLEVEL;
155 
156 	/*
157 	 * Decrement the counters, since these commands are no longer
158 	 * active on the host/device.
159 	 */
160 	scsi_device_unbusy(device);
161 
162 	/*
163 	 * Insert this command at the head of the queue for it's device.
164 	 * It will go before all other commands that are already in the queue.
165 	 *
166 	 * NOTE: there is magic here about the way the queue is plugged if
167 	 * we have no outstanding commands.
168 	 *
169 	 * Although this *doesn't* plug the queue, it does call the request
170 	 * function.  The SCSI request function detects the blocked condition
171 	 * and plugs the queue appropriately.
172 	 */
173 	blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1);
174 	return 0;
175 }
176 
177 /*
178  * Function:    scsi_do_req
179  *
180  * Purpose:     Queue a SCSI request
181  *
182  * Arguments:   sreq	  - command descriptor.
183  *              cmnd      - actual SCSI command to be performed.
184  *              buffer    - data buffer.
185  *              bufflen   - size of data buffer.
186  *              done      - completion function to be run.
187  *              timeout   - how long to let it run before timeout.
188  *              retries   - number of retries we allow.
189  *
190  * Lock status: No locks held upon entry.
191  *
192  * Returns:     Nothing.
193  *
194  * Notes:	This function is only used for queueing requests for things
195  *		like ioctls and character device requests - this is because
196  *		we essentially just inject a request into the queue for the
197  *		device.
198  *
199  *		In order to support the scsi_device_quiesce function, we
200  *		now inject requests on the *head* of the device queue
201  *		rather than the tail.
202  */
203 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
204 		 void *buffer, unsigned bufflen,
205 		 void (*done)(struct scsi_cmnd *),
206 		 int timeout, int retries)
207 {
208 	/*
209 	 * If the upper level driver is reusing these things, then
210 	 * we should release the low-level block now.  Another one will
211 	 * be allocated later when this request is getting queued.
212 	 */
213 	__scsi_release_request(sreq);
214 
215 	/*
216 	 * Our own function scsi_done (which marks the host as not busy,
217 	 * disables the timeout counter, etc) will be called by us or by the
218 	 * scsi_hosts[host].queuecommand() function needs to also call
219 	 * the completion function for the high level driver.
220 	 */
221 	memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
222 	sreq->sr_bufflen = bufflen;
223 	sreq->sr_buffer = buffer;
224 	sreq->sr_allowed = retries;
225 	sreq->sr_done = done;
226 	sreq->sr_timeout_per_command = timeout;
227 
228 	if (sreq->sr_cmd_len == 0)
229 		sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
230 
231 	/*
232 	 * head injection *required* here otherwise quiesce won't work
233 	 */
234 	scsi_insert_special_req(sreq, 1);
235 }
236 EXPORT_SYMBOL(scsi_do_req);
237 
238 static void scsi_wait_done(struct scsi_cmnd *cmd)
239 {
240 	struct request *req = cmd->request;
241 	struct request_queue *q = cmd->device->request_queue;
242 	unsigned long flags;
243 
244 	req->rq_status = RQ_SCSI_DONE;	/* Busy, but indicate request done */
245 
246 	spin_lock_irqsave(q->queue_lock, flags);
247 	if (blk_rq_tagged(req))
248 		blk_queue_end_tag(q, req);
249 	spin_unlock_irqrestore(q->queue_lock, flags);
250 
251 	if (req->waiting)
252 		complete(req->waiting);
253 }
254 
255 /* This is the end routine we get to if a command was never attached
256  * to the request.  Simply complete the request without changing
257  * rq_status; this will cause a DRIVER_ERROR. */
258 static void scsi_wait_req_end_io(struct request *req)
259 {
260 	BUG_ON(!req->waiting);
261 
262 	complete(req->waiting);
263 }
264 
265 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
266 		   unsigned bufflen, int timeout, int retries)
267 {
268 	DECLARE_COMPLETION(wait);
269 
270 	sreq->sr_request->waiting = &wait;
271 	sreq->sr_request->rq_status = RQ_SCSI_BUSY;
272 	sreq->sr_request->end_io = scsi_wait_req_end_io;
273 	scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
274 			timeout, retries);
275 	wait_for_completion(&wait);
276 	sreq->sr_request->waiting = NULL;
277 	if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
278 		sreq->sr_result |= (DRIVER_ERROR << 24);
279 
280 	__scsi_release_request(sreq);
281 }
282 EXPORT_SYMBOL(scsi_wait_req);
283 
284 /*
285  * Function:    scsi_init_cmd_errh()
286  *
287  * Purpose:     Initialize cmd fields related to error handling.
288  *
289  * Arguments:   cmd	- command that is ready to be queued.
290  *
291  * Returns:     Nothing
292  *
293  * Notes:       This function has the job of initializing a number of
294  *              fields related to error handling.   Typically this will
295  *              be called once for each command, as required.
296  */
297 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
298 {
299 	cmd->owner = SCSI_OWNER_MIDLEVEL;
300 	cmd->serial_number = 0;
301 	cmd->abort_reason = 0;
302 
303 	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
304 
305 	if (cmd->cmd_len == 0)
306 		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
307 
308 	/*
309 	 * We need saved copies of a number of fields - this is because
310 	 * error handling may need to overwrite these with different values
311 	 * to run different commands, and once error handling is complete,
312 	 * we will need to restore these values prior to running the actual
313 	 * command.
314 	 */
315 	cmd->old_use_sg = cmd->use_sg;
316 	cmd->old_cmd_len = cmd->cmd_len;
317 	cmd->sc_old_data_direction = cmd->sc_data_direction;
318 	cmd->old_underflow = cmd->underflow;
319 	memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
320 	cmd->buffer = cmd->request_buffer;
321 	cmd->bufflen = cmd->request_bufflen;
322 	cmd->abort_reason = 0;
323 
324 	return 1;
325 }
326 
327 /*
328  * Function:   scsi_setup_cmd_retry()
329  *
330  * Purpose:    Restore the command state for a retry
331  *
332  * Arguments:  cmd	- command to be restored
333  *
334  * Returns:    Nothing
335  *
336  * Notes:      Immediately prior to retrying a command, we need
337  *             to restore certain fields that we saved above.
338  */
339 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
340 {
341 	memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
342 	cmd->request_buffer = cmd->buffer;
343 	cmd->request_bufflen = cmd->bufflen;
344 	cmd->use_sg = cmd->old_use_sg;
345 	cmd->cmd_len = cmd->old_cmd_len;
346 	cmd->sc_data_direction = cmd->sc_old_data_direction;
347 	cmd->underflow = cmd->old_underflow;
348 }
349 
350 void scsi_device_unbusy(struct scsi_device *sdev)
351 {
352 	struct Scsi_Host *shost = sdev->host;
353 	unsigned long flags;
354 
355 	spin_lock_irqsave(shost->host_lock, flags);
356 	shost->host_busy--;
357 	if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
358 		     shost->host_failed))
359 		scsi_eh_wakeup(shost);
360 	spin_unlock(shost->host_lock);
361 	spin_lock(sdev->request_queue->queue_lock);
362 	sdev->device_busy--;
363 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
364 }
365 
366 /*
367  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
368  * and call blk_run_queue for all the scsi_devices on the target -
369  * including current_sdev first.
370  *
371  * Called with *no* scsi locks held.
372  */
373 static void scsi_single_lun_run(struct scsi_device *current_sdev)
374 {
375 	struct Scsi_Host *shost = current_sdev->host;
376 	struct scsi_device *sdev, *tmp;
377 	struct scsi_target *starget = scsi_target(current_sdev);
378 	unsigned long flags;
379 
380 	spin_lock_irqsave(shost->host_lock, flags);
381 	starget->starget_sdev_user = NULL;
382 	spin_unlock_irqrestore(shost->host_lock, flags);
383 
384 	/*
385 	 * Call blk_run_queue for all LUNs on the target, starting with
386 	 * current_sdev. We race with others (to set starget_sdev_user),
387 	 * but in most cases, we will be first. Ideally, each LU on the
388 	 * target would get some limited time or requests on the target.
389 	 */
390 	blk_run_queue(current_sdev->request_queue);
391 
392 	spin_lock_irqsave(shost->host_lock, flags);
393 	if (starget->starget_sdev_user)
394 		goto out;
395 	list_for_each_entry_safe(sdev, tmp, &starget->devices,
396 			same_target_siblings) {
397 		if (sdev == current_sdev)
398 			continue;
399 		if (scsi_device_get(sdev))
400 			continue;
401 
402 		spin_unlock_irqrestore(shost->host_lock, flags);
403 		blk_run_queue(sdev->request_queue);
404 		spin_lock_irqsave(shost->host_lock, flags);
405 
406 		scsi_device_put(sdev);
407 	}
408  out:
409 	spin_unlock_irqrestore(shost->host_lock, flags);
410 }
411 
412 /*
413  * Function:	scsi_run_queue()
414  *
415  * Purpose:	Select a proper request queue to serve next
416  *
417  * Arguments:	q	- last request's queue
418  *
419  * Returns:     Nothing
420  *
421  * Notes:	The previous command was completely finished, start
422  *		a new one if possible.
423  */
424 static void scsi_run_queue(struct request_queue *q)
425 {
426 	struct scsi_device *sdev = q->queuedata;
427 	struct Scsi_Host *shost = sdev->host;
428 	unsigned long flags;
429 
430 	if (sdev->single_lun)
431 		scsi_single_lun_run(sdev);
432 
433 	spin_lock_irqsave(shost->host_lock, flags);
434 	while (!list_empty(&shost->starved_list) &&
435 	       !shost->host_blocked && !shost->host_self_blocked &&
436 		!((shost->can_queue > 0) &&
437 		  (shost->host_busy >= shost->can_queue))) {
438 		/*
439 		 * As long as shost is accepting commands and we have
440 		 * starved queues, call blk_run_queue. scsi_request_fn
441 		 * drops the queue_lock and can add us back to the
442 		 * starved_list.
443 		 *
444 		 * host_lock protects the starved_list and starved_entry.
445 		 * scsi_request_fn must get the host_lock before checking
446 		 * or modifying starved_list or starved_entry.
447 		 */
448 		sdev = list_entry(shost->starved_list.next,
449 					  struct scsi_device, starved_entry);
450 		list_del_init(&sdev->starved_entry);
451 		spin_unlock_irqrestore(shost->host_lock, flags);
452 
453 		blk_run_queue(sdev->request_queue);
454 
455 		spin_lock_irqsave(shost->host_lock, flags);
456 		if (unlikely(!list_empty(&sdev->starved_entry)))
457 			/*
458 			 * sdev lost a race, and was put back on the
459 			 * starved list. This is unlikely but without this
460 			 * in theory we could loop forever.
461 			 */
462 			break;
463 	}
464 	spin_unlock_irqrestore(shost->host_lock, flags);
465 
466 	blk_run_queue(q);
467 }
468 
469 /*
470  * Function:	scsi_requeue_command()
471  *
472  * Purpose:	Handle post-processing of completed commands.
473  *
474  * Arguments:	q	- queue to operate on
475  *		cmd	- command that may need to be requeued.
476  *
477  * Returns:	Nothing
478  *
479  * Notes:	After command completion, there may be blocks left
480  *		over which weren't finished by the previous command
481  *		this can be for a number of reasons - the main one is
482  *		I/O errors in the middle of the request, in which case
483  *		we need to request the blocks that come after the bad
484  *		sector.
485  */
486 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
487 {
488 	cmd->request->flags &= ~REQ_DONTPREP;
489 	blk_insert_request(q, cmd->request, 1, cmd, 1);
490 
491 	scsi_run_queue(q);
492 }
493 
494 void scsi_next_command(struct scsi_cmnd *cmd)
495 {
496 	struct request_queue *q = cmd->device->request_queue;
497 
498 	scsi_put_command(cmd);
499 	scsi_run_queue(q);
500 }
501 
502 void scsi_run_host_queues(struct Scsi_Host *shost)
503 {
504 	struct scsi_device *sdev;
505 
506 	shost_for_each_device(sdev, shost)
507 		scsi_run_queue(sdev->request_queue);
508 }
509 
510 /*
511  * Function:    scsi_end_request()
512  *
513  * Purpose:     Post-processing of completed commands (usually invoked at end
514  *		of upper level post-processing and scsi_io_completion).
515  *
516  * Arguments:   cmd	 - command that is complete.
517  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
518  *              bytes    - number of bytes of completed I/O
519  *		requeue  - indicates whether we should requeue leftovers.
520  *
521  * Lock status: Assumed that lock is not held upon entry.
522  *
523  * Returns:     cmd if requeue done or required, NULL otherwise
524  *
525  * Notes:       This is called for block device requests in order to
526  *              mark some number of sectors as complete.
527  *
528  *		We are guaranteeing that the request queue will be goosed
529  *		at some point during this call.
530  */
531 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
532 					  int bytes, int requeue)
533 {
534 	request_queue_t *q = cmd->device->request_queue;
535 	struct request *req = cmd->request;
536 	unsigned long flags;
537 
538 	/*
539 	 * If there are blocks left over at the end, set up the command
540 	 * to queue the remainder of them.
541 	 */
542 	if (end_that_request_chunk(req, uptodate, bytes)) {
543 		int leftover = (req->hard_nr_sectors << 9);
544 
545 		if (blk_pc_request(req))
546 			leftover = req->data_len;
547 
548 		/* kill remainder if no retrys */
549 		if (!uptodate && blk_noretry_request(req))
550 			end_that_request_chunk(req, 0, leftover);
551 		else {
552 			if (requeue)
553 				/*
554 				 * Bleah.  Leftovers again.  Stick the
555 				 * leftovers in the front of the
556 				 * queue, and goose the queue again.
557 				 */
558 				scsi_requeue_command(q, cmd);
559 
560 			return cmd;
561 		}
562 	}
563 
564 	add_disk_randomness(req->rq_disk);
565 
566 	spin_lock_irqsave(q->queue_lock, flags);
567 	if (blk_rq_tagged(req))
568 		blk_queue_end_tag(q, req);
569 	end_that_request_last(req);
570 	spin_unlock_irqrestore(q->queue_lock, flags);
571 
572 	/*
573 	 * This will goose the queue request function at the end, so we don't
574 	 * need to worry about launching another command.
575 	 */
576 	scsi_next_command(cmd);
577 	return NULL;
578 }
579 
580 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
581 {
582 	struct scsi_host_sg_pool *sgp;
583 	struct scatterlist *sgl;
584 
585 	BUG_ON(!cmd->use_sg);
586 
587 	switch (cmd->use_sg) {
588 	case 1 ... 8:
589 		cmd->sglist_len = 0;
590 		break;
591 	case 9 ... 16:
592 		cmd->sglist_len = 1;
593 		break;
594 	case 17 ... 32:
595 		cmd->sglist_len = 2;
596 		break;
597 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
598 	case 33 ... 64:
599 		cmd->sglist_len = 3;
600 		break;
601 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
602 	case 65 ... 128:
603 		cmd->sglist_len = 4;
604 		break;
605 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
606 	case 129 ... 256:
607 		cmd->sglist_len = 5;
608 		break;
609 #endif
610 #endif
611 #endif
612 	default:
613 		return NULL;
614 	}
615 
616 	sgp = scsi_sg_pools + cmd->sglist_len;
617 	sgl = mempool_alloc(sgp->pool, gfp_mask);
618 	if (sgl)
619 		memset(sgl, 0, sgp->size);
620 	return sgl;
621 }
622 
623 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
624 {
625 	struct scsi_host_sg_pool *sgp;
626 
627 	BUG_ON(index > SG_MEMPOOL_NR);
628 
629 	sgp = scsi_sg_pools + index;
630 	mempool_free(sgl, sgp->pool);
631 }
632 
633 /*
634  * Function:    scsi_release_buffers()
635  *
636  * Purpose:     Completion processing for block device I/O requests.
637  *
638  * Arguments:   cmd	- command that we are bailing.
639  *
640  * Lock status: Assumed that no lock is held upon entry.
641  *
642  * Returns:     Nothing
643  *
644  * Notes:       In the event that an upper level driver rejects a
645  *		command, we must release resources allocated during
646  *		the __init_io() function.  Primarily this would involve
647  *		the scatter-gather table, and potentially any bounce
648  *		buffers.
649  */
650 static void scsi_release_buffers(struct scsi_cmnd *cmd)
651 {
652 	struct request *req = cmd->request;
653 
654 	/*
655 	 * Free up any indirection buffers we allocated for DMA purposes.
656 	 */
657 	if (cmd->use_sg)
658 		scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
659 	else if (cmd->request_buffer != req->buffer)
660 		kfree(cmd->request_buffer);
661 
662 	/*
663 	 * Zero these out.  They now point to freed memory, and it is
664 	 * dangerous to hang onto the pointers.
665 	 */
666 	cmd->buffer  = NULL;
667 	cmd->bufflen = 0;
668 	cmd->request_buffer = NULL;
669 	cmd->request_bufflen = 0;
670 }
671 
672 /*
673  * Function:    scsi_io_completion()
674  *
675  * Purpose:     Completion processing for block device I/O requests.
676  *
677  * Arguments:   cmd   - command that is finished.
678  *
679  * Lock status: Assumed that no lock is held upon entry.
680  *
681  * Returns:     Nothing
682  *
683  * Notes:       This function is matched in terms of capabilities to
684  *              the function that created the scatter-gather list.
685  *              In other words, if there are no bounce buffers
686  *              (the normal case for most drivers), we don't need
687  *              the logic to deal with cleaning up afterwards.
688  *
689  *		We must do one of several things here:
690  *
691  *		a) Call scsi_end_request.  This will finish off the
692  *		   specified number of sectors.  If we are done, the
693  *		   command block will be released, and the queue
694  *		   function will be goosed.  If we are not done, then
695  *		   scsi_end_request will directly goose the queue.
696  *
697  *		b) We can just use scsi_requeue_command() here.  This would
698  *		   be used if we just wanted to retry, for example.
699  */
700 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
701 			unsigned int block_bytes)
702 {
703 	int result = cmd->result;
704 	int this_count = cmd->bufflen;
705 	request_queue_t *q = cmd->device->request_queue;
706 	struct request *req = cmd->request;
707 	int clear_errors = 1;
708 	struct scsi_sense_hdr sshdr;
709 	int sense_valid = 0;
710 	int sense_deferred = 0;
711 
712 	if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
713 		return;
714 
715 	/*
716 	 * Free up any indirection buffers we allocated for DMA purposes.
717 	 * For the case of a READ, we need to copy the data out of the
718 	 * bounce buffer and into the real buffer.
719 	 */
720 	if (cmd->use_sg)
721 		scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
722 	else if (cmd->buffer != req->buffer) {
723 		if (rq_data_dir(req) == READ) {
724 			unsigned long flags;
725 			char *to = bio_kmap_irq(req->bio, &flags);
726 			memcpy(to, cmd->buffer, cmd->bufflen);
727 			bio_kunmap_irq(to, &flags);
728 		}
729 		kfree(cmd->buffer);
730 	}
731 
732 	if (result) {
733 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
734 		if (sense_valid)
735 			sense_deferred = scsi_sense_is_deferred(&sshdr);
736 	}
737 	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
738 		req->errors = result;
739 		if (result) {
740 			clear_errors = 0;
741 			if (sense_valid && req->sense) {
742 				/*
743 				 * SG_IO wants current and deferred errors
744 				 */
745 				int len = 8 + cmd->sense_buffer[7];
746 
747 				if (len > SCSI_SENSE_BUFFERSIZE)
748 					len = SCSI_SENSE_BUFFERSIZE;
749 				memcpy(req->sense, cmd->sense_buffer,  len);
750 				req->sense_len = len;
751 			}
752 		} else
753 			req->data_len = cmd->resid;
754 	}
755 
756 	/*
757 	 * Zero these out.  They now point to freed memory, and it is
758 	 * dangerous to hang onto the pointers.
759 	 */
760 	cmd->buffer  = NULL;
761 	cmd->bufflen = 0;
762 	cmd->request_buffer = NULL;
763 	cmd->request_bufflen = 0;
764 
765 	/*
766 	 * Next deal with any sectors which we were able to correctly
767 	 * handle.
768 	 */
769 	if (good_bytes >= 0) {
770 		SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
771 					      req->nr_sectors, good_bytes));
772 		SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
773 
774 		if (clear_errors)
775 			req->errors = 0;
776 		/*
777 		 * If multiple sectors are requested in one buffer, then
778 		 * they will have been finished off by the first command.
779 		 * If not, then we have a multi-buffer command.
780 		 *
781 		 * If block_bytes != 0, it means we had a medium error
782 		 * of some sort, and that we want to mark some number of
783 		 * sectors as not uptodate.  Thus we want to inhibit
784 		 * requeueing right here - we will requeue down below
785 		 * when we handle the bad sectors.
786 		 */
787 		cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
788 
789 		/*
790 		 * If the command completed without error, then either finish off the
791 		 * rest of the command, or start a new one.
792 		 */
793 		if (result == 0 || cmd == NULL ) {
794 			return;
795 		}
796 	}
797 	/*
798 	 * Now, if we were good little boys and girls, Santa left us a request
799 	 * sense buffer.  We can extract information from this, so we
800 	 * can choose a block to remap, etc.
801 	 */
802 	if (sense_valid && !sense_deferred) {
803 		switch (sshdr.sense_key) {
804 		case UNIT_ATTENTION:
805 			if (cmd->device->removable) {
806 				/* detected disc change.  set a bit
807 				 * and quietly refuse further access.
808 				 */
809 				cmd->device->changed = 1;
810 				cmd = scsi_end_request(cmd, 0,
811 						this_count, 1);
812 				return;
813 			} else {
814 				/*
815 				* Must have been a power glitch, or a
816 				* bus reset.  Could not have been a
817 				* media change, so we just retry the
818 				* request and see what happens.
819 				*/
820 				scsi_requeue_command(q, cmd);
821 				return;
822 			}
823 			break;
824 		case ILLEGAL_REQUEST:
825 			/*
826 		 	* If we had an ILLEGAL REQUEST returned, then we may
827 		 	* have performed an unsupported command.  The only
828 		 	* thing this should be would be a ten byte read where
829 			* only a six byte read was supported.  Also, on a
830 			* system where READ CAPACITY failed, we may have read
831 			* past the end of the disk.
832 		 	*/
833 			if (cmd->device->use_10_for_rw &&
834 			    (cmd->cmnd[0] == READ_10 ||
835 			     cmd->cmnd[0] == WRITE_10)) {
836 				cmd->device->use_10_for_rw = 0;
837 				/*
838 				 * This will cause a retry with a 6-byte
839 				 * command.
840 				 */
841 				scsi_requeue_command(q, cmd);
842 				result = 0;
843 			} else {
844 				cmd = scsi_end_request(cmd, 0, this_count, 1);
845 				return;
846 			}
847 			break;
848 		case NOT_READY:
849 			/*
850 			 * If the device is in the process of becoming ready,
851 			 * retry.
852 			 */
853 			if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
854 				scsi_requeue_command(q, cmd);
855 				return;
856 			}
857 			printk(KERN_INFO "Device %s not ready.\n",
858 			       req->rq_disk ? req->rq_disk->disk_name : "");
859 			cmd = scsi_end_request(cmd, 0, this_count, 1);
860 			return;
861 		case VOLUME_OVERFLOW:
862 			printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
863 			       cmd->device->host->host_no,
864 			       (int)cmd->device->channel,
865 			       (int)cmd->device->id, (int)cmd->device->lun);
866 			__scsi_print_command(cmd->data_cmnd);
867 			scsi_print_sense("", cmd);
868 			cmd = scsi_end_request(cmd, 0, block_bytes, 1);
869 			return;
870 		default:
871 			break;
872 		}
873 	}			/* driver byte != 0 */
874 	if (host_byte(result) == DID_RESET) {
875 		/*
876 		 * Third party bus reset or reset for error
877 		 * recovery reasons.  Just retry the request
878 		 * and see what happens.
879 		 */
880 		scsi_requeue_command(q, cmd);
881 		return;
882 	}
883 	if (result) {
884 		printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
885 		       "= 0x%x\n", cmd->device->host->host_no,
886 		       cmd->device->channel,
887 		       cmd->device->id,
888 		       cmd->device->lun, result);
889 
890 		if (driver_byte(result) & DRIVER_SENSE)
891 			scsi_print_sense("", cmd);
892 		/*
893 		 * Mark a single buffer as not uptodate.  Queue the remainder.
894 		 * We sometimes get this cruft in the event that a medium error
895 		 * isn't properly reported.
896 		 */
897 		block_bytes = req->hard_cur_sectors << 9;
898 		if (!block_bytes)
899 			block_bytes = req->data_len;
900 		cmd = scsi_end_request(cmd, 0, block_bytes, 1);
901 	}
902 }
903 EXPORT_SYMBOL(scsi_io_completion);
904 
905 /*
906  * Function:    scsi_init_io()
907  *
908  * Purpose:     SCSI I/O initialize function.
909  *
910  * Arguments:   cmd   - Command descriptor we wish to initialize
911  *
912  * Returns:     0 on success
913  *		BLKPREP_DEFER if the failure is retryable
914  *		BLKPREP_KILL if the failure is fatal
915  */
916 static int scsi_init_io(struct scsi_cmnd *cmd)
917 {
918 	struct request     *req = cmd->request;
919 	struct scatterlist *sgpnt;
920 	int		   count;
921 
922 	/*
923 	 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
924 	 */
925 	if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
926 		cmd->request_bufflen = req->data_len;
927 		cmd->request_buffer = req->data;
928 		req->buffer = req->data;
929 		cmd->use_sg = 0;
930 		return 0;
931 	}
932 
933 	/*
934 	 * we used to not use scatter-gather for single segment request,
935 	 * but now we do (it makes highmem I/O easier to support without
936 	 * kmapping pages)
937 	 */
938 	cmd->use_sg = req->nr_phys_segments;
939 
940 	/*
941 	 * if sg table allocation fails, requeue request later.
942 	 */
943 	sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
944 	if (unlikely(!sgpnt)) {
945 		req->flags |= REQ_SPECIAL;
946 		return BLKPREP_DEFER;
947 	}
948 
949 	cmd->request_buffer = (char *) sgpnt;
950 	cmd->request_bufflen = req->nr_sectors << 9;
951 	if (blk_pc_request(req))
952 		cmd->request_bufflen = req->data_len;
953 	req->buffer = NULL;
954 
955 	/*
956 	 * Next, walk the list, and fill in the addresses and sizes of
957 	 * each segment.
958 	 */
959 	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
960 
961 	/*
962 	 * mapped well, send it off
963 	 */
964 	if (likely(count <= cmd->use_sg)) {
965 		cmd->use_sg = count;
966 		return 0;
967 	}
968 
969 	printk(KERN_ERR "Incorrect number of segments after building list\n");
970 	printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
971 	printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
972 			req->current_nr_sectors);
973 
974 	/* release the command and kill it */
975 	scsi_release_buffers(cmd);
976 	scsi_put_command(cmd);
977 	return BLKPREP_KILL;
978 }
979 
980 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
981 {
982 	struct scsi_device *sdev = q->queuedata;
983 	struct scsi_driver *drv;
984 
985 	if (sdev->sdev_state == SDEV_RUNNING) {
986 		drv = *(struct scsi_driver **) rq->rq_disk->private_data;
987 
988 		if (drv->prepare_flush)
989 			return drv->prepare_flush(q, rq);
990 	}
991 
992 	return 0;
993 }
994 
995 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
996 {
997 	struct scsi_device *sdev = q->queuedata;
998 	struct request *flush_rq = rq->end_io_data;
999 	struct scsi_driver *drv;
1000 
1001 	if (flush_rq->errors) {
1002 		printk("scsi: barrier error, disabling flush support\n");
1003 		blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1004 	}
1005 
1006 	if (sdev->sdev_state == SDEV_RUNNING) {
1007 		drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1008 		drv->end_flush(q, rq);
1009 	}
1010 }
1011 
1012 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1013 			       sector_t *error_sector)
1014 {
1015 	struct scsi_device *sdev = q->queuedata;
1016 	struct scsi_driver *drv;
1017 
1018 	if (sdev->sdev_state != SDEV_RUNNING)
1019 		return -ENXIO;
1020 
1021 	drv = *(struct scsi_driver **) disk->private_data;
1022 	if (drv->issue_flush)
1023 		return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1024 
1025 	return -EOPNOTSUPP;
1026 }
1027 
1028 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1029 {
1030 	struct scsi_device *sdev = q->queuedata;
1031 	struct scsi_cmnd *cmd;
1032 	int specials_only = 0;
1033 
1034 	/*
1035 	 * Just check to see if the device is online.  If it isn't, we
1036 	 * refuse to process any commands.  The device must be brought
1037 	 * online before trying any recovery commands
1038 	 */
1039 	if (unlikely(!scsi_device_online(sdev))) {
1040 		printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1041 		       sdev->host->host_no, sdev->id, sdev->lun);
1042 		return BLKPREP_KILL;
1043 	}
1044 	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1045 		/* OK, we're not in a running state don't prep
1046 		 * user commands */
1047 		if (sdev->sdev_state == SDEV_DEL) {
1048 			/* Device is fully deleted, no commands
1049 			 * at all allowed down */
1050 			printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1051 			       sdev->host->host_no, sdev->id, sdev->lun);
1052 			return BLKPREP_KILL;
1053 		}
1054 		/* OK, we only allow special commands (i.e. not
1055 		 * user initiated ones */
1056 		specials_only = sdev->sdev_state;
1057 	}
1058 
1059 	/*
1060 	 * Find the actual device driver associated with this command.
1061 	 * The SPECIAL requests are things like character device or
1062 	 * ioctls, which did not originate from ll_rw_blk.  Note that
1063 	 * the special field is also used to indicate the cmd for
1064 	 * the remainder of a partially fulfilled request that can
1065 	 * come up when there is a medium error.  We have to treat
1066 	 * these two cases differently.  We differentiate by looking
1067 	 * at request->cmd, as this tells us the real story.
1068 	 */
1069 	if (req->flags & REQ_SPECIAL) {
1070 		struct scsi_request *sreq = req->special;
1071 
1072 		if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1073 			cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1074 			if (unlikely(!cmd))
1075 				goto defer;
1076 			scsi_init_cmd_from_req(cmd, sreq);
1077 		} else
1078 			cmd = req->special;
1079 	} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1080 
1081 		if(unlikely(specials_only)) {
1082 			if(specials_only == SDEV_QUIESCE ||
1083 					specials_only == SDEV_BLOCK)
1084 				return BLKPREP_DEFER;
1085 
1086 			printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1087 			       sdev->host->host_no, sdev->id, sdev->lun);
1088 			return BLKPREP_KILL;
1089 		}
1090 
1091 
1092 		/*
1093 		 * Now try and find a command block that we can use.
1094 		 */
1095 		if (!req->special) {
1096 			cmd = scsi_get_command(sdev, GFP_ATOMIC);
1097 			if (unlikely(!cmd))
1098 				goto defer;
1099 		} else
1100 			cmd = req->special;
1101 
1102 		/* pull a tag out of the request if we have one */
1103 		cmd->tag = req->tag;
1104 	} else {
1105 		blk_dump_rq_flags(req, "SCSI bad req");
1106 		return BLKPREP_KILL;
1107 	}
1108 
1109 	/* note the overloading of req->special.  When the tag
1110 	 * is active it always means cmd.  If the tag goes
1111 	 * back for re-queueing, it may be reset */
1112 	req->special = cmd;
1113 	cmd->request = req;
1114 
1115 	/*
1116 	 * FIXME: drop the lock here because the functions below
1117 	 * expect to be called without the queue lock held.  Also,
1118 	 * previously, we dequeued the request before dropping the
1119 	 * lock.  We hope REQ_STARTED prevents anything untoward from
1120 	 * happening now.
1121 	 */
1122 	if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1123 		struct scsi_driver *drv;
1124 		int ret;
1125 
1126 		/*
1127 		 * This will do a couple of things:
1128 		 *  1) Fill in the actual SCSI command.
1129 		 *  2) Fill in any other upper-level specific fields
1130 		 * (timeout).
1131 		 *
1132 		 * If this returns 0, it means that the request failed
1133 		 * (reading past end of disk, reading offline device,
1134 		 * etc).   This won't actually talk to the device, but
1135 		 * some kinds of consistency checking may cause the
1136 		 * request to be rejected immediately.
1137 		 */
1138 
1139 		/*
1140 		 * This sets up the scatter-gather table (allocating if
1141 		 * required).
1142 		 */
1143 		ret = scsi_init_io(cmd);
1144 		if (ret)	/* BLKPREP_KILL return also releases the command */
1145 			return ret;
1146 
1147 		/*
1148 		 * Initialize the actual SCSI command for this request.
1149 		 */
1150 		drv = *(struct scsi_driver **)req->rq_disk->private_data;
1151 		if (unlikely(!drv->init_command(cmd))) {
1152 			scsi_release_buffers(cmd);
1153 			scsi_put_command(cmd);
1154 			return BLKPREP_KILL;
1155 		}
1156 	}
1157 
1158 	/*
1159 	 * The request is now prepped, no need to come back here
1160 	 */
1161 	req->flags |= REQ_DONTPREP;
1162 	return BLKPREP_OK;
1163 
1164  defer:
1165 	/* If we defer, the elv_next_request() returns NULL, but the
1166 	 * queue must be restarted, so we plug here if no returning
1167 	 * command will automatically do that. */
1168 	if (sdev->device_busy == 0)
1169 		blk_plug_device(q);
1170 	return BLKPREP_DEFER;
1171 }
1172 
1173 /*
1174  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1175  * return 0.
1176  *
1177  * Called with the queue_lock held.
1178  */
1179 static inline int scsi_dev_queue_ready(struct request_queue *q,
1180 				  struct scsi_device *sdev)
1181 {
1182 	if (sdev->device_busy >= sdev->queue_depth)
1183 		return 0;
1184 	if (sdev->device_busy == 0 && sdev->device_blocked) {
1185 		/*
1186 		 * unblock after device_blocked iterates to zero
1187 		 */
1188 		if (--sdev->device_blocked == 0) {
1189 			SCSI_LOG_MLQUEUE(3,
1190 				printk("scsi%d (%d:%d) unblocking device at"
1191 				       " zero depth\n", sdev->host->host_no,
1192 				       sdev->id, sdev->lun));
1193 		} else {
1194 			blk_plug_device(q);
1195 			return 0;
1196 		}
1197 	}
1198 	if (sdev->device_blocked)
1199 		return 0;
1200 
1201 	return 1;
1202 }
1203 
1204 /*
1205  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1206  * return 0. We must end up running the queue again whenever 0 is
1207  * returned, else IO can hang.
1208  *
1209  * Called with host_lock held.
1210  */
1211 static inline int scsi_host_queue_ready(struct request_queue *q,
1212 				   struct Scsi_Host *shost,
1213 				   struct scsi_device *sdev)
1214 {
1215 	if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1216 		return 0;
1217 	if (shost->host_busy == 0 && shost->host_blocked) {
1218 		/*
1219 		 * unblock after host_blocked iterates to zero
1220 		 */
1221 		if (--shost->host_blocked == 0) {
1222 			SCSI_LOG_MLQUEUE(3,
1223 				printk("scsi%d unblocking host at zero depth\n",
1224 					shost->host_no));
1225 		} else {
1226 			blk_plug_device(q);
1227 			return 0;
1228 		}
1229 	}
1230 	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1231 	    shost->host_blocked || shost->host_self_blocked) {
1232 		if (list_empty(&sdev->starved_entry))
1233 			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1234 		return 0;
1235 	}
1236 
1237 	/* We're OK to process the command, so we can't be starved */
1238 	if (!list_empty(&sdev->starved_entry))
1239 		list_del_init(&sdev->starved_entry);
1240 
1241 	return 1;
1242 }
1243 
1244 /*
1245  * Kill requests for a dead device
1246  */
1247 static void scsi_kill_requests(request_queue_t *q)
1248 {
1249 	struct request *req;
1250 
1251 	while ((req = elv_next_request(q)) != NULL) {
1252 		blkdev_dequeue_request(req);
1253 		req->flags |= REQ_QUIET;
1254 		while (end_that_request_first(req, 0, req->nr_sectors))
1255 			;
1256 		end_that_request_last(req);
1257 	}
1258 }
1259 
1260 /*
1261  * Function:    scsi_request_fn()
1262  *
1263  * Purpose:     Main strategy routine for SCSI.
1264  *
1265  * Arguments:   q       - Pointer to actual queue.
1266  *
1267  * Returns:     Nothing
1268  *
1269  * Lock status: IO request lock assumed to be held when called.
1270  */
1271 static void scsi_request_fn(struct request_queue *q)
1272 {
1273 	struct scsi_device *sdev = q->queuedata;
1274 	struct Scsi_Host *shost;
1275 	struct scsi_cmnd *cmd;
1276 	struct request *req;
1277 
1278 	if (!sdev) {
1279 		printk("scsi: killing requests for dead queue\n");
1280 		scsi_kill_requests(q);
1281 		return;
1282 	}
1283 
1284 	if(!get_device(&sdev->sdev_gendev))
1285 		/* We must be tearing the block queue down already */
1286 		return;
1287 
1288 	/*
1289 	 * To start with, we keep looping until the queue is empty, or until
1290 	 * the host is no longer able to accept any more requests.
1291 	 */
1292 	shost = sdev->host;
1293 	while (!blk_queue_plugged(q)) {
1294 		int rtn;
1295 		/*
1296 		 * get next queueable request.  We do this early to make sure
1297 		 * that the request is fully prepared even if we cannot
1298 		 * accept it.
1299 		 */
1300 		req = elv_next_request(q);
1301 		if (!req || !scsi_dev_queue_ready(q, sdev))
1302 			break;
1303 
1304 		if (unlikely(!scsi_device_online(sdev))) {
1305 			printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1306 			       sdev->host->host_no, sdev->id, sdev->lun);
1307 			blkdev_dequeue_request(req);
1308 			req->flags |= REQ_QUIET;
1309 			while (end_that_request_first(req, 0, req->nr_sectors))
1310 				;
1311 			end_that_request_last(req);
1312 			continue;
1313 		}
1314 
1315 
1316 		/*
1317 		 * Remove the request from the request list.
1318 		 */
1319 		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1320 			blkdev_dequeue_request(req);
1321 		sdev->device_busy++;
1322 
1323 		spin_unlock(q->queue_lock);
1324 		spin_lock(shost->host_lock);
1325 
1326 		if (!scsi_host_queue_ready(q, shost, sdev))
1327 			goto not_ready;
1328 		if (sdev->single_lun) {
1329 			if (scsi_target(sdev)->starget_sdev_user &&
1330 			    scsi_target(sdev)->starget_sdev_user != sdev)
1331 				goto not_ready;
1332 			scsi_target(sdev)->starget_sdev_user = sdev;
1333 		}
1334 		shost->host_busy++;
1335 
1336 		/*
1337 		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1338 		 *		take the lock again.
1339 		 */
1340 		spin_unlock_irq(shost->host_lock);
1341 
1342 		cmd = req->special;
1343 		if (unlikely(cmd == NULL)) {
1344 			printk(KERN_CRIT "impossible request in %s.\n"
1345 					 "please mail a stack trace to "
1346 					 "linux-scsi@vger.kernel.org",
1347 					 __FUNCTION__);
1348 			BUG();
1349 		}
1350 
1351 		/*
1352 		 * Finally, initialize any error handling parameters, and set up
1353 		 * the timers for timeouts.
1354 		 */
1355 		scsi_init_cmd_errh(cmd);
1356 
1357 		/*
1358 		 * Dispatch the command to the low-level driver.
1359 		 */
1360 		rtn = scsi_dispatch_cmd(cmd);
1361 		spin_lock_irq(q->queue_lock);
1362 		if(rtn) {
1363 			/* we're refusing the command; because of
1364 			 * the way locks get dropped, we need to
1365 			 * check here if plugging is required */
1366 			if(sdev->device_busy == 0)
1367 				blk_plug_device(q);
1368 
1369 			break;
1370 		}
1371 	}
1372 
1373 	goto out;
1374 
1375  not_ready:
1376 	spin_unlock_irq(shost->host_lock);
1377 
1378 	/*
1379 	 * lock q, handle tag, requeue req, and decrement device_busy. We
1380 	 * must return with queue_lock held.
1381 	 *
1382 	 * Decrementing device_busy without checking it is OK, as all such
1383 	 * cases (host limits or settings) should run the queue at some
1384 	 * later time.
1385 	 */
1386 	spin_lock_irq(q->queue_lock);
1387 	blk_requeue_request(q, req);
1388 	sdev->device_busy--;
1389 	if(sdev->device_busy == 0)
1390 		blk_plug_device(q);
1391  out:
1392 	/* must be careful here...if we trigger the ->remove() function
1393 	 * we cannot be holding the q lock */
1394 	spin_unlock_irq(q->queue_lock);
1395 	put_device(&sdev->sdev_gendev);
1396 	spin_lock_irq(q->queue_lock);
1397 }
1398 
1399 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1400 {
1401 	struct device *host_dev;
1402 	u64 bounce_limit = 0xffffffff;
1403 
1404 	if (shost->unchecked_isa_dma)
1405 		return BLK_BOUNCE_ISA;
1406 	/*
1407 	 * Platforms with virtual-DMA translation
1408 	 * hardware have no practical limit.
1409 	 */
1410 	if (!PCI_DMA_BUS_IS_PHYS)
1411 		return BLK_BOUNCE_ANY;
1412 
1413 	host_dev = scsi_get_device(shost);
1414 	if (host_dev && host_dev->dma_mask)
1415 		bounce_limit = *host_dev->dma_mask;
1416 
1417 	return bounce_limit;
1418 }
1419 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1420 
1421 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1422 {
1423 	struct Scsi_Host *shost = sdev->host;
1424 	struct request_queue *q;
1425 
1426 	q = blk_init_queue(scsi_request_fn, NULL);
1427 	if (!q)
1428 		return NULL;
1429 
1430 	blk_queue_prep_rq(q, scsi_prep_fn);
1431 
1432 	blk_queue_max_hw_segments(q, shost->sg_tablesize);
1433 	blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1434 	blk_queue_max_sectors(q, shost->max_sectors);
1435 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1436 	blk_queue_segment_boundary(q, shost->dma_boundary);
1437 	blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1438 
1439 	/*
1440 	 * ordered tags are superior to flush ordering
1441 	 */
1442 	if (shost->ordered_tag)
1443 		blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1444 	else if (shost->ordered_flush) {
1445 		blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1446 		q->prepare_flush_fn = scsi_prepare_flush_fn;
1447 		q->end_flush_fn = scsi_end_flush_fn;
1448 	}
1449 
1450 	if (!shost->use_clustering)
1451 		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1452 	return q;
1453 }
1454 
1455 void scsi_free_queue(struct request_queue *q)
1456 {
1457 	blk_cleanup_queue(q);
1458 }
1459 
1460 /*
1461  * Function:    scsi_block_requests()
1462  *
1463  * Purpose:     Utility function used by low-level drivers to prevent further
1464  *		commands from being queued to the device.
1465  *
1466  * Arguments:   shost       - Host in question
1467  *
1468  * Returns:     Nothing
1469  *
1470  * Lock status: No locks are assumed held.
1471  *
1472  * Notes:       There is no timer nor any other means by which the requests
1473  *		get unblocked other than the low-level driver calling
1474  *		scsi_unblock_requests().
1475  */
1476 void scsi_block_requests(struct Scsi_Host *shost)
1477 {
1478 	shost->host_self_blocked = 1;
1479 }
1480 EXPORT_SYMBOL(scsi_block_requests);
1481 
1482 /*
1483  * Function:    scsi_unblock_requests()
1484  *
1485  * Purpose:     Utility function used by low-level drivers to allow further
1486  *		commands from being queued to the device.
1487  *
1488  * Arguments:   shost       - Host in question
1489  *
1490  * Returns:     Nothing
1491  *
1492  * Lock status: No locks are assumed held.
1493  *
1494  * Notes:       There is no timer nor any other means by which the requests
1495  *		get unblocked other than the low-level driver calling
1496  *		scsi_unblock_requests().
1497  *
1498  *		This is done as an API function so that changes to the
1499  *		internals of the scsi mid-layer won't require wholesale
1500  *		changes to drivers that use this feature.
1501  */
1502 void scsi_unblock_requests(struct Scsi_Host *shost)
1503 {
1504 	shost->host_self_blocked = 0;
1505 	scsi_run_host_queues(shost);
1506 }
1507 EXPORT_SYMBOL(scsi_unblock_requests);
1508 
1509 int __init scsi_init_queue(void)
1510 {
1511 	int i;
1512 
1513 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1514 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1515 		int size = sgp->size * sizeof(struct scatterlist);
1516 
1517 		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1518 				SLAB_HWCACHE_ALIGN, NULL, NULL);
1519 		if (!sgp->slab) {
1520 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1521 					sgp->name);
1522 		}
1523 
1524 		sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1525 				mempool_alloc_slab, mempool_free_slab,
1526 				sgp->slab);
1527 		if (!sgp->pool) {
1528 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1529 					sgp->name);
1530 		}
1531 	}
1532 
1533 	return 0;
1534 }
1535 
1536 void scsi_exit_queue(void)
1537 {
1538 	int i;
1539 
1540 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1541 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1542 		mempool_destroy(sgp->pool);
1543 		kmem_cache_destroy(sgp->slab);
1544 	}
1545 }
1546 /**
1547  *	__scsi_mode_sense - issue a mode sense, falling back from 10 to
1548  *		six bytes if necessary.
1549  *	@sreq:	SCSI request to fill in with the MODE_SENSE
1550  *	@dbd:	set if mode sense will allow block descriptors to be returned
1551  *	@modepage: mode page being requested
1552  *	@buffer: request buffer (may not be smaller than eight bytes)
1553  *	@len:	length of request buffer.
1554  *	@timeout: command timeout
1555  *	@retries: number of retries before failing
1556  *	@data: returns a structure abstracting the mode header data
1557  *
1558  *	Returns zero if unsuccessful, or the header offset (either 4
1559  *	or 8 depending on whether a six or ten byte command was
1560  *	issued) if successful.
1561  **/
1562 int
1563 __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1564 		  unsigned char *buffer, int len, int timeout, int retries,
1565 		  struct scsi_mode_data *data) {
1566 	unsigned char cmd[12];
1567 	int use_10_for_ms;
1568 	int header_length;
1569 
1570 	memset(data, 0, sizeof(*data));
1571 	memset(&cmd[0], 0, 12);
1572 	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1573 	cmd[2] = modepage;
1574 
1575  retry:
1576 	use_10_for_ms = sreq->sr_device->use_10_for_ms;
1577 
1578 	if (use_10_for_ms) {
1579 		if (len < 8)
1580 			len = 8;
1581 
1582 		cmd[0] = MODE_SENSE_10;
1583 		cmd[8] = len;
1584 		header_length = 8;
1585 	} else {
1586 		if (len < 4)
1587 			len = 4;
1588 
1589 		cmd[0] = MODE_SENSE;
1590 		cmd[4] = len;
1591 		header_length = 4;
1592 	}
1593 
1594 	sreq->sr_cmd_len = 0;
1595 	memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1596 	sreq->sr_data_direction = DMA_FROM_DEVICE;
1597 
1598 	memset(buffer, 0, len);
1599 
1600 	scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1601 
1602 	/* This code looks awful: what it's doing is making sure an
1603 	 * ILLEGAL REQUEST sense return identifies the actual command
1604 	 * byte as the problem.  MODE_SENSE commands can return
1605 	 * ILLEGAL REQUEST if the code page isn't supported */
1606 
1607 	if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) &&
1608 	    (driver_byte(sreq->sr_result) & DRIVER_SENSE)) {
1609 		struct scsi_sense_hdr sshdr;
1610 
1611 		if (scsi_request_normalize_sense(sreq, &sshdr)) {
1612 			if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1613 			    (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1614 				/*
1615 				 * Invalid command operation code
1616 				 */
1617 				sreq->sr_device->use_10_for_ms = 0;
1618 				goto retry;
1619 			}
1620 		}
1621 	}
1622 
1623 	if(scsi_status_is_good(sreq->sr_result)) {
1624 		data->header_length = header_length;
1625 		if(use_10_for_ms) {
1626 			data->length = buffer[0]*256 + buffer[1] + 2;
1627 			data->medium_type = buffer[2];
1628 			data->device_specific = buffer[3];
1629 			data->longlba = buffer[4] & 0x01;
1630 			data->block_descriptor_length = buffer[6]*256
1631 				+ buffer[7];
1632 		} else {
1633 			data->length = buffer[0] + 1;
1634 			data->medium_type = buffer[1];
1635 			data->device_specific = buffer[2];
1636 			data->block_descriptor_length = buffer[3];
1637 		}
1638 	}
1639 
1640 	return sreq->sr_result;
1641 }
1642 EXPORT_SYMBOL(__scsi_mode_sense);
1643 
1644 /**
1645  *	scsi_mode_sense - issue a mode sense, falling back from 10 to
1646  *		six bytes if necessary.
1647  *	@sdev:	scsi device to send command to.
1648  *	@dbd:	set if mode sense will disable block descriptors in the return
1649  *	@modepage: mode page being requested
1650  *	@buffer: request buffer (may not be smaller than eight bytes)
1651  *	@len:	length of request buffer.
1652  *	@timeout: command timeout
1653  *	@retries: number of retries before failing
1654  *
1655  *	Returns zero if unsuccessful, or the header offset (either 4
1656  *	or 8 depending on whether a six or ten byte command was
1657  *	issued) if successful.
1658  **/
1659 int
1660 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1661 		unsigned char *buffer, int len, int timeout, int retries,
1662 		struct scsi_mode_data *data)
1663 {
1664 	struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1665 	int ret;
1666 
1667 	if (!sreq)
1668 		return -1;
1669 
1670 	ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1671 				timeout, retries, data);
1672 
1673 	scsi_release_request(sreq);
1674 
1675 	return ret;
1676 }
1677 EXPORT_SYMBOL(scsi_mode_sense);
1678 
1679 int
1680 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1681 {
1682 	struct scsi_request *sreq;
1683 	char cmd[] = {
1684 		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1685 	};
1686 	int result;
1687 
1688 	sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1689 	if (!sreq)
1690 		return -ENOMEM;
1691 
1692 	sreq->sr_data_direction = DMA_NONE;
1693 	scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1694 
1695 	if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) {
1696 		struct scsi_sense_hdr sshdr;
1697 
1698 		if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
1699 		    ((sshdr.sense_key == UNIT_ATTENTION) ||
1700 		     (sshdr.sense_key == NOT_READY))) {
1701 			sdev->changed = 1;
1702 			sreq->sr_result = 0;
1703 		}
1704 	}
1705 	result = sreq->sr_result;
1706 	scsi_release_request(sreq);
1707 	return result;
1708 }
1709 EXPORT_SYMBOL(scsi_test_unit_ready);
1710 
1711 /**
1712  *	scsi_device_set_state - Take the given device through the device
1713  *		state model.
1714  *	@sdev:	scsi device to change the state of.
1715  *	@state:	state to change to.
1716  *
1717  *	Returns zero if unsuccessful or an error if the requested
1718  *	transition is illegal.
1719  **/
1720 int
1721 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1722 {
1723 	enum scsi_device_state oldstate = sdev->sdev_state;
1724 
1725 	if (state == oldstate)
1726 		return 0;
1727 
1728 	switch (state) {
1729 	case SDEV_CREATED:
1730 		/* There are no legal states that come back to
1731 		 * created.  This is the manually initialised start
1732 		 * state */
1733 		goto illegal;
1734 
1735 	case SDEV_RUNNING:
1736 		switch (oldstate) {
1737 		case SDEV_CREATED:
1738 		case SDEV_OFFLINE:
1739 		case SDEV_QUIESCE:
1740 		case SDEV_BLOCK:
1741 			break;
1742 		default:
1743 			goto illegal;
1744 		}
1745 		break;
1746 
1747 	case SDEV_QUIESCE:
1748 		switch (oldstate) {
1749 		case SDEV_RUNNING:
1750 		case SDEV_OFFLINE:
1751 			break;
1752 		default:
1753 			goto illegal;
1754 		}
1755 		break;
1756 
1757 	case SDEV_OFFLINE:
1758 		switch (oldstate) {
1759 		case SDEV_CREATED:
1760 		case SDEV_RUNNING:
1761 		case SDEV_QUIESCE:
1762 		case SDEV_BLOCK:
1763 			break;
1764 		default:
1765 			goto illegal;
1766 		}
1767 		break;
1768 
1769 	case SDEV_BLOCK:
1770 		switch (oldstate) {
1771 		case SDEV_CREATED:
1772 		case SDEV_RUNNING:
1773 			break;
1774 		default:
1775 			goto illegal;
1776 		}
1777 		break;
1778 
1779 	case SDEV_CANCEL:
1780 		switch (oldstate) {
1781 		case SDEV_CREATED:
1782 		case SDEV_RUNNING:
1783 		case SDEV_OFFLINE:
1784 		case SDEV_BLOCK:
1785 			break;
1786 		default:
1787 			goto illegal;
1788 		}
1789 		break;
1790 
1791 	case SDEV_DEL:
1792 		switch (oldstate) {
1793 		case SDEV_CANCEL:
1794 			break;
1795 		default:
1796 			goto illegal;
1797 		}
1798 		break;
1799 
1800 	}
1801 	sdev->sdev_state = state;
1802 	return 0;
1803 
1804  illegal:
1805 	SCSI_LOG_ERROR_RECOVERY(1,
1806 				dev_printk(KERN_ERR, &sdev->sdev_gendev,
1807 					   "Illegal state transition %s->%s\n",
1808 					   scsi_device_state_name(oldstate),
1809 					   scsi_device_state_name(state))
1810 				);
1811 	return -EINVAL;
1812 }
1813 EXPORT_SYMBOL(scsi_device_set_state);
1814 
1815 /**
1816  *	scsi_device_quiesce - Block user issued commands.
1817  *	@sdev:	scsi device to quiesce.
1818  *
1819  *	This works by trying to transition to the SDEV_QUIESCE state
1820  *	(which must be a legal transition).  When the device is in this
1821  *	state, only special requests will be accepted, all others will
1822  *	be deferred.  Since special requests may also be requeued requests,
1823  *	a successful return doesn't guarantee the device will be
1824  *	totally quiescent.
1825  *
1826  *	Must be called with user context, may sleep.
1827  *
1828  *	Returns zero if unsuccessful or an error if not.
1829  **/
1830 int
1831 scsi_device_quiesce(struct scsi_device *sdev)
1832 {
1833 	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1834 	if (err)
1835 		return err;
1836 
1837 	scsi_run_queue(sdev->request_queue);
1838 	while (sdev->device_busy) {
1839 		msleep_interruptible(200);
1840 		scsi_run_queue(sdev->request_queue);
1841 	}
1842 	return 0;
1843 }
1844 EXPORT_SYMBOL(scsi_device_quiesce);
1845 
1846 /**
1847  *	scsi_device_resume - Restart user issued commands to a quiesced device.
1848  *	@sdev:	scsi device to resume.
1849  *
1850  *	Moves the device from quiesced back to running and restarts the
1851  *	queues.
1852  *
1853  *	Must be called with user context, may sleep.
1854  **/
1855 void
1856 scsi_device_resume(struct scsi_device *sdev)
1857 {
1858 	if(scsi_device_set_state(sdev, SDEV_RUNNING))
1859 		return;
1860 	scsi_run_queue(sdev->request_queue);
1861 }
1862 EXPORT_SYMBOL(scsi_device_resume);
1863 
1864 static void
1865 device_quiesce_fn(struct scsi_device *sdev, void *data)
1866 {
1867 	scsi_device_quiesce(sdev);
1868 }
1869 
1870 void
1871 scsi_target_quiesce(struct scsi_target *starget)
1872 {
1873 	starget_for_each_device(starget, NULL, device_quiesce_fn);
1874 }
1875 EXPORT_SYMBOL(scsi_target_quiesce);
1876 
1877 static void
1878 device_resume_fn(struct scsi_device *sdev, void *data)
1879 {
1880 	scsi_device_resume(sdev);
1881 }
1882 
1883 void
1884 scsi_target_resume(struct scsi_target *starget)
1885 {
1886 	starget_for_each_device(starget, NULL, device_resume_fn);
1887 }
1888 EXPORT_SYMBOL(scsi_target_resume);
1889 
1890 /**
1891  * scsi_internal_device_block - internal function to put a device
1892  *				temporarily into the SDEV_BLOCK state
1893  * @sdev:	device to block
1894  *
1895  * Block request made by scsi lld's to temporarily stop all
1896  * scsi commands on the specified device.  Called from interrupt
1897  * or normal process context.
1898  *
1899  * Returns zero if successful or error if not
1900  *
1901  * Notes:
1902  *	This routine transitions the device to the SDEV_BLOCK state
1903  *	(which must be a legal transition).  When the device is in this
1904  *	state, all commands are deferred until the scsi lld reenables
1905  *	the device with scsi_device_unblock or device_block_tmo fires.
1906  *	This routine assumes the host_lock is held on entry.
1907  **/
1908 int
1909 scsi_internal_device_block(struct scsi_device *sdev)
1910 {
1911 	request_queue_t *q = sdev->request_queue;
1912 	unsigned long flags;
1913 	int err = 0;
1914 
1915 	err = scsi_device_set_state(sdev, SDEV_BLOCK);
1916 	if (err)
1917 		return err;
1918 
1919 	/*
1920 	 * The device has transitioned to SDEV_BLOCK.  Stop the
1921 	 * block layer from calling the midlayer with this device's
1922 	 * request queue.
1923 	 */
1924 	spin_lock_irqsave(q->queue_lock, flags);
1925 	blk_stop_queue(q);
1926 	spin_unlock_irqrestore(q->queue_lock, flags);
1927 
1928 	return 0;
1929 }
1930 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1931 
1932 /**
1933  * scsi_internal_device_unblock - resume a device after a block request
1934  * @sdev:	device to resume
1935  *
1936  * Called by scsi lld's or the midlayer to restart the device queue
1937  * for the previously suspended scsi device.  Called from interrupt or
1938  * normal process context.
1939  *
1940  * Returns zero if successful or error if not.
1941  *
1942  * Notes:
1943  *	This routine transitions the device to the SDEV_RUNNING state
1944  *	(which must be a legal transition) allowing the midlayer to
1945  *	goose the queue for this device.  This routine assumes the
1946  *	host_lock is held upon entry.
1947  **/
1948 int
1949 scsi_internal_device_unblock(struct scsi_device *sdev)
1950 {
1951 	request_queue_t *q = sdev->request_queue;
1952 	int err;
1953 	unsigned long flags;
1954 
1955 	/*
1956 	 * Try to transition the scsi device to SDEV_RUNNING
1957 	 * and goose the device queue if successful.
1958 	 */
1959 	err = scsi_device_set_state(sdev, SDEV_RUNNING);
1960 	if (err)
1961 		return err;
1962 
1963 	spin_lock_irqsave(q->queue_lock, flags);
1964 	blk_start_queue(q);
1965 	spin_unlock_irqrestore(q->queue_lock, flags);
1966 
1967 	return 0;
1968 }
1969 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
1970 
1971 static void
1972 device_block(struct scsi_device *sdev, void *data)
1973 {
1974 	scsi_internal_device_block(sdev);
1975 }
1976 
1977 static int
1978 target_block(struct device *dev, void *data)
1979 {
1980 	if (scsi_is_target_device(dev))
1981 		starget_for_each_device(to_scsi_target(dev), NULL,
1982 					device_block);
1983 	return 0;
1984 }
1985 
1986 void
1987 scsi_target_block(struct device *dev)
1988 {
1989 	if (scsi_is_target_device(dev))
1990 		starget_for_each_device(to_scsi_target(dev), NULL,
1991 					device_block);
1992 	else
1993 		device_for_each_child(dev, NULL, target_block);
1994 }
1995 EXPORT_SYMBOL_GPL(scsi_target_block);
1996 
1997 static void
1998 device_unblock(struct scsi_device *sdev, void *data)
1999 {
2000 	scsi_internal_device_unblock(sdev);
2001 }
2002 
2003 static int
2004 target_unblock(struct device *dev, void *data)
2005 {
2006 	if (scsi_is_target_device(dev))
2007 		starget_for_each_device(to_scsi_target(dev), NULL,
2008 					device_unblock);
2009 	return 0;
2010 }
2011 
2012 void
2013 scsi_target_unblock(struct device *dev)
2014 {
2015 	if (scsi_is_target_device(dev))
2016 		starget_for_each_device(to_scsi_target(dev), NULL,
2017 					device_unblock);
2018 	else
2019 		device_for_each_child(dev, NULL, target_unblock);
2020 }
2021 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2022