xref: /linux/drivers/scsi/scsi_lib.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9 
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27 
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30 
31 
32 #define SG_MEMPOOL_NR		(sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE		32
34 
35 struct scsi_host_sg_pool {
36 	size_t		size;
37 	char		*name;
38 	kmem_cache_t	*slab;
39 	mempool_t	*pool;
40 };
41 
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45 
46 #define SP(x) { x, "sgpool-" #x }
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48 	SP(8),
49 	SP(16),
50 	SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 	SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 	SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 	SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };
64 #undef SP
65 
66 
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq	- request that is ready to be queued.
73  *              at_head	- boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89 	/*
90 	 * Because users of this function are apt to reuse requests with no
91 	 * modification, we have to sanitise the request flags here
92 	 */
93 	sreq->sr_request->flags &= ~REQ_DONTPREP;
94 	blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95 		       	   at_head, sreq);
96 	return 0;
97 }
98 
99 static void scsi_run_queue(struct request_queue *q);
100 
101 /*
102  * Function:	scsi_unprep_request()
103  *
104  * Purpose:	Remove all preparation done for a request, including its
105  *		associated scsi_cmnd, so that it can be requeued.
106  *
107  * Arguments:	req	- request to unprepare
108  *
109  * Lock status:	Assumed that no locks are held upon entry.
110  *
111  * Returns:	Nothing.
112  */
113 static void scsi_unprep_request(struct request *req)
114 {
115 	struct scsi_cmnd *cmd = req->special;
116 
117 	req->flags &= ~REQ_DONTPREP;
118 	req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
119 
120 	scsi_put_command(cmd);
121 }
122 
123 /*
124  * Function:    scsi_queue_insert()
125  *
126  * Purpose:     Insert a command in the midlevel queue.
127  *
128  * Arguments:   cmd    - command that we are adding to queue.
129  *              reason - why we are inserting command to queue.
130  *
131  * Lock status: Assumed that lock is not held upon entry.
132  *
133  * Returns:     Nothing.
134  *
135  * Notes:       We do this for one of two cases.  Either the host is busy
136  *              and it cannot accept any more commands for the time being,
137  *              or the device returned QUEUE_FULL and can accept no more
138  *              commands.
139  * Notes:       This could be called either from an interrupt context or a
140  *              normal process context.
141  */
142 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
143 {
144 	struct Scsi_Host *host = cmd->device->host;
145 	struct scsi_device *device = cmd->device;
146 	struct request_queue *q = device->request_queue;
147 	unsigned long flags;
148 
149 	SCSI_LOG_MLQUEUE(1,
150 		 printk("Inserting command %p into mlqueue\n", cmd));
151 
152 	/*
153 	 * Set the appropriate busy bit for the device/host.
154 	 *
155 	 * If the host/device isn't busy, assume that something actually
156 	 * completed, and that we should be able to queue a command now.
157 	 *
158 	 * Note that the prior mid-layer assumption that any host could
159 	 * always queue at least one command is now broken.  The mid-layer
160 	 * will implement a user specifiable stall (see
161 	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
162 	 * if a command is requeued with no other commands outstanding
163 	 * either for the device or for the host.
164 	 */
165 	if (reason == SCSI_MLQUEUE_HOST_BUSY)
166 		host->host_blocked = host->max_host_blocked;
167 	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
168 		device->device_blocked = device->max_device_blocked;
169 
170 	/*
171 	 * Decrement the counters, since these commands are no longer
172 	 * active on the host/device.
173 	 */
174 	scsi_device_unbusy(device);
175 
176 	/*
177 	 * Requeue this command.  It will go before all other commands
178 	 * that are already in the queue.
179 	 *
180 	 * NOTE: there is magic here about the way the queue is plugged if
181 	 * we have no outstanding commands.
182 	 *
183 	 * Although we *don't* plug the queue, we call the request
184 	 * function.  The SCSI request function detects the blocked condition
185 	 * and plugs the queue appropriately.
186          */
187 	spin_lock_irqsave(q->queue_lock, flags);
188 	blk_requeue_request(q, cmd->request);
189 	spin_unlock_irqrestore(q->queue_lock, flags);
190 
191 	scsi_run_queue(q);
192 
193 	return 0;
194 }
195 
196 /*
197  * Function:    scsi_do_req
198  *
199  * Purpose:     Queue a SCSI request
200  *
201  * Arguments:   sreq	  - command descriptor.
202  *              cmnd      - actual SCSI command to be performed.
203  *              buffer    - data buffer.
204  *              bufflen   - size of data buffer.
205  *              done      - completion function to be run.
206  *              timeout   - how long to let it run before timeout.
207  *              retries   - number of retries we allow.
208  *
209  * Lock status: No locks held upon entry.
210  *
211  * Returns:     Nothing.
212  *
213  * Notes:	This function is only used for queueing requests for things
214  *		like ioctls and character device requests - this is because
215  *		we essentially just inject a request into the queue for the
216  *		device.
217  *
218  *		In order to support the scsi_device_quiesce function, we
219  *		now inject requests on the *head* of the device queue
220  *		rather than the tail.
221  */
222 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
223 		 void *buffer, unsigned bufflen,
224 		 void (*done)(struct scsi_cmnd *),
225 		 int timeout, int retries)
226 {
227 	/*
228 	 * If the upper level driver is reusing these things, then
229 	 * we should release the low-level block now.  Another one will
230 	 * be allocated later when this request is getting queued.
231 	 */
232 	__scsi_release_request(sreq);
233 
234 	/*
235 	 * Our own function scsi_done (which marks the host as not busy,
236 	 * disables the timeout counter, etc) will be called by us or by the
237 	 * scsi_hosts[host].queuecommand() function needs to also call
238 	 * the completion function for the high level driver.
239 	 */
240 	memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
241 	sreq->sr_bufflen = bufflen;
242 	sreq->sr_buffer = buffer;
243 	sreq->sr_allowed = retries;
244 	sreq->sr_done = done;
245 	sreq->sr_timeout_per_command = timeout;
246 
247 	if (sreq->sr_cmd_len == 0)
248 		sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
249 
250 	/*
251 	 * head injection *required* here otherwise quiesce won't work
252 	 */
253 	scsi_insert_special_req(sreq, 1);
254 }
255 EXPORT_SYMBOL(scsi_do_req);
256 
257 /**
258  * scsi_execute - insert request and wait for the result
259  * @sdev:	scsi device
260  * @cmd:	scsi command
261  * @data_direction: data direction
262  * @buffer:	data buffer
263  * @bufflen:	len of buffer
264  * @sense:	optional sense buffer
265  * @timeout:	request timeout in seconds
266  * @retries:	number of times to retry request
267  * @flags:	or into request flags;
268  *
269  * returns the req->errors value which is the the scsi_cmnd result
270  * field.
271  **/
272 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
273 		 int data_direction, void *buffer, unsigned bufflen,
274 		 unsigned char *sense, int timeout, int retries, int flags)
275 {
276 	struct request *req;
277 	int write = (data_direction == DMA_TO_DEVICE);
278 	int ret = DRIVER_ERROR << 24;
279 
280 	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
281 
282 	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
283 					buffer, bufflen, __GFP_WAIT))
284 		goto out;
285 
286 	req->cmd_len = COMMAND_SIZE(cmd[0]);
287 	memcpy(req->cmd, cmd, req->cmd_len);
288 	req->sense = sense;
289 	req->sense_len = 0;
290 	req->timeout = timeout;
291 	req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
292 
293 	/*
294 	 * head injection *required* here otherwise quiesce won't work
295 	 */
296 	blk_execute_rq(req->q, NULL, req, 1);
297 
298 	ret = req->errors;
299  out:
300 	blk_put_request(req);
301 
302 	return ret;
303 }
304 EXPORT_SYMBOL(scsi_execute);
305 
306 
307 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
308 		     int data_direction, void *buffer, unsigned bufflen,
309 		     struct scsi_sense_hdr *sshdr, int timeout, int retries)
310 {
311 	char *sense = NULL;
312 	int result;
313 
314 	if (sshdr) {
315 		sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
316 		if (!sense)
317 			return DRIVER_ERROR << 24;
318 		memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
319 	}
320 	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
321 				  sense, timeout, retries, 0);
322 	if (sshdr)
323 		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
324 
325 	kfree(sense);
326 	return result;
327 }
328 EXPORT_SYMBOL(scsi_execute_req);
329 
330 /*
331  * Function:    scsi_init_cmd_errh()
332  *
333  * Purpose:     Initialize cmd fields related to error handling.
334  *
335  * Arguments:   cmd	- command that is ready to be queued.
336  *
337  * Returns:     Nothing
338  *
339  * Notes:       This function has the job of initializing a number of
340  *              fields related to error handling.   Typically this will
341  *              be called once for each command, as required.
342  */
343 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
344 {
345 	cmd->serial_number = 0;
346 
347 	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
348 
349 	if (cmd->cmd_len == 0)
350 		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
351 
352 	/*
353 	 * We need saved copies of a number of fields - this is because
354 	 * error handling may need to overwrite these with different values
355 	 * to run different commands, and once error handling is complete,
356 	 * we will need to restore these values prior to running the actual
357 	 * command.
358 	 */
359 	cmd->old_use_sg = cmd->use_sg;
360 	cmd->old_cmd_len = cmd->cmd_len;
361 	cmd->sc_old_data_direction = cmd->sc_data_direction;
362 	cmd->old_underflow = cmd->underflow;
363 	memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
364 	cmd->buffer = cmd->request_buffer;
365 	cmd->bufflen = cmd->request_bufflen;
366 
367 	return 1;
368 }
369 
370 /*
371  * Function:   scsi_setup_cmd_retry()
372  *
373  * Purpose:    Restore the command state for a retry
374  *
375  * Arguments:  cmd	- command to be restored
376  *
377  * Returns:    Nothing
378  *
379  * Notes:      Immediately prior to retrying a command, we need
380  *             to restore certain fields that we saved above.
381  */
382 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
383 {
384 	memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
385 	cmd->request_buffer = cmd->buffer;
386 	cmd->request_bufflen = cmd->bufflen;
387 	cmd->use_sg = cmd->old_use_sg;
388 	cmd->cmd_len = cmd->old_cmd_len;
389 	cmd->sc_data_direction = cmd->sc_old_data_direction;
390 	cmd->underflow = cmd->old_underflow;
391 }
392 
393 void scsi_device_unbusy(struct scsi_device *sdev)
394 {
395 	struct Scsi_Host *shost = sdev->host;
396 	unsigned long flags;
397 
398 	spin_lock_irqsave(shost->host_lock, flags);
399 	shost->host_busy--;
400 	if (unlikely(scsi_host_in_recovery(shost) &&
401 		     shost->host_failed))
402 		scsi_eh_wakeup(shost);
403 	spin_unlock(shost->host_lock);
404 	spin_lock(sdev->request_queue->queue_lock);
405 	sdev->device_busy--;
406 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
407 }
408 
409 /*
410  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
411  * and call blk_run_queue for all the scsi_devices on the target -
412  * including current_sdev first.
413  *
414  * Called with *no* scsi locks held.
415  */
416 static void scsi_single_lun_run(struct scsi_device *current_sdev)
417 {
418 	struct Scsi_Host *shost = current_sdev->host;
419 	struct scsi_device *sdev, *tmp;
420 	struct scsi_target *starget = scsi_target(current_sdev);
421 	unsigned long flags;
422 
423 	spin_lock_irqsave(shost->host_lock, flags);
424 	starget->starget_sdev_user = NULL;
425 	spin_unlock_irqrestore(shost->host_lock, flags);
426 
427 	/*
428 	 * Call blk_run_queue for all LUNs on the target, starting with
429 	 * current_sdev. We race with others (to set starget_sdev_user),
430 	 * but in most cases, we will be first. Ideally, each LU on the
431 	 * target would get some limited time or requests on the target.
432 	 */
433 	blk_run_queue(current_sdev->request_queue);
434 
435 	spin_lock_irqsave(shost->host_lock, flags);
436 	if (starget->starget_sdev_user)
437 		goto out;
438 	list_for_each_entry_safe(sdev, tmp, &starget->devices,
439 			same_target_siblings) {
440 		if (sdev == current_sdev)
441 			continue;
442 		if (scsi_device_get(sdev))
443 			continue;
444 
445 		spin_unlock_irqrestore(shost->host_lock, flags);
446 		blk_run_queue(sdev->request_queue);
447 		spin_lock_irqsave(shost->host_lock, flags);
448 
449 		scsi_device_put(sdev);
450 	}
451  out:
452 	spin_unlock_irqrestore(shost->host_lock, flags);
453 }
454 
455 /*
456  * Function:	scsi_run_queue()
457  *
458  * Purpose:	Select a proper request queue to serve next
459  *
460  * Arguments:	q	- last request's queue
461  *
462  * Returns:     Nothing
463  *
464  * Notes:	The previous command was completely finished, start
465  *		a new one if possible.
466  */
467 static void scsi_run_queue(struct request_queue *q)
468 {
469 	struct scsi_device *sdev = q->queuedata;
470 	struct Scsi_Host *shost = sdev->host;
471 	unsigned long flags;
472 
473 	if (sdev->single_lun)
474 		scsi_single_lun_run(sdev);
475 
476 	spin_lock_irqsave(shost->host_lock, flags);
477 	while (!list_empty(&shost->starved_list) &&
478 	       !shost->host_blocked && !shost->host_self_blocked &&
479 		!((shost->can_queue > 0) &&
480 		  (shost->host_busy >= shost->can_queue))) {
481 		/*
482 		 * As long as shost is accepting commands and we have
483 		 * starved queues, call blk_run_queue. scsi_request_fn
484 		 * drops the queue_lock and can add us back to the
485 		 * starved_list.
486 		 *
487 		 * host_lock protects the starved_list and starved_entry.
488 		 * scsi_request_fn must get the host_lock before checking
489 		 * or modifying starved_list or starved_entry.
490 		 */
491 		sdev = list_entry(shost->starved_list.next,
492 					  struct scsi_device, starved_entry);
493 		list_del_init(&sdev->starved_entry);
494 		spin_unlock_irqrestore(shost->host_lock, flags);
495 
496 		blk_run_queue(sdev->request_queue);
497 
498 		spin_lock_irqsave(shost->host_lock, flags);
499 		if (unlikely(!list_empty(&sdev->starved_entry)))
500 			/*
501 			 * sdev lost a race, and was put back on the
502 			 * starved list. This is unlikely but without this
503 			 * in theory we could loop forever.
504 			 */
505 			break;
506 	}
507 	spin_unlock_irqrestore(shost->host_lock, flags);
508 
509 	blk_run_queue(q);
510 }
511 
512 /*
513  * Function:	scsi_requeue_command()
514  *
515  * Purpose:	Handle post-processing of completed commands.
516  *
517  * Arguments:	q	- queue to operate on
518  *		cmd	- command that may need to be requeued.
519  *
520  * Returns:	Nothing
521  *
522  * Notes:	After command completion, there may be blocks left
523  *		over which weren't finished by the previous command
524  *		this can be for a number of reasons - the main one is
525  *		I/O errors in the middle of the request, in which case
526  *		we need to request the blocks that come after the bad
527  *		sector.
528  * Notes:	Upon return, cmd is a stale pointer.
529  */
530 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
531 {
532 	struct request *req = cmd->request;
533 	unsigned long flags;
534 
535 	scsi_unprep_request(req);
536 	spin_lock_irqsave(q->queue_lock, flags);
537 	blk_requeue_request(q, req);
538 	spin_unlock_irqrestore(q->queue_lock, flags);
539 
540 	scsi_run_queue(q);
541 }
542 
543 void scsi_next_command(struct scsi_cmnd *cmd)
544 {
545 	struct scsi_device *sdev = cmd->device;
546 	struct request_queue *q = sdev->request_queue;
547 
548 	/* need to hold a reference on the device before we let go of the cmd */
549 	get_device(&sdev->sdev_gendev);
550 
551 	scsi_put_command(cmd);
552 	scsi_run_queue(q);
553 
554 	/* ok to remove device now */
555 	put_device(&sdev->sdev_gendev);
556 }
557 
558 void scsi_run_host_queues(struct Scsi_Host *shost)
559 {
560 	struct scsi_device *sdev;
561 
562 	shost_for_each_device(sdev, shost)
563 		scsi_run_queue(sdev->request_queue);
564 }
565 
566 /*
567  * Function:    scsi_end_request()
568  *
569  * Purpose:     Post-processing of completed commands (usually invoked at end
570  *		of upper level post-processing and scsi_io_completion).
571  *
572  * Arguments:   cmd	 - command that is complete.
573  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
574  *              bytes    - number of bytes of completed I/O
575  *		requeue  - indicates whether we should requeue leftovers.
576  *
577  * Lock status: Assumed that lock is not held upon entry.
578  *
579  * Returns:     cmd if requeue required, NULL otherwise.
580  *
581  * Notes:       This is called for block device requests in order to
582  *              mark some number of sectors as complete.
583  *
584  *		We are guaranteeing that the request queue will be goosed
585  *		at some point during this call.
586  * Notes:	If cmd was requeued, upon return it will be a stale pointer.
587  */
588 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
589 					  int bytes, int requeue)
590 {
591 	request_queue_t *q = cmd->device->request_queue;
592 	struct request *req = cmd->request;
593 	unsigned long flags;
594 
595 	/*
596 	 * If there are blocks left over at the end, set up the command
597 	 * to queue the remainder of them.
598 	 */
599 	if (end_that_request_chunk(req, uptodate, bytes)) {
600 		int leftover = (req->hard_nr_sectors << 9);
601 
602 		if (blk_pc_request(req))
603 			leftover = req->data_len;
604 
605 		/* kill remainder if no retrys */
606 		if (!uptodate && blk_noretry_request(req))
607 			end_that_request_chunk(req, 0, leftover);
608 		else {
609 			if (requeue) {
610 				/*
611 				 * Bleah.  Leftovers again.  Stick the
612 				 * leftovers in the front of the
613 				 * queue, and goose the queue again.
614 				 */
615 				scsi_requeue_command(q, cmd);
616 				cmd = NULL;
617 			}
618 			return cmd;
619 		}
620 	}
621 
622 	add_disk_randomness(req->rq_disk);
623 
624 	spin_lock_irqsave(q->queue_lock, flags);
625 	if (blk_rq_tagged(req))
626 		blk_queue_end_tag(q, req);
627 	end_that_request_last(req);
628 	spin_unlock_irqrestore(q->queue_lock, flags);
629 
630 	/*
631 	 * This will goose the queue request function at the end, so we don't
632 	 * need to worry about launching another command.
633 	 */
634 	scsi_next_command(cmd);
635 	return NULL;
636 }
637 
638 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
639 {
640 	struct scsi_host_sg_pool *sgp;
641 	struct scatterlist *sgl;
642 
643 	BUG_ON(!cmd->use_sg);
644 
645 	switch (cmd->use_sg) {
646 	case 1 ... 8:
647 		cmd->sglist_len = 0;
648 		break;
649 	case 9 ... 16:
650 		cmd->sglist_len = 1;
651 		break;
652 	case 17 ... 32:
653 		cmd->sglist_len = 2;
654 		break;
655 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
656 	case 33 ... 64:
657 		cmd->sglist_len = 3;
658 		break;
659 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
660 	case 65 ... 128:
661 		cmd->sglist_len = 4;
662 		break;
663 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
664 	case 129 ... 256:
665 		cmd->sglist_len = 5;
666 		break;
667 #endif
668 #endif
669 #endif
670 	default:
671 		return NULL;
672 	}
673 
674 	sgp = scsi_sg_pools + cmd->sglist_len;
675 	sgl = mempool_alloc(sgp->pool, gfp_mask);
676 	return sgl;
677 }
678 
679 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
680 {
681 	struct scsi_host_sg_pool *sgp;
682 
683 	BUG_ON(index >= SG_MEMPOOL_NR);
684 
685 	sgp = scsi_sg_pools + index;
686 	mempool_free(sgl, sgp->pool);
687 }
688 
689 /*
690  * Function:    scsi_release_buffers()
691  *
692  * Purpose:     Completion processing for block device I/O requests.
693  *
694  * Arguments:   cmd	- command that we are bailing.
695  *
696  * Lock status: Assumed that no lock is held upon entry.
697  *
698  * Returns:     Nothing
699  *
700  * Notes:       In the event that an upper level driver rejects a
701  *		command, we must release resources allocated during
702  *		the __init_io() function.  Primarily this would involve
703  *		the scatter-gather table, and potentially any bounce
704  *		buffers.
705  */
706 static void scsi_release_buffers(struct scsi_cmnd *cmd)
707 {
708 	struct request *req = cmd->request;
709 
710 	/*
711 	 * Free up any indirection buffers we allocated for DMA purposes.
712 	 */
713 	if (cmd->use_sg)
714 		scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
715 	else if (cmd->request_buffer != req->buffer)
716 		kfree(cmd->request_buffer);
717 
718 	/*
719 	 * Zero these out.  They now point to freed memory, and it is
720 	 * dangerous to hang onto the pointers.
721 	 */
722 	cmd->buffer  = NULL;
723 	cmd->bufflen = 0;
724 	cmd->request_buffer = NULL;
725 	cmd->request_bufflen = 0;
726 }
727 
728 /*
729  * Function:    scsi_io_completion()
730  *
731  * Purpose:     Completion processing for block device I/O requests.
732  *
733  * Arguments:   cmd   - command that is finished.
734  *
735  * Lock status: Assumed that no lock is held upon entry.
736  *
737  * Returns:     Nothing
738  *
739  * Notes:       This function is matched in terms of capabilities to
740  *              the function that created the scatter-gather list.
741  *              In other words, if there are no bounce buffers
742  *              (the normal case for most drivers), we don't need
743  *              the logic to deal with cleaning up afterwards.
744  *
745  *		We must do one of several things here:
746  *
747  *		a) Call scsi_end_request.  This will finish off the
748  *		   specified number of sectors.  If we are done, the
749  *		   command block will be released, and the queue
750  *		   function will be goosed.  If we are not done, then
751  *		   scsi_end_request will directly goose the queue.
752  *
753  *		b) We can just use scsi_requeue_command() here.  This would
754  *		   be used if we just wanted to retry, for example.
755  */
756 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
757 			unsigned int block_bytes)
758 {
759 	int result = cmd->result;
760 	int this_count = cmd->bufflen;
761 	request_queue_t *q = cmd->device->request_queue;
762 	struct request *req = cmd->request;
763 	int clear_errors = 1;
764 	struct scsi_sense_hdr sshdr;
765 	int sense_valid = 0;
766 	int sense_deferred = 0;
767 
768 	if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
769 		return;
770 
771 	/*
772 	 * Free up any indirection buffers we allocated for DMA purposes.
773 	 * For the case of a READ, we need to copy the data out of the
774 	 * bounce buffer and into the real buffer.
775 	 */
776 	if (cmd->use_sg)
777 		scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
778 	else if (cmd->buffer != req->buffer) {
779 		if (rq_data_dir(req) == READ) {
780 			unsigned long flags;
781 			char *to = bio_kmap_irq(req->bio, &flags);
782 			memcpy(to, cmd->buffer, cmd->bufflen);
783 			bio_kunmap_irq(to, &flags);
784 		}
785 		kfree(cmd->buffer);
786 	}
787 
788 	if (result) {
789 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
790 		if (sense_valid)
791 			sense_deferred = scsi_sense_is_deferred(&sshdr);
792 	}
793 	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
794 		req->errors = result;
795 		if (result) {
796 			clear_errors = 0;
797 			if (sense_valid && req->sense) {
798 				/*
799 				 * SG_IO wants current and deferred errors
800 				 */
801 				int len = 8 + cmd->sense_buffer[7];
802 
803 				if (len > SCSI_SENSE_BUFFERSIZE)
804 					len = SCSI_SENSE_BUFFERSIZE;
805 				memcpy(req->sense, cmd->sense_buffer,  len);
806 				req->sense_len = len;
807 			}
808 		} else
809 			req->data_len = cmd->resid;
810 	}
811 
812 	/*
813 	 * Zero these out.  They now point to freed memory, and it is
814 	 * dangerous to hang onto the pointers.
815 	 */
816 	cmd->buffer  = NULL;
817 	cmd->bufflen = 0;
818 	cmd->request_buffer = NULL;
819 	cmd->request_bufflen = 0;
820 
821 	/*
822 	 * Next deal with any sectors which we were able to correctly
823 	 * handle.
824 	 */
825 	if (good_bytes >= 0) {
826 		SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
827 					      req->nr_sectors, good_bytes));
828 		SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
829 
830 		if (clear_errors)
831 			req->errors = 0;
832 		/*
833 		 * If multiple sectors are requested in one buffer, then
834 		 * they will have been finished off by the first command.
835 		 * If not, then we have a multi-buffer command.
836 		 *
837 		 * If block_bytes != 0, it means we had a medium error
838 		 * of some sort, and that we want to mark some number of
839 		 * sectors as not uptodate.  Thus we want to inhibit
840 		 * requeueing right here - we will requeue down below
841 		 * when we handle the bad sectors.
842 		 */
843 
844 		/*
845 		 * If the command completed without error, then either
846 		 * finish off the rest of the command, or start a new one.
847 		 */
848 		if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
849 			return;
850 	}
851 	/*
852 	 * Now, if we were good little boys and girls, Santa left us a request
853 	 * sense buffer.  We can extract information from this, so we
854 	 * can choose a block to remap, etc.
855 	 */
856 	if (sense_valid && !sense_deferred) {
857 		switch (sshdr.sense_key) {
858 		case UNIT_ATTENTION:
859 			if (cmd->device->removable) {
860 				/* detected disc change.  set a bit
861 				 * and quietly refuse further access.
862 				 */
863 				cmd->device->changed = 1;
864 				scsi_end_request(cmd, 0,
865 						this_count, 1);
866 				return;
867 			} else {
868 				/*
869 				* Must have been a power glitch, or a
870 				* bus reset.  Could not have been a
871 				* media change, so we just retry the
872 				* request and see what happens.
873 				*/
874 				scsi_requeue_command(q, cmd);
875 				return;
876 			}
877 			break;
878 		case ILLEGAL_REQUEST:
879 			/*
880 		 	* If we had an ILLEGAL REQUEST returned, then we may
881 		 	* have performed an unsupported command.  The only
882 		 	* thing this should be would be a ten byte read where
883 			* only a six byte read was supported.  Also, on a
884 			* system where READ CAPACITY failed, we may have read
885 			* past the end of the disk.
886 		 	*/
887 			if (cmd->device->use_10_for_rw &&
888 			    (cmd->cmnd[0] == READ_10 ||
889 			     cmd->cmnd[0] == WRITE_10)) {
890 				cmd->device->use_10_for_rw = 0;
891 				/*
892 				 * This will cause a retry with a 6-byte
893 				 * command.
894 				 */
895 				scsi_requeue_command(q, cmd);
896 				result = 0;
897 			} else {
898 				scsi_end_request(cmd, 0, this_count, 1);
899 				return;
900 			}
901 			break;
902 		case NOT_READY:
903 			/*
904 			 * If the device is in the process of becoming ready,
905 			 * retry.
906 			 */
907 			if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
908 				scsi_requeue_command(q, cmd);
909 				return;
910 			}
911 			if (!(req->flags & REQ_QUIET))
912 				scmd_printk(KERN_INFO, cmd,
913 					   "Device not ready.\n");
914 			scsi_end_request(cmd, 0, this_count, 1);
915 			return;
916 		case VOLUME_OVERFLOW:
917 			if (!(req->flags & REQ_QUIET)) {
918 				scmd_printk(KERN_INFO, cmd,
919 					   "Volume overflow, CDB: ");
920 				__scsi_print_command(cmd->data_cmnd);
921 				scsi_print_sense("", cmd);
922 			}
923 			scsi_end_request(cmd, 0, block_bytes, 1);
924 			return;
925 		default:
926 			break;
927 		}
928 	}			/* driver byte != 0 */
929 	if (host_byte(result) == DID_RESET) {
930 		/*
931 		 * Third party bus reset or reset for error
932 		 * recovery reasons.  Just retry the request
933 		 * and see what happens.
934 		 */
935 		scsi_requeue_command(q, cmd);
936 		return;
937 	}
938 	if (result) {
939 		if (!(req->flags & REQ_QUIET)) {
940 			scmd_printk(KERN_INFO, cmd,
941 				   "SCSI error: return code = 0x%x\n", result);
942 
943 			if (driver_byte(result) & DRIVER_SENSE)
944 				scsi_print_sense("", cmd);
945 		}
946 		/*
947 		 * Mark a single buffer as not uptodate.  Queue the remainder.
948 		 * We sometimes get this cruft in the event that a medium error
949 		 * isn't properly reported.
950 		 */
951 		block_bytes = req->hard_cur_sectors << 9;
952 		if (!block_bytes)
953 			block_bytes = req->data_len;
954 		scsi_end_request(cmd, 0, block_bytes, 1);
955 	}
956 }
957 EXPORT_SYMBOL(scsi_io_completion);
958 
959 /*
960  * Function:    scsi_init_io()
961  *
962  * Purpose:     SCSI I/O initialize function.
963  *
964  * Arguments:   cmd   - Command descriptor we wish to initialize
965  *
966  * Returns:     0 on success
967  *		BLKPREP_DEFER if the failure is retryable
968  *		BLKPREP_KILL if the failure is fatal
969  */
970 static int scsi_init_io(struct scsi_cmnd *cmd)
971 {
972 	struct request     *req = cmd->request;
973 	struct scatterlist *sgpnt;
974 	int		   count;
975 
976 	/*
977 	 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
978 	 */
979 	if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
980 		cmd->request_bufflen = req->data_len;
981 		cmd->request_buffer = req->data;
982 		req->buffer = req->data;
983 		cmd->use_sg = 0;
984 		return 0;
985 	}
986 
987 	/*
988 	 * we used to not use scatter-gather for single segment request,
989 	 * but now we do (it makes highmem I/O easier to support without
990 	 * kmapping pages)
991 	 */
992 	cmd->use_sg = req->nr_phys_segments;
993 
994 	/*
995 	 * if sg table allocation fails, requeue request later.
996 	 */
997 	sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
998 	if (unlikely(!sgpnt)) {
999 		scsi_unprep_request(req);
1000 		return BLKPREP_DEFER;
1001 	}
1002 
1003 	cmd->request_buffer = (char *) sgpnt;
1004 	cmd->request_bufflen = req->nr_sectors << 9;
1005 	if (blk_pc_request(req))
1006 		cmd->request_bufflen = req->data_len;
1007 	req->buffer = NULL;
1008 
1009 	/*
1010 	 * Next, walk the list, and fill in the addresses and sizes of
1011 	 * each segment.
1012 	 */
1013 	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1014 
1015 	/*
1016 	 * mapped well, send it off
1017 	 */
1018 	if (likely(count <= cmd->use_sg)) {
1019 		cmd->use_sg = count;
1020 		return 0;
1021 	}
1022 
1023 	printk(KERN_ERR "Incorrect number of segments after building list\n");
1024 	printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1025 	printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1026 			req->current_nr_sectors);
1027 
1028 	/* release the command and kill it */
1029 	scsi_release_buffers(cmd);
1030 	scsi_put_command(cmd);
1031 	return BLKPREP_KILL;
1032 }
1033 
1034 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1035 {
1036 	struct scsi_device *sdev = q->queuedata;
1037 	struct scsi_driver *drv;
1038 
1039 	if (sdev->sdev_state == SDEV_RUNNING) {
1040 		drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1041 
1042 		if (drv->prepare_flush)
1043 			return drv->prepare_flush(q, rq);
1044 	}
1045 
1046 	return 0;
1047 }
1048 
1049 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1050 {
1051 	struct scsi_device *sdev = q->queuedata;
1052 	struct request *flush_rq = rq->end_io_data;
1053 	struct scsi_driver *drv;
1054 
1055 	if (flush_rq->errors) {
1056 		printk("scsi: barrier error, disabling flush support\n");
1057 		blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1058 	}
1059 
1060 	if (sdev->sdev_state == SDEV_RUNNING) {
1061 		drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1062 		drv->end_flush(q, rq);
1063 	}
1064 }
1065 
1066 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1067 			       sector_t *error_sector)
1068 {
1069 	struct scsi_device *sdev = q->queuedata;
1070 	struct scsi_driver *drv;
1071 
1072 	if (sdev->sdev_state != SDEV_RUNNING)
1073 		return -ENXIO;
1074 
1075 	drv = *(struct scsi_driver **) disk->private_data;
1076 	if (drv->issue_flush)
1077 		return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1078 
1079 	return -EOPNOTSUPP;
1080 }
1081 
1082 static void scsi_generic_done(struct scsi_cmnd *cmd)
1083 {
1084 	BUG_ON(!blk_pc_request(cmd->request));
1085 	scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1086 }
1087 
1088 void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries)
1089 {
1090 	struct request *req = cmd->request;
1091 
1092 	BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1093 	memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1094 	cmd->cmd_len = req->cmd_len;
1095 	if (!req->data_len)
1096 		cmd->sc_data_direction = DMA_NONE;
1097 	else if (rq_data_dir(req) == WRITE)
1098 		cmd->sc_data_direction = DMA_TO_DEVICE;
1099 	else
1100 		cmd->sc_data_direction = DMA_FROM_DEVICE;
1101 
1102 	cmd->transfersize = req->data_len;
1103 	cmd->allowed = retries;
1104 	cmd->timeout_per_command = req->timeout;
1105 }
1106 EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd);
1107 
1108 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1109 {
1110 	struct scsi_device *sdev = q->queuedata;
1111 	struct scsi_cmnd *cmd;
1112 	int specials_only = 0;
1113 
1114 	/*
1115 	 * Just check to see if the device is online.  If it isn't, we
1116 	 * refuse to process any commands.  The device must be brought
1117 	 * online before trying any recovery commands
1118 	 */
1119 	if (unlikely(!scsi_device_online(sdev))) {
1120 		sdev_printk(KERN_ERR, sdev,
1121 			    "rejecting I/O to offline device\n");
1122 		goto kill;
1123 	}
1124 	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1125 		/* OK, we're not in a running state don't prep
1126 		 * user commands */
1127 		if (sdev->sdev_state == SDEV_DEL) {
1128 			/* Device is fully deleted, no commands
1129 			 * at all allowed down */
1130 			sdev_printk(KERN_ERR, sdev,
1131 				    "rejecting I/O to dead device\n");
1132 			goto kill;
1133 		}
1134 		/* OK, we only allow special commands (i.e. not
1135 		 * user initiated ones */
1136 		specials_only = sdev->sdev_state;
1137 	}
1138 
1139 	/*
1140 	 * Find the actual device driver associated with this command.
1141 	 * The SPECIAL requests are things like character device or
1142 	 * ioctls, which did not originate from ll_rw_blk.  Note that
1143 	 * the special field is also used to indicate the cmd for
1144 	 * the remainder of a partially fulfilled request that can
1145 	 * come up when there is a medium error.  We have to treat
1146 	 * these two cases differently.  We differentiate by looking
1147 	 * at request->cmd, as this tells us the real story.
1148 	 */
1149 	if (req->flags & REQ_SPECIAL && req->special) {
1150 		struct scsi_request *sreq = req->special;
1151 
1152 		if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1153 			cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1154 			if (unlikely(!cmd))
1155 				goto defer;
1156 			scsi_init_cmd_from_req(cmd, sreq);
1157 		} else
1158 			cmd = req->special;
1159 	} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1160 
1161 		if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1162 			if(specials_only == SDEV_QUIESCE ||
1163 					specials_only == SDEV_BLOCK)
1164 				goto defer;
1165 
1166 			sdev_printk(KERN_ERR, sdev,
1167 				    "rejecting I/O to device being removed\n");
1168 			goto kill;
1169 		}
1170 
1171 
1172 		/*
1173 		 * Now try and find a command block that we can use.
1174 		 */
1175 		if (!req->special) {
1176 			cmd = scsi_get_command(sdev, GFP_ATOMIC);
1177 			if (unlikely(!cmd))
1178 				goto defer;
1179 		} else
1180 			cmd = req->special;
1181 
1182 		/* pull a tag out of the request if we have one */
1183 		cmd->tag = req->tag;
1184 	} else {
1185 		blk_dump_rq_flags(req, "SCSI bad req");
1186 		goto kill;
1187 	}
1188 
1189 	/* note the overloading of req->special.  When the tag
1190 	 * is active it always means cmd.  If the tag goes
1191 	 * back for re-queueing, it may be reset */
1192 	req->special = cmd;
1193 	cmd->request = req;
1194 
1195 	/*
1196 	 * FIXME: drop the lock here because the functions below
1197 	 * expect to be called without the queue lock held.  Also,
1198 	 * previously, we dequeued the request before dropping the
1199 	 * lock.  We hope REQ_STARTED prevents anything untoward from
1200 	 * happening now.
1201 	 */
1202 	if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1203 		struct scsi_driver *drv;
1204 		int ret;
1205 
1206 		/*
1207 		 * This will do a couple of things:
1208 		 *  1) Fill in the actual SCSI command.
1209 		 *  2) Fill in any other upper-level specific fields
1210 		 * (timeout).
1211 		 *
1212 		 * If this returns 0, it means that the request failed
1213 		 * (reading past end of disk, reading offline device,
1214 		 * etc).   This won't actually talk to the device, but
1215 		 * some kinds of consistency checking may cause the
1216 		 * request to be rejected immediately.
1217 		 */
1218 
1219 		/*
1220 		 * This sets up the scatter-gather table (allocating if
1221 		 * required).
1222 		 */
1223 		ret = scsi_init_io(cmd);
1224 		switch(ret) {
1225 			/* For BLKPREP_KILL/DEFER the cmd was released */
1226 		case BLKPREP_KILL:
1227 			goto kill;
1228 		case BLKPREP_DEFER:
1229 			goto defer;
1230 		}
1231 
1232 		/*
1233 		 * Initialize the actual SCSI command for this request.
1234 		 */
1235 		if (req->rq_disk) {
1236 			drv = *(struct scsi_driver **)req->rq_disk->private_data;
1237 			if (unlikely(!drv->init_command(cmd))) {
1238 				scsi_release_buffers(cmd);
1239 				scsi_put_command(cmd);
1240 				goto kill;
1241 			}
1242 		} else {
1243 			scsi_setup_blk_pc_cmnd(cmd, 3);
1244 			cmd->done = scsi_generic_done;
1245 		}
1246 	}
1247 
1248 	/*
1249 	 * The request is now prepped, no need to come back here
1250 	 */
1251 	req->flags |= REQ_DONTPREP;
1252 	return BLKPREP_OK;
1253 
1254  defer:
1255 	/* If we defer, the elv_next_request() returns NULL, but the
1256 	 * queue must be restarted, so we plug here if no returning
1257 	 * command will automatically do that. */
1258 	if (sdev->device_busy == 0)
1259 		blk_plug_device(q);
1260 	return BLKPREP_DEFER;
1261  kill:
1262 	req->errors = DID_NO_CONNECT << 16;
1263 	return BLKPREP_KILL;
1264 }
1265 
1266 /*
1267  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1268  * return 0.
1269  *
1270  * Called with the queue_lock held.
1271  */
1272 static inline int scsi_dev_queue_ready(struct request_queue *q,
1273 				  struct scsi_device *sdev)
1274 {
1275 	if (sdev->device_busy >= sdev->queue_depth)
1276 		return 0;
1277 	if (sdev->device_busy == 0 && sdev->device_blocked) {
1278 		/*
1279 		 * unblock after device_blocked iterates to zero
1280 		 */
1281 		if (--sdev->device_blocked == 0) {
1282 			SCSI_LOG_MLQUEUE(3,
1283 				   sdev_printk(KERN_INFO, sdev,
1284 				   "unblocking device at zero depth\n"));
1285 		} else {
1286 			blk_plug_device(q);
1287 			return 0;
1288 		}
1289 	}
1290 	if (sdev->device_blocked)
1291 		return 0;
1292 
1293 	return 1;
1294 }
1295 
1296 /*
1297  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1298  * return 0. We must end up running the queue again whenever 0 is
1299  * returned, else IO can hang.
1300  *
1301  * Called with host_lock held.
1302  */
1303 static inline int scsi_host_queue_ready(struct request_queue *q,
1304 				   struct Scsi_Host *shost,
1305 				   struct scsi_device *sdev)
1306 {
1307 	if (scsi_host_in_recovery(shost))
1308 		return 0;
1309 	if (shost->host_busy == 0 && shost->host_blocked) {
1310 		/*
1311 		 * unblock after host_blocked iterates to zero
1312 		 */
1313 		if (--shost->host_blocked == 0) {
1314 			SCSI_LOG_MLQUEUE(3,
1315 				printk("scsi%d unblocking host at zero depth\n",
1316 					shost->host_no));
1317 		} else {
1318 			blk_plug_device(q);
1319 			return 0;
1320 		}
1321 	}
1322 	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1323 	    shost->host_blocked || shost->host_self_blocked) {
1324 		if (list_empty(&sdev->starved_entry))
1325 			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1326 		return 0;
1327 	}
1328 
1329 	/* We're OK to process the command, so we can't be starved */
1330 	if (!list_empty(&sdev->starved_entry))
1331 		list_del_init(&sdev->starved_entry);
1332 
1333 	return 1;
1334 }
1335 
1336 /*
1337  * Kill a request for a dead device
1338  */
1339 static void scsi_kill_request(struct request *req, request_queue_t *q)
1340 {
1341 	struct scsi_cmnd *cmd = req->special;
1342 
1343 	blkdev_dequeue_request(req);
1344 
1345 	if (unlikely(cmd == NULL)) {
1346 		printk(KERN_CRIT "impossible request in %s.\n",
1347 				 __FUNCTION__);
1348 		BUG();
1349 	}
1350 
1351 	scsi_init_cmd_errh(cmd);
1352 	cmd->result = DID_NO_CONNECT << 16;
1353 	atomic_inc(&cmd->device->iorequest_cnt);
1354 	__scsi_done(cmd);
1355 }
1356 
1357 /*
1358  * Function:    scsi_request_fn()
1359  *
1360  * Purpose:     Main strategy routine for SCSI.
1361  *
1362  * Arguments:   q       - Pointer to actual queue.
1363  *
1364  * Returns:     Nothing
1365  *
1366  * Lock status: IO request lock assumed to be held when called.
1367  */
1368 static void scsi_request_fn(struct request_queue *q)
1369 {
1370 	struct scsi_device *sdev = q->queuedata;
1371 	struct Scsi_Host *shost;
1372 	struct scsi_cmnd *cmd;
1373 	struct request *req;
1374 
1375 	if (!sdev) {
1376 		printk("scsi: killing requests for dead queue\n");
1377 		while ((req = elv_next_request(q)) != NULL)
1378 			scsi_kill_request(req, q);
1379 		return;
1380 	}
1381 
1382 	if(!get_device(&sdev->sdev_gendev))
1383 		/* We must be tearing the block queue down already */
1384 		return;
1385 
1386 	/*
1387 	 * To start with, we keep looping until the queue is empty, or until
1388 	 * the host is no longer able to accept any more requests.
1389 	 */
1390 	shost = sdev->host;
1391 	while (!blk_queue_plugged(q)) {
1392 		int rtn;
1393 		/*
1394 		 * get next queueable request.  We do this early to make sure
1395 		 * that the request is fully prepared even if we cannot
1396 		 * accept it.
1397 		 */
1398 		req = elv_next_request(q);
1399 		if (!req || !scsi_dev_queue_ready(q, sdev))
1400 			break;
1401 
1402 		if (unlikely(!scsi_device_online(sdev))) {
1403 			sdev_printk(KERN_ERR, sdev,
1404 				    "rejecting I/O to offline device\n");
1405 			scsi_kill_request(req, q);
1406 			continue;
1407 		}
1408 
1409 
1410 		/*
1411 		 * Remove the request from the request list.
1412 		 */
1413 		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1414 			blkdev_dequeue_request(req);
1415 		sdev->device_busy++;
1416 
1417 		spin_unlock(q->queue_lock);
1418 		cmd = req->special;
1419 		if (unlikely(cmd == NULL)) {
1420 			printk(KERN_CRIT "impossible request in %s.\n"
1421 					 "please mail a stack trace to "
1422 					 "linux-scsi@vger.kernel.org",
1423 					 __FUNCTION__);
1424 			BUG();
1425 		}
1426 		spin_lock(shost->host_lock);
1427 
1428 		if (!scsi_host_queue_ready(q, shost, sdev))
1429 			goto not_ready;
1430 		if (sdev->single_lun) {
1431 			if (scsi_target(sdev)->starget_sdev_user &&
1432 			    scsi_target(sdev)->starget_sdev_user != sdev)
1433 				goto not_ready;
1434 			scsi_target(sdev)->starget_sdev_user = sdev;
1435 		}
1436 		shost->host_busy++;
1437 
1438 		/*
1439 		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1440 		 *		take the lock again.
1441 		 */
1442 		spin_unlock_irq(shost->host_lock);
1443 
1444 		/*
1445 		 * Finally, initialize any error handling parameters, and set up
1446 		 * the timers for timeouts.
1447 		 */
1448 		scsi_init_cmd_errh(cmd);
1449 
1450 		/*
1451 		 * Dispatch the command to the low-level driver.
1452 		 */
1453 		rtn = scsi_dispatch_cmd(cmd);
1454 		spin_lock_irq(q->queue_lock);
1455 		if(rtn) {
1456 			/* we're refusing the command; because of
1457 			 * the way locks get dropped, we need to
1458 			 * check here if plugging is required */
1459 			if(sdev->device_busy == 0)
1460 				blk_plug_device(q);
1461 
1462 			break;
1463 		}
1464 	}
1465 
1466 	goto out;
1467 
1468  not_ready:
1469 	spin_unlock_irq(shost->host_lock);
1470 
1471 	/*
1472 	 * lock q, handle tag, requeue req, and decrement device_busy. We
1473 	 * must return with queue_lock held.
1474 	 *
1475 	 * Decrementing device_busy without checking it is OK, as all such
1476 	 * cases (host limits or settings) should run the queue at some
1477 	 * later time.
1478 	 */
1479 	spin_lock_irq(q->queue_lock);
1480 	blk_requeue_request(q, req);
1481 	sdev->device_busy--;
1482 	if(sdev->device_busy == 0)
1483 		blk_plug_device(q);
1484  out:
1485 	/* must be careful here...if we trigger the ->remove() function
1486 	 * we cannot be holding the q lock */
1487 	spin_unlock_irq(q->queue_lock);
1488 	put_device(&sdev->sdev_gendev);
1489 	spin_lock_irq(q->queue_lock);
1490 }
1491 
1492 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1493 {
1494 	struct device *host_dev;
1495 	u64 bounce_limit = 0xffffffff;
1496 
1497 	if (shost->unchecked_isa_dma)
1498 		return BLK_BOUNCE_ISA;
1499 	/*
1500 	 * Platforms with virtual-DMA translation
1501 	 * hardware have no practical limit.
1502 	 */
1503 	if (!PCI_DMA_BUS_IS_PHYS)
1504 		return BLK_BOUNCE_ANY;
1505 
1506 	host_dev = scsi_get_device(shost);
1507 	if (host_dev && host_dev->dma_mask)
1508 		bounce_limit = *host_dev->dma_mask;
1509 
1510 	return bounce_limit;
1511 }
1512 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1513 
1514 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1515 {
1516 	struct Scsi_Host *shost = sdev->host;
1517 	struct request_queue *q;
1518 
1519 	q = blk_init_queue(scsi_request_fn, NULL);
1520 	if (!q)
1521 		return NULL;
1522 
1523 	blk_queue_prep_rq(q, scsi_prep_fn);
1524 
1525 	blk_queue_max_hw_segments(q, shost->sg_tablesize);
1526 	blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1527 	blk_queue_max_sectors(q, shost->max_sectors);
1528 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1529 	blk_queue_segment_boundary(q, shost->dma_boundary);
1530 	blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1531 
1532 	/*
1533 	 * ordered tags are superior to flush ordering
1534 	 */
1535 	if (shost->ordered_tag)
1536 		blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1537 	else if (shost->ordered_flush) {
1538 		blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1539 		q->prepare_flush_fn = scsi_prepare_flush_fn;
1540 		q->end_flush_fn = scsi_end_flush_fn;
1541 	}
1542 
1543 	if (!shost->use_clustering)
1544 		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1545 	return q;
1546 }
1547 
1548 void scsi_free_queue(struct request_queue *q)
1549 {
1550 	blk_cleanup_queue(q);
1551 }
1552 
1553 /*
1554  * Function:    scsi_block_requests()
1555  *
1556  * Purpose:     Utility function used by low-level drivers to prevent further
1557  *		commands from being queued to the device.
1558  *
1559  * Arguments:   shost       - Host in question
1560  *
1561  * Returns:     Nothing
1562  *
1563  * Lock status: No locks are assumed held.
1564  *
1565  * Notes:       There is no timer nor any other means by which the requests
1566  *		get unblocked other than the low-level driver calling
1567  *		scsi_unblock_requests().
1568  */
1569 void scsi_block_requests(struct Scsi_Host *shost)
1570 {
1571 	shost->host_self_blocked = 1;
1572 }
1573 EXPORT_SYMBOL(scsi_block_requests);
1574 
1575 /*
1576  * Function:    scsi_unblock_requests()
1577  *
1578  * Purpose:     Utility function used by low-level drivers to allow further
1579  *		commands from being queued to the device.
1580  *
1581  * Arguments:   shost       - Host in question
1582  *
1583  * Returns:     Nothing
1584  *
1585  * Lock status: No locks are assumed held.
1586  *
1587  * Notes:       There is no timer nor any other means by which the requests
1588  *		get unblocked other than the low-level driver calling
1589  *		scsi_unblock_requests().
1590  *
1591  *		This is done as an API function so that changes to the
1592  *		internals of the scsi mid-layer won't require wholesale
1593  *		changes to drivers that use this feature.
1594  */
1595 void scsi_unblock_requests(struct Scsi_Host *shost)
1596 {
1597 	shost->host_self_blocked = 0;
1598 	scsi_run_host_queues(shost);
1599 }
1600 EXPORT_SYMBOL(scsi_unblock_requests);
1601 
1602 int __init scsi_init_queue(void)
1603 {
1604 	int i;
1605 
1606 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1607 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1608 		int size = sgp->size * sizeof(struct scatterlist);
1609 
1610 		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1611 				SLAB_HWCACHE_ALIGN, NULL, NULL);
1612 		if (!sgp->slab) {
1613 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1614 					sgp->name);
1615 		}
1616 
1617 		sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1618 				mempool_alloc_slab, mempool_free_slab,
1619 				sgp->slab);
1620 		if (!sgp->pool) {
1621 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1622 					sgp->name);
1623 		}
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 void scsi_exit_queue(void)
1630 {
1631 	int i;
1632 
1633 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1634 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1635 		mempool_destroy(sgp->pool);
1636 		kmem_cache_destroy(sgp->slab);
1637 	}
1638 }
1639 /**
1640  *	scsi_mode_sense - issue a mode sense, falling back from 10 to
1641  *		six bytes if necessary.
1642  *	@sdev:	SCSI device to be queried
1643  *	@dbd:	set if mode sense will allow block descriptors to be returned
1644  *	@modepage: mode page being requested
1645  *	@buffer: request buffer (may not be smaller than eight bytes)
1646  *	@len:	length of request buffer.
1647  *	@timeout: command timeout
1648  *	@retries: number of retries before failing
1649  *	@data: returns a structure abstracting the mode header data
1650  *	@sense: place to put sense data (or NULL if no sense to be collected).
1651  *		must be SCSI_SENSE_BUFFERSIZE big.
1652  *
1653  *	Returns zero if unsuccessful, or the header offset (either 4
1654  *	or 8 depending on whether a six or ten byte command was
1655  *	issued) if successful.
1656  **/
1657 int
1658 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1659 		  unsigned char *buffer, int len, int timeout, int retries,
1660 		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1661 	unsigned char cmd[12];
1662 	int use_10_for_ms;
1663 	int header_length;
1664 	int result;
1665 	struct scsi_sense_hdr my_sshdr;
1666 
1667 	memset(data, 0, sizeof(*data));
1668 	memset(&cmd[0], 0, 12);
1669 	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
1670 	cmd[2] = modepage;
1671 
1672 	/* caller might not be interested in sense, but we need it */
1673 	if (!sshdr)
1674 		sshdr = &my_sshdr;
1675 
1676  retry:
1677 	use_10_for_ms = sdev->use_10_for_ms;
1678 
1679 	if (use_10_for_ms) {
1680 		if (len < 8)
1681 			len = 8;
1682 
1683 		cmd[0] = MODE_SENSE_10;
1684 		cmd[8] = len;
1685 		header_length = 8;
1686 	} else {
1687 		if (len < 4)
1688 			len = 4;
1689 
1690 		cmd[0] = MODE_SENSE;
1691 		cmd[4] = len;
1692 		header_length = 4;
1693 	}
1694 
1695 	memset(buffer, 0, len);
1696 
1697 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1698 				  sshdr, timeout, retries);
1699 
1700 	/* This code looks awful: what it's doing is making sure an
1701 	 * ILLEGAL REQUEST sense return identifies the actual command
1702 	 * byte as the problem.  MODE_SENSE commands can return
1703 	 * ILLEGAL REQUEST if the code page isn't supported */
1704 
1705 	if (use_10_for_ms && !scsi_status_is_good(result) &&
1706 	    (driver_byte(result) & DRIVER_SENSE)) {
1707 		if (scsi_sense_valid(sshdr)) {
1708 			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1709 			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1710 				/*
1711 				 * Invalid command operation code
1712 				 */
1713 				sdev->use_10_for_ms = 0;
1714 				goto retry;
1715 			}
1716 		}
1717 	}
1718 
1719 	if(scsi_status_is_good(result)) {
1720 		data->header_length = header_length;
1721 		if(use_10_for_ms) {
1722 			data->length = buffer[0]*256 + buffer[1] + 2;
1723 			data->medium_type = buffer[2];
1724 			data->device_specific = buffer[3];
1725 			data->longlba = buffer[4] & 0x01;
1726 			data->block_descriptor_length = buffer[6]*256
1727 				+ buffer[7];
1728 		} else {
1729 			data->length = buffer[0] + 1;
1730 			data->medium_type = buffer[1];
1731 			data->device_specific = buffer[2];
1732 			data->block_descriptor_length = buffer[3];
1733 		}
1734 	}
1735 
1736 	return result;
1737 }
1738 EXPORT_SYMBOL(scsi_mode_sense);
1739 
1740 int
1741 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1742 {
1743 	char cmd[] = {
1744 		TEST_UNIT_READY, 0, 0, 0, 0, 0,
1745 	};
1746 	struct scsi_sense_hdr sshdr;
1747 	int result;
1748 
1749 	result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1750 				  timeout, retries);
1751 
1752 	if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1753 
1754 		if ((scsi_sense_valid(&sshdr)) &&
1755 		    ((sshdr.sense_key == UNIT_ATTENTION) ||
1756 		     (sshdr.sense_key == NOT_READY))) {
1757 			sdev->changed = 1;
1758 			result = 0;
1759 		}
1760 	}
1761 	return result;
1762 }
1763 EXPORT_SYMBOL(scsi_test_unit_ready);
1764 
1765 /**
1766  *	scsi_device_set_state - Take the given device through the device
1767  *		state model.
1768  *	@sdev:	scsi device to change the state of.
1769  *	@state:	state to change to.
1770  *
1771  *	Returns zero if unsuccessful or an error if the requested
1772  *	transition is illegal.
1773  **/
1774 int
1775 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1776 {
1777 	enum scsi_device_state oldstate = sdev->sdev_state;
1778 
1779 	if (state == oldstate)
1780 		return 0;
1781 
1782 	switch (state) {
1783 	case SDEV_CREATED:
1784 		/* There are no legal states that come back to
1785 		 * created.  This is the manually initialised start
1786 		 * state */
1787 		goto illegal;
1788 
1789 	case SDEV_RUNNING:
1790 		switch (oldstate) {
1791 		case SDEV_CREATED:
1792 		case SDEV_OFFLINE:
1793 		case SDEV_QUIESCE:
1794 		case SDEV_BLOCK:
1795 			break;
1796 		default:
1797 			goto illegal;
1798 		}
1799 		break;
1800 
1801 	case SDEV_QUIESCE:
1802 		switch (oldstate) {
1803 		case SDEV_RUNNING:
1804 		case SDEV_OFFLINE:
1805 			break;
1806 		default:
1807 			goto illegal;
1808 		}
1809 		break;
1810 
1811 	case SDEV_OFFLINE:
1812 		switch (oldstate) {
1813 		case SDEV_CREATED:
1814 		case SDEV_RUNNING:
1815 		case SDEV_QUIESCE:
1816 		case SDEV_BLOCK:
1817 			break;
1818 		default:
1819 			goto illegal;
1820 		}
1821 		break;
1822 
1823 	case SDEV_BLOCK:
1824 		switch (oldstate) {
1825 		case SDEV_CREATED:
1826 		case SDEV_RUNNING:
1827 			break;
1828 		default:
1829 			goto illegal;
1830 		}
1831 		break;
1832 
1833 	case SDEV_CANCEL:
1834 		switch (oldstate) {
1835 		case SDEV_CREATED:
1836 		case SDEV_RUNNING:
1837 		case SDEV_OFFLINE:
1838 		case SDEV_BLOCK:
1839 			break;
1840 		default:
1841 			goto illegal;
1842 		}
1843 		break;
1844 
1845 	case SDEV_DEL:
1846 		switch (oldstate) {
1847 		case SDEV_CANCEL:
1848 			break;
1849 		default:
1850 			goto illegal;
1851 		}
1852 		break;
1853 
1854 	}
1855 	sdev->sdev_state = state;
1856 	return 0;
1857 
1858  illegal:
1859 	SCSI_LOG_ERROR_RECOVERY(1,
1860 				sdev_printk(KERN_ERR, sdev,
1861 					    "Illegal state transition %s->%s\n",
1862 					    scsi_device_state_name(oldstate),
1863 					    scsi_device_state_name(state))
1864 				);
1865 	return -EINVAL;
1866 }
1867 EXPORT_SYMBOL(scsi_device_set_state);
1868 
1869 /**
1870  *	scsi_device_quiesce - Block user issued commands.
1871  *	@sdev:	scsi device to quiesce.
1872  *
1873  *	This works by trying to transition to the SDEV_QUIESCE state
1874  *	(which must be a legal transition).  When the device is in this
1875  *	state, only special requests will be accepted, all others will
1876  *	be deferred.  Since special requests may also be requeued requests,
1877  *	a successful return doesn't guarantee the device will be
1878  *	totally quiescent.
1879  *
1880  *	Must be called with user context, may sleep.
1881  *
1882  *	Returns zero if unsuccessful or an error if not.
1883  **/
1884 int
1885 scsi_device_quiesce(struct scsi_device *sdev)
1886 {
1887 	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1888 	if (err)
1889 		return err;
1890 
1891 	scsi_run_queue(sdev->request_queue);
1892 	while (sdev->device_busy) {
1893 		msleep_interruptible(200);
1894 		scsi_run_queue(sdev->request_queue);
1895 	}
1896 	return 0;
1897 }
1898 EXPORT_SYMBOL(scsi_device_quiesce);
1899 
1900 /**
1901  *	scsi_device_resume - Restart user issued commands to a quiesced device.
1902  *	@sdev:	scsi device to resume.
1903  *
1904  *	Moves the device from quiesced back to running and restarts the
1905  *	queues.
1906  *
1907  *	Must be called with user context, may sleep.
1908  **/
1909 void
1910 scsi_device_resume(struct scsi_device *sdev)
1911 {
1912 	if(scsi_device_set_state(sdev, SDEV_RUNNING))
1913 		return;
1914 	scsi_run_queue(sdev->request_queue);
1915 }
1916 EXPORT_SYMBOL(scsi_device_resume);
1917 
1918 static void
1919 device_quiesce_fn(struct scsi_device *sdev, void *data)
1920 {
1921 	scsi_device_quiesce(sdev);
1922 }
1923 
1924 void
1925 scsi_target_quiesce(struct scsi_target *starget)
1926 {
1927 	starget_for_each_device(starget, NULL, device_quiesce_fn);
1928 }
1929 EXPORT_SYMBOL(scsi_target_quiesce);
1930 
1931 static void
1932 device_resume_fn(struct scsi_device *sdev, void *data)
1933 {
1934 	scsi_device_resume(sdev);
1935 }
1936 
1937 void
1938 scsi_target_resume(struct scsi_target *starget)
1939 {
1940 	starget_for_each_device(starget, NULL, device_resume_fn);
1941 }
1942 EXPORT_SYMBOL(scsi_target_resume);
1943 
1944 /**
1945  * scsi_internal_device_block - internal function to put a device
1946  *				temporarily into the SDEV_BLOCK state
1947  * @sdev:	device to block
1948  *
1949  * Block request made by scsi lld's to temporarily stop all
1950  * scsi commands on the specified device.  Called from interrupt
1951  * or normal process context.
1952  *
1953  * Returns zero if successful or error if not
1954  *
1955  * Notes:
1956  *	This routine transitions the device to the SDEV_BLOCK state
1957  *	(which must be a legal transition).  When the device is in this
1958  *	state, all commands are deferred until the scsi lld reenables
1959  *	the device with scsi_device_unblock or device_block_tmo fires.
1960  *	This routine assumes the host_lock is held on entry.
1961  **/
1962 int
1963 scsi_internal_device_block(struct scsi_device *sdev)
1964 {
1965 	request_queue_t *q = sdev->request_queue;
1966 	unsigned long flags;
1967 	int err = 0;
1968 
1969 	err = scsi_device_set_state(sdev, SDEV_BLOCK);
1970 	if (err)
1971 		return err;
1972 
1973 	/*
1974 	 * The device has transitioned to SDEV_BLOCK.  Stop the
1975 	 * block layer from calling the midlayer with this device's
1976 	 * request queue.
1977 	 */
1978 	spin_lock_irqsave(q->queue_lock, flags);
1979 	blk_stop_queue(q);
1980 	spin_unlock_irqrestore(q->queue_lock, flags);
1981 
1982 	return 0;
1983 }
1984 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1985 
1986 /**
1987  * scsi_internal_device_unblock - resume a device after a block request
1988  * @sdev:	device to resume
1989  *
1990  * Called by scsi lld's or the midlayer to restart the device queue
1991  * for the previously suspended scsi device.  Called from interrupt or
1992  * normal process context.
1993  *
1994  * Returns zero if successful or error if not.
1995  *
1996  * Notes:
1997  *	This routine transitions the device to the SDEV_RUNNING state
1998  *	(which must be a legal transition) allowing the midlayer to
1999  *	goose the queue for this device.  This routine assumes the
2000  *	host_lock is held upon entry.
2001  **/
2002 int
2003 scsi_internal_device_unblock(struct scsi_device *sdev)
2004 {
2005 	request_queue_t *q = sdev->request_queue;
2006 	int err;
2007 	unsigned long flags;
2008 
2009 	/*
2010 	 * Try to transition the scsi device to SDEV_RUNNING
2011 	 * and goose the device queue if successful.
2012 	 */
2013 	err = scsi_device_set_state(sdev, SDEV_RUNNING);
2014 	if (err)
2015 		return err;
2016 
2017 	spin_lock_irqsave(q->queue_lock, flags);
2018 	blk_start_queue(q);
2019 	spin_unlock_irqrestore(q->queue_lock, flags);
2020 
2021 	return 0;
2022 }
2023 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2024 
2025 static void
2026 device_block(struct scsi_device *sdev, void *data)
2027 {
2028 	scsi_internal_device_block(sdev);
2029 }
2030 
2031 static int
2032 target_block(struct device *dev, void *data)
2033 {
2034 	if (scsi_is_target_device(dev))
2035 		starget_for_each_device(to_scsi_target(dev), NULL,
2036 					device_block);
2037 	return 0;
2038 }
2039 
2040 void
2041 scsi_target_block(struct device *dev)
2042 {
2043 	if (scsi_is_target_device(dev))
2044 		starget_for_each_device(to_scsi_target(dev), NULL,
2045 					device_block);
2046 	else
2047 		device_for_each_child(dev, NULL, target_block);
2048 }
2049 EXPORT_SYMBOL_GPL(scsi_target_block);
2050 
2051 static void
2052 device_unblock(struct scsi_device *sdev, void *data)
2053 {
2054 	scsi_internal_device_unblock(sdev);
2055 }
2056 
2057 static int
2058 target_unblock(struct device *dev, void *data)
2059 {
2060 	if (scsi_is_target_device(dev))
2061 		starget_for_each_device(to_scsi_target(dev), NULL,
2062 					device_unblock);
2063 	return 0;
2064 }
2065 
2066 void
2067 scsi_target_unblock(struct device *dev)
2068 {
2069 	if (scsi_is_target_device(dev))
2070 		starget_for_each_device(to_scsi_target(dev), NULL,
2071 					device_unblock);
2072 	else
2073 		device_for_each_child(dev, NULL, target_unblock);
2074 }
2075 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2076