xref: /linux/drivers/scsi/scsi_error.c (revision 858259cf7d1c443c836a2022b78cb281f0a9b95e)
1 /*
2  *  scsi_error.c Copyright (C) 1997 Eric Youngdale
3  *
4  *  SCSI error/timeout handling
5  *      Initial versions: Eric Youngdale.  Based upon conversations with
6  *                        Leonard Zubkoff and David Miller at Linux Expo,
7  *                        ideas originating from all over the place.
8  *
9  *	Restructured scsi_unjam_host and associated functions.
10  *	September 04, 2002 Mike Anderson (andmike@us.ibm.com)
11  *
12  *	Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
13  *	minor  cleanups.
14  *	September 30, 2002 Mike Anderson (andmike@us.ibm.com)
15  */
16 
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/kernel.h>
23 #include <linux/kthread.h>
24 #include <linux/interrupt.h>
25 #include <linux/blkdev.h>
26 #include <linux/delay.h>
27 
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_dbg.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_ioctl.h>
34 #include <scsi/scsi_request.h>
35 
36 #include "scsi_priv.h"
37 #include "scsi_logging.h"
38 
39 #define SENSE_TIMEOUT		(10*HZ)
40 #define START_UNIT_TIMEOUT	(30*HZ)
41 
42 /*
43  * These should *probably* be handled by the host itself.
44  * Since it is allowed to sleep, it probably should.
45  */
46 #define BUS_RESET_SETTLE_TIME   (10)
47 #define HOST_RESET_SETTLE_TIME  (10)
48 
49 /* called with shost->host_lock held */
50 void scsi_eh_wakeup(struct Scsi_Host *shost)
51 {
52 	if (shost->host_busy == shost->host_failed) {
53 		wake_up_process(shost->ehandler);
54 		SCSI_LOG_ERROR_RECOVERY(5,
55 				printk("Waking error handler thread\n"));
56 	}
57 }
58 
59 /**
60  * scsi_eh_scmd_add - add scsi cmd to error handling.
61  * @scmd:	scmd to run eh on.
62  * @eh_flag:	optional SCSI_EH flag.
63  *
64  * Return value:
65  *	0 on failure.
66  **/
67 int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
68 {
69 	struct Scsi_Host *shost = scmd->device->host;
70 	unsigned long flags;
71 	int ret = 0;
72 
73 	if (!shost->ehandler)
74 		return 0;
75 
76 	spin_lock_irqsave(shost->host_lock, flags);
77 	if (scsi_host_set_state(shost, SHOST_RECOVERY))
78 		if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
79 			goto out_unlock;
80 
81 	ret = 1;
82 	scmd->eh_eflags |= eh_flag;
83 	list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
84 	shost->host_failed++;
85 	scsi_eh_wakeup(shost);
86  out_unlock:
87 	spin_unlock_irqrestore(shost->host_lock, flags);
88 	return ret;
89 }
90 
91 /**
92  * scsi_add_timer - Start timeout timer for a single scsi command.
93  * @scmd:	scsi command that is about to start running.
94  * @timeout:	amount of time to allow this command to run.
95  * @complete:	timeout function to call if timer isn't canceled.
96  *
97  * Notes:
98  *    This should be turned into an inline function.  Each scsi command
99  *    has its own timer, and as it is added to the queue, we set up the
100  *    timer.  When the command completes, we cancel the timer.
101  **/
102 void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
103 		    void (*complete)(struct scsi_cmnd *))
104 {
105 
106 	/*
107 	 * If the clock was already running for this command, then
108 	 * first delete the timer.  The timer handling code gets rather
109 	 * confused if we don't do this.
110 	 */
111 	if (scmd->eh_timeout.function)
112 		del_timer(&scmd->eh_timeout);
113 
114 	scmd->eh_timeout.data = (unsigned long)scmd;
115 	scmd->eh_timeout.expires = jiffies + timeout;
116 	scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
117 
118 	SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
119 					  " %d, (%p)\n", __FUNCTION__,
120 					  scmd, timeout, complete));
121 
122 	add_timer(&scmd->eh_timeout);
123 }
124 
125 /**
126  * scsi_delete_timer - Delete/cancel timer for a given function.
127  * @scmd:	Cmd that we are canceling timer for
128  *
129  * Notes:
130  *     This should be turned into an inline function.
131  *
132  * Return value:
133  *     1 if we were able to detach the timer.  0 if we blew it, and the
134  *     timer function has already started to run.
135  **/
136 int scsi_delete_timer(struct scsi_cmnd *scmd)
137 {
138 	int rtn;
139 
140 	rtn = del_timer(&scmd->eh_timeout);
141 
142 	SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
143 					 " rtn: %d\n", __FUNCTION__,
144 					 scmd, rtn));
145 
146 	scmd->eh_timeout.data = (unsigned long)NULL;
147 	scmd->eh_timeout.function = NULL;
148 
149 	return rtn;
150 }
151 
152 /**
153  * scsi_times_out - Timeout function for normal scsi commands.
154  * @scmd:	Cmd that is timing out.
155  *
156  * Notes:
157  *     We do not need to lock this.  There is the potential for a race
158  *     only in that the normal completion handling might run, but if the
159  *     normal completion function determines that the timer has already
160  *     fired, then it mustn't do anything.
161  **/
162 void scsi_times_out(struct scsi_cmnd *scmd)
163 {
164 	scsi_log_completion(scmd, TIMEOUT_ERROR);
165 
166 	if (scmd->device->host->hostt->eh_timed_out)
167 		switch (scmd->device->host->hostt->eh_timed_out(scmd)) {
168 		case EH_HANDLED:
169 			__scsi_done(scmd);
170 			return;
171 		case EH_RESET_TIMER:
172 			/* This allows a single retry even of a command
173 			 * with allowed == 0 */
174 			if (scmd->retries++ > scmd->allowed)
175 				break;
176 			scsi_add_timer(scmd, scmd->timeout_per_command,
177 				       scsi_times_out);
178 			return;
179 		case EH_NOT_HANDLED:
180 			break;
181 		}
182 
183 	if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
184 		scmd->result |= DID_TIME_OUT << 16;
185 		__scsi_done(scmd);
186 	}
187 }
188 
189 /**
190  * scsi_block_when_processing_errors - Prevent cmds from being queued.
191  * @sdev:	Device on which we are performing recovery.
192  *
193  * Description:
194  *     We block until the host is out of error recovery, and then check to
195  *     see whether the host or the device is offline.
196  *
197  * Return value:
198  *     0 when dev was taken offline by error recovery. 1 OK to proceed.
199  **/
200 int scsi_block_when_processing_errors(struct scsi_device *sdev)
201 {
202 	int online;
203 
204 	wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
205 
206 	online = scsi_device_online(sdev);
207 
208 	SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__,
209 					  online));
210 
211 	return online;
212 }
213 EXPORT_SYMBOL(scsi_block_when_processing_errors);
214 
215 #ifdef CONFIG_SCSI_LOGGING
216 /**
217  * scsi_eh_prt_fail_stats - Log info on failures.
218  * @shost:	scsi host being recovered.
219  * @work_q:	Queue of scsi cmds to process.
220  **/
221 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
222 					  struct list_head *work_q)
223 {
224 	struct scsi_cmnd *scmd;
225 	struct scsi_device *sdev;
226 	int total_failures = 0;
227 	int cmd_failed = 0;
228 	int cmd_cancel = 0;
229 	int devices_failed = 0;
230 
231 	shost_for_each_device(sdev, shost) {
232 		list_for_each_entry(scmd, work_q, eh_entry) {
233 			if (scmd->device == sdev) {
234 				++total_failures;
235 				if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
236 					++cmd_cancel;
237 				else
238 					++cmd_failed;
239 			}
240 		}
241 
242 		if (cmd_cancel || cmd_failed) {
243 			SCSI_LOG_ERROR_RECOVERY(3,
244 				sdev_printk(KERN_INFO, sdev,
245 					    "%s: cmds failed: %d, cancel: %d\n",
246 					    __FUNCTION__, cmd_failed,
247 					    cmd_cancel));
248 			cmd_cancel = 0;
249 			cmd_failed = 0;
250 			++devices_failed;
251 		}
252 	}
253 
254 	SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
255 					  " devices require eh work\n",
256 				  total_failures, devices_failed));
257 }
258 #endif
259 
260 /**
261  * scsi_check_sense - Examine scsi cmd sense
262  * @scmd:	Cmd to have sense checked.
263  *
264  * Return value:
265  * 	SUCCESS or FAILED or NEEDS_RETRY
266  *
267  * Notes:
268  *	When a deferred error is detected the current command has
269  *	not been executed and needs retrying.
270  **/
271 static int scsi_check_sense(struct scsi_cmnd *scmd)
272 {
273 	struct scsi_sense_hdr sshdr;
274 
275 	if (! scsi_command_normalize_sense(scmd, &sshdr))
276 		return FAILED;	/* no valid sense data */
277 
278 	if (scsi_sense_is_deferred(&sshdr))
279 		return NEEDS_RETRY;
280 
281 	/*
282 	 * Previous logic looked for FILEMARK, EOM or ILI which are
283 	 * mainly associated with tapes and returned SUCCESS.
284 	 */
285 	if (sshdr.response_code == 0x70) {
286 		/* fixed format */
287 		if (scmd->sense_buffer[2] & 0xe0)
288 			return SUCCESS;
289 	} else {
290 		/*
291 		 * descriptor format: look for "stream commands sense data
292 		 * descriptor" (see SSC-3). Assume single sense data
293 		 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
294 		 */
295 		if ((sshdr.additional_length > 3) &&
296 		    (scmd->sense_buffer[8] == 0x4) &&
297 		    (scmd->sense_buffer[11] & 0xe0))
298 			return SUCCESS;
299 	}
300 
301 	switch (sshdr.sense_key) {
302 	case NO_SENSE:
303 		return SUCCESS;
304 	case RECOVERED_ERROR:
305 		return /* soft_error */ SUCCESS;
306 
307 	case ABORTED_COMMAND:
308 		return NEEDS_RETRY;
309 	case NOT_READY:
310 	case UNIT_ATTENTION:
311 		/*
312 		 * if we are expecting a cc/ua because of a bus reset that we
313 		 * performed, treat this just as a retry.  otherwise this is
314 		 * information that we should pass up to the upper-level driver
315 		 * so that we can deal with it there.
316 		 */
317 		if (scmd->device->expecting_cc_ua) {
318 			scmd->device->expecting_cc_ua = 0;
319 			return NEEDS_RETRY;
320 		}
321 		/*
322 		 * if the device is in the process of becoming ready, we
323 		 * should retry.
324 		 */
325 		if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
326 			return NEEDS_RETRY;
327 		/*
328 		 * if the device is not started, we need to wake
329 		 * the error handler to start the motor
330 		 */
331 		if (scmd->device->allow_restart &&
332 		    (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
333 			return FAILED;
334 		return SUCCESS;
335 
336 		/* these three are not supported */
337 	case COPY_ABORTED:
338 	case VOLUME_OVERFLOW:
339 	case MISCOMPARE:
340 		return SUCCESS;
341 
342 	case MEDIUM_ERROR:
343 		return NEEDS_RETRY;
344 
345 	case HARDWARE_ERROR:
346 		if (scmd->device->retry_hwerror)
347 			return NEEDS_RETRY;
348 		else
349 			return SUCCESS;
350 
351 	case ILLEGAL_REQUEST:
352 	case BLANK_CHECK:
353 	case DATA_PROTECT:
354 	default:
355 		return SUCCESS;
356 	}
357 }
358 
359 /**
360  * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
361  * @scmd:	SCSI cmd to examine.
362  *
363  * Notes:
364  *    This is *only* called when we are examining the status of commands
365  *    queued during error recovery.  the main difference here is that we
366  *    don't allow for the possibility of retries here, and we are a lot
367  *    more restrictive about what we consider acceptable.
368  **/
369 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
370 {
371 	/*
372 	 * first check the host byte, to see if there is anything in there
373 	 * that would indicate what we need to do.
374 	 */
375 	if (host_byte(scmd->result) == DID_RESET) {
376 		/*
377 		 * rats.  we are already in the error handler, so we now
378 		 * get to try and figure out what to do next.  if the sense
379 		 * is valid, we have a pretty good idea of what to do.
380 		 * if not, we mark it as FAILED.
381 		 */
382 		return scsi_check_sense(scmd);
383 	}
384 	if (host_byte(scmd->result) != DID_OK)
385 		return FAILED;
386 
387 	/*
388 	 * next, check the message byte.
389 	 */
390 	if (msg_byte(scmd->result) != COMMAND_COMPLETE)
391 		return FAILED;
392 
393 	/*
394 	 * now, check the status byte to see if this indicates
395 	 * anything special.
396 	 */
397 	switch (status_byte(scmd->result)) {
398 	case GOOD:
399 	case COMMAND_TERMINATED:
400 		return SUCCESS;
401 	case CHECK_CONDITION:
402 		return scsi_check_sense(scmd);
403 	case CONDITION_GOOD:
404 	case INTERMEDIATE_GOOD:
405 	case INTERMEDIATE_C_GOOD:
406 		/*
407 		 * who knows?  FIXME(eric)
408 		 */
409 		return SUCCESS;
410 	case BUSY:
411 	case QUEUE_FULL:
412 	case RESERVATION_CONFLICT:
413 	default:
414 		return FAILED;
415 	}
416 	return FAILED;
417 }
418 
419 /**
420  * scsi_eh_times_out - timeout function for error handling.
421  * @scmd:	Cmd that is timing out.
422  *
423  * Notes:
424  *    During error handling, the kernel thread will be sleeping waiting
425  *    for some action to complete on the device.  our only job is to
426  *    record that it timed out, and to wake up the thread.
427  **/
428 static void scsi_eh_times_out(struct scsi_cmnd *scmd)
429 {
430 	scmd->eh_eflags |= SCSI_EH_REC_TIMEOUT;
431 	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd:%p\n", __FUNCTION__,
432 					  scmd));
433 
434 	up(scmd->device->host->eh_action);
435 }
436 
437 /**
438  * scsi_eh_done - Completion function for error handling.
439  * @scmd:	Cmd that is done.
440  **/
441 static void scsi_eh_done(struct scsi_cmnd *scmd)
442 {
443 	/*
444 	 * if the timeout handler is already running, then just set the
445 	 * flag which says we finished late, and return.  we have no
446 	 * way of stopping the timeout handler from running, so we must
447 	 * always defer to it.
448 	 */
449 	if (del_timer(&scmd->eh_timeout)) {
450 		scmd->request->rq_status = RQ_SCSI_DONE;
451 
452 		SCSI_LOG_ERROR_RECOVERY(3, printk("%s scmd: %p result: %x\n",
453 					   __FUNCTION__, scmd, scmd->result));
454 
455 		up(scmd->device->host->eh_action);
456 	}
457 }
458 
459 /**
460  * scsi_send_eh_cmnd  - send a cmd to a device as part of error recovery.
461  * @scmd:	SCSI Cmd to send.
462  * @timeout:	Timeout for cmd.
463  *
464  * Notes:
465  *    The initialization of the structures is quite a bit different in
466  *    this case, and furthermore, there is a different completion handler
467  *    vs scsi_dispatch_cmd.
468  * Return value:
469  *    SUCCESS or FAILED or NEEDS_RETRY
470  **/
471 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
472 {
473 	struct scsi_device *sdev = scmd->device;
474 	struct Scsi_Host *shost = sdev->host;
475 	DECLARE_MUTEX_LOCKED(sem);
476 	unsigned long flags;
477 	int rtn = SUCCESS;
478 
479 	/*
480 	 * we will use a queued command if possible, otherwise we will
481 	 * emulate the queuing and calling of completion function ourselves.
482 	 */
483 	if (sdev->scsi_level <= SCSI_2)
484 		scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
485 			(sdev->lun << 5 & 0xe0);
486 
487 	scsi_add_timer(scmd, timeout, scsi_eh_times_out);
488 
489 	/*
490 	 * set up the semaphore so we wait for the command to complete.
491 	 */
492 	shost->eh_action = &sem;
493 	scmd->request->rq_status = RQ_SCSI_BUSY;
494 
495 	spin_lock_irqsave(shost->host_lock, flags);
496 	scsi_log_send(scmd);
497 	shost->hostt->queuecommand(scmd, scsi_eh_done);
498 	spin_unlock_irqrestore(shost->host_lock, flags);
499 
500 	down(&sem);
501 	scsi_log_completion(scmd, SUCCESS);
502 
503 	shost->eh_action = NULL;
504 
505 	/*
506 	 * see if timeout.  if so, tell the host to forget about it.
507 	 * in other words, we don't want a callback any more.
508 	 */
509 	if (scmd->eh_eflags & SCSI_EH_REC_TIMEOUT) {
510 		scmd->eh_eflags &= ~SCSI_EH_REC_TIMEOUT;
511 
512 		/*
513 		 * as far as the low level driver is
514 		 * concerned, this command is still active, so
515 		 * we must give the low level driver a chance
516 		 * to abort it. (db)
517 		 *
518 		 * FIXME(eric) - we are not tracking whether we could
519 		 * abort a timed out command or not.  not sure how
520 		 * we should treat them differently anyways.
521 		 */
522 		if (shost->hostt->eh_abort_handler)
523 			shost->hostt->eh_abort_handler(scmd);
524 
525 		scmd->request->rq_status = RQ_SCSI_DONE;
526 		rtn = FAILED;
527 	}
528 
529 	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, rtn:%x\n",
530 					  __FUNCTION__, scmd, rtn));
531 
532 	/*
533 	 * now examine the actual status codes to see whether the command
534 	 * actually did complete normally.
535 	 */
536 	if (rtn == SUCCESS) {
537 		rtn = scsi_eh_completed_normally(scmd);
538 		SCSI_LOG_ERROR_RECOVERY(3,
539 			printk("%s: scsi_eh_completed_normally %x\n",
540 			       __FUNCTION__, rtn));
541 		switch (rtn) {
542 		case SUCCESS:
543 		case NEEDS_RETRY:
544 		case FAILED:
545 			break;
546 		default:
547 			rtn = FAILED;
548 			break;
549 		}
550 	}
551 
552 	return rtn;
553 }
554 
555 /**
556  * scsi_request_sense - Request sense data from a particular target.
557  * @scmd:	SCSI cmd for request sense.
558  *
559  * Notes:
560  *    Some hosts automatically obtain this information, others require
561  *    that we obtain it on our own. This function will *not* return until
562  *    the command either times out, or it completes.
563  **/
564 static int scsi_request_sense(struct scsi_cmnd *scmd)
565 {
566 	static unsigned char generic_sense[6] =
567 	{REQUEST_SENSE, 0, 0, 0, 252, 0};
568 	unsigned char *scsi_result;
569 	int saved_result;
570 	int rtn;
571 
572 	memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));
573 
574 	scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0));
575 
576 
577 	if (unlikely(!scsi_result)) {
578 		printk(KERN_ERR "%s: cannot allocate scsi_result.\n",
579 		       __FUNCTION__);
580 		return FAILED;
581 	}
582 
583 	/*
584 	 * zero the sense buffer.  some host adapters automatically always
585 	 * request sense, so it is not a good idea that
586 	 * scmd->request_buffer and scmd->sense_buffer point to the same
587 	 * address (db).  0 is not a valid sense code.
588 	 */
589 	memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
590 	memset(scsi_result, 0, 252);
591 
592 	saved_result = scmd->result;
593 	scmd->request_buffer = scsi_result;
594 	scmd->request_bufflen = 252;
595 	scmd->use_sg = 0;
596 	scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
597 	scmd->sc_data_direction = DMA_FROM_DEVICE;
598 	scmd->underflow = 0;
599 
600 	rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
601 
602 	/* last chance to have valid sense data */
603 	if(!SCSI_SENSE_VALID(scmd)) {
604 		memcpy(scmd->sense_buffer, scmd->request_buffer,
605 		       sizeof(scmd->sense_buffer));
606 	}
607 
608 	kfree(scsi_result);
609 
610 	/*
611 	 * when we eventually call scsi_finish, we really wish to complete
612 	 * the original request, so let's restore the original data. (db)
613 	 */
614 	scsi_setup_cmd_retry(scmd);
615 	scmd->result = saved_result;
616 	return rtn;
617 }
618 
619 /**
620  * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
621  * @scmd:	Original SCSI cmd that eh has finished.
622  * @done_q:	Queue for processed commands.
623  *
624  * Notes:
625  *    We don't want to use the normal command completion while we are are
626  *    still handling errors - it may cause other commands to be queued,
627  *    and that would disturb what we are doing.  thus we really want to
628  *    keep a list of pending commands for final completion, and once we
629  *    are ready to leave error handling we handle completion for real.
630  **/
631 static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
632 			       struct list_head *done_q)
633 {
634 	scmd->device->host->host_failed--;
635 	scmd->eh_eflags = 0;
636 
637 	/*
638 	 * set this back so that the upper level can correctly free up
639 	 * things.
640 	 */
641 	scsi_setup_cmd_retry(scmd);
642 	list_move_tail(&scmd->eh_entry, done_q);
643 }
644 
645 /**
646  * scsi_eh_get_sense - Get device sense data.
647  * @work_q:	Queue of commands to process.
648  * @done_q:	Queue of proccessed commands..
649  *
650  * Description:
651  *    See if we need to request sense information.  if so, then get it
652  *    now, so we have a better idea of what to do.
653  *
654  * Notes:
655  *    This has the unfortunate side effect that if a shost adapter does
656  *    not automatically request sense information, that we end up shutting
657  *    it down before we request it.
658  *
659  *    All drivers should request sense information internally these days,
660  *    so for now all I have to say is tough noogies if you end up in here.
661  *
662  *    XXX: Long term this code should go away, but that needs an audit of
663  *         all LLDDs first.
664  **/
665 static int scsi_eh_get_sense(struct list_head *work_q,
666 			     struct list_head *done_q)
667 {
668 	struct scsi_cmnd *scmd, *next;
669 	int rtn;
670 
671 	list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
672 		if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
673 		    SCSI_SENSE_VALID(scmd))
674 			continue;
675 
676 		SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
677 						  "%s: requesting sense\n",
678 						  current->comm));
679 		rtn = scsi_request_sense(scmd);
680 		if (rtn != SUCCESS)
681 			continue;
682 
683 		SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p"
684 						  " result %x\n", scmd,
685 						  scmd->result));
686 		SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd));
687 
688 		rtn = scsi_decide_disposition(scmd);
689 
690 		/*
691 		 * if the result was normal, then just pass it along to the
692 		 * upper level.
693 		 */
694 		if (rtn == SUCCESS)
695 			/* we don't want this command reissued, just
696 			 * finished with the sense data, so set
697 			 * retries to the max allowed to ensure it
698 			 * won't get reissued */
699 			scmd->retries = scmd->allowed;
700 		else if (rtn != NEEDS_RETRY)
701 			continue;
702 
703 		scsi_eh_finish_cmd(scmd, done_q);
704 	}
705 
706 	return list_empty(work_q);
707 }
708 
709 /**
710  * scsi_try_to_abort_cmd - Ask host to abort a running command.
711  * @scmd:	SCSI cmd to abort from Lower Level.
712  *
713  * Notes:
714  *    This function will not return until the user's completion function
715  *    has been called.  there is no timeout on this operation.  if the
716  *    author of the low-level driver wishes this operation to be timed,
717  *    they can provide this facility themselves.  helper functions in
718  *    scsi_error.c can be supplied to make this easier to do.
719  **/
720 static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
721 {
722 	if (!scmd->device->host->hostt->eh_abort_handler)
723 		return FAILED;
724 
725 	/*
726 	 * scsi_done was called just after the command timed out and before
727 	 * we had a chance to process it. (db)
728 	 */
729 	if (scmd->serial_number == 0)
730 		return SUCCESS;
731 	return scmd->device->host->hostt->eh_abort_handler(scmd);
732 }
733 
734 /**
735  * scsi_eh_tur - Send TUR to device.
736  * @scmd:	Scsi cmd to send TUR
737  *
738  * Return value:
739  *    0 - Device is ready. 1 - Device NOT ready.
740  **/
741 static int scsi_eh_tur(struct scsi_cmnd *scmd)
742 {
743 	static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
744 	int retry_cnt = 1, rtn;
745 	int saved_result;
746 
747 retry_tur:
748 	memcpy(scmd->cmnd, tur_command, sizeof(tur_command));
749 
750 	/*
751 	 * zero the sense buffer.  the scsi spec mandates that any
752 	 * untransferred sense data should be interpreted as being zero.
753 	 */
754 	memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
755 
756 	saved_result = scmd->result;
757 	scmd->request_buffer = NULL;
758 	scmd->request_bufflen = 0;
759 	scmd->use_sg = 0;
760 	scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
761 	scmd->underflow = 0;
762 	scmd->sc_data_direction = DMA_NONE;
763 
764 	rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
765 
766 	/*
767 	 * when we eventually call scsi_finish, we really wish to complete
768 	 * the original request, so let's restore the original data. (db)
769 	 */
770 	scsi_setup_cmd_retry(scmd);
771 	scmd->result = saved_result;
772 
773 	/*
774 	 * hey, we are done.  let's look to see what happened.
775 	 */
776 	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
777 		__FUNCTION__, scmd, rtn));
778 	if (rtn == SUCCESS)
779 		return 0;
780 	else if (rtn == NEEDS_RETRY) {
781 		if (retry_cnt--)
782 			goto retry_tur;
783 		return 0;
784 	}
785 	return 1;
786 }
787 
788 /**
789  * scsi_eh_abort_cmds - abort canceled commands.
790  * @shost:	scsi host being recovered.
791  * @eh_done_q:	list_head for processed commands.
792  *
793  * Decription:
794  *    Try and see whether or not it makes sense to try and abort the
795  *    running command.  this only works out to be the case if we have one
796  *    command that has timed out.  if the command simply failed, it makes
797  *    no sense to try and abort the command, since as far as the shost
798  *    adapter is concerned, it isn't running.
799  **/
800 static int scsi_eh_abort_cmds(struct list_head *work_q,
801 			      struct list_head *done_q)
802 {
803 	struct scsi_cmnd *scmd, *next;
804 	int rtn;
805 
806 	list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
807 		if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
808 			continue;
809 		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
810 						  "0x%p\n", current->comm,
811 						  scmd));
812 		rtn = scsi_try_to_abort_cmd(scmd);
813 		if (rtn == SUCCESS) {
814 			scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
815 			if (!scsi_device_online(scmd->device) ||
816 			    !scsi_eh_tur(scmd)) {
817 				scsi_eh_finish_cmd(scmd, done_q);
818 			}
819 
820 		} else
821 			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
822 							  " cmd failed:"
823 							  "0x%p\n",
824 							  current->comm,
825 							  scmd));
826 	}
827 
828 	return list_empty(work_q);
829 }
830 
831 /**
832  * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
833  * @scmd:	SCSI cmd used to send BDR
834  *
835  * Notes:
836  *    There is no timeout for this operation.  if this operation is
837  *    unreliable for a given host, then the host itself needs to put a
838  *    timer on it, and set the host back to a consistent state prior to
839  *    returning.
840  **/
841 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
842 {
843 	int rtn;
844 
845 	if (!scmd->device->host->hostt->eh_device_reset_handler)
846 		return FAILED;
847 
848 	rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
849 	if (rtn == SUCCESS) {
850 		scmd->device->was_reset = 1;
851 		scmd->device->expecting_cc_ua = 1;
852 	}
853 
854 	return rtn;
855 }
856 
857 /**
858  * scsi_eh_try_stu - Send START_UNIT to device.
859  * @scmd:	Scsi cmd to send START_UNIT
860  *
861  * Return value:
862  *    0 - Device is ready. 1 - Device NOT ready.
863  **/
864 static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
865 {
866 	static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
867 	int rtn;
868 	int saved_result;
869 
870 	if (!scmd->device->allow_restart)
871 		return 1;
872 
873 	memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
874 
875 	/*
876 	 * zero the sense buffer.  the scsi spec mandates that any
877 	 * untransferred sense data should be interpreted as being zero.
878 	 */
879 	memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
880 
881 	saved_result = scmd->result;
882 	scmd->request_buffer = NULL;
883 	scmd->request_bufflen = 0;
884 	scmd->use_sg = 0;
885 	scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
886 	scmd->underflow = 0;
887 	scmd->sc_data_direction = DMA_NONE;
888 
889 	rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT);
890 
891 	/*
892 	 * when we eventually call scsi_finish, we really wish to complete
893 	 * the original request, so let's restore the original data. (db)
894 	 */
895 	scsi_setup_cmd_retry(scmd);
896 	scmd->result = saved_result;
897 
898 	/*
899 	 * hey, we are done.  let's look to see what happened.
900 	 */
901 	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
902 		__FUNCTION__, scmd, rtn));
903 	if (rtn == SUCCESS)
904 		return 0;
905 	return 1;
906 }
907 
908  /**
909  * scsi_eh_stu - send START_UNIT if needed
910  * @shost:	scsi host being recovered.
911  * @eh_done_q:	list_head for processed commands.
912  *
913  * Notes:
914  *    If commands are failing due to not ready, initializing command required,
915  *	try revalidating the device, which will end up sending a start unit.
916  **/
917 static int scsi_eh_stu(struct Scsi_Host *shost,
918 			      struct list_head *work_q,
919 			      struct list_head *done_q)
920 {
921 	struct scsi_cmnd *scmd, *stu_scmd, *next;
922 	struct scsi_device *sdev;
923 
924 	shost_for_each_device(sdev, shost) {
925 		stu_scmd = NULL;
926 		list_for_each_entry(scmd, work_q, eh_entry)
927 			if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
928 			    scsi_check_sense(scmd) == FAILED ) {
929 				stu_scmd = scmd;
930 				break;
931 			}
932 
933 		if (!stu_scmd)
934 			continue;
935 
936 		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:"
937 						  " 0x%p\n", current->comm, sdev));
938 
939 		if (!scsi_eh_try_stu(stu_scmd)) {
940 			if (!scsi_device_online(sdev) ||
941 			    !scsi_eh_tur(stu_scmd)) {
942 				list_for_each_entry_safe(scmd, next,
943 							  work_q, eh_entry) {
944 					if (scmd->device == sdev)
945 						scsi_eh_finish_cmd(scmd, done_q);
946 				}
947 			}
948 		} else {
949 			SCSI_LOG_ERROR_RECOVERY(3,
950 						printk("%s: START_UNIT failed to sdev:"
951 						       " 0x%p\n", current->comm, sdev));
952 		}
953 	}
954 
955 	return list_empty(work_q);
956 }
957 
958 
959 /**
960  * scsi_eh_bus_device_reset - send bdr if needed
961  * @shost:	scsi host being recovered.
962  * @eh_done_q:	list_head for processed commands.
963  *
964  * Notes:
965  *    Try a bus device reset.  still, look to see whether we have multiple
966  *    devices that are jammed or not - if we have multiple devices, it
967  *    makes no sense to try bus_device_reset - we really would need to try
968  *    a bus_reset instead.
969  **/
970 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
971 				    struct list_head *work_q,
972 				    struct list_head *done_q)
973 {
974 	struct scsi_cmnd *scmd, *bdr_scmd, *next;
975 	struct scsi_device *sdev;
976 	int rtn;
977 
978 	shost_for_each_device(sdev, shost) {
979 		bdr_scmd = NULL;
980 		list_for_each_entry(scmd, work_q, eh_entry)
981 			if (scmd->device == sdev) {
982 				bdr_scmd = scmd;
983 				break;
984 			}
985 
986 		if (!bdr_scmd)
987 			continue;
988 
989 		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:"
990 						  " 0x%p\n", current->comm,
991 						  sdev));
992 		rtn = scsi_try_bus_device_reset(bdr_scmd);
993 		if (rtn == SUCCESS) {
994 			if (!scsi_device_online(sdev) ||
995 			    !scsi_eh_tur(bdr_scmd)) {
996 				list_for_each_entry_safe(scmd, next,
997 							 work_q, eh_entry) {
998 					if (scmd->device == sdev)
999 						scsi_eh_finish_cmd(scmd,
1000 								   done_q);
1001 				}
1002 			}
1003 		} else {
1004 			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR"
1005 							  " failed sdev:"
1006 							  "0x%p\n",
1007 							  current->comm,
1008 							   sdev));
1009 		}
1010 	}
1011 
1012 	return list_empty(work_q);
1013 }
1014 
1015 /**
1016  * scsi_try_bus_reset - ask host to perform a bus reset
1017  * @scmd:	SCSI cmd to send bus reset.
1018  **/
1019 static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
1020 {
1021 	unsigned long flags;
1022 	int rtn;
1023 
1024 	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
1025 					  __FUNCTION__));
1026 
1027 	if (!scmd->device->host->hostt->eh_bus_reset_handler)
1028 		return FAILED;
1029 
1030 	rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd);
1031 
1032 	if (rtn == SUCCESS) {
1033 		if (!scmd->device->host->hostt->skip_settle_delay)
1034 			ssleep(BUS_RESET_SETTLE_TIME);
1035 		spin_lock_irqsave(scmd->device->host->host_lock, flags);
1036 		scsi_report_bus_reset(scmd->device->host,
1037 				      scmd_channel(scmd));
1038 		spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
1039 	}
1040 
1041 	return rtn;
1042 }
1043 
1044 /**
1045  * scsi_try_host_reset - ask host adapter to reset itself
1046  * @scmd:	SCSI cmd to send hsot reset.
1047  **/
1048 static int scsi_try_host_reset(struct scsi_cmnd *scmd)
1049 {
1050 	unsigned long flags;
1051 	int rtn;
1052 
1053 	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
1054 					  __FUNCTION__));
1055 
1056 	if (!scmd->device->host->hostt->eh_host_reset_handler)
1057 		return FAILED;
1058 
1059 	rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd);
1060 
1061 	if (rtn == SUCCESS) {
1062 		if (!scmd->device->host->hostt->skip_settle_delay)
1063 			ssleep(HOST_RESET_SETTLE_TIME);
1064 		spin_lock_irqsave(scmd->device->host->host_lock, flags);
1065 		scsi_report_bus_reset(scmd->device->host,
1066 				      scmd_channel(scmd));
1067 		spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
1068 	}
1069 
1070 	return rtn;
1071 }
1072 
1073 /**
1074  * scsi_eh_bus_reset - send a bus reset
1075  * @shost:	scsi host being recovered.
1076  * @eh_done_q:	list_head for processed commands.
1077  **/
1078 static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1079 			     struct list_head *work_q,
1080 			     struct list_head *done_q)
1081 {
1082 	struct scsi_cmnd *scmd, *chan_scmd, *next;
1083 	unsigned int channel;
1084 	int rtn;
1085 
1086 	/*
1087 	 * we really want to loop over the various channels, and do this on
1088 	 * a channel by channel basis.  we should also check to see if any
1089 	 * of the failed commands are on soft_reset devices, and if so, skip
1090 	 * the reset.
1091 	 */
1092 
1093 	for (channel = 0; channel <= shost->max_channel; channel++) {
1094 		chan_scmd = NULL;
1095 		list_for_each_entry(scmd, work_q, eh_entry) {
1096 			if (channel == scmd_channel(scmd)) {
1097 				chan_scmd = scmd;
1098 				break;
1099 				/*
1100 				 * FIXME add back in some support for
1101 				 * soft_reset devices.
1102 				 */
1103 			}
1104 		}
1105 
1106 		if (!chan_scmd)
1107 			continue;
1108 		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:"
1109 						  " %d\n", current->comm,
1110 						  channel));
1111 		rtn = scsi_try_bus_reset(chan_scmd);
1112 		if (rtn == SUCCESS) {
1113 			list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1114 				if (channel == scmd_channel(scmd))
1115 					if (!scsi_device_online(scmd->device) ||
1116 					    !scsi_eh_tur(scmd))
1117 						scsi_eh_finish_cmd(scmd,
1118 								   done_q);
1119 			}
1120 		} else {
1121 			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
1122 							  " failed chan: %d\n",
1123 							  current->comm,
1124 							  channel));
1125 		}
1126 	}
1127 	return list_empty(work_q);
1128 }
1129 
1130 /**
1131  * scsi_eh_host_reset - send a host reset
1132  * @work_q:	list_head for processed commands.
1133  * @done_q:	list_head for processed commands.
1134  **/
1135 static int scsi_eh_host_reset(struct list_head *work_q,
1136 			      struct list_head *done_q)
1137 {
1138 	struct scsi_cmnd *scmd, *next;
1139 	int rtn;
1140 
1141 	if (!list_empty(work_q)) {
1142 		scmd = list_entry(work_q->next,
1143 				  struct scsi_cmnd, eh_entry);
1144 
1145 		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n"
1146 						  , current->comm));
1147 
1148 		rtn = scsi_try_host_reset(scmd);
1149 		if (rtn == SUCCESS) {
1150 			list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1151 				if (!scsi_device_online(scmd->device) ||
1152 				    (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1153 				    !scsi_eh_tur(scmd))
1154 					scsi_eh_finish_cmd(scmd, done_q);
1155 			}
1156 		} else {
1157 			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST"
1158 							  " failed\n",
1159 							  current->comm));
1160 		}
1161 	}
1162 	return list_empty(work_q);
1163 }
1164 
1165 /**
1166  * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1167  * @work_q:	list_head for processed commands.
1168  * @done_q:	list_head for processed commands.
1169  *
1170  **/
1171 static void scsi_eh_offline_sdevs(struct list_head *work_q,
1172 				  struct list_head *done_q)
1173 {
1174 	struct scsi_cmnd *scmd, *next;
1175 
1176 	list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1177 		sdev_printk(KERN_INFO, scmd->device,
1178 			    "scsi: Device offlined - not"
1179 			    " ready after error recovery\n");
1180 		scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1181 		if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1182 			/*
1183 			 * FIXME: Handle lost cmds.
1184 			 */
1185 		}
1186 		scsi_eh_finish_cmd(scmd, done_q);
1187 	}
1188 	return;
1189 }
1190 
1191 /**
1192  * scsi_decide_disposition - Disposition a cmd on return from LLD.
1193  * @scmd:	SCSI cmd to examine.
1194  *
1195  * Notes:
1196  *    This is *only* called when we are examining the status after sending
1197  *    out the actual data command.  any commands that are queued for error
1198  *    recovery (e.g. test_unit_ready) do *not* come through here.
1199  *
1200  *    When this routine returns failed, it means the error handler thread
1201  *    is woken.  In cases where the error code indicates an error that
1202  *    doesn't require the error handler read (i.e. we don't need to
1203  *    abort/reset), this function should return SUCCESS.
1204  **/
1205 int scsi_decide_disposition(struct scsi_cmnd *scmd)
1206 {
1207 	int rtn;
1208 
1209 	/*
1210 	 * if the device is offline, then we clearly just pass the result back
1211 	 * up to the top level.
1212 	 */
1213 	if (!scsi_device_online(scmd->device)) {
1214 		SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1215 						  " as SUCCESS\n",
1216 						  __FUNCTION__));
1217 		return SUCCESS;
1218 	}
1219 
1220 	/*
1221 	 * first check the host byte, to see if there is anything in there
1222 	 * that would indicate what we need to do.
1223 	 */
1224 	switch (host_byte(scmd->result)) {
1225 	case DID_PASSTHROUGH:
1226 		/*
1227 		 * no matter what, pass this through to the upper layer.
1228 		 * nuke this special code so that it looks like we are saying
1229 		 * did_ok.
1230 		 */
1231 		scmd->result &= 0xff00ffff;
1232 		return SUCCESS;
1233 	case DID_OK:
1234 		/*
1235 		 * looks good.  drop through, and check the next byte.
1236 		 */
1237 		break;
1238 	case DID_NO_CONNECT:
1239 	case DID_BAD_TARGET:
1240 	case DID_ABORT:
1241 		/*
1242 		 * note - this means that we just report the status back
1243 		 * to the top level driver, not that we actually think
1244 		 * that it indicates SUCCESS.
1245 		 */
1246 		return SUCCESS;
1247 		/*
1248 		 * when the low level driver returns did_soft_error,
1249 		 * it is responsible for keeping an internal retry counter
1250 		 * in order to avoid endless loops (db)
1251 		 *
1252 		 * actually this is a bug in this function here.  we should
1253 		 * be mindful of the maximum number of retries specified
1254 		 * and not get stuck in a loop.
1255 		 */
1256 	case DID_SOFT_ERROR:
1257 		goto maybe_retry;
1258 	case DID_IMM_RETRY:
1259 		return NEEDS_RETRY;
1260 
1261 	case DID_REQUEUE:
1262 		return ADD_TO_MLQUEUE;
1263 
1264 	case DID_ERROR:
1265 		if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1266 		    status_byte(scmd->result) == RESERVATION_CONFLICT)
1267 			/*
1268 			 * execute reservation conflict processing code
1269 			 * lower down
1270 			 */
1271 			break;
1272 		/* fallthrough */
1273 
1274 	case DID_BUS_BUSY:
1275 	case DID_PARITY:
1276 		goto maybe_retry;
1277 	case DID_TIME_OUT:
1278 		/*
1279 		 * when we scan the bus, we get timeout messages for
1280 		 * these commands if there is no device available.
1281 		 * other hosts report did_no_connect for the same thing.
1282 		 */
1283 		if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1284 		     scmd->cmnd[0] == INQUIRY)) {
1285 			return SUCCESS;
1286 		} else {
1287 			return FAILED;
1288 		}
1289 	case DID_RESET:
1290 		return SUCCESS;
1291 	default:
1292 		return FAILED;
1293 	}
1294 
1295 	/*
1296 	 * next, check the message byte.
1297 	 */
1298 	if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1299 		return FAILED;
1300 
1301 	/*
1302 	 * check the status byte to see if this indicates anything special.
1303 	 */
1304 	switch (status_byte(scmd->result)) {
1305 	case QUEUE_FULL:
1306 		/*
1307 		 * the case of trying to send too many commands to a
1308 		 * tagged queueing device.
1309 		 */
1310 	case BUSY:
1311 		/*
1312 		 * device can't talk to us at the moment.  Should only
1313 		 * occur (SAM-3) when the task queue is empty, so will cause
1314 		 * the empty queue handling to trigger a stall in the
1315 		 * device.
1316 		 */
1317 		return ADD_TO_MLQUEUE;
1318 	case GOOD:
1319 	case COMMAND_TERMINATED:
1320 	case TASK_ABORTED:
1321 		return SUCCESS;
1322 	case CHECK_CONDITION:
1323 		rtn = scsi_check_sense(scmd);
1324 		if (rtn == NEEDS_RETRY)
1325 			goto maybe_retry;
1326 		/* if rtn == FAILED, we have no sense information;
1327 		 * returning FAILED will wake the error handler thread
1328 		 * to collect the sense and redo the decide
1329 		 * disposition */
1330 		return rtn;
1331 	case CONDITION_GOOD:
1332 	case INTERMEDIATE_GOOD:
1333 	case INTERMEDIATE_C_GOOD:
1334 	case ACA_ACTIVE:
1335 		/*
1336 		 * who knows?  FIXME(eric)
1337 		 */
1338 		return SUCCESS;
1339 
1340 	case RESERVATION_CONFLICT:
1341 		sdev_printk(KERN_INFO, scmd->device,
1342 			    "reservation conflict\n");
1343 		return SUCCESS; /* causes immediate i/o error */
1344 	default:
1345 		return FAILED;
1346 	}
1347 	return FAILED;
1348 
1349       maybe_retry:
1350 
1351 	/* we requeue for retry because the error was retryable, and
1352 	 * the request was not marked fast fail.  Note that above,
1353 	 * even if the request is marked fast fail, we still requeue
1354 	 * for queue congestion conditions (QUEUE_FULL or BUSY) */
1355 	if ((++scmd->retries) < scmd->allowed
1356 	    && !blk_noretry_request(scmd->request)) {
1357 		return NEEDS_RETRY;
1358 	} else {
1359 		/*
1360 		 * no more retries - report this one back to upper level.
1361 		 */
1362 		return SUCCESS;
1363 	}
1364 }
1365 
1366 /**
1367  * scsi_eh_lock_done - done function for eh door lock request
1368  * @scmd:	SCSI command block for the door lock request
1369  *
1370  * Notes:
1371  * 	We completed the asynchronous door lock request, and it has either
1372  * 	locked the door or failed.  We must free the command structures
1373  * 	associated with this request.
1374  **/
1375 static void scsi_eh_lock_done(struct scsi_cmnd *scmd)
1376 {
1377 	struct scsi_request *sreq = scmd->sc_request;
1378 
1379 	scsi_release_request(sreq);
1380 }
1381 
1382 
1383 /**
1384  * scsi_eh_lock_door - Prevent medium removal for the specified device
1385  * @sdev:	SCSI device to prevent medium removal
1386  *
1387  * Locking:
1388  * 	We must be called from process context; scsi_allocate_request()
1389  * 	may sleep.
1390  *
1391  * Notes:
1392  * 	We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1393  * 	head of the devices request queue, and continue.
1394  *
1395  * Bugs:
1396  * 	scsi_allocate_request() may sleep waiting for existing requests to
1397  * 	be processed.  However, since we haven't kicked off any request
1398  * 	processing for this host, this may deadlock.
1399  *
1400  *	If scsi_allocate_request() fails for what ever reason, we
1401  *	completely forget to lock the door.
1402  **/
1403 static void scsi_eh_lock_door(struct scsi_device *sdev)
1404 {
1405 	struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1406 
1407 	if (unlikely(!sreq)) {
1408 		printk(KERN_ERR "%s: request allocate failed,"
1409 		       "prevent media removal cmd not sent\n", __FUNCTION__);
1410 		return;
1411 	}
1412 
1413 	sreq->sr_cmnd[0] = ALLOW_MEDIUM_REMOVAL;
1414 	sreq->sr_cmnd[1] = 0;
1415 	sreq->sr_cmnd[2] = 0;
1416 	sreq->sr_cmnd[3] = 0;
1417 	sreq->sr_cmnd[4] = SCSI_REMOVAL_PREVENT;
1418 	sreq->sr_cmnd[5] = 0;
1419 	sreq->sr_data_direction = DMA_NONE;
1420 	sreq->sr_bufflen = 0;
1421 	sreq->sr_buffer = NULL;
1422 	sreq->sr_allowed = 5;
1423 	sreq->sr_done = scsi_eh_lock_done;
1424 	sreq->sr_timeout_per_command = 10 * HZ;
1425 	sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
1426 
1427 	scsi_insert_special_req(sreq, 1);
1428 }
1429 
1430 
1431 /**
1432  * scsi_restart_operations - restart io operations to the specified host.
1433  * @shost:	Host we are restarting.
1434  *
1435  * Notes:
1436  *    When we entered the error handler, we blocked all further i/o to
1437  *    this device.  we need to 'reverse' this process.
1438  **/
1439 static void scsi_restart_operations(struct Scsi_Host *shost)
1440 {
1441 	struct scsi_device *sdev;
1442 	unsigned long flags;
1443 
1444 	/*
1445 	 * If the door was locked, we need to insert a door lock request
1446 	 * onto the head of the SCSI request queue for the device.  There
1447 	 * is no point trying to lock the door of an off-line device.
1448 	 */
1449 	shost_for_each_device(sdev, shost) {
1450 		if (scsi_device_online(sdev) && sdev->locked)
1451 			scsi_eh_lock_door(sdev);
1452 	}
1453 
1454 	/*
1455 	 * next free up anything directly waiting upon the host.  this
1456 	 * will be requests for character device operations, and also for
1457 	 * ioctls to queued block devices.
1458 	 */
1459 	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1460 					  __FUNCTION__));
1461 
1462 	spin_lock_irqsave(shost->host_lock, flags);
1463 	if (scsi_host_set_state(shost, SHOST_RUNNING))
1464 		if (scsi_host_set_state(shost, SHOST_CANCEL))
1465 			BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1466 	spin_unlock_irqrestore(shost->host_lock, flags);
1467 
1468 	wake_up(&shost->host_wait);
1469 
1470 	/*
1471 	 * finally we need to re-initiate requests that may be pending.  we will
1472 	 * have had everything blocked while error handling is taking place, and
1473 	 * now that error recovery is done, we will need to ensure that these
1474 	 * requests are started.
1475 	 */
1476 	scsi_run_host_queues(shost);
1477 }
1478 
1479 /**
1480  * scsi_eh_ready_devs - check device ready state and recover if not.
1481  * @shost: 	host to be recovered.
1482  * @eh_done_q:	list_head for processed commands.
1483  *
1484  **/
1485 static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1486 			       struct list_head *work_q,
1487 			       struct list_head *done_q)
1488 {
1489 	if (!scsi_eh_stu(shost, work_q, done_q))
1490 		if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
1491 			if (!scsi_eh_bus_reset(shost, work_q, done_q))
1492 				if (!scsi_eh_host_reset(work_q, done_q))
1493 					scsi_eh_offline_sdevs(work_q, done_q);
1494 }
1495 
1496 /**
1497  * scsi_eh_flush_done_q - finish processed commands or retry them.
1498  * @done_q:	list_head of processed commands.
1499  *
1500  **/
1501 static void scsi_eh_flush_done_q(struct list_head *done_q)
1502 {
1503 	struct scsi_cmnd *scmd, *next;
1504 
1505 	list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
1506 		list_del_init(&scmd->eh_entry);
1507 		if (scsi_device_online(scmd->device) &&
1508 		    !blk_noretry_request(scmd->request) &&
1509 		    (++scmd->retries < scmd->allowed)) {
1510 			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
1511 							  " retry cmd: %p\n",
1512 							  current->comm,
1513 							  scmd));
1514 				scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
1515 		} else {
1516 			/*
1517 			 * If just we got sense for the device (called
1518 			 * scsi_eh_get_sense), scmd->result is already
1519 			 * set, do not set DRIVER_TIMEOUT.
1520 			 */
1521 			if (!scmd->result)
1522 				scmd->result |= (DRIVER_TIMEOUT << 24);
1523 			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish"
1524 							" cmd: %p\n",
1525 							current->comm, scmd));
1526 			scsi_finish_command(scmd);
1527 		}
1528 	}
1529 }
1530 
1531 /**
1532  * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
1533  * @shost:	Host to unjam.
1534  *
1535  * Notes:
1536  *    When we come in here, we *know* that all commands on the bus have
1537  *    either completed, failed or timed out.  we also know that no further
1538  *    commands are being sent to the host, so things are relatively quiet
1539  *    and we have freedom to fiddle with things as we wish.
1540  *
1541  *    This is only the *default* implementation.  it is possible for
1542  *    individual drivers to supply their own version of this function, and
1543  *    if the maintainer wishes to do this, it is strongly suggested that
1544  *    this function be taken as a template and modified.  this function
1545  *    was designed to correctly handle problems for about 95% of the
1546  *    different cases out there, and it should always provide at least a
1547  *    reasonable amount of error recovery.
1548  *
1549  *    Any command marked 'failed' or 'timeout' must eventually have
1550  *    scsi_finish_cmd() called for it.  we do all of the retry stuff
1551  *    here, so when we restart the host after we return it should have an
1552  *    empty queue.
1553  **/
1554 static void scsi_unjam_host(struct Scsi_Host *shost)
1555 {
1556 	unsigned long flags;
1557 	LIST_HEAD(eh_work_q);
1558 	LIST_HEAD(eh_done_q);
1559 
1560 	spin_lock_irqsave(shost->host_lock, flags);
1561 	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
1562 	spin_unlock_irqrestore(shost->host_lock, flags);
1563 
1564 	SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
1565 
1566 	if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
1567 		if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
1568 			scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
1569 
1570 	scsi_eh_flush_done_q(&eh_done_q);
1571 }
1572 
1573 /**
1574  * scsi_error_handler - Handle errors/timeouts of SCSI cmds.
1575  * @data:	Host for which we are running.
1576  *
1577  * Notes:
1578  *    This is always run in the context of a kernel thread.  The idea is
1579  *    that we start this thing up when the kernel starts up (one per host
1580  *    that we detect), and it immediately goes to sleep and waits for some
1581  *    event (i.e. failure).  When this takes place, we have the job of
1582  *    trying to unjam the bus and restarting things.
1583  **/
1584 int scsi_error_handler(void *data)
1585 {
1586 	struct Scsi_Host *shost = (struct Scsi_Host *) data;
1587 	int rtn;
1588 
1589 	current->flags |= PF_NOFREEZE;
1590 
1591 
1592 	/*
1593 	 * Note - we always use TASK_INTERRUPTIBLE even if the module
1594 	 * was loaded as part of the kernel.  The reason is that
1595 	 * UNINTERRUPTIBLE would cause this thread to be counted in
1596 	 * the load average as a running process, and an interruptible
1597 	 * wait doesn't.
1598 	 */
1599 	set_current_state(TASK_INTERRUPTIBLE);
1600 	while (!kthread_should_stop()) {
1601 		if (shost->host_failed == 0 ||
1602 		    shost->host_failed != shost->host_busy) {
1603 			SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
1604 							  " scsi_eh_%d"
1605 							  " sleeping\n",
1606 							  shost->host_no));
1607 			schedule();
1608 			set_current_state(TASK_INTERRUPTIBLE);
1609 			continue;
1610 		}
1611 
1612 		__set_current_state(TASK_RUNNING);
1613 		SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
1614 						  " scsi_eh_%d waking"
1615 						  " up\n",shost->host_no));
1616 
1617 		shost->eh_active = 1;
1618 
1619 		/*
1620 		 * We have a host that is failing for some reason.  Figure out
1621 		 * what we need to do to get it up and online again (if we can).
1622 		 * If we fail, we end up taking the thing offline.
1623 		 */
1624 		if (shost->hostt->eh_strategy_handler)
1625 			rtn = shost->hostt->eh_strategy_handler(shost);
1626 		else
1627 			scsi_unjam_host(shost);
1628 
1629 		shost->eh_active = 0;
1630 
1631 		/*
1632 		 * Note - if the above fails completely, the action is to take
1633 		 * individual devices offline and flush the queue of any
1634 		 * outstanding requests that may have been pending.  When we
1635 		 * restart, we restart any I/O to any other devices on the bus
1636 		 * which are still online.
1637 		 */
1638 		scsi_restart_operations(shost);
1639 		set_current_state(TASK_INTERRUPTIBLE);
1640 	}
1641 
1642 	__set_current_state(TASK_RUNNING);
1643 
1644 	SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d"
1645 					  " exiting\n",shost->host_no));
1646 
1647 	/*
1648 	 * Make sure that nobody tries to wake us up again.
1649 	 */
1650 	shost->ehandler = NULL;
1651 	return 0;
1652 }
1653 
1654 /*
1655  * Function:    scsi_report_bus_reset()
1656  *
1657  * Purpose:     Utility function used by low-level drivers to report that
1658  *		they have observed a bus reset on the bus being handled.
1659  *
1660  * Arguments:   shost       - Host in question
1661  *		channel     - channel on which reset was observed.
1662  *
1663  * Returns:     Nothing
1664  *
1665  * Lock status: Host lock must be held.
1666  *
1667  * Notes:       This only needs to be called if the reset is one which
1668  *		originates from an unknown location.  Resets originated
1669  *		by the mid-level itself don't need to call this, but there
1670  *		should be no harm.
1671  *
1672  *		The main purpose of this is to make sure that a CHECK_CONDITION
1673  *		is properly treated.
1674  */
1675 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
1676 {
1677 	struct scsi_device *sdev;
1678 
1679 	__shost_for_each_device(sdev, shost) {
1680 		if (channel == sdev_channel(sdev)) {
1681 			sdev->was_reset = 1;
1682 			sdev->expecting_cc_ua = 1;
1683 		}
1684 	}
1685 }
1686 EXPORT_SYMBOL(scsi_report_bus_reset);
1687 
1688 /*
1689  * Function:    scsi_report_device_reset()
1690  *
1691  * Purpose:     Utility function used by low-level drivers to report that
1692  *		they have observed a device reset on the device being handled.
1693  *
1694  * Arguments:   shost       - Host in question
1695  *		channel     - channel on which reset was observed
1696  *		target	    - target on which reset was observed
1697  *
1698  * Returns:     Nothing
1699  *
1700  * Lock status: Host lock must be held
1701  *
1702  * Notes:       This only needs to be called if the reset is one which
1703  *		originates from an unknown location.  Resets originated
1704  *		by the mid-level itself don't need to call this, but there
1705  *		should be no harm.
1706  *
1707  *		The main purpose of this is to make sure that a CHECK_CONDITION
1708  *		is properly treated.
1709  */
1710 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
1711 {
1712 	struct scsi_device *sdev;
1713 
1714 	__shost_for_each_device(sdev, shost) {
1715 		if (channel == sdev_channel(sdev) &&
1716 		    target == sdev_id(sdev)) {
1717 			sdev->was_reset = 1;
1718 			sdev->expecting_cc_ua = 1;
1719 		}
1720 	}
1721 }
1722 EXPORT_SYMBOL(scsi_report_device_reset);
1723 
1724 static void
1725 scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
1726 {
1727 }
1728 
1729 /*
1730  * Function:	scsi_reset_provider
1731  *
1732  * Purpose:	Send requested reset to a bus or device at any phase.
1733  *
1734  * Arguments:	device	- device to send reset to
1735  *		flag - reset type (see scsi.h)
1736  *
1737  * Returns:	SUCCESS/FAILURE.
1738  *
1739  * Notes:	This is used by the SCSI Generic driver to provide
1740  *		Bus/Device reset capability.
1741  */
1742 int
1743 scsi_reset_provider(struct scsi_device *dev, int flag)
1744 {
1745 	struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
1746 	struct request req;
1747 	int rtn;
1748 
1749 	scmd->request = &req;
1750 	memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1751 	scmd->request->rq_status      	= RQ_SCSI_BUSY;
1752 
1753 	memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd));
1754 
1755 	scmd->scsi_done		= scsi_reset_provider_done_command;
1756 	scmd->done			= NULL;
1757 	scmd->buffer			= NULL;
1758 	scmd->bufflen			= 0;
1759 	scmd->request_buffer		= NULL;
1760 	scmd->request_bufflen		= 0;
1761 
1762 	scmd->cmd_len			= 0;
1763 
1764 	scmd->sc_data_direction		= DMA_BIDIRECTIONAL;
1765 	scmd->sc_request		= NULL;
1766 	scmd->sc_magic			= SCSI_CMND_MAGIC;
1767 
1768 	init_timer(&scmd->eh_timeout);
1769 
1770 	/*
1771 	 * Sometimes the command can get back into the timer chain,
1772 	 * so use the pid as an identifier.
1773 	 */
1774 	scmd->pid			= 0;
1775 
1776 	switch (flag) {
1777 	case SCSI_TRY_RESET_DEVICE:
1778 		rtn = scsi_try_bus_device_reset(scmd);
1779 		if (rtn == SUCCESS)
1780 			break;
1781 		/* FALLTHROUGH */
1782 	case SCSI_TRY_RESET_BUS:
1783 		rtn = scsi_try_bus_reset(scmd);
1784 		if (rtn == SUCCESS)
1785 			break;
1786 		/* FALLTHROUGH */
1787 	case SCSI_TRY_RESET_HOST:
1788 		rtn = scsi_try_host_reset(scmd);
1789 		break;
1790 	default:
1791 		rtn = FAILED;
1792 	}
1793 
1794 	scsi_next_command(scmd);
1795 	return rtn;
1796 }
1797 EXPORT_SYMBOL(scsi_reset_provider);
1798 
1799 /**
1800  * scsi_normalize_sense - normalize main elements from either fixed or
1801  *			descriptor sense data format into a common format.
1802  *
1803  * @sense_buffer:	byte array containing sense data returned by device
1804  * @sb_len:		number of valid bytes in sense_buffer
1805  * @sshdr:		pointer to instance of structure that common
1806  *			elements are written to.
1807  *
1808  * Notes:
1809  *	The "main elements" from sense data are: response_code, sense_key,
1810  *	asc, ascq and additional_length (only for descriptor format).
1811  *
1812  *	Typically this function can be called after a device has
1813  *	responded to a SCSI command with the CHECK_CONDITION status.
1814  *
1815  * Return value:
1816  *	1 if valid sense data information found, else 0;
1817  **/
1818 int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
1819                          struct scsi_sense_hdr *sshdr)
1820 {
1821 	if (!sense_buffer || !sb_len)
1822 		return 0;
1823 
1824 	memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
1825 
1826 	sshdr->response_code = (sense_buffer[0] & 0x7f);
1827 
1828 	if (!scsi_sense_valid(sshdr))
1829 		return 0;
1830 
1831 	if (sshdr->response_code >= 0x72) {
1832 		/*
1833 		 * descriptor format
1834 		 */
1835 		if (sb_len > 1)
1836 			sshdr->sense_key = (sense_buffer[1] & 0xf);
1837 		if (sb_len > 2)
1838 			sshdr->asc = sense_buffer[2];
1839 		if (sb_len > 3)
1840 			sshdr->ascq = sense_buffer[3];
1841 		if (sb_len > 7)
1842 			sshdr->additional_length = sense_buffer[7];
1843 	} else {
1844 		/*
1845 		 * fixed format
1846 		 */
1847 		if (sb_len > 2)
1848 			sshdr->sense_key = (sense_buffer[2] & 0xf);
1849 		if (sb_len > 7) {
1850 			sb_len = (sb_len < (sense_buffer[7] + 8)) ?
1851 					 sb_len : (sense_buffer[7] + 8);
1852 			if (sb_len > 12)
1853 				sshdr->asc = sense_buffer[12];
1854 			if (sb_len > 13)
1855 				sshdr->ascq = sense_buffer[13];
1856 		}
1857 	}
1858 
1859 	return 1;
1860 }
1861 EXPORT_SYMBOL(scsi_normalize_sense);
1862 
1863 int scsi_request_normalize_sense(struct scsi_request *sreq,
1864 				 struct scsi_sense_hdr *sshdr)
1865 {
1866 	return scsi_normalize_sense(sreq->sr_sense_buffer,
1867 			sizeof(sreq->sr_sense_buffer), sshdr);
1868 }
1869 EXPORT_SYMBOL(scsi_request_normalize_sense);
1870 
1871 int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
1872 				 struct scsi_sense_hdr *sshdr)
1873 {
1874 	return scsi_normalize_sense(cmd->sense_buffer,
1875 			sizeof(cmd->sense_buffer), sshdr);
1876 }
1877 EXPORT_SYMBOL(scsi_command_normalize_sense);
1878 
1879 /**
1880  * scsi_sense_desc_find - search for a given descriptor type in
1881  *			descriptor sense data format.
1882  *
1883  * @sense_buffer:	byte array of descriptor format sense data
1884  * @sb_len:		number of valid bytes in sense_buffer
1885  * @desc_type:		value of descriptor type to find
1886  *			(e.g. 0 -> information)
1887  *
1888  * Notes:
1889  *	only valid when sense data is in descriptor format
1890  *
1891  * Return value:
1892  *	pointer to start of (first) descriptor if found else NULL
1893  **/
1894 const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
1895 				int desc_type)
1896 {
1897 	int add_sen_len, add_len, desc_len, k;
1898 	const u8 * descp;
1899 
1900 	if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
1901 		return NULL;
1902 	if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
1903 		return NULL;
1904 	add_sen_len = (add_sen_len < (sb_len - 8)) ?
1905 			add_sen_len : (sb_len - 8);
1906 	descp = &sense_buffer[8];
1907 	for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
1908 		descp += desc_len;
1909 		add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
1910 		desc_len = add_len + 2;
1911 		if (descp[0] == desc_type)
1912 			return descp;
1913 		if (add_len < 0) // short descriptor ??
1914 			break;
1915 	}
1916 	return NULL;
1917 }
1918 EXPORT_SYMBOL(scsi_sense_desc_find);
1919 
1920 /**
1921  * scsi_get_sense_info_fld - attempts to get information field from
1922  *			sense data (either fixed or descriptor format)
1923  *
1924  * @sense_buffer:	byte array of sense data
1925  * @sb_len:		number of valid bytes in sense_buffer
1926  * @info_out:		pointer to 64 integer where 8 or 4 byte information
1927  *			field will be placed if found.
1928  *
1929  * Return value:
1930  *	1 if information field found, 0 if not found.
1931  **/
1932 int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
1933 			    u64 * info_out)
1934 {
1935 	int j;
1936 	const u8 * ucp;
1937 	u64 ull;
1938 
1939 	if (sb_len < 7)
1940 		return 0;
1941 	switch (sense_buffer[0] & 0x7f) {
1942 	case 0x70:
1943 	case 0x71:
1944 		if (sense_buffer[0] & 0x80) {
1945 			*info_out = (sense_buffer[3] << 24) +
1946 				    (sense_buffer[4] << 16) +
1947 				    (sense_buffer[5] << 8) + sense_buffer[6];
1948 			return 1;
1949 		} else
1950 			return 0;
1951 	case 0x72:
1952 	case 0x73:
1953 		ucp = scsi_sense_desc_find(sense_buffer, sb_len,
1954 					   0 /* info desc */);
1955 		if (ucp && (0xa == ucp[1])) {
1956 			ull = 0;
1957 			for (j = 0; j < 8; ++j) {
1958 				if (j > 0)
1959 					ull <<= 8;
1960 				ull |= ucp[4 + j];
1961 			}
1962 			*info_out = ull;
1963 			return 1;
1964 		} else
1965 			return 0;
1966 	default:
1967 		return 0;
1968 	}
1969 }
1970 EXPORT_SYMBOL(scsi_get_sense_info_fld);
1971