1 /* 2 * scsi_error.c Copyright (C) 1997 Eric Youngdale 3 * 4 * SCSI error/timeout handling 5 * Initial versions: Eric Youngdale. Based upon conversations with 6 * Leonard Zubkoff and David Miller at Linux Expo, 7 * ideas originating from all over the place. 8 * 9 * Restructured scsi_unjam_host and associated functions. 10 * September 04, 2002 Mike Anderson (andmike@us.ibm.com) 11 * 12 * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and 13 * minor cleanups. 14 * September 30, 2002 Mike Anderson (andmike@us.ibm.com) 15 */ 16 17 #include <linux/module.h> 18 #include <linux/sched.h> 19 #include <linux/gfp.h> 20 #include <linux/timer.h> 21 #include <linux/string.h> 22 #include <linux/kernel.h> 23 #include <linux/freezer.h> 24 #include <linux/kthread.h> 25 #include <linux/interrupt.h> 26 #include <linux/blkdev.h> 27 #include <linux/delay.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_cmnd.h> 31 #include <scsi/scsi_dbg.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_transport.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_ioctl.h> 37 38 #include "scsi_priv.h" 39 #include "scsi_logging.h" 40 #include "scsi_transport_api.h" 41 42 #include <trace/events/scsi.h> 43 44 #define SENSE_TIMEOUT (10*HZ) 45 46 /* 47 * These should *probably* be handled by the host itself. 48 * Since it is allowed to sleep, it probably should. 49 */ 50 #define BUS_RESET_SETTLE_TIME (10) 51 #define HOST_RESET_SETTLE_TIME (10) 52 53 /* called with shost->host_lock held */ 54 void scsi_eh_wakeup(struct Scsi_Host *shost) 55 { 56 if (shost->host_busy == shost->host_failed) { 57 trace_scsi_eh_wakeup(shost); 58 wake_up_process(shost->ehandler); 59 SCSI_LOG_ERROR_RECOVERY(5, 60 printk("Waking error handler thread\n")); 61 } 62 } 63 64 /** 65 * scsi_schedule_eh - schedule EH for SCSI host 66 * @shost: SCSI host to invoke error handling on. 67 * 68 * Schedule SCSI EH without scmd. 69 */ 70 void scsi_schedule_eh(struct Scsi_Host *shost) 71 { 72 unsigned long flags; 73 74 spin_lock_irqsave(shost->host_lock, flags); 75 76 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 || 77 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) { 78 shost->host_eh_scheduled++; 79 scsi_eh_wakeup(shost); 80 } 81 82 spin_unlock_irqrestore(shost->host_lock, flags); 83 } 84 EXPORT_SYMBOL_GPL(scsi_schedule_eh); 85 86 /** 87 * scsi_eh_scmd_add - add scsi cmd to error handling. 88 * @scmd: scmd to run eh on. 89 * @eh_flag: optional SCSI_EH flag. 90 * 91 * Return value: 92 * 0 on failure. 93 */ 94 int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) 95 { 96 struct Scsi_Host *shost = scmd->device->host; 97 unsigned long flags; 98 int ret = 0; 99 100 if (!shost->ehandler) 101 return 0; 102 103 spin_lock_irqsave(shost->host_lock, flags); 104 if (scsi_host_set_state(shost, SHOST_RECOVERY)) 105 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) 106 goto out_unlock; 107 108 ret = 1; 109 scmd->eh_eflags |= eh_flag; 110 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 111 shost->host_failed++; 112 scsi_eh_wakeup(shost); 113 out_unlock: 114 spin_unlock_irqrestore(shost->host_lock, flags); 115 return ret; 116 } 117 118 /** 119 * scsi_times_out - Timeout function for normal scsi commands. 120 * @req: request that is timing out. 121 * 122 * Notes: 123 * We do not need to lock this. There is the potential for a race 124 * only in that the normal completion handling might run, but if the 125 * normal completion function determines that the timer has already 126 * fired, then it mustn't do anything. 127 */ 128 enum blk_eh_timer_return scsi_times_out(struct request *req) 129 { 130 struct scsi_cmnd *scmd = req->special; 131 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED; 132 struct Scsi_Host *host = scmd->device->host; 133 134 trace_scsi_dispatch_cmd_timeout(scmd); 135 scsi_log_completion(scmd, TIMEOUT_ERROR); 136 137 if (host->transportt->eh_timed_out) 138 rtn = host->transportt->eh_timed_out(scmd); 139 else if (host->hostt->eh_timed_out) 140 rtn = host->hostt->eh_timed_out(scmd); 141 142 if (unlikely(rtn == BLK_EH_NOT_HANDLED && 143 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 144 scmd->result |= DID_TIME_OUT << 16; 145 rtn = BLK_EH_HANDLED; 146 } 147 148 return rtn; 149 } 150 151 /** 152 * scsi_block_when_processing_errors - Prevent cmds from being queued. 153 * @sdev: Device on which we are performing recovery. 154 * 155 * Description: 156 * We block until the host is out of error recovery, and then check to 157 * see whether the host or the device is offline. 158 * 159 * Return value: 160 * 0 when dev was taken offline by error recovery. 1 OK to proceed. 161 */ 162 int scsi_block_when_processing_errors(struct scsi_device *sdev) 163 { 164 int online; 165 166 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host)); 167 168 online = scsi_device_online(sdev); 169 170 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__, 171 online)); 172 173 return online; 174 } 175 EXPORT_SYMBOL(scsi_block_when_processing_errors); 176 177 #ifdef CONFIG_SCSI_LOGGING 178 /** 179 * scsi_eh_prt_fail_stats - Log info on failures. 180 * @shost: scsi host being recovered. 181 * @work_q: Queue of scsi cmds to process. 182 */ 183 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, 184 struct list_head *work_q) 185 { 186 struct scsi_cmnd *scmd; 187 struct scsi_device *sdev; 188 int total_failures = 0; 189 int cmd_failed = 0; 190 int cmd_cancel = 0; 191 int devices_failed = 0; 192 193 shost_for_each_device(sdev, shost) { 194 list_for_each_entry(scmd, work_q, eh_entry) { 195 if (scmd->device == sdev) { 196 ++total_failures; 197 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) 198 ++cmd_cancel; 199 else 200 ++cmd_failed; 201 } 202 } 203 204 if (cmd_cancel || cmd_failed) { 205 SCSI_LOG_ERROR_RECOVERY(3, 206 sdev_printk(KERN_INFO, sdev, 207 "%s: cmds failed: %d, cancel: %d\n", 208 __func__, cmd_failed, 209 cmd_cancel)); 210 cmd_cancel = 0; 211 cmd_failed = 0; 212 ++devices_failed; 213 } 214 } 215 216 SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d" 217 " devices require eh work\n", 218 total_failures, devices_failed)); 219 } 220 #endif 221 222 /** 223 * scsi_check_sense - Examine scsi cmd sense 224 * @scmd: Cmd to have sense checked. 225 * 226 * Return value: 227 * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR 228 * 229 * Notes: 230 * When a deferred error is detected the current command has 231 * not been executed and needs retrying. 232 */ 233 static int scsi_check_sense(struct scsi_cmnd *scmd) 234 { 235 struct scsi_device *sdev = scmd->device; 236 struct scsi_sense_hdr sshdr; 237 238 if (! scsi_command_normalize_sense(scmd, &sshdr)) 239 return FAILED; /* no valid sense data */ 240 241 if (scsi_sense_is_deferred(&sshdr)) 242 return NEEDS_RETRY; 243 244 if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh && 245 sdev->scsi_dh_data->scsi_dh->check_sense) { 246 int rc; 247 248 rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr); 249 if (rc != SCSI_RETURN_NOT_HANDLED) 250 return rc; 251 /* handler does not care. Drop down to default handling */ 252 } 253 254 /* 255 * Previous logic looked for FILEMARK, EOM or ILI which are 256 * mainly associated with tapes and returned SUCCESS. 257 */ 258 if (sshdr.response_code == 0x70) { 259 /* fixed format */ 260 if (scmd->sense_buffer[2] & 0xe0) 261 return SUCCESS; 262 } else { 263 /* 264 * descriptor format: look for "stream commands sense data 265 * descriptor" (see SSC-3). Assume single sense data 266 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG. 267 */ 268 if ((sshdr.additional_length > 3) && 269 (scmd->sense_buffer[8] == 0x4) && 270 (scmd->sense_buffer[11] & 0xe0)) 271 return SUCCESS; 272 } 273 274 switch (sshdr.sense_key) { 275 case NO_SENSE: 276 return SUCCESS; 277 case RECOVERED_ERROR: 278 return /* soft_error */ SUCCESS; 279 280 case ABORTED_COMMAND: 281 if (sshdr.asc == 0x10) /* DIF */ 282 return SUCCESS; 283 284 return NEEDS_RETRY; 285 case NOT_READY: 286 case UNIT_ATTENTION: 287 /* 288 * if we are expecting a cc/ua because of a bus reset that we 289 * performed, treat this just as a retry. otherwise this is 290 * information that we should pass up to the upper-level driver 291 * so that we can deal with it there. 292 */ 293 if (scmd->device->expecting_cc_ua) { 294 scmd->device->expecting_cc_ua = 0; 295 return NEEDS_RETRY; 296 } 297 /* 298 * if the device is in the process of becoming ready, we 299 * should retry. 300 */ 301 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01)) 302 return NEEDS_RETRY; 303 /* 304 * if the device is not started, we need to wake 305 * the error handler to start the motor 306 */ 307 if (scmd->device->allow_restart && 308 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 309 return FAILED; 310 311 if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e) 312 scmd_printk(KERN_WARNING, scmd, 313 "Warning! Received an indication that the " 314 "LUN assignments on this target have " 315 "changed. The Linux SCSI layer does not " 316 "automatically remap LUN assignments.\n"); 317 else if (sshdr.asc == 0x3f) 318 scmd_printk(KERN_WARNING, scmd, 319 "Warning! Received an indication that the " 320 "operating parameters on this target have " 321 "changed. The Linux SCSI layer does not " 322 "automatically adjust these parameters.\n"); 323 324 if (sshdr.asc == 0x38 && sshdr.ascq == 0x07) 325 scmd_printk(KERN_WARNING, scmd, 326 "Warning! Received an indication that the " 327 "LUN reached a thin provisioning soft " 328 "threshold.\n"); 329 330 /* 331 * Pass the UA upwards for a determination in the completion 332 * functions. 333 */ 334 return SUCCESS; 335 336 /* these are not supported */ 337 case COPY_ABORTED: 338 case VOLUME_OVERFLOW: 339 case MISCOMPARE: 340 case BLANK_CHECK: 341 case DATA_PROTECT: 342 return TARGET_ERROR; 343 344 case MEDIUM_ERROR: 345 if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */ 346 sshdr.asc == 0x13 || /* AMNF DATA FIELD */ 347 sshdr.asc == 0x14) { /* RECORD NOT FOUND */ 348 return TARGET_ERROR; 349 } 350 return NEEDS_RETRY; 351 352 case HARDWARE_ERROR: 353 if (scmd->device->retry_hwerror) 354 return ADD_TO_MLQUEUE; 355 else 356 return TARGET_ERROR; 357 358 case ILLEGAL_REQUEST: 359 default: 360 return SUCCESS; 361 } 362 } 363 364 static void scsi_handle_queue_ramp_up(struct scsi_device *sdev) 365 { 366 struct scsi_host_template *sht = sdev->host->hostt; 367 struct scsi_device *tmp_sdev; 368 369 if (!sht->change_queue_depth || 370 sdev->queue_depth >= sdev->max_queue_depth) 371 return; 372 373 if (time_before(jiffies, 374 sdev->last_queue_ramp_up + sdev->queue_ramp_up_period)) 375 return; 376 377 if (time_before(jiffies, 378 sdev->last_queue_full_time + sdev->queue_ramp_up_period)) 379 return; 380 381 /* 382 * Walk all devices of a target and do 383 * ramp up on them. 384 */ 385 shost_for_each_device(tmp_sdev, sdev->host) { 386 if (tmp_sdev->channel != sdev->channel || 387 tmp_sdev->id != sdev->id || 388 tmp_sdev->queue_depth == sdev->max_queue_depth) 389 continue; 390 /* 391 * call back into LLD to increase queue_depth by one 392 * with ramp up reason code. 393 */ 394 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1, 395 SCSI_QDEPTH_RAMP_UP); 396 sdev->last_queue_ramp_up = jiffies; 397 } 398 } 399 400 static void scsi_handle_queue_full(struct scsi_device *sdev) 401 { 402 struct scsi_host_template *sht = sdev->host->hostt; 403 struct scsi_device *tmp_sdev; 404 405 if (!sht->change_queue_depth) 406 return; 407 408 shost_for_each_device(tmp_sdev, sdev->host) { 409 if (tmp_sdev->channel != sdev->channel || 410 tmp_sdev->id != sdev->id) 411 continue; 412 /* 413 * We do not know the number of commands that were at 414 * the device when we got the queue full so we start 415 * from the highest possible value and work our way down. 416 */ 417 sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1, 418 SCSI_QDEPTH_QFULL); 419 } 420 } 421 422 /** 423 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. 424 * @scmd: SCSI cmd to examine. 425 * 426 * Notes: 427 * This is *only* called when we are examining the status of commands 428 * queued during error recovery. the main difference here is that we 429 * don't allow for the possibility of retries here, and we are a lot 430 * more restrictive about what we consider acceptable. 431 */ 432 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) 433 { 434 /* 435 * first check the host byte, to see if there is anything in there 436 * that would indicate what we need to do. 437 */ 438 if (host_byte(scmd->result) == DID_RESET) { 439 /* 440 * rats. we are already in the error handler, so we now 441 * get to try and figure out what to do next. if the sense 442 * is valid, we have a pretty good idea of what to do. 443 * if not, we mark it as FAILED. 444 */ 445 return scsi_check_sense(scmd); 446 } 447 if (host_byte(scmd->result) != DID_OK) 448 return FAILED; 449 450 /* 451 * next, check the message byte. 452 */ 453 if (msg_byte(scmd->result) != COMMAND_COMPLETE) 454 return FAILED; 455 456 /* 457 * now, check the status byte to see if this indicates 458 * anything special. 459 */ 460 switch (status_byte(scmd->result)) { 461 case GOOD: 462 scsi_handle_queue_ramp_up(scmd->device); 463 case COMMAND_TERMINATED: 464 return SUCCESS; 465 case CHECK_CONDITION: 466 return scsi_check_sense(scmd); 467 case CONDITION_GOOD: 468 case INTERMEDIATE_GOOD: 469 case INTERMEDIATE_C_GOOD: 470 /* 471 * who knows? FIXME(eric) 472 */ 473 return SUCCESS; 474 case RESERVATION_CONFLICT: 475 if (scmd->cmnd[0] == TEST_UNIT_READY) 476 /* it is a success, we probed the device and 477 * found it */ 478 return SUCCESS; 479 /* otherwise, we failed to send the command */ 480 return FAILED; 481 case QUEUE_FULL: 482 scsi_handle_queue_full(scmd->device); 483 /* fall through */ 484 case BUSY: 485 return NEEDS_RETRY; 486 default: 487 return FAILED; 488 } 489 return FAILED; 490 } 491 492 /** 493 * scsi_eh_done - Completion function for error handling. 494 * @scmd: Cmd that is done. 495 */ 496 static void scsi_eh_done(struct scsi_cmnd *scmd) 497 { 498 struct completion *eh_action; 499 500 SCSI_LOG_ERROR_RECOVERY(3, 501 printk("%s scmd: %p result: %x\n", 502 __func__, scmd, scmd->result)); 503 504 eh_action = scmd->device->host->eh_action; 505 if (eh_action) 506 complete(eh_action); 507 } 508 509 /** 510 * scsi_try_host_reset - ask host adapter to reset itself 511 * @scmd: SCSI cmd to send hsot reset. 512 */ 513 static int scsi_try_host_reset(struct scsi_cmnd *scmd) 514 { 515 unsigned long flags; 516 int rtn; 517 struct Scsi_Host *host = scmd->device->host; 518 struct scsi_host_template *hostt = host->hostt; 519 520 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", 521 __func__)); 522 523 if (!hostt->eh_host_reset_handler) 524 return FAILED; 525 526 rtn = hostt->eh_host_reset_handler(scmd); 527 528 if (rtn == SUCCESS) { 529 if (!hostt->skip_settle_delay) 530 ssleep(HOST_RESET_SETTLE_TIME); 531 spin_lock_irqsave(host->host_lock, flags); 532 scsi_report_bus_reset(host, scmd_channel(scmd)); 533 spin_unlock_irqrestore(host->host_lock, flags); 534 } 535 536 return rtn; 537 } 538 539 /** 540 * scsi_try_bus_reset - ask host to perform a bus reset 541 * @scmd: SCSI cmd to send bus reset. 542 */ 543 static int scsi_try_bus_reset(struct scsi_cmnd *scmd) 544 { 545 unsigned long flags; 546 int rtn; 547 struct Scsi_Host *host = scmd->device->host; 548 struct scsi_host_template *hostt = host->hostt; 549 550 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", 551 __func__)); 552 553 if (!hostt->eh_bus_reset_handler) 554 return FAILED; 555 556 rtn = hostt->eh_bus_reset_handler(scmd); 557 558 if (rtn == SUCCESS) { 559 if (!hostt->skip_settle_delay) 560 ssleep(BUS_RESET_SETTLE_TIME); 561 spin_lock_irqsave(host->host_lock, flags); 562 scsi_report_bus_reset(host, scmd_channel(scmd)); 563 spin_unlock_irqrestore(host->host_lock, flags); 564 } 565 566 return rtn; 567 } 568 569 static void __scsi_report_device_reset(struct scsi_device *sdev, void *data) 570 { 571 sdev->was_reset = 1; 572 sdev->expecting_cc_ua = 1; 573 } 574 575 /** 576 * scsi_try_target_reset - Ask host to perform a target reset 577 * @scmd: SCSI cmd used to send a target reset 578 * 579 * Notes: 580 * There is no timeout for this operation. if this operation is 581 * unreliable for a given host, then the host itself needs to put a 582 * timer on it, and set the host back to a consistent state prior to 583 * returning. 584 */ 585 static int scsi_try_target_reset(struct scsi_cmnd *scmd) 586 { 587 unsigned long flags; 588 int rtn; 589 struct Scsi_Host *host = scmd->device->host; 590 struct scsi_host_template *hostt = host->hostt; 591 592 if (!hostt->eh_target_reset_handler) 593 return FAILED; 594 595 rtn = hostt->eh_target_reset_handler(scmd); 596 if (rtn == SUCCESS) { 597 spin_lock_irqsave(host->host_lock, flags); 598 __starget_for_each_device(scsi_target(scmd->device), NULL, 599 __scsi_report_device_reset); 600 spin_unlock_irqrestore(host->host_lock, flags); 601 } 602 603 return rtn; 604 } 605 606 /** 607 * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev 608 * @scmd: SCSI cmd used to send BDR 609 * 610 * Notes: 611 * There is no timeout for this operation. if this operation is 612 * unreliable for a given host, then the host itself needs to put a 613 * timer on it, and set the host back to a consistent state prior to 614 * returning. 615 */ 616 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) 617 { 618 int rtn; 619 struct scsi_host_template *hostt = scmd->device->host->hostt; 620 621 if (!hostt->eh_device_reset_handler) 622 return FAILED; 623 624 rtn = hostt->eh_device_reset_handler(scmd); 625 if (rtn == SUCCESS) 626 __scsi_report_device_reset(scmd->device, NULL); 627 return rtn; 628 } 629 630 static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd) 631 { 632 if (!hostt->eh_abort_handler) 633 return FAILED; 634 635 return hostt->eh_abort_handler(scmd); 636 } 637 638 static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd) 639 { 640 if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS) 641 if (scsi_try_bus_device_reset(scmd) != SUCCESS) 642 if (scsi_try_target_reset(scmd) != SUCCESS) 643 if (scsi_try_bus_reset(scmd) != SUCCESS) 644 scsi_try_host_reset(scmd); 645 } 646 647 /** 648 * scsi_eh_prep_cmnd - Save a scsi command info as part of error recory 649 * @scmd: SCSI command structure to hijack 650 * @ses: structure to save restore information 651 * @cmnd: CDB to send. Can be NULL if no new cmnd is needed 652 * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB) 653 * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored) 654 * 655 * This function is used to save a scsi command information before re-execution 656 * as part of the error recovery process. If @sense_bytes is 0 the command 657 * sent must be one that does not transfer any data. If @sense_bytes != 0 658 * @cmnd is ignored and this functions sets up a REQUEST_SENSE command 659 * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer. 660 */ 661 void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, 662 unsigned char *cmnd, int cmnd_size, unsigned sense_bytes) 663 { 664 struct scsi_device *sdev = scmd->device; 665 666 /* 667 * We need saved copies of a number of fields - this is because 668 * error handling may need to overwrite these with different values 669 * to run different commands, and once error handling is complete, 670 * we will need to restore these values prior to running the actual 671 * command. 672 */ 673 ses->cmd_len = scmd->cmd_len; 674 ses->cmnd = scmd->cmnd; 675 ses->data_direction = scmd->sc_data_direction; 676 ses->sdb = scmd->sdb; 677 ses->next_rq = scmd->request->next_rq; 678 ses->result = scmd->result; 679 ses->underflow = scmd->underflow; 680 ses->prot_op = scmd->prot_op; 681 682 scmd->prot_op = SCSI_PROT_NORMAL; 683 scmd->cmnd = ses->eh_cmnd; 684 memset(scmd->cmnd, 0, BLK_MAX_CDB); 685 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 686 scmd->request->next_rq = NULL; 687 688 if (sense_bytes) { 689 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, 690 sense_bytes); 691 sg_init_one(&ses->sense_sgl, scmd->sense_buffer, 692 scmd->sdb.length); 693 scmd->sdb.table.sgl = &ses->sense_sgl; 694 scmd->sc_data_direction = DMA_FROM_DEVICE; 695 scmd->sdb.table.nents = 1; 696 scmd->cmnd[0] = REQUEST_SENSE; 697 scmd->cmnd[4] = scmd->sdb.length; 698 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 699 } else { 700 scmd->sc_data_direction = DMA_NONE; 701 if (cmnd) { 702 BUG_ON(cmnd_size > BLK_MAX_CDB); 703 memcpy(scmd->cmnd, cmnd, cmnd_size); 704 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 705 } 706 } 707 708 scmd->underflow = 0; 709 710 if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN) 711 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 712 (sdev->lun << 5 & 0xe0); 713 714 /* 715 * Zero the sense buffer. The scsi spec mandates that any 716 * untransferred sense data should be interpreted as being zero. 717 */ 718 memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 719 } 720 EXPORT_SYMBOL(scsi_eh_prep_cmnd); 721 722 /** 723 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory 724 * @scmd: SCSI command structure to restore 725 * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd 726 * 727 * Undo any damage done by above scsi_eh_prep_cmnd(). 728 */ 729 void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) 730 { 731 /* 732 * Restore original data 733 */ 734 scmd->cmd_len = ses->cmd_len; 735 scmd->cmnd = ses->cmnd; 736 scmd->sc_data_direction = ses->data_direction; 737 scmd->sdb = ses->sdb; 738 scmd->request->next_rq = ses->next_rq; 739 scmd->result = ses->result; 740 scmd->underflow = ses->underflow; 741 scmd->prot_op = ses->prot_op; 742 } 743 EXPORT_SYMBOL(scsi_eh_restore_cmnd); 744 745 /** 746 * scsi_send_eh_cmnd - submit a scsi command as part of error recory 747 * @scmd: SCSI command structure to hijack 748 * @cmnd: CDB to send 749 * @cmnd_size: size in bytes of @cmnd 750 * @timeout: timeout for this request 751 * @sense_bytes: size of sense data to copy or 0 752 * 753 * This function is used to send a scsi command down to a target device 754 * as part of the error recovery process. See also scsi_eh_prep_cmnd() above. 755 * 756 * Return value: 757 * SUCCESS or FAILED or NEEDS_RETRY 758 */ 759 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, 760 int cmnd_size, int timeout, unsigned sense_bytes) 761 { 762 struct scsi_device *sdev = scmd->device; 763 struct Scsi_Host *shost = sdev->host; 764 DECLARE_COMPLETION_ONSTACK(done); 765 unsigned long timeleft; 766 struct scsi_eh_save ses; 767 int rtn; 768 769 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); 770 shost->eh_action = &done; 771 772 scsi_log_send(scmd); 773 scmd->scsi_done = scsi_eh_done; 774 shost->hostt->queuecommand(shost, scmd); 775 776 timeleft = wait_for_completion_timeout(&done, timeout); 777 778 shost->eh_action = NULL; 779 780 scsi_log_completion(scmd, SUCCESS); 781 782 SCSI_LOG_ERROR_RECOVERY(3, 783 printk("%s: scmd: %p, timeleft: %ld\n", 784 __func__, scmd, timeleft)); 785 786 /* 787 * If there is time left scsi_eh_done got called, and we will 788 * examine the actual status codes to see whether the command 789 * actually did complete normally, else tell the host to forget 790 * about this command. 791 */ 792 if (timeleft) { 793 rtn = scsi_eh_completed_normally(scmd); 794 SCSI_LOG_ERROR_RECOVERY(3, 795 printk("%s: scsi_eh_completed_normally %x\n", 796 __func__, rtn)); 797 798 switch (rtn) { 799 case SUCCESS: 800 case NEEDS_RETRY: 801 case FAILED: 802 case TARGET_ERROR: 803 break; 804 case ADD_TO_MLQUEUE: 805 rtn = NEEDS_RETRY; 806 break; 807 default: 808 rtn = FAILED; 809 break; 810 } 811 } else { 812 scsi_abort_eh_cmnd(scmd); 813 rtn = FAILED; 814 } 815 816 scsi_eh_restore_cmnd(scmd, &ses); 817 return rtn; 818 } 819 820 /** 821 * scsi_request_sense - Request sense data from a particular target. 822 * @scmd: SCSI cmd for request sense. 823 * 824 * Notes: 825 * Some hosts automatically obtain this information, others require 826 * that we obtain it on our own. This function will *not* return until 827 * the command either times out, or it completes. 828 */ 829 static int scsi_request_sense(struct scsi_cmnd *scmd) 830 { 831 return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0); 832 } 833 834 /** 835 * scsi_eh_finish_cmd - Handle a cmd that eh is finished with. 836 * @scmd: Original SCSI cmd that eh has finished. 837 * @done_q: Queue for processed commands. 838 * 839 * Notes: 840 * We don't want to use the normal command completion while we are are 841 * still handling errors - it may cause other commands to be queued, 842 * and that would disturb what we are doing. Thus we really want to 843 * keep a list of pending commands for final completion, and once we 844 * are ready to leave error handling we handle completion for real. 845 */ 846 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) 847 { 848 scmd->device->host->host_failed--; 849 scmd->eh_eflags = 0; 850 list_move_tail(&scmd->eh_entry, done_q); 851 } 852 EXPORT_SYMBOL(scsi_eh_finish_cmd); 853 854 /** 855 * scsi_eh_get_sense - Get device sense data. 856 * @work_q: Queue of commands to process. 857 * @done_q: Queue of processed commands. 858 * 859 * Description: 860 * See if we need to request sense information. if so, then get it 861 * now, so we have a better idea of what to do. 862 * 863 * Notes: 864 * This has the unfortunate side effect that if a shost adapter does 865 * not automatically request sense information, we end up shutting 866 * it down before we request it. 867 * 868 * All drivers should request sense information internally these days, 869 * so for now all I have to say is tough noogies if you end up in here. 870 * 871 * XXX: Long term this code should go away, but that needs an audit of 872 * all LLDDs first. 873 */ 874 int scsi_eh_get_sense(struct list_head *work_q, 875 struct list_head *done_q) 876 { 877 struct scsi_cmnd *scmd, *next; 878 int rtn; 879 880 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 881 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || 882 SCSI_SENSE_VALID(scmd)) 883 continue; 884 885 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, 886 "%s: requesting sense\n", 887 current->comm)); 888 rtn = scsi_request_sense(scmd); 889 if (rtn != SUCCESS) 890 continue; 891 892 SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p" 893 " result %x\n", scmd, 894 scmd->result)); 895 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); 896 897 rtn = scsi_decide_disposition(scmd); 898 899 /* 900 * if the result was normal, then just pass it along to the 901 * upper level. 902 */ 903 if (rtn == SUCCESS) 904 /* we don't want this command reissued, just 905 * finished with the sense data, so set 906 * retries to the max allowed to ensure it 907 * won't get reissued */ 908 scmd->retries = scmd->allowed; 909 else if (rtn != NEEDS_RETRY) 910 continue; 911 912 scsi_eh_finish_cmd(scmd, done_q); 913 } 914 915 return list_empty(work_q); 916 } 917 EXPORT_SYMBOL_GPL(scsi_eh_get_sense); 918 919 /** 920 * scsi_eh_tur - Send TUR to device. 921 * @scmd: &scsi_cmnd to send TUR 922 * 923 * Return value: 924 * 0 - Device is ready. 1 - Device NOT ready. 925 */ 926 static int scsi_eh_tur(struct scsi_cmnd *scmd) 927 { 928 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 929 int retry_cnt = 1, rtn; 930 931 retry_tur: 932 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); 933 934 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 935 __func__, scmd, rtn)); 936 937 switch (rtn) { 938 case NEEDS_RETRY: 939 if (retry_cnt--) 940 goto retry_tur; 941 /*FALLTHRU*/ 942 case SUCCESS: 943 return 0; 944 default: 945 return 1; 946 } 947 } 948 949 /** 950 * scsi_eh_abort_cmds - abort pending commands. 951 * @work_q: &list_head for pending commands. 952 * @done_q: &list_head for processed commands. 953 * 954 * Decription: 955 * Try and see whether or not it makes sense to try and abort the 956 * running command. This only works out to be the case if we have one 957 * command that has timed out. If the command simply failed, it makes 958 * no sense to try and abort the command, since as far as the shost 959 * adapter is concerned, it isn't running. 960 */ 961 static int scsi_eh_abort_cmds(struct list_head *work_q, 962 struct list_head *done_q) 963 { 964 struct scsi_cmnd *scmd, *next; 965 int rtn; 966 967 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 968 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) 969 continue; 970 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" 971 "0x%p\n", current->comm, 972 scmd)); 973 rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); 974 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 975 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; 976 if (!scsi_device_online(scmd->device) || 977 rtn == FAST_IO_FAIL || 978 !scsi_eh_tur(scmd)) { 979 scsi_eh_finish_cmd(scmd, done_q); 980 } 981 } else 982 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" 983 " cmd failed:" 984 "0x%p\n", 985 current->comm, 986 scmd)); 987 } 988 989 return list_empty(work_q); 990 } 991 992 /** 993 * scsi_eh_try_stu - Send START_UNIT to device. 994 * @scmd: &scsi_cmnd to send START_UNIT 995 * 996 * Return value: 997 * 0 - Device is ready. 1 - Device NOT ready. 998 */ 999 static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 1000 { 1001 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 1002 1003 if (scmd->device->allow_restart) { 1004 int i, rtn = NEEDS_RETRY; 1005 1006 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) 1007 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0); 1008 1009 if (rtn == SUCCESS) 1010 return 0; 1011 } 1012 1013 return 1; 1014 } 1015 1016 /** 1017 * scsi_eh_stu - send START_UNIT if needed 1018 * @shost: &scsi host being recovered. 1019 * @work_q: &list_head for pending commands. 1020 * @done_q: &list_head for processed commands. 1021 * 1022 * Notes: 1023 * If commands are failing due to not ready, initializing command required, 1024 * try revalidating the device, which will end up sending a start unit. 1025 */ 1026 static int scsi_eh_stu(struct Scsi_Host *shost, 1027 struct list_head *work_q, 1028 struct list_head *done_q) 1029 { 1030 struct scsi_cmnd *scmd, *stu_scmd, *next; 1031 struct scsi_device *sdev; 1032 1033 shost_for_each_device(sdev, shost) { 1034 stu_scmd = NULL; 1035 list_for_each_entry(scmd, work_q, eh_entry) 1036 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && 1037 scsi_check_sense(scmd) == FAILED ) { 1038 stu_scmd = scmd; 1039 break; 1040 } 1041 1042 if (!stu_scmd) 1043 continue; 1044 1045 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:" 1046 " 0x%p\n", current->comm, sdev)); 1047 1048 if (!scsi_eh_try_stu(stu_scmd)) { 1049 if (!scsi_device_online(sdev) || 1050 !scsi_eh_tur(stu_scmd)) { 1051 list_for_each_entry_safe(scmd, next, 1052 work_q, eh_entry) { 1053 if (scmd->device == sdev) 1054 scsi_eh_finish_cmd(scmd, done_q); 1055 } 1056 } 1057 } else { 1058 SCSI_LOG_ERROR_RECOVERY(3, 1059 printk("%s: START_UNIT failed to sdev:" 1060 " 0x%p\n", current->comm, sdev)); 1061 } 1062 } 1063 1064 return list_empty(work_q); 1065 } 1066 1067 1068 /** 1069 * scsi_eh_bus_device_reset - send bdr if needed 1070 * @shost: scsi host being recovered. 1071 * @work_q: &list_head for pending commands. 1072 * @done_q: &list_head for processed commands. 1073 * 1074 * Notes: 1075 * Try a bus device reset. Still, look to see whether we have multiple 1076 * devices that are jammed or not - if we have multiple devices, it 1077 * makes no sense to try bus_device_reset - we really would need to try 1078 * a bus_reset instead. 1079 */ 1080 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, 1081 struct list_head *work_q, 1082 struct list_head *done_q) 1083 { 1084 struct scsi_cmnd *scmd, *bdr_scmd, *next; 1085 struct scsi_device *sdev; 1086 int rtn; 1087 1088 shost_for_each_device(sdev, shost) { 1089 bdr_scmd = NULL; 1090 list_for_each_entry(scmd, work_q, eh_entry) 1091 if (scmd->device == sdev) { 1092 bdr_scmd = scmd; 1093 break; 1094 } 1095 1096 if (!bdr_scmd) 1097 continue; 1098 1099 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:" 1100 " 0x%p\n", current->comm, 1101 sdev)); 1102 rtn = scsi_try_bus_device_reset(bdr_scmd); 1103 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1104 if (!scsi_device_online(sdev) || 1105 rtn == FAST_IO_FAIL || 1106 !scsi_eh_tur(bdr_scmd)) { 1107 list_for_each_entry_safe(scmd, next, 1108 work_q, eh_entry) { 1109 if (scmd->device == sdev) 1110 scsi_eh_finish_cmd(scmd, 1111 done_q); 1112 } 1113 } 1114 } else { 1115 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR" 1116 " failed sdev:" 1117 "0x%p\n", 1118 current->comm, 1119 sdev)); 1120 } 1121 } 1122 1123 return list_empty(work_q); 1124 } 1125 1126 /** 1127 * scsi_eh_target_reset - send target reset if needed 1128 * @shost: scsi host being recovered. 1129 * @work_q: &list_head for pending commands. 1130 * @done_q: &list_head for processed commands. 1131 * 1132 * Notes: 1133 * Try a target reset. 1134 */ 1135 static int scsi_eh_target_reset(struct Scsi_Host *shost, 1136 struct list_head *work_q, 1137 struct list_head *done_q) 1138 { 1139 LIST_HEAD(tmp_list); 1140 1141 list_splice_init(work_q, &tmp_list); 1142 1143 while (!list_empty(&tmp_list)) { 1144 struct scsi_cmnd *next, *scmd; 1145 int rtn; 1146 unsigned int id; 1147 1148 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); 1149 id = scmd_id(scmd); 1150 1151 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " 1152 "to target %d\n", 1153 current->comm, id)); 1154 rtn = scsi_try_target_reset(scmd); 1155 if (rtn != SUCCESS && rtn != FAST_IO_FAIL) 1156 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset" 1157 " failed target: " 1158 "%d\n", 1159 current->comm, id)); 1160 list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) { 1161 if (scmd_id(scmd) != id) 1162 continue; 1163 1164 if ((rtn == SUCCESS || rtn == FAST_IO_FAIL) 1165 && (!scsi_device_online(scmd->device) || 1166 rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd))) 1167 scsi_eh_finish_cmd(scmd, done_q); 1168 else 1169 /* push back on work queue for further processing */ 1170 list_move(&scmd->eh_entry, work_q); 1171 } 1172 } 1173 1174 return list_empty(work_q); 1175 } 1176 1177 /** 1178 * scsi_eh_bus_reset - send a bus reset 1179 * @shost: &scsi host being recovered. 1180 * @work_q: &list_head for pending commands. 1181 * @done_q: &list_head for processed commands. 1182 */ 1183 static int scsi_eh_bus_reset(struct Scsi_Host *shost, 1184 struct list_head *work_q, 1185 struct list_head *done_q) 1186 { 1187 struct scsi_cmnd *scmd, *chan_scmd, *next; 1188 unsigned int channel; 1189 int rtn; 1190 1191 /* 1192 * we really want to loop over the various channels, and do this on 1193 * a channel by channel basis. we should also check to see if any 1194 * of the failed commands are on soft_reset devices, and if so, skip 1195 * the reset. 1196 */ 1197 1198 for (channel = 0; channel <= shost->max_channel; channel++) { 1199 chan_scmd = NULL; 1200 list_for_each_entry(scmd, work_q, eh_entry) { 1201 if (channel == scmd_channel(scmd)) { 1202 chan_scmd = scmd; 1203 break; 1204 /* 1205 * FIXME add back in some support for 1206 * soft_reset devices. 1207 */ 1208 } 1209 } 1210 1211 if (!chan_scmd) 1212 continue; 1213 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:" 1214 " %d\n", current->comm, 1215 channel)); 1216 rtn = scsi_try_bus_reset(chan_scmd); 1217 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1218 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1219 if (channel == scmd_channel(scmd)) 1220 if (!scsi_device_online(scmd->device) || 1221 rtn == FAST_IO_FAIL || 1222 !scsi_eh_tur(scmd)) 1223 scsi_eh_finish_cmd(scmd, 1224 done_q); 1225 } 1226 } else { 1227 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" 1228 " failed chan: %d\n", 1229 current->comm, 1230 channel)); 1231 } 1232 } 1233 return list_empty(work_q); 1234 } 1235 1236 /** 1237 * scsi_eh_host_reset - send a host reset 1238 * @work_q: list_head for processed commands. 1239 * @done_q: list_head for processed commands. 1240 */ 1241 static int scsi_eh_host_reset(struct list_head *work_q, 1242 struct list_head *done_q) 1243 { 1244 struct scsi_cmnd *scmd, *next; 1245 int rtn; 1246 1247 if (!list_empty(work_q)) { 1248 scmd = list_entry(work_q->next, 1249 struct scsi_cmnd, eh_entry); 1250 1251 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n" 1252 , current->comm)); 1253 1254 rtn = scsi_try_host_reset(scmd); 1255 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1256 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1257 if (!scsi_device_online(scmd->device) || 1258 rtn == FAST_IO_FAIL || 1259 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) || 1260 !scsi_eh_tur(scmd)) 1261 scsi_eh_finish_cmd(scmd, done_q); 1262 } 1263 } else { 1264 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST" 1265 " failed\n", 1266 current->comm)); 1267 } 1268 } 1269 return list_empty(work_q); 1270 } 1271 1272 /** 1273 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover 1274 * @work_q: list_head for processed commands. 1275 * @done_q: list_head for processed commands. 1276 */ 1277 static void scsi_eh_offline_sdevs(struct list_head *work_q, 1278 struct list_head *done_q) 1279 { 1280 struct scsi_cmnd *scmd, *next; 1281 1282 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1283 sdev_printk(KERN_INFO, scmd->device, "Device offlined - " 1284 "not ready after error recovery\n"); 1285 scsi_device_set_state(scmd->device, SDEV_OFFLINE); 1286 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) { 1287 /* 1288 * FIXME: Handle lost cmds. 1289 */ 1290 } 1291 scsi_eh_finish_cmd(scmd, done_q); 1292 } 1293 return; 1294 } 1295 1296 /** 1297 * scsi_noretry_cmd - determinte if command should be failed fast 1298 * @scmd: SCSI cmd to examine. 1299 */ 1300 int scsi_noretry_cmd(struct scsi_cmnd *scmd) 1301 { 1302 switch (host_byte(scmd->result)) { 1303 case DID_OK: 1304 break; 1305 case DID_BUS_BUSY: 1306 return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT); 1307 case DID_PARITY: 1308 return (scmd->request->cmd_flags & REQ_FAILFAST_DEV); 1309 case DID_ERROR: 1310 if (msg_byte(scmd->result) == COMMAND_COMPLETE && 1311 status_byte(scmd->result) == RESERVATION_CONFLICT) 1312 return 0; 1313 /* fall through */ 1314 case DID_SOFT_ERROR: 1315 return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); 1316 } 1317 1318 switch (status_byte(scmd->result)) { 1319 case CHECK_CONDITION: 1320 /* 1321 * assume caller has checked sense and determinted 1322 * the check condition was retryable. 1323 */ 1324 if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || 1325 scmd->request->cmd_type == REQ_TYPE_BLOCK_PC) 1326 return 1; 1327 } 1328 1329 return 0; 1330 } 1331 1332 /** 1333 * scsi_decide_disposition - Disposition a cmd on return from LLD. 1334 * @scmd: SCSI cmd to examine. 1335 * 1336 * Notes: 1337 * This is *only* called when we are examining the status after sending 1338 * out the actual data command. any commands that are queued for error 1339 * recovery (e.g. test_unit_ready) do *not* come through here. 1340 * 1341 * When this routine returns failed, it means the error handler thread 1342 * is woken. In cases where the error code indicates an error that 1343 * doesn't require the error handler read (i.e. we don't need to 1344 * abort/reset), this function should return SUCCESS. 1345 */ 1346 int scsi_decide_disposition(struct scsi_cmnd *scmd) 1347 { 1348 int rtn; 1349 1350 /* 1351 * if the device is offline, then we clearly just pass the result back 1352 * up to the top level. 1353 */ 1354 if (!scsi_device_online(scmd->device)) { 1355 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" 1356 " as SUCCESS\n", 1357 __func__)); 1358 return SUCCESS; 1359 } 1360 1361 /* 1362 * first check the host byte, to see if there is anything in there 1363 * that would indicate what we need to do. 1364 */ 1365 switch (host_byte(scmd->result)) { 1366 case DID_PASSTHROUGH: 1367 /* 1368 * no matter what, pass this through to the upper layer. 1369 * nuke this special code so that it looks like we are saying 1370 * did_ok. 1371 */ 1372 scmd->result &= 0xff00ffff; 1373 return SUCCESS; 1374 case DID_OK: 1375 /* 1376 * looks good. drop through, and check the next byte. 1377 */ 1378 break; 1379 case DID_NO_CONNECT: 1380 case DID_BAD_TARGET: 1381 case DID_ABORT: 1382 /* 1383 * note - this means that we just report the status back 1384 * to the top level driver, not that we actually think 1385 * that it indicates SUCCESS. 1386 */ 1387 return SUCCESS; 1388 /* 1389 * when the low level driver returns did_soft_error, 1390 * it is responsible for keeping an internal retry counter 1391 * in order to avoid endless loops (db) 1392 * 1393 * actually this is a bug in this function here. we should 1394 * be mindful of the maximum number of retries specified 1395 * and not get stuck in a loop. 1396 */ 1397 case DID_SOFT_ERROR: 1398 goto maybe_retry; 1399 case DID_IMM_RETRY: 1400 return NEEDS_RETRY; 1401 1402 case DID_REQUEUE: 1403 return ADD_TO_MLQUEUE; 1404 case DID_TRANSPORT_DISRUPTED: 1405 /* 1406 * LLD/transport was disrupted during processing of the IO. 1407 * The transport class is now blocked/blocking, 1408 * and the transport will decide what to do with the IO 1409 * based on its timers and recovery capablilities if 1410 * there are enough retries. 1411 */ 1412 goto maybe_retry; 1413 case DID_TRANSPORT_FAILFAST: 1414 /* 1415 * The transport decided to failfast the IO (most likely 1416 * the fast io fail tmo fired), so send IO directly upwards. 1417 */ 1418 return SUCCESS; 1419 case DID_ERROR: 1420 if (msg_byte(scmd->result) == COMMAND_COMPLETE && 1421 status_byte(scmd->result) == RESERVATION_CONFLICT) 1422 /* 1423 * execute reservation conflict processing code 1424 * lower down 1425 */ 1426 break; 1427 /* fallthrough */ 1428 case DID_BUS_BUSY: 1429 case DID_PARITY: 1430 goto maybe_retry; 1431 case DID_TIME_OUT: 1432 /* 1433 * when we scan the bus, we get timeout messages for 1434 * these commands if there is no device available. 1435 * other hosts report did_no_connect for the same thing. 1436 */ 1437 if ((scmd->cmnd[0] == TEST_UNIT_READY || 1438 scmd->cmnd[0] == INQUIRY)) { 1439 return SUCCESS; 1440 } else { 1441 return FAILED; 1442 } 1443 case DID_RESET: 1444 return SUCCESS; 1445 default: 1446 return FAILED; 1447 } 1448 1449 /* 1450 * next, check the message byte. 1451 */ 1452 if (msg_byte(scmd->result) != COMMAND_COMPLETE) 1453 return FAILED; 1454 1455 /* 1456 * check the status byte to see if this indicates anything special. 1457 */ 1458 switch (status_byte(scmd->result)) { 1459 case QUEUE_FULL: 1460 scsi_handle_queue_full(scmd->device); 1461 /* 1462 * the case of trying to send too many commands to a 1463 * tagged queueing device. 1464 */ 1465 case BUSY: 1466 /* 1467 * device can't talk to us at the moment. Should only 1468 * occur (SAM-3) when the task queue is empty, so will cause 1469 * the empty queue handling to trigger a stall in the 1470 * device. 1471 */ 1472 return ADD_TO_MLQUEUE; 1473 case GOOD: 1474 scsi_handle_queue_ramp_up(scmd->device); 1475 case COMMAND_TERMINATED: 1476 return SUCCESS; 1477 case TASK_ABORTED: 1478 goto maybe_retry; 1479 case CHECK_CONDITION: 1480 rtn = scsi_check_sense(scmd); 1481 if (rtn == NEEDS_RETRY) 1482 goto maybe_retry; 1483 else if (rtn == TARGET_ERROR) { 1484 /* 1485 * Need to modify host byte to signal a 1486 * permanent target failure 1487 */ 1488 scmd->result |= (DID_TARGET_FAILURE << 16); 1489 rtn = SUCCESS; 1490 } 1491 /* if rtn == FAILED, we have no sense information; 1492 * returning FAILED will wake the error handler thread 1493 * to collect the sense and redo the decide 1494 * disposition */ 1495 return rtn; 1496 case CONDITION_GOOD: 1497 case INTERMEDIATE_GOOD: 1498 case INTERMEDIATE_C_GOOD: 1499 case ACA_ACTIVE: 1500 /* 1501 * who knows? FIXME(eric) 1502 */ 1503 return SUCCESS; 1504 1505 case RESERVATION_CONFLICT: 1506 sdev_printk(KERN_INFO, scmd->device, 1507 "reservation conflict\n"); 1508 scmd->result |= (DID_NEXUS_FAILURE << 16); 1509 return SUCCESS; /* causes immediate i/o error */ 1510 default: 1511 return FAILED; 1512 } 1513 return FAILED; 1514 1515 maybe_retry: 1516 1517 /* we requeue for retry because the error was retryable, and 1518 * the request was not marked fast fail. Note that above, 1519 * even if the request is marked fast fail, we still requeue 1520 * for queue congestion conditions (QUEUE_FULL or BUSY) */ 1521 if ((++scmd->retries) <= scmd->allowed 1522 && !scsi_noretry_cmd(scmd)) { 1523 return NEEDS_RETRY; 1524 } else { 1525 /* 1526 * no more retries - report this one back to upper level. 1527 */ 1528 return SUCCESS; 1529 } 1530 } 1531 1532 static void eh_lock_door_done(struct request *req, int uptodate) 1533 { 1534 __blk_put_request(req->q, req); 1535 } 1536 1537 /** 1538 * scsi_eh_lock_door - Prevent medium removal for the specified device 1539 * @sdev: SCSI device to prevent medium removal 1540 * 1541 * Locking: 1542 * We must be called from process context. 1543 * 1544 * Notes: 1545 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the 1546 * head of the devices request queue, and continue. 1547 */ 1548 static void scsi_eh_lock_door(struct scsi_device *sdev) 1549 { 1550 struct request *req; 1551 1552 /* 1553 * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a 1554 * request becomes available 1555 */ 1556 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1557 1558 req->cmd[0] = ALLOW_MEDIUM_REMOVAL; 1559 req->cmd[1] = 0; 1560 req->cmd[2] = 0; 1561 req->cmd[3] = 0; 1562 req->cmd[4] = SCSI_REMOVAL_PREVENT; 1563 req->cmd[5] = 0; 1564 1565 req->cmd_len = COMMAND_SIZE(req->cmd[0]); 1566 1567 req->cmd_type = REQ_TYPE_BLOCK_PC; 1568 req->cmd_flags |= REQ_QUIET; 1569 req->timeout = 10 * HZ; 1570 req->retries = 5; 1571 1572 blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done); 1573 } 1574 1575 /** 1576 * scsi_restart_operations - restart io operations to the specified host. 1577 * @shost: Host we are restarting. 1578 * 1579 * Notes: 1580 * When we entered the error handler, we blocked all further i/o to 1581 * this device. we need to 'reverse' this process. 1582 */ 1583 static void scsi_restart_operations(struct Scsi_Host *shost) 1584 { 1585 struct scsi_device *sdev; 1586 unsigned long flags; 1587 1588 /* 1589 * If the door was locked, we need to insert a door lock request 1590 * onto the head of the SCSI request queue for the device. There 1591 * is no point trying to lock the door of an off-line device. 1592 */ 1593 shost_for_each_device(sdev, shost) { 1594 if (scsi_device_online(sdev) && sdev->locked) 1595 scsi_eh_lock_door(sdev); 1596 } 1597 1598 /* 1599 * next free up anything directly waiting upon the host. this 1600 * will be requests for character device operations, and also for 1601 * ioctls to queued block devices. 1602 */ 1603 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1604 __func__)); 1605 1606 spin_lock_irqsave(shost->host_lock, flags); 1607 if (scsi_host_set_state(shost, SHOST_RUNNING)) 1608 if (scsi_host_set_state(shost, SHOST_CANCEL)) 1609 BUG_ON(scsi_host_set_state(shost, SHOST_DEL)); 1610 spin_unlock_irqrestore(shost->host_lock, flags); 1611 1612 wake_up(&shost->host_wait); 1613 1614 /* 1615 * finally we need to re-initiate requests that may be pending. we will 1616 * have had everything blocked while error handling is taking place, and 1617 * now that error recovery is done, we will need to ensure that these 1618 * requests are started. 1619 */ 1620 scsi_run_host_queues(shost); 1621 } 1622 1623 /** 1624 * scsi_eh_ready_devs - check device ready state and recover if not. 1625 * @shost: host to be recovered. 1626 * @work_q: &list_head for pending commands. 1627 * @done_q: &list_head for processed commands. 1628 */ 1629 void scsi_eh_ready_devs(struct Scsi_Host *shost, 1630 struct list_head *work_q, 1631 struct list_head *done_q) 1632 { 1633 if (!scsi_eh_stu(shost, work_q, done_q)) 1634 if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) 1635 if (!scsi_eh_target_reset(shost, work_q, done_q)) 1636 if (!scsi_eh_bus_reset(shost, work_q, done_q)) 1637 if (!scsi_eh_host_reset(work_q, done_q)) 1638 scsi_eh_offline_sdevs(work_q, 1639 done_q); 1640 } 1641 EXPORT_SYMBOL_GPL(scsi_eh_ready_devs); 1642 1643 /** 1644 * scsi_eh_flush_done_q - finish processed commands or retry them. 1645 * @done_q: list_head of processed commands. 1646 */ 1647 void scsi_eh_flush_done_q(struct list_head *done_q) 1648 { 1649 struct scsi_cmnd *scmd, *next; 1650 1651 list_for_each_entry_safe(scmd, next, done_q, eh_entry) { 1652 list_del_init(&scmd->eh_entry); 1653 if (scsi_device_online(scmd->device) && 1654 !scsi_noretry_cmd(scmd) && 1655 (++scmd->retries <= scmd->allowed)) { 1656 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" 1657 " retry cmd: %p\n", 1658 current->comm, 1659 scmd)); 1660 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); 1661 } else { 1662 /* 1663 * If just we got sense for the device (called 1664 * scsi_eh_get_sense), scmd->result is already 1665 * set, do not set DRIVER_TIMEOUT. 1666 */ 1667 if (!scmd->result) 1668 scmd->result |= (DRIVER_TIMEOUT << 24); 1669 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish" 1670 " cmd: %p\n", 1671 current->comm, scmd)); 1672 scsi_finish_command(scmd); 1673 } 1674 } 1675 } 1676 EXPORT_SYMBOL(scsi_eh_flush_done_q); 1677 1678 /** 1679 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1680 * @shost: Host to unjam. 1681 * 1682 * Notes: 1683 * When we come in here, we *know* that all commands on the bus have 1684 * either completed, failed or timed out. we also know that no further 1685 * commands are being sent to the host, so things are relatively quiet 1686 * and we have freedom to fiddle with things as we wish. 1687 * 1688 * This is only the *default* implementation. it is possible for 1689 * individual drivers to supply their own version of this function, and 1690 * if the maintainer wishes to do this, it is strongly suggested that 1691 * this function be taken as a template and modified. this function 1692 * was designed to correctly handle problems for about 95% of the 1693 * different cases out there, and it should always provide at least a 1694 * reasonable amount of error recovery. 1695 * 1696 * Any command marked 'failed' or 'timeout' must eventually have 1697 * scsi_finish_cmd() called for it. we do all of the retry stuff 1698 * here, so when we restart the host after we return it should have an 1699 * empty queue. 1700 */ 1701 static void scsi_unjam_host(struct Scsi_Host *shost) 1702 { 1703 unsigned long flags; 1704 LIST_HEAD(eh_work_q); 1705 LIST_HEAD(eh_done_q); 1706 1707 spin_lock_irqsave(shost->host_lock, flags); 1708 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 1709 spin_unlock_irqrestore(shost->host_lock, flags); 1710 1711 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q)); 1712 1713 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q)) 1714 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) 1715 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); 1716 1717 scsi_eh_flush_done_q(&eh_done_q); 1718 } 1719 1720 /** 1721 * scsi_error_handler - SCSI error handler thread 1722 * @data: Host for which we are running. 1723 * 1724 * Notes: 1725 * This is the main error handling loop. This is run as a kernel thread 1726 * for every SCSI host and handles all error handling activity. 1727 */ 1728 int scsi_error_handler(void *data) 1729 { 1730 struct Scsi_Host *shost = data; 1731 1732 /* 1733 * We use TASK_INTERRUPTIBLE so that the thread is not 1734 * counted against the load average as a running process. 1735 * We never actually get interrupted because kthread_run 1736 * disables signal delivery for the created thread. 1737 */ 1738 set_current_state(TASK_INTERRUPTIBLE); 1739 while (!kthread_should_stop()) { 1740 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || 1741 shost->host_failed != shost->host_busy) { 1742 SCSI_LOG_ERROR_RECOVERY(1, 1743 printk("Error handler scsi_eh_%d sleeping\n", 1744 shost->host_no)); 1745 schedule(); 1746 set_current_state(TASK_INTERRUPTIBLE); 1747 continue; 1748 } 1749 1750 __set_current_state(TASK_RUNNING); 1751 SCSI_LOG_ERROR_RECOVERY(1, 1752 printk("Error handler scsi_eh_%d waking up\n", 1753 shost->host_no)); 1754 1755 /* 1756 * We have a host that is failing for some reason. Figure out 1757 * what we need to do to get it up and online again (if we can). 1758 * If we fail, we end up taking the thing offline. 1759 */ 1760 if (scsi_autopm_get_host(shost) != 0) { 1761 SCSI_LOG_ERROR_RECOVERY(1, 1762 printk(KERN_ERR "Error handler scsi_eh_%d " 1763 "unable to autoresume\n", 1764 shost->host_no)); 1765 continue; 1766 } 1767 1768 if (shost->transportt->eh_strategy_handler) 1769 shost->transportt->eh_strategy_handler(shost); 1770 else 1771 scsi_unjam_host(shost); 1772 1773 /* 1774 * Note - if the above fails completely, the action is to take 1775 * individual devices offline and flush the queue of any 1776 * outstanding requests that may have been pending. When we 1777 * restart, we restart any I/O to any other devices on the bus 1778 * which are still online. 1779 */ 1780 scsi_restart_operations(shost); 1781 scsi_autopm_put_host(shost); 1782 set_current_state(TASK_INTERRUPTIBLE); 1783 } 1784 __set_current_state(TASK_RUNNING); 1785 1786 SCSI_LOG_ERROR_RECOVERY(1, 1787 printk("Error handler scsi_eh_%d exiting\n", shost->host_no)); 1788 shost->ehandler = NULL; 1789 return 0; 1790 } 1791 1792 /* 1793 * Function: scsi_report_bus_reset() 1794 * 1795 * Purpose: Utility function used by low-level drivers to report that 1796 * they have observed a bus reset on the bus being handled. 1797 * 1798 * Arguments: shost - Host in question 1799 * channel - channel on which reset was observed. 1800 * 1801 * Returns: Nothing 1802 * 1803 * Lock status: Host lock must be held. 1804 * 1805 * Notes: This only needs to be called if the reset is one which 1806 * originates from an unknown location. Resets originated 1807 * by the mid-level itself don't need to call this, but there 1808 * should be no harm. 1809 * 1810 * The main purpose of this is to make sure that a CHECK_CONDITION 1811 * is properly treated. 1812 */ 1813 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel) 1814 { 1815 struct scsi_device *sdev; 1816 1817 __shost_for_each_device(sdev, shost) { 1818 if (channel == sdev_channel(sdev)) 1819 __scsi_report_device_reset(sdev, NULL); 1820 } 1821 } 1822 EXPORT_SYMBOL(scsi_report_bus_reset); 1823 1824 /* 1825 * Function: scsi_report_device_reset() 1826 * 1827 * Purpose: Utility function used by low-level drivers to report that 1828 * they have observed a device reset on the device being handled. 1829 * 1830 * Arguments: shost - Host in question 1831 * channel - channel on which reset was observed 1832 * target - target on which reset was observed 1833 * 1834 * Returns: Nothing 1835 * 1836 * Lock status: Host lock must be held 1837 * 1838 * Notes: This only needs to be called if the reset is one which 1839 * originates from an unknown location. Resets originated 1840 * by the mid-level itself don't need to call this, but there 1841 * should be no harm. 1842 * 1843 * The main purpose of this is to make sure that a CHECK_CONDITION 1844 * is properly treated. 1845 */ 1846 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target) 1847 { 1848 struct scsi_device *sdev; 1849 1850 __shost_for_each_device(sdev, shost) { 1851 if (channel == sdev_channel(sdev) && 1852 target == sdev_id(sdev)) 1853 __scsi_report_device_reset(sdev, NULL); 1854 } 1855 } 1856 EXPORT_SYMBOL(scsi_report_device_reset); 1857 1858 static void 1859 scsi_reset_provider_done_command(struct scsi_cmnd *scmd) 1860 { 1861 } 1862 1863 /* 1864 * Function: scsi_reset_provider 1865 * 1866 * Purpose: Send requested reset to a bus or device at any phase. 1867 * 1868 * Arguments: device - device to send reset to 1869 * flag - reset type (see scsi.h) 1870 * 1871 * Returns: SUCCESS/FAILURE. 1872 * 1873 * Notes: This is used by the SCSI Generic driver to provide 1874 * Bus/Device reset capability. 1875 */ 1876 int 1877 scsi_reset_provider(struct scsi_device *dev, int flag) 1878 { 1879 struct scsi_cmnd *scmd; 1880 struct Scsi_Host *shost = dev->host; 1881 struct request req; 1882 unsigned long flags; 1883 int rtn; 1884 1885 if (scsi_autopm_get_host(shost) < 0) 1886 return FAILED; 1887 1888 scmd = scsi_get_command(dev, GFP_KERNEL); 1889 blk_rq_init(NULL, &req); 1890 scmd->request = &req; 1891 1892 scmd->cmnd = req.cmd; 1893 1894 scmd->scsi_done = scsi_reset_provider_done_command; 1895 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 1896 1897 scmd->cmd_len = 0; 1898 1899 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 1900 1901 spin_lock_irqsave(shost->host_lock, flags); 1902 shost->tmf_in_progress = 1; 1903 spin_unlock_irqrestore(shost->host_lock, flags); 1904 1905 switch (flag) { 1906 case SCSI_TRY_RESET_DEVICE: 1907 rtn = scsi_try_bus_device_reset(scmd); 1908 if (rtn == SUCCESS) 1909 break; 1910 /* FALLTHROUGH */ 1911 case SCSI_TRY_RESET_TARGET: 1912 rtn = scsi_try_target_reset(scmd); 1913 if (rtn == SUCCESS) 1914 break; 1915 /* FALLTHROUGH */ 1916 case SCSI_TRY_RESET_BUS: 1917 rtn = scsi_try_bus_reset(scmd); 1918 if (rtn == SUCCESS) 1919 break; 1920 /* FALLTHROUGH */ 1921 case SCSI_TRY_RESET_HOST: 1922 rtn = scsi_try_host_reset(scmd); 1923 break; 1924 default: 1925 rtn = FAILED; 1926 } 1927 1928 spin_lock_irqsave(shost->host_lock, flags); 1929 shost->tmf_in_progress = 0; 1930 spin_unlock_irqrestore(shost->host_lock, flags); 1931 1932 /* 1933 * be sure to wake up anyone who was sleeping or had their queue 1934 * suspended while we performed the TMF. 1935 */ 1936 SCSI_LOG_ERROR_RECOVERY(3, 1937 printk("%s: waking up host to restart after TMF\n", 1938 __func__)); 1939 1940 wake_up(&shost->host_wait); 1941 1942 scsi_run_host_queues(shost); 1943 1944 scsi_next_command(scmd); 1945 scsi_autopm_put_host(shost); 1946 return rtn; 1947 } 1948 EXPORT_SYMBOL(scsi_reset_provider); 1949 1950 /** 1951 * scsi_normalize_sense - normalize main elements from either fixed or 1952 * descriptor sense data format into a common format. 1953 * 1954 * @sense_buffer: byte array containing sense data returned by device 1955 * @sb_len: number of valid bytes in sense_buffer 1956 * @sshdr: pointer to instance of structure that common 1957 * elements are written to. 1958 * 1959 * Notes: 1960 * The "main elements" from sense data are: response_code, sense_key, 1961 * asc, ascq and additional_length (only for descriptor format). 1962 * 1963 * Typically this function can be called after a device has 1964 * responded to a SCSI command with the CHECK_CONDITION status. 1965 * 1966 * Return value: 1967 * 1 if valid sense data information found, else 0; 1968 */ 1969 int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, 1970 struct scsi_sense_hdr *sshdr) 1971 { 1972 if (!sense_buffer || !sb_len) 1973 return 0; 1974 1975 memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); 1976 1977 sshdr->response_code = (sense_buffer[0] & 0x7f); 1978 1979 if (!scsi_sense_valid(sshdr)) 1980 return 0; 1981 1982 if (sshdr->response_code >= 0x72) { 1983 /* 1984 * descriptor format 1985 */ 1986 if (sb_len > 1) 1987 sshdr->sense_key = (sense_buffer[1] & 0xf); 1988 if (sb_len > 2) 1989 sshdr->asc = sense_buffer[2]; 1990 if (sb_len > 3) 1991 sshdr->ascq = sense_buffer[3]; 1992 if (sb_len > 7) 1993 sshdr->additional_length = sense_buffer[7]; 1994 } else { 1995 /* 1996 * fixed format 1997 */ 1998 if (sb_len > 2) 1999 sshdr->sense_key = (sense_buffer[2] & 0xf); 2000 if (sb_len > 7) { 2001 sb_len = (sb_len < (sense_buffer[7] + 8)) ? 2002 sb_len : (sense_buffer[7] + 8); 2003 if (sb_len > 12) 2004 sshdr->asc = sense_buffer[12]; 2005 if (sb_len > 13) 2006 sshdr->ascq = sense_buffer[13]; 2007 } 2008 } 2009 2010 return 1; 2011 } 2012 EXPORT_SYMBOL(scsi_normalize_sense); 2013 2014 int scsi_command_normalize_sense(struct scsi_cmnd *cmd, 2015 struct scsi_sense_hdr *sshdr) 2016 { 2017 return scsi_normalize_sense(cmd->sense_buffer, 2018 SCSI_SENSE_BUFFERSIZE, sshdr); 2019 } 2020 EXPORT_SYMBOL(scsi_command_normalize_sense); 2021 2022 /** 2023 * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format. 2024 * @sense_buffer: byte array of descriptor format sense data 2025 * @sb_len: number of valid bytes in sense_buffer 2026 * @desc_type: value of descriptor type to find 2027 * (e.g. 0 -> information) 2028 * 2029 * Notes: 2030 * only valid when sense data is in descriptor format 2031 * 2032 * Return value: 2033 * pointer to start of (first) descriptor if found else NULL 2034 */ 2035 const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, 2036 int desc_type) 2037 { 2038 int add_sen_len, add_len, desc_len, k; 2039 const u8 * descp; 2040 2041 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7]))) 2042 return NULL; 2043 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73)) 2044 return NULL; 2045 add_sen_len = (add_sen_len < (sb_len - 8)) ? 2046 add_sen_len : (sb_len - 8); 2047 descp = &sense_buffer[8]; 2048 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) { 2049 descp += desc_len; 2050 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1; 2051 desc_len = add_len + 2; 2052 if (descp[0] == desc_type) 2053 return descp; 2054 if (add_len < 0) // short descriptor ?? 2055 break; 2056 } 2057 return NULL; 2058 } 2059 EXPORT_SYMBOL(scsi_sense_desc_find); 2060 2061 /** 2062 * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format) 2063 * @sense_buffer: byte array of sense data 2064 * @sb_len: number of valid bytes in sense_buffer 2065 * @info_out: pointer to 64 integer where 8 or 4 byte information 2066 * field will be placed if found. 2067 * 2068 * Return value: 2069 * 1 if information field found, 0 if not found. 2070 */ 2071 int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, 2072 u64 * info_out) 2073 { 2074 int j; 2075 const u8 * ucp; 2076 u64 ull; 2077 2078 if (sb_len < 7) 2079 return 0; 2080 switch (sense_buffer[0] & 0x7f) { 2081 case 0x70: 2082 case 0x71: 2083 if (sense_buffer[0] & 0x80) { 2084 *info_out = (sense_buffer[3] << 24) + 2085 (sense_buffer[4] << 16) + 2086 (sense_buffer[5] << 8) + sense_buffer[6]; 2087 return 1; 2088 } else 2089 return 0; 2090 case 0x72: 2091 case 0x73: 2092 ucp = scsi_sense_desc_find(sense_buffer, sb_len, 2093 0 /* info desc */); 2094 if (ucp && (0xa == ucp[1])) { 2095 ull = 0; 2096 for (j = 0; j < 8; ++j) { 2097 if (j > 0) 2098 ull <<= 8; 2099 ull |= ucp[4 + j]; 2100 } 2101 *info_out = ull; 2102 return 1; 2103 } else 2104 return 0; 2105 default: 2106 return 0; 2107 } 2108 } 2109 EXPORT_SYMBOL(scsi_get_sense_info_fld); 2110 2111 /** 2112 * scsi_build_sense_buffer - build sense data in a buffer 2113 * @desc: Sense format (non zero == descriptor format, 2114 * 0 == fixed format) 2115 * @buf: Where to build sense data 2116 * @key: Sense key 2117 * @asc: Additional sense code 2118 * @ascq: Additional sense code qualifier 2119 * 2120 **/ 2121 void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq) 2122 { 2123 if (desc) { 2124 buf[0] = 0x72; /* descriptor, current */ 2125 buf[1] = key; 2126 buf[2] = asc; 2127 buf[3] = ascq; 2128 buf[7] = 0; 2129 } else { 2130 buf[0] = 0x70; /* fixed, current */ 2131 buf[2] = key; 2132 buf[7] = 0xa; 2133 buf[12] = asc; 2134 buf[13] = ascq; 2135 } 2136 } 2137 EXPORT_SYMBOL(scsi_build_sense_buffer); 2138