1 /* 2 * scsi_error.c Copyright (C) 1997 Eric Youngdale 3 * 4 * SCSI error/timeout handling 5 * Initial versions: Eric Youngdale. Based upon conversations with 6 * Leonard Zubkoff and David Miller at Linux Expo, 7 * ideas originating from all over the place. 8 * 9 * Restructured scsi_unjam_host and associated functions. 10 * September 04, 2002 Mike Anderson (andmike@us.ibm.com) 11 * 12 * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and 13 * minor cleanups. 14 * September 30, 2002 Mike Anderson (andmike@us.ibm.com) 15 */ 16 17 #include <linux/module.h> 18 #include <linux/sched.h> 19 #include <linux/timer.h> 20 #include <linux/string.h> 21 #include <linux/slab.h> 22 #include <linux/kernel.h> 23 #include <linux/interrupt.h> 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_dbg.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_eh.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_ioctl.h> 33 #include <scsi/scsi_request.h> 34 35 #include "scsi_priv.h" 36 #include "scsi_logging.h" 37 38 #define SENSE_TIMEOUT (10*HZ) 39 #define START_UNIT_TIMEOUT (30*HZ) 40 41 /* 42 * These should *probably* be handled by the host itself. 43 * Since it is allowed to sleep, it probably should. 44 */ 45 #define BUS_RESET_SETTLE_TIME (10) 46 #define HOST_RESET_SETTLE_TIME (10) 47 48 /* called with shost->host_lock held */ 49 void scsi_eh_wakeup(struct Scsi_Host *shost) 50 { 51 if (shost->host_busy == shost->host_failed) { 52 up(shost->eh_wait); 53 SCSI_LOG_ERROR_RECOVERY(5, 54 printk("Waking error handler thread\n")); 55 } 56 } 57 58 /** 59 * scsi_eh_scmd_add - add scsi cmd to error handling. 60 * @scmd: scmd to run eh on. 61 * @eh_flag: optional SCSI_EH flag. 62 * 63 * Return value: 64 * 0 on failure. 65 **/ 66 int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) 67 { 68 struct Scsi_Host *shost = scmd->device->host; 69 unsigned long flags; 70 71 if (shost->eh_wait == NULL) 72 return 0; 73 74 spin_lock_irqsave(shost->host_lock, flags); 75 76 scsi_eh_eflags_set(scmd, eh_flag); 77 /* 78 * FIXME: Can we stop setting owner and state. 79 */ 80 scmd->owner = SCSI_OWNER_ERROR_HANDLER; 81 scmd->state = SCSI_STATE_FAILED; 82 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 83 set_bit(SHOST_RECOVERY, &shost->shost_state); 84 shost->host_failed++; 85 scsi_eh_wakeup(shost); 86 spin_unlock_irqrestore(shost->host_lock, flags); 87 return 1; 88 } 89 90 /** 91 * scsi_add_timer - Start timeout timer for a single scsi command. 92 * @scmd: scsi command that is about to start running. 93 * @timeout: amount of time to allow this command to run. 94 * @complete: timeout function to call if timer isn't canceled. 95 * 96 * Notes: 97 * This should be turned into an inline function. Each scsi command 98 * has its own timer, and as it is added to the queue, we set up the 99 * timer. When the command completes, we cancel the timer. 100 **/ 101 void scsi_add_timer(struct scsi_cmnd *scmd, int timeout, 102 void (*complete)(struct scsi_cmnd *)) 103 { 104 105 /* 106 * If the clock was already running for this command, then 107 * first delete the timer. The timer handling code gets rather 108 * confused if we don't do this. 109 */ 110 if (scmd->eh_timeout.function) 111 del_timer(&scmd->eh_timeout); 112 113 scmd->eh_timeout.data = (unsigned long)scmd; 114 scmd->eh_timeout.expires = jiffies + timeout; 115 scmd->eh_timeout.function = (void (*)(unsigned long)) complete; 116 117 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" 118 " %d, (%p)\n", __FUNCTION__, 119 scmd, timeout, complete)); 120 121 add_timer(&scmd->eh_timeout); 122 } 123 EXPORT_SYMBOL(scsi_add_timer); 124 125 /** 126 * scsi_delete_timer - Delete/cancel timer for a given function. 127 * @scmd: Cmd that we are canceling timer for 128 * 129 * Notes: 130 * This should be turned into an inline function. 131 * 132 * Return value: 133 * 1 if we were able to detach the timer. 0 if we blew it, and the 134 * timer function has already started to run. 135 **/ 136 int scsi_delete_timer(struct scsi_cmnd *scmd) 137 { 138 int rtn; 139 140 rtn = del_timer(&scmd->eh_timeout); 141 142 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," 143 " rtn: %d\n", __FUNCTION__, 144 scmd, rtn)); 145 146 scmd->eh_timeout.data = (unsigned long)NULL; 147 scmd->eh_timeout.function = NULL; 148 149 return rtn; 150 } 151 EXPORT_SYMBOL(scsi_delete_timer); 152 153 /** 154 * scsi_times_out - Timeout function for normal scsi commands. 155 * @scmd: Cmd that is timing out. 156 * 157 * Notes: 158 * We do not need to lock this. There is the potential for a race 159 * only in that the normal completion handling might run, but if the 160 * normal completion function determines that the timer has already 161 * fired, then it mustn't do anything. 162 **/ 163 void scsi_times_out(struct scsi_cmnd *scmd) 164 { 165 scsi_log_completion(scmd, TIMEOUT_ERROR); 166 167 if (scmd->device->host->hostt->eh_timed_out) 168 switch (scmd->device->host->hostt->eh_timed_out(scmd)) { 169 case EH_HANDLED: 170 __scsi_done(scmd); 171 return; 172 case EH_RESET_TIMER: 173 /* This allows a single retry even of a command 174 * with allowed == 0 */ 175 if (scmd->retries++ > scmd->allowed) 176 break; 177 scsi_add_timer(scmd, scmd->timeout_per_command, 178 scsi_times_out); 179 return; 180 case EH_NOT_HANDLED: 181 break; 182 } 183 184 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 185 panic("Error handler thread not present at %p %p %s %d", 186 scmd, scmd->device->host, __FILE__, __LINE__); 187 } 188 } 189 190 /** 191 * scsi_block_when_processing_errors - Prevent cmds from being queued. 192 * @sdev: Device on which we are performing recovery. 193 * 194 * Description: 195 * We block until the host is out of error recovery, and then check to 196 * see whether the host or the device is offline. 197 * 198 * Return value: 199 * 0 when dev was taken offline by error recovery. 1 OK to proceed. 200 **/ 201 int scsi_block_when_processing_errors(struct scsi_device *sdev) 202 { 203 int online; 204 205 wait_event(sdev->host->host_wait, (!test_bit(SHOST_RECOVERY, &sdev->host->shost_state))); 206 207 online = scsi_device_online(sdev); 208 209 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__, 210 online)); 211 212 return online; 213 } 214 EXPORT_SYMBOL(scsi_block_when_processing_errors); 215 216 #ifdef CONFIG_SCSI_LOGGING 217 /** 218 * scsi_eh_prt_fail_stats - Log info on failures. 219 * @shost: scsi host being recovered. 220 * @work_q: Queue of scsi cmds to process. 221 **/ 222 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, 223 struct list_head *work_q) 224 { 225 struct scsi_cmnd *scmd; 226 struct scsi_device *sdev; 227 int total_failures = 0; 228 int cmd_failed = 0; 229 int cmd_cancel = 0; 230 int devices_failed = 0; 231 232 shost_for_each_device(sdev, shost) { 233 list_for_each_entry(scmd, work_q, eh_entry) { 234 if (scmd->device == sdev) { 235 ++total_failures; 236 if (scsi_eh_eflags_chk(scmd, 237 SCSI_EH_CANCEL_CMD)) 238 ++cmd_cancel; 239 else 240 ++cmd_failed; 241 } 242 } 243 244 if (cmd_cancel || cmd_failed) { 245 SCSI_LOG_ERROR_RECOVERY(3, 246 printk("%s: %d:%d:%d:%d cmds failed: %d," 247 " cancel: %d\n", 248 __FUNCTION__, shost->host_no, 249 sdev->channel, sdev->id, sdev->lun, 250 cmd_failed, cmd_cancel)); 251 cmd_cancel = 0; 252 cmd_failed = 0; 253 ++devices_failed; 254 } 255 } 256 257 SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d" 258 " devices require eh work\n", 259 total_failures, devices_failed)); 260 } 261 #endif 262 263 /** 264 * scsi_check_sense - Examine scsi cmd sense 265 * @scmd: Cmd to have sense checked. 266 * 267 * Return value: 268 * SUCCESS or FAILED or NEEDS_RETRY 269 * 270 * Notes: 271 * When a deferred error is detected the current command has 272 * not been executed and needs retrying. 273 **/ 274 static int scsi_check_sense(struct scsi_cmnd *scmd) 275 { 276 struct scsi_sense_hdr sshdr; 277 278 if (! scsi_command_normalize_sense(scmd, &sshdr)) 279 return FAILED; /* no valid sense data */ 280 281 if (scsi_sense_is_deferred(&sshdr)) 282 return NEEDS_RETRY; 283 284 /* 285 * Previous logic looked for FILEMARK, EOM or ILI which are 286 * mainly associated with tapes and returned SUCCESS. 287 */ 288 if (sshdr.response_code == 0x70) { 289 /* fixed format */ 290 if (scmd->sense_buffer[2] & 0xe0) 291 return SUCCESS; 292 } else { 293 /* 294 * descriptor format: look for "stream commands sense data 295 * descriptor" (see SSC-3). Assume single sense data 296 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG. 297 */ 298 if ((sshdr.additional_length > 3) && 299 (scmd->sense_buffer[8] == 0x4) && 300 (scmd->sense_buffer[11] & 0xe0)) 301 return SUCCESS; 302 } 303 304 switch (sshdr.sense_key) { 305 case NO_SENSE: 306 return SUCCESS; 307 case RECOVERED_ERROR: 308 return /* soft_error */ SUCCESS; 309 310 case ABORTED_COMMAND: 311 return NEEDS_RETRY; 312 case NOT_READY: 313 case UNIT_ATTENTION: 314 /* 315 * if we are expecting a cc/ua because of a bus reset that we 316 * performed, treat this just as a retry. otherwise this is 317 * information that we should pass up to the upper-level driver 318 * so that we can deal with it there. 319 */ 320 if (scmd->device->expecting_cc_ua) { 321 scmd->device->expecting_cc_ua = 0; 322 return NEEDS_RETRY; 323 } 324 /* 325 * if the device is in the process of becoming ready, we 326 * should retry. 327 */ 328 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01)) 329 return NEEDS_RETRY; 330 /* 331 * if the device is not started, we need to wake 332 * the error handler to start the motor 333 */ 334 if (scmd->device->allow_restart && 335 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 336 return FAILED; 337 return SUCCESS; 338 339 /* these three are not supported */ 340 case COPY_ABORTED: 341 case VOLUME_OVERFLOW: 342 case MISCOMPARE: 343 return SUCCESS; 344 345 case MEDIUM_ERROR: 346 return NEEDS_RETRY; 347 348 case HARDWARE_ERROR: 349 if (scmd->device->retry_hwerror) 350 return NEEDS_RETRY; 351 else 352 return SUCCESS; 353 354 case ILLEGAL_REQUEST: 355 case BLANK_CHECK: 356 case DATA_PROTECT: 357 default: 358 return SUCCESS; 359 } 360 } 361 362 /** 363 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. 364 * @scmd: SCSI cmd to examine. 365 * 366 * Notes: 367 * This is *only* called when we are examining the status of commands 368 * queued during error recovery. the main difference here is that we 369 * don't allow for the possibility of retries here, and we are a lot 370 * more restrictive about what we consider acceptable. 371 **/ 372 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) 373 { 374 /* 375 * first check the host byte, to see if there is anything in there 376 * that would indicate what we need to do. 377 */ 378 if (host_byte(scmd->result) == DID_RESET) { 379 /* 380 * rats. we are already in the error handler, so we now 381 * get to try and figure out what to do next. if the sense 382 * is valid, we have a pretty good idea of what to do. 383 * if not, we mark it as FAILED. 384 */ 385 return scsi_check_sense(scmd); 386 } 387 if (host_byte(scmd->result) != DID_OK) 388 return FAILED; 389 390 /* 391 * next, check the message byte. 392 */ 393 if (msg_byte(scmd->result) != COMMAND_COMPLETE) 394 return FAILED; 395 396 /* 397 * now, check the status byte to see if this indicates 398 * anything special. 399 */ 400 switch (status_byte(scmd->result)) { 401 case GOOD: 402 case COMMAND_TERMINATED: 403 return SUCCESS; 404 case CHECK_CONDITION: 405 return scsi_check_sense(scmd); 406 case CONDITION_GOOD: 407 case INTERMEDIATE_GOOD: 408 case INTERMEDIATE_C_GOOD: 409 /* 410 * who knows? FIXME(eric) 411 */ 412 return SUCCESS; 413 case BUSY: 414 case QUEUE_FULL: 415 case RESERVATION_CONFLICT: 416 default: 417 return FAILED; 418 } 419 return FAILED; 420 } 421 422 /** 423 * scsi_eh_times_out - timeout function for error handling. 424 * @scmd: Cmd that is timing out. 425 * 426 * Notes: 427 * During error handling, the kernel thread will be sleeping waiting 428 * for some action to complete on the device. our only job is to 429 * record that it timed out, and to wake up the thread. 430 **/ 431 static void scsi_eh_times_out(struct scsi_cmnd *scmd) 432 { 433 scsi_eh_eflags_set(scmd, SCSI_EH_REC_TIMEOUT); 434 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd:%p\n", __FUNCTION__, 435 scmd)); 436 437 if (scmd->device->host->eh_action) 438 up(scmd->device->host->eh_action); 439 } 440 441 /** 442 * scsi_eh_done - Completion function for error handling. 443 * @scmd: Cmd that is done. 444 **/ 445 static void scsi_eh_done(struct scsi_cmnd *scmd) 446 { 447 /* 448 * if the timeout handler is already running, then just set the 449 * flag which says we finished late, and return. we have no 450 * way of stopping the timeout handler from running, so we must 451 * always defer to it. 452 */ 453 if (del_timer(&scmd->eh_timeout)) { 454 scmd->request->rq_status = RQ_SCSI_DONE; 455 scmd->owner = SCSI_OWNER_ERROR_HANDLER; 456 457 SCSI_LOG_ERROR_RECOVERY(3, printk("%s scmd: %p result: %x\n", 458 __FUNCTION__, scmd, scmd->result)); 459 460 if (scmd->device->host->eh_action) 461 up(scmd->device->host->eh_action); 462 } 463 } 464 465 /** 466 * scsi_send_eh_cmnd - send a cmd to a device as part of error recovery. 467 * @scmd: SCSI Cmd to send. 468 * @timeout: Timeout for cmd. 469 * 470 * Notes: 471 * The initialization of the structures is quite a bit different in 472 * this case, and furthermore, there is a different completion handler 473 * vs scsi_dispatch_cmd. 474 * Return value: 475 * SUCCESS or FAILED or NEEDS_RETRY 476 **/ 477 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) 478 { 479 struct scsi_device *sdev = scmd->device; 480 struct Scsi_Host *shost = sdev->host; 481 DECLARE_MUTEX_LOCKED(sem); 482 unsigned long flags; 483 int rtn = SUCCESS; 484 485 /* 486 * we will use a queued command if possible, otherwise we will 487 * emulate the queuing and calling of completion function ourselves. 488 */ 489 scmd->owner = SCSI_OWNER_LOWLEVEL; 490 491 if (sdev->scsi_level <= SCSI_2) 492 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 493 (sdev->lun << 5 & 0xe0); 494 495 scsi_add_timer(scmd, timeout, scsi_eh_times_out); 496 497 /* 498 * set up the semaphore so we wait for the command to complete. 499 */ 500 shost->eh_action = &sem; 501 scmd->request->rq_status = RQ_SCSI_BUSY; 502 503 spin_lock_irqsave(shost->host_lock, flags); 504 scsi_log_send(scmd); 505 shost->hostt->queuecommand(scmd, scsi_eh_done); 506 spin_unlock_irqrestore(shost->host_lock, flags); 507 508 down(&sem); 509 scsi_log_completion(scmd, SUCCESS); 510 511 shost->eh_action = NULL; 512 513 /* 514 * see if timeout. if so, tell the host to forget about it. 515 * in other words, we don't want a callback any more. 516 */ 517 if (scsi_eh_eflags_chk(scmd, SCSI_EH_REC_TIMEOUT)) { 518 scsi_eh_eflags_clr(scmd, SCSI_EH_REC_TIMEOUT); 519 scmd->owner = SCSI_OWNER_LOWLEVEL; 520 521 /* 522 * as far as the low level driver is 523 * concerned, this command is still active, so 524 * we must give the low level driver a chance 525 * to abort it. (db) 526 * 527 * FIXME(eric) - we are not tracking whether we could 528 * abort a timed out command or not. not sure how 529 * we should treat them differently anyways. 530 */ 531 spin_lock_irqsave(shost->host_lock, flags); 532 if (shost->hostt->eh_abort_handler) 533 shost->hostt->eh_abort_handler(scmd); 534 spin_unlock_irqrestore(shost->host_lock, flags); 535 536 scmd->request->rq_status = RQ_SCSI_DONE; 537 scmd->owner = SCSI_OWNER_ERROR_HANDLER; 538 539 rtn = FAILED; 540 } 541 542 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, rtn:%x\n", 543 __FUNCTION__, scmd, rtn)); 544 545 /* 546 * now examine the actual status codes to see whether the command 547 * actually did complete normally. 548 */ 549 if (rtn == SUCCESS) { 550 rtn = scsi_eh_completed_normally(scmd); 551 SCSI_LOG_ERROR_RECOVERY(3, 552 printk("%s: scsi_eh_completed_normally %x\n", 553 __FUNCTION__, rtn)); 554 switch (rtn) { 555 case SUCCESS: 556 case NEEDS_RETRY: 557 case FAILED: 558 break; 559 default: 560 rtn = FAILED; 561 break; 562 } 563 } 564 565 return rtn; 566 } 567 568 /** 569 * scsi_request_sense - Request sense data from a particular target. 570 * @scmd: SCSI cmd for request sense. 571 * 572 * Notes: 573 * Some hosts automatically obtain this information, others require 574 * that we obtain it on our own. This function will *not* return until 575 * the command either times out, or it completes. 576 **/ 577 static int scsi_request_sense(struct scsi_cmnd *scmd) 578 { 579 static unsigned char generic_sense[6] = 580 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 581 unsigned char *scsi_result; 582 int saved_result; 583 int rtn; 584 585 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 586 587 scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0)); 588 589 590 if (unlikely(!scsi_result)) { 591 printk(KERN_ERR "%s: cannot allocate scsi_result.\n", 592 __FUNCTION__); 593 return FAILED; 594 } 595 596 /* 597 * zero the sense buffer. some host adapters automatically always 598 * request sense, so it is not a good idea that 599 * scmd->request_buffer and scmd->sense_buffer point to the same 600 * address (db). 0 is not a valid sense code. 601 */ 602 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 603 memset(scsi_result, 0, 252); 604 605 saved_result = scmd->result; 606 scmd->request_buffer = scsi_result; 607 scmd->request_bufflen = 252; 608 scmd->use_sg = 0; 609 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 610 scmd->sc_data_direction = DMA_FROM_DEVICE; 611 scmd->underflow = 0; 612 613 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 614 615 /* last chance to have valid sense data */ 616 if(!SCSI_SENSE_VALID(scmd)) { 617 memcpy(scmd->sense_buffer, scmd->request_buffer, 618 sizeof(scmd->sense_buffer)); 619 } 620 621 kfree(scsi_result); 622 623 /* 624 * when we eventually call scsi_finish, we really wish to complete 625 * the original request, so let's restore the original data. (db) 626 */ 627 scsi_setup_cmd_retry(scmd); 628 scmd->result = saved_result; 629 return rtn; 630 } 631 632 /** 633 * scsi_eh_finish_cmd - Handle a cmd that eh is finished with. 634 * @scmd: Original SCSI cmd that eh has finished. 635 * @done_q: Queue for processed commands. 636 * 637 * Notes: 638 * We don't want to use the normal command completion while we are are 639 * still handling errors - it may cause other commands to be queued, 640 * and that would disturb what we are doing. thus we really want to 641 * keep a list of pending commands for final completion, and once we 642 * are ready to leave error handling we handle completion for real. 643 **/ 644 static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 645 struct list_head *done_q) 646 { 647 scmd->device->host->host_failed--; 648 scmd->state = SCSI_STATE_BHQUEUE; 649 650 scsi_eh_eflags_clr_all(scmd); 651 652 /* 653 * set this back so that the upper level can correctly free up 654 * things. 655 */ 656 scsi_setup_cmd_retry(scmd); 657 list_move_tail(&scmd->eh_entry, done_q); 658 } 659 660 /** 661 * scsi_eh_get_sense - Get device sense data. 662 * @work_q: Queue of commands to process. 663 * @done_q: Queue of proccessed commands.. 664 * 665 * Description: 666 * See if we need to request sense information. if so, then get it 667 * now, so we have a better idea of what to do. 668 * 669 * Notes: 670 * This has the unfortunate side effect that if a shost adapter does 671 * not automatically request sense information, that we end up shutting 672 * it down before we request it. 673 * 674 * All drivers should request sense information internally these days, 675 * so for now all I have to say is tough noogies if you end up in here. 676 * 677 * XXX: Long term this code should go away, but that needs an audit of 678 * all LLDDs first. 679 **/ 680 static int scsi_eh_get_sense(struct list_head *work_q, 681 struct list_head *done_q) 682 { 683 struct list_head *lh, *lh_sf; 684 struct scsi_cmnd *scmd; 685 int rtn; 686 687 list_for_each_safe(lh, lh_sf, work_q) { 688 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 689 if (scsi_eh_eflags_chk(scmd, SCSI_EH_CANCEL_CMD) || 690 SCSI_SENSE_VALID(scmd)) 691 continue; 692 693 SCSI_LOG_ERROR_RECOVERY(2, printk("%s: requesting sense" 694 " for id: %d\n", 695 current->comm, 696 scmd->device->id)); 697 rtn = scsi_request_sense(scmd); 698 if (rtn != SUCCESS) 699 continue; 700 701 SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p" 702 " result %x\n", scmd, 703 scmd->result)); 704 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); 705 706 rtn = scsi_decide_disposition(scmd); 707 708 /* 709 * if the result was normal, then just pass it along to the 710 * upper level. 711 */ 712 if (rtn == SUCCESS) 713 /* we don't want this command reissued, just 714 * finished with the sense data, so set 715 * retries to the max allowed to ensure it 716 * won't get reissued */ 717 scmd->retries = scmd->allowed; 718 else if (rtn != NEEDS_RETRY) 719 continue; 720 721 scsi_eh_finish_cmd(scmd, done_q); 722 } 723 724 return list_empty(work_q); 725 } 726 727 /** 728 * scsi_try_to_abort_cmd - Ask host to abort a running command. 729 * @scmd: SCSI cmd to abort from Lower Level. 730 * 731 * Notes: 732 * This function will not return until the user's completion function 733 * has been called. there is no timeout on this operation. if the 734 * author of the low-level driver wishes this operation to be timed, 735 * they can provide this facility themselves. helper functions in 736 * scsi_error.c can be supplied to make this easier to do. 737 **/ 738 static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) 739 { 740 unsigned long flags; 741 int rtn = FAILED; 742 743 if (!scmd->device->host->hostt->eh_abort_handler) 744 return rtn; 745 746 /* 747 * scsi_done was called just after the command timed out and before 748 * we had a chance to process it. (db) 749 */ 750 if (scmd->serial_number == 0) 751 return SUCCESS; 752 753 scmd->owner = SCSI_OWNER_LOWLEVEL; 754 755 spin_lock_irqsave(scmd->device->host->host_lock, flags); 756 rtn = scmd->device->host->hostt->eh_abort_handler(scmd); 757 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 758 759 return rtn; 760 } 761 762 /** 763 * scsi_eh_tur - Send TUR to device. 764 * @scmd: Scsi cmd to send TUR 765 * 766 * Return value: 767 * 0 - Device is ready. 1 - Device NOT ready. 768 **/ 769 static int scsi_eh_tur(struct scsi_cmnd *scmd) 770 { 771 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 772 int retry_cnt = 1, rtn; 773 774 retry_tur: 775 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 776 777 /* 778 * zero the sense buffer. the scsi spec mandates that any 779 * untransferred sense data should be interpreted as being zero. 780 */ 781 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 782 783 scmd->request_buffer = NULL; 784 scmd->request_bufflen = 0; 785 scmd->use_sg = 0; 786 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 787 scmd->underflow = 0; 788 scmd->sc_data_direction = DMA_NONE; 789 790 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 791 792 /* 793 * when we eventually call scsi_finish, we really wish to complete 794 * the original request, so let's restore the original data. (db) 795 */ 796 scsi_setup_cmd_retry(scmd); 797 798 /* 799 * hey, we are done. let's look to see what happened. 800 */ 801 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 802 __FUNCTION__, scmd, rtn)); 803 if (rtn == SUCCESS) 804 return 0; 805 else if (rtn == NEEDS_RETRY) 806 if (retry_cnt--) 807 goto retry_tur; 808 return 1; 809 } 810 811 /** 812 * scsi_eh_abort_cmds - abort canceled commands. 813 * @shost: scsi host being recovered. 814 * @eh_done_q: list_head for processed commands. 815 * 816 * Decription: 817 * Try and see whether or not it makes sense to try and abort the 818 * running command. this only works out to be the case if we have one 819 * command that has timed out. if the command simply failed, it makes 820 * no sense to try and abort the command, since as far as the shost 821 * adapter is concerned, it isn't running. 822 **/ 823 static int scsi_eh_abort_cmds(struct list_head *work_q, 824 struct list_head *done_q) 825 { 826 struct list_head *lh, *lh_sf; 827 struct scsi_cmnd *scmd; 828 int rtn; 829 830 list_for_each_safe(lh, lh_sf, work_q) { 831 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 832 if (!scsi_eh_eflags_chk(scmd, SCSI_EH_CANCEL_CMD)) 833 continue; 834 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" 835 "0x%p\n", current->comm, 836 scmd)); 837 rtn = scsi_try_to_abort_cmd(scmd); 838 if (rtn == SUCCESS) { 839 scsi_eh_eflags_clr(scmd, SCSI_EH_CANCEL_CMD); 840 if (!scsi_device_online(scmd->device) || 841 !scsi_eh_tur(scmd)) { 842 scsi_eh_finish_cmd(scmd, done_q); 843 } 844 845 } else 846 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" 847 " cmd failed:" 848 "0x%p\n", 849 current->comm, 850 scmd)); 851 } 852 853 return list_empty(work_q); 854 } 855 856 /** 857 * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev 858 * @scmd: SCSI cmd used to send BDR 859 * 860 * Notes: 861 * There is no timeout for this operation. if this operation is 862 * unreliable for a given host, then the host itself needs to put a 863 * timer on it, and set the host back to a consistent state prior to 864 * returning. 865 **/ 866 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) 867 { 868 unsigned long flags; 869 int rtn = FAILED; 870 871 if (!scmd->device->host->hostt->eh_device_reset_handler) 872 return rtn; 873 874 scmd->owner = SCSI_OWNER_LOWLEVEL; 875 876 spin_lock_irqsave(scmd->device->host->host_lock, flags); 877 rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd); 878 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 879 880 if (rtn == SUCCESS) { 881 scmd->device->was_reset = 1; 882 scmd->device->expecting_cc_ua = 1; 883 } 884 885 return rtn; 886 } 887 888 /** 889 * scsi_eh_try_stu - Send START_UNIT to device. 890 * @scmd: Scsi cmd to send START_UNIT 891 * 892 * Return value: 893 * 0 - Device is ready. 1 - Device NOT ready. 894 **/ 895 static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 896 { 897 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 898 int rtn; 899 900 if (!scmd->device->allow_restart) 901 return 1; 902 903 memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); 904 905 /* 906 * zero the sense buffer. the scsi spec mandates that any 907 * untransferred sense data should be interpreted as being zero. 908 */ 909 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 910 911 scmd->request_buffer = NULL; 912 scmd->request_bufflen = 0; 913 scmd->use_sg = 0; 914 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 915 scmd->underflow = 0; 916 scmd->sc_data_direction = DMA_NONE; 917 918 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); 919 920 /* 921 * when we eventually call scsi_finish, we really wish to complete 922 * the original request, so let's restore the original data. (db) 923 */ 924 scsi_setup_cmd_retry(scmd); 925 926 /* 927 * hey, we are done. let's look to see what happened. 928 */ 929 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 930 __FUNCTION__, scmd, rtn)); 931 if (rtn == SUCCESS) 932 return 0; 933 return 1; 934 } 935 936 /** 937 * scsi_eh_stu - send START_UNIT if needed 938 * @shost: scsi host being recovered. 939 * @eh_done_q: list_head for processed commands. 940 * 941 * Notes: 942 * If commands are failing due to not ready, initializing command required, 943 * try revalidating the device, which will end up sending a start unit. 944 **/ 945 static int scsi_eh_stu(struct Scsi_Host *shost, 946 struct list_head *work_q, 947 struct list_head *done_q) 948 { 949 struct list_head *lh, *lh_sf; 950 struct scsi_cmnd *scmd, *stu_scmd; 951 struct scsi_device *sdev; 952 953 shost_for_each_device(sdev, shost) { 954 stu_scmd = NULL; 955 list_for_each_entry(scmd, work_q, eh_entry) 956 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && 957 scsi_check_sense(scmd) == FAILED ) { 958 stu_scmd = scmd; 959 break; 960 } 961 962 if (!stu_scmd) 963 continue; 964 965 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:" 966 " 0x%p\n", current->comm, sdev)); 967 968 if (!scsi_eh_try_stu(stu_scmd)) { 969 if (!scsi_device_online(sdev) || 970 !scsi_eh_tur(stu_scmd)) { 971 list_for_each_safe(lh, lh_sf, work_q) { 972 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 973 if (scmd->device == sdev) 974 scsi_eh_finish_cmd(scmd, done_q); 975 } 976 } 977 } else { 978 SCSI_LOG_ERROR_RECOVERY(3, 979 printk("%s: START_UNIT failed to sdev:" 980 " 0x%p\n", current->comm, sdev)); 981 } 982 } 983 984 return list_empty(work_q); 985 } 986 987 988 /** 989 * scsi_eh_bus_device_reset - send bdr if needed 990 * @shost: scsi host being recovered. 991 * @eh_done_q: list_head for processed commands. 992 * 993 * Notes: 994 * Try a bus device reset. still, look to see whether we have multiple 995 * devices that are jammed or not - if we have multiple devices, it 996 * makes no sense to try bus_device_reset - we really would need to try 997 * a bus_reset instead. 998 **/ 999 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, 1000 struct list_head *work_q, 1001 struct list_head *done_q) 1002 { 1003 struct list_head *lh, *lh_sf; 1004 struct scsi_cmnd *scmd, *bdr_scmd; 1005 struct scsi_device *sdev; 1006 int rtn; 1007 1008 shost_for_each_device(sdev, shost) { 1009 bdr_scmd = NULL; 1010 list_for_each_entry(scmd, work_q, eh_entry) 1011 if (scmd->device == sdev) { 1012 bdr_scmd = scmd; 1013 break; 1014 } 1015 1016 if (!bdr_scmd) 1017 continue; 1018 1019 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:" 1020 " 0x%p\n", current->comm, 1021 sdev)); 1022 rtn = scsi_try_bus_device_reset(bdr_scmd); 1023 if (rtn == SUCCESS) { 1024 if (!scsi_device_online(sdev) || 1025 !scsi_eh_tur(bdr_scmd)) { 1026 list_for_each_safe(lh, lh_sf, 1027 work_q) { 1028 scmd = list_entry(lh, struct 1029 scsi_cmnd, 1030 eh_entry); 1031 if (scmd->device == sdev) 1032 scsi_eh_finish_cmd(scmd, 1033 done_q); 1034 } 1035 } 1036 } else { 1037 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR" 1038 " failed sdev:" 1039 "0x%p\n", 1040 current->comm, 1041 sdev)); 1042 } 1043 } 1044 1045 return list_empty(work_q); 1046 } 1047 1048 /** 1049 * scsi_try_bus_reset - ask host to perform a bus reset 1050 * @scmd: SCSI cmd to send bus reset. 1051 **/ 1052 static int scsi_try_bus_reset(struct scsi_cmnd *scmd) 1053 { 1054 unsigned long flags; 1055 int rtn; 1056 1057 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", 1058 __FUNCTION__)); 1059 scmd->owner = SCSI_OWNER_LOWLEVEL; 1060 1061 if (!scmd->device->host->hostt->eh_bus_reset_handler) 1062 return FAILED; 1063 1064 spin_lock_irqsave(scmd->device->host->host_lock, flags); 1065 rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd); 1066 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 1067 1068 if (rtn == SUCCESS) { 1069 if (!scmd->device->host->hostt->skip_settle_delay) 1070 ssleep(BUS_RESET_SETTLE_TIME); 1071 spin_lock_irqsave(scmd->device->host->host_lock, flags); 1072 scsi_report_bus_reset(scmd->device->host, scmd->device->channel); 1073 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 1074 } 1075 1076 return rtn; 1077 } 1078 1079 /** 1080 * scsi_try_host_reset - ask host adapter to reset itself 1081 * @scmd: SCSI cmd to send hsot reset. 1082 **/ 1083 static int scsi_try_host_reset(struct scsi_cmnd *scmd) 1084 { 1085 unsigned long flags; 1086 int rtn; 1087 1088 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", 1089 __FUNCTION__)); 1090 scmd->owner = SCSI_OWNER_LOWLEVEL; 1091 1092 if (!scmd->device->host->hostt->eh_host_reset_handler) 1093 return FAILED; 1094 1095 spin_lock_irqsave(scmd->device->host->host_lock, flags); 1096 rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd); 1097 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 1098 1099 if (rtn == SUCCESS) { 1100 if (!scmd->device->host->hostt->skip_settle_delay) 1101 ssleep(HOST_RESET_SETTLE_TIME); 1102 spin_lock_irqsave(scmd->device->host->host_lock, flags); 1103 scsi_report_bus_reset(scmd->device->host, scmd->device->channel); 1104 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 1105 } 1106 1107 return rtn; 1108 } 1109 1110 /** 1111 * scsi_eh_bus_reset - send a bus reset 1112 * @shost: scsi host being recovered. 1113 * @eh_done_q: list_head for processed commands. 1114 **/ 1115 static int scsi_eh_bus_reset(struct Scsi_Host *shost, 1116 struct list_head *work_q, 1117 struct list_head *done_q) 1118 { 1119 struct list_head *lh, *lh_sf; 1120 struct scsi_cmnd *scmd; 1121 struct scsi_cmnd *chan_scmd; 1122 unsigned int channel; 1123 int rtn; 1124 1125 /* 1126 * we really want to loop over the various channels, and do this on 1127 * a channel by channel basis. we should also check to see if any 1128 * of the failed commands are on soft_reset devices, and if so, skip 1129 * the reset. 1130 */ 1131 1132 for (channel = 0; channel <= shost->max_channel; channel++) { 1133 chan_scmd = NULL; 1134 list_for_each_entry(scmd, work_q, eh_entry) { 1135 if (channel == scmd->device->channel) { 1136 chan_scmd = scmd; 1137 break; 1138 /* 1139 * FIXME add back in some support for 1140 * soft_reset devices. 1141 */ 1142 } 1143 } 1144 1145 if (!chan_scmd) 1146 continue; 1147 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:" 1148 " %d\n", current->comm, 1149 channel)); 1150 rtn = scsi_try_bus_reset(chan_scmd); 1151 if (rtn == SUCCESS) { 1152 list_for_each_safe(lh, lh_sf, work_q) { 1153 scmd = list_entry(lh, struct scsi_cmnd, 1154 eh_entry); 1155 if (channel == scmd->device->channel) 1156 if (!scsi_device_online(scmd->device) || 1157 !scsi_eh_tur(scmd)) 1158 scsi_eh_finish_cmd(scmd, 1159 done_q); 1160 } 1161 } else { 1162 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" 1163 " failed chan: %d\n", 1164 current->comm, 1165 channel)); 1166 } 1167 } 1168 return list_empty(work_q); 1169 } 1170 1171 /** 1172 * scsi_eh_host_reset - send a host reset 1173 * @work_q: list_head for processed commands. 1174 * @done_q: list_head for processed commands. 1175 **/ 1176 static int scsi_eh_host_reset(struct list_head *work_q, 1177 struct list_head *done_q) 1178 { 1179 int rtn; 1180 struct list_head *lh, *lh_sf; 1181 struct scsi_cmnd *scmd; 1182 1183 if (!list_empty(work_q)) { 1184 scmd = list_entry(work_q->next, 1185 struct scsi_cmnd, eh_entry); 1186 1187 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n" 1188 , current->comm)); 1189 1190 rtn = scsi_try_host_reset(scmd); 1191 if (rtn == SUCCESS) { 1192 list_for_each_safe(lh, lh_sf, work_q) { 1193 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1194 if (!scsi_device_online(scmd->device) || 1195 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) || 1196 !scsi_eh_tur(scmd)) 1197 scsi_eh_finish_cmd(scmd, done_q); 1198 } 1199 } else { 1200 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST" 1201 " failed\n", 1202 current->comm)); 1203 } 1204 } 1205 return list_empty(work_q); 1206 } 1207 1208 /** 1209 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover 1210 * @work_q: list_head for processed commands. 1211 * @done_q: list_head for processed commands. 1212 * 1213 **/ 1214 static void scsi_eh_offline_sdevs(struct list_head *work_q, 1215 struct list_head *done_q) 1216 { 1217 struct list_head *lh, *lh_sf; 1218 struct scsi_cmnd *scmd; 1219 1220 list_for_each_safe(lh, lh_sf, work_q) { 1221 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1222 printk(KERN_INFO "scsi: Device offlined - not" 1223 " ready after error recovery: host" 1224 " %d channel %d id %d lun %d\n", 1225 scmd->device->host->host_no, 1226 scmd->device->channel, 1227 scmd->device->id, 1228 scmd->device->lun); 1229 scsi_device_set_state(scmd->device, SDEV_OFFLINE); 1230 if (scsi_eh_eflags_chk(scmd, SCSI_EH_CANCEL_CMD)) { 1231 /* 1232 * FIXME: Handle lost cmds. 1233 */ 1234 } 1235 scsi_eh_finish_cmd(scmd, done_q); 1236 } 1237 return; 1238 } 1239 1240 /** 1241 * scsi_decide_disposition - Disposition a cmd on return from LLD. 1242 * @scmd: SCSI cmd to examine. 1243 * 1244 * Notes: 1245 * This is *only* called when we are examining the status after sending 1246 * out the actual data command. any commands that are queued for error 1247 * recovery (e.g. test_unit_ready) do *not* come through here. 1248 * 1249 * When this routine returns failed, it means the error handler thread 1250 * is woken. In cases where the error code indicates an error that 1251 * doesn't require the error handler read (i.e. we don't need to 1252 * abort/reset), this function should return SUCCESS. 1253 **/ 1254 int scsi_decide_disposition(struct scsi_cmnd *scmd) 1255 { 1256 int rtn; 1257 1258 /* 1259 * if the device is offline, then we clearly just pass the result back 1260 * up to the top level. 1261 */ 1262 if (!scsi_device_online(scmd->device)) { 1263 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" 1264 " as SUCCESS\n", 1265 __FUNCTION__)); 1266 return SUCCESS; 1267 } 1268 1269 /* 1270 * first check the host byte, to see if there is anything in there 1271 * that would indicate what we need to do. 1272 */ 1273 switch (host_byte(scmd->result)) { 1274 case DID_PASSTHROUGH: 1275 /* 1276 * no matter what, pass this through to the upper layer. 1277 * nuke this special code so that it looks like we are saying 1278 * did_ok. 1279 */ 1280 scmd->result &= 0xff00ffff; 1281 return SUCCESS; 1282 case DID_OK: 1283 /* 1284 * looks good. drop through, and check the next byte. 1285 */ 1286 break; 1287 case DID_NO_CONNECT: 1288 case DID_BAD_TARGET: 1289 case DID_ABORT: 1290 /* 1291 * note - this means that we just report the status back 1292 * to the top level driver, not that we actually think 1293 * that it indicates SUCCESS. 1294 */ 1295 return SUCCESS; 1296 /* 1297 * when the low level driver returns did_soft_error, 1298 * it is responsible for keeping an internal retry counter 1299 * in order to avoid endless loops (db) 1300 * 1301 * actually this is a bug in this function here. we should 1302 * be mindful of the maximum number of retries specified 1303 * and not get stuck in a loop. 1304 */ 1305 case DID_SOFT_ERROR: 1306 goto maybe_retry; 1307 case DID_IMM_RETRY: 1308 return NEEDS_RETRY; 1309 1310 case DID_REQUEUE: 1311 return ADD_TO_MLQUEUE; 1312 1313 case DID_ERROR: 1314 if (msg_byte(scmd->result) == COMMAND_COMPLETE && 1315 status_byte(scmd->result) == RESERVATION_CONFLICT) 1316 /* 1317 * execute reservation conflict processing code 1318 * lower down 1319 */ 1320 break; 1321 /* fallthrough */ 1322 1323 case DID_BUS_BUSY: 1324 case DID_PARITY: 1325 goto maybe_retry; 1326 case DID_TIME_OUT: 1327 /* 1328 * when we scan the bus, we get timeout messages for 1329 * these commands if there is no device available. 1330 * other hosts report did_no_connect for the same thing. 1331 */ 1332 if ((scmd->cmnd[0] == TEST_UNIT_READY || 1333 scmd->cmnd[0] == INQUIRY)) { 1334 return SUCCESS; 1335 } else { 1336 return FAILED; 1337 } 1338 case DID_RESET: 1339 return SUCCESS; 1340 default: 1341 return FAILED; 1342 } 1343 1344 /* 1345 * next, check the message byte. 1346 */ 1347 if (msg_byte(scmd->result) != COMMAND_COMPLETE) 1348 return FAILED; 1349 1350 /* 1351 * check the status byte to see if this indicates anything special. 1352 */ 1353 switch (status_byte(scmd->result)) { 1354 case QUEUE_FULL: 1355 /* 1356 * the case of trying to send too many commands to a 1357 * tagged queueing device. 1358 */ 1359 case BUSY: 1360 /* 1361 * device can't talk to us at the moment. Should only 1362 * occur (SAM-3) when the task queue is empty, so will cause 1363 * the empty queue handling to trigger a stall in the 1364 * device. 1365 */ 1366 return ADD_TO_MLQUEUE; 1367 case GOOD: 1368 case COMMAND_TERMINATED: 1369 case TASK_ABORTED: 1370 return SUCCESS; 1371 case CHECK_CONDITION: 1372 rtn = scsi_check_sense(scmd); 1373 if (rtn == NEEDS_RETRY) 1374 goto maybe_retry; 1375 /* if rtn == FAILED, we have no sense information; 1376 * returning FAILED will wake the error handler thread 1377 * to collect the sense and redo the decide 1378 * disposition */ 1379 return rtn; 1380 case CONDITION_GOOD: 1381 case INTERMEDIATE_GOOD: 1382 case INTERMEDIATE_C_GOOD: 1383 case ACA_ACTIVE: 1384 /* 1385 * who knows? FIXME(eric) 1386 */ 1387 return SUCCESS; 1388 1389 case RESERVATION_CONFLICT: 1390 printk(KERN_INFO "scsi: reservation conflict: host" 1391 " %d channel %d id %d lun %d\n", 1392 scmd->device->host->host_no, scmd->device->channel, 1393 scmd->device->id, scmd->device->lun); 1394 return SUCCESS; /* causes immediate i/o error */ 1395 default: 1396 return FAILED; 1397 } 1398 return FAILED; 1399 1400 maybe_retry: 1401 1402 /* we requeue for retry because the error was retryable, and 1403 * the request was not marked fast fail. Note that above, 1404 * even if the request is marked fast fail, we still requeue 1405 * for queue congestion conditions (QUEUE_FULL or BUSY) */ 1406 if ((++scmd->retries) < scmd->allowed 1407 && !blk_noretry_request(scmd->request)) { 1408 return NEEDS_RETRY; 1409 } else { 1410 /* 1411 * no more retries - report this one back to upper level. 1412 */ 1413 return SUCCESS; 1414 } 1415 } 1416 1417 /** 1418 * scsi_eh_lock_done - done function for eh door lock request 1419 * @scmd: SCSI command block for the door lock request 1420 * 1421 * Notes: 1422 * We completed the asynchronous door lock request, and it has either 1423 * locked the door or failed. We must free the command structures 1424 * associated with this request. 1425 **/ 1426 static void scsi_eh_lock_done(struct scsi_cmnd *scmd) 1427 { 1428 struct scsi_request *sreq = scmd->sc_request; 1429 1430 scsi_release_request(sreq); 1431 } 1432 1433 1434 /** 1435 * scsi_eh_lock_door - Prevent medium removal for the specified device 1436 * @sdev: SCSI device to prevent medium removal 1437 * 1438 * Locking: 1439 * We must be called from process context; scsi_allocate_request() 1440 * may sleep. 1441 * 1442 * Notes: 1443 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the 1444 * head of the devices request queue, and continue. 1445 * 1446 * Bugs: 1447 * scsi_allocate_request() may sleep waiting for existing requests to 1448 * be processed. However, since we haven't kicked off any request 1449 * processing for this host, this may deadlock. 1450 * 1451 * If scsi_allocate_request() fails for what ever reason, we 1452 * completely forget to lock the door. 1453 **/ 1454 static void scsi_eh_lock_door(struct scsi_device *sdev) 1455 { 1456 struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL); 1457 1458 if (unlikely(!sreq)) { 1459 printk(KERN_ERR "%s: request allocate failed," 1460 "prevent media removal cmd not sent\n", __FUNCTION__); 1461 return; 1462 } 1463 1464 sreq->sr_cmnd[0] = ALLOW_MEDIUM_REMOVAL; 1465 sreq->sr_cmnd[1] = 0; 1466 sreq->sr_cmnd[2] = 0; 1467 sreq->sr_cmnd[3] = 0; 1468 sreq->sr_cmnd[4] = SCSI_REMOVAL_PREVENT; 1469 sreq->sr_cmnd[5] = 0; 1470 sreq->sr_data_direction = DMA_NONE; 1471 sreq->sr_bufflen = 0; 1472 sreq->sr_buffer = NULL; 1473 sreq->sr_allowed = 5; 1474 sreq->sr_done = scsi_eh_lock_done; 1475 sreq->sr_timeout_per_command = 10 * HZ; 1476 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]); 1477 1478 scsi_insert_special_req(sreq, 1); 1479 } 1480 1481 1482 /** 1483 * scsi_restart_operations - restart io operations to the specified host. 1484 * @shost: Host we are restarting. 1485 * 1486 * Notes: 1487 * When we entered the error handler, we blocked all further i/o to 1488 * this device. we need to 'reverse' this process. 1489 **/ 1490 static void scsi_restart_operations(struct Scsi_Host *shost) 1491 { 1492 struct scsi_device *sdev; 1493 1494 /* 1495 * If the door was locked, we need to insert a door lock request 1496 * onto the head of the SCSI request queue for the device. There 1497 * is no point trying to lock the door of an off-line device. 1498 */ 1499 shost_for_each_device(sdev, shost) { 1500 if (scsi_device_online(sdev) && sdev->locked) 1501 scsi_eh_lock_door(sdev); 1502 } 1503 1504 /* 1505 * next free up anything directly waiting upon the host. this 1506 * will be requests for character device operations, and also for 1507 * ioctls to queued block devices. 1508 */ 1509 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1510 __FUNCTION__)); 1511 1512 clear_bit(SHOST_RECOVERY, &shost->shost_state); 1513 1514 wake_up(&shost->host_wait); 1515 1516 /* 1517 * finally we need to re-initiate requests that may be pending. we will 1518 * have had everything blocked while error handling is taking place, and 1519 * now that error recovery is done, we will need to ensure that these 1520 * requests are started. 1521 */ 1522 scsi_run_host_queues(shost); 1523 } 1524 1525 /** 1526 * scsi_eh_ready_devs - check device ready state and recover if not. 1527 * @shost: host to be recovered. 1528 * @eh_done_q: list_head for processed commands. 1529 * 1530 **/ 1531 static void scsi_eh_ready_devs(struct Scsi_Host *shost, 1532 struct list_head *work_q, 1533 struct list_head *done_q) 1534 { 1535 if (!scsi_eh_stu(shost, work_q, done_q)) 1536 if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) 1537 if (!scsi_eh_bus_reset(shost, work_q, done_q)) 1538 if (!scsi_eh_host_reset(work_q, done_q)) 1539 scsi_eh_offline_sdevs(work_q, done_q); 1540 } 1541 1542 /** 1543 * scsi_eh_flush_done_q - finish processed commands or retry them. 1544 * @done_q: list_head of processed commands. 1545 * 1546 **/ 1547 static void scsi_eh_flush_done_q(struct list_head *done_q) 1548 { 1549 struct list_head *lh, *lh_sf; 1550 struct scsi_cmnd *scmd; 1551 1552 list_for_each_safe(lh, lh_sf, done_q) { 1553 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1554 list_del_init(lh); 1555 if (scsi_device_online(scmd->device) && 1556 !blk_noretry_request(scmd->request) && 1557 (++scmd->retries < scmd->allowed)) { 1558 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" 1559 " retry cmd: %p\n", 1560 current->comm, 1561 scmd)); 1562 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); 1563 } else { 1564 if (!scmd->result) 1565 scmd->result |= (DRIVER_TIMEOUT << 24); 1566 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish" 1567 " cmd: %p\n", 1568 current->comm, scmd)); 1569 scsi_finish_command(scmd); 1570 } 1571 } 1572 } 1573 1574 /** 1575 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1576 * @shost: Host to unjam. 1577 * 1578 * Notes: 1579 * When we come in here, we *know* that all commands on the bus have 1580 * either completed, failed or timed out. we also know that no further 1581 * commands are being sent to the host, so things are relatively quiet 1582 * and we have freedom to fiddle with things as we wish. 1583 * 1584 * This is only the *default* implementation. it is possible for 1585 * individual drivers to supply their own version of this function, and 1586 * if the maintainer wishes to do this, it is strongly suggested that 1587 * this function be taken as a template and modified. this function 1588 * was designed to correctly handle problems for about 95% of the 1589 * different cases out there, and it should always provide at least a 1590 * reasonable amount of error recovery. 1591 * 1592 * Any command marked 'failed' or 'timeout' must eventually have 1593 * scsi_finish_cmd() called for it. we do all of the retry stuff 1594 * here, so when we restart the host after we return it should have an 1595 * empty queue. 1596 **/ 1597 static void scsi_unjam_host(struct Scsi_Host *shost) 1598 { 1599 unsigned long flags; 1600 LIST_HEAD(eh_work_q); 1601 LIST_HEAD(eh_done_q); 1602 1603 spin_lock_irqsave(shost->host_lock, flags); 1604 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 1605 spin_unlock_irqrestore(shost->host_lock, flags); 1606 1607 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q)); 1608 1609 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q)) 1610 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) 1611 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); 1612 1613 scsi_eh_flush_done_q(&eh_done_q); 1614 } 1615 1616 /** 1617 * scsi_error_handler - Handle errors/timeouts of SCSI cmds. 1618 * @data: Host for which we are running. 1619 * 1620 * Notes: 1621 * This is always run in the context of a kernel thread. The idea is 1622 * that we start this thing up when the kernel starts up (one per host 1623 * that we detect), and it immediately goes to sleep and waits for some 1624 * event (i.e. failure). When this takes place, we have the job of 1625 * trying to unjam the bus and restarting things. 1626 **/ 1627 int scsi_error_handler(void *data) 1628 { 1629 struct Scsi_Host *shost = (struct Scsi_Host *) data; 1630 int rtn; 1631 DECLARE_MUTEX_LOCKED(sem); 1632 1633 /* 1634 * Flush resources 1635 */ 1636 1637 daemonize("scsi_eh_%d", shost->host_no); 1638 1639 current->flags |= PF_NOFREEZE; 1640 1641 shost->eh_wait = &sem; 1642 shost->ehandler = current; 1643 1644 /* 1645 * Wake up the thread that created us. 1646 */ 1647 SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of" 1648 " scsi_eh_%d\n",shost->host_no)); 1649 1650 complete(shost->eh_notify); 1651 1652 while (1) { 1653 /* 1654 * If we get a signal, it means we are supposed to go 1655 * away and die. This typically happens if the user is 1656 * trying to unload a module. 1657 */ 1658 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1659 " scsi_eh_%d" 1660 " sleeping\n",shost->host_no)); 1661 1662 /* 1663 * Note - we always use down_interruptible with the semaphore 1664 * even if the module was loaded as part of the kernel. The 1665 * reason is that down() will cause this thread to be counted 1666 * in the load average as a running process, and down 1667 * interruptible doesn't. Given that we need to allow this 1668 * thread to die if the driver was loaded as a module, using 1669 * semaphores isn't unreasonable. 1670 */ 1671 down_interruptible(&sem); 1672 if (shost->eh_kill) 1673 break; 1674 1675 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1676 " scsi_eh_%d waking" 1677 " up\n",shost->host_no)); 1678 1679 shost->eh_active = 1; 1680 1681 /* 1682 * We have a host that is failing for some reason. Figure out 1683 * what we need to do to get it up and online again (if we can). 1684 * If we fail, we end up taking the thing offline. 1685 */ 1686 if (shost->hostt->eh_strategy_handler) 1687 rtn = shost->hostt->eh_strategy_handler(shost); 1688 else 1689 scsi_unjam_host(shost); 1690 1691 shost->eh_active = 0; 1692 1693 /* 1694 * Note - if the above fails completely, the action is to take 1695 * individual devices offline and flush the queue of any 1696 * outstanding requests that may have been pending. When we 1697 * restart, we restart any I/O to any other devices on the bus 1698 * which are still online. 1699 */ 1700 scsi_restart_operations(shost); 1701 1702 } 1703 1704 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d" 1705 " exiting\n",shost->host_no)); 1706 1707 /* 1708 * Make sure that nobody tries to wake us up again. 1709 */ 1710 shost->eh_wait = NULL; 1711 1712 /* 1713 * Knock this down too. From this point on, the host is flying 1714 * without a pilot. If this is because the module is being unloaded, 1715 * that's fine. If the user sent a signal to this thing, we are 1716 * potentially in real danger. 1717 */ 1718 shost->eh_active = 0; 1719 shost->ehandler = NULL; 1720 1721 /* 1722 * If anyone is waiting for us to exit (i.e. someone trying to unload 1723 * a driver), then wake up that process to let them know we are on 1724 * the way out the door. 1725 */ 1726 complete_and_exit(shost->eh_notify, 0); 1727 return 0; 1728 } 1729 1730 /* 1731 * Function: scsi_report_bus_reset() 1732 * 1733 * Purpose: Utility function used by low-level drivers to report that 1734 * they have observed a bus reset on the bus being handled. 1735 * 1736 * Arguments: shost - Host in question 1737 * channel - channel on which reset was observed. 1738 * 1739 * Returns: Nothing 1740 * 1741 * Lock status: Host lock must be held. 1742 * 1743 * Notes: This only needs to be called if the reset is one which 1744 * originates from an unknown location. Resets originated 1745 * by the mid-level itself don't need to call this, but there 1746 * should be no harm. 1747 * 1748 * The main purpose of this is to make sure that a CHECK_CONDITION 1749 * is properly treated. 1750 */ 1751 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel) 1752 { 1753 struct scsi_device *sdev; 1754 1755 __shost_for_each_device(sdev, shost) { 1756 if (channel == sdev->channel) { 1757 sdev->was_reset = 1; 1758 sdev->expecting_cc_ua = 1; 1759 } 1760 } 1761 } 1762 EXPORT_SYMBOL(scsi_report_bus_reset); 1763 1764 /* 1765 * Function: scsi_report_device_reset() 1766 * 1767 * Purpose: Utility function used by low-level drivers to report that 1768 * they have observed a device reset on the device being handled. 1769 * 1770 * Arguments: shost - Host in question 1771 * channel - channel on which reset was observed 1772 * target - target on which reset was observed 1773 * 1774 * Returns: Nothing 1775 * 1776 * Lock status: Host lock must be held 1777 * 1778 * Notes: This only needs to be called if the reset is one which 1779 * originates from an unknown location. Resets originated 1780 * by the mid-level itself don't need to call this, but there 1781 * should be no harm. 1782 * 1783 * The main purpose of this is to make sure that a CHECK_CONDITION 1784 * is properly treated. 1785 */ 1786 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target) 1787 { 1788 struct scsi_device *sdev; 1789 1790 __shost_for_each_device(sdev, shost) { 1791 if (channel == sdev->channel && 1792 target == sdev->id) { 1793 sdev->was_reset = 1; 1794 sdev->expecting_cc_ua = 1; 1795 } 1796 } 1797 } 1798 EXPORT_SYMBOL(scsi_report_device_reset); 1799 1800 static void 1801 scsi_reset_provider_done_command(struct scsi_cmnd *scmd) 1802 { 1803 } 1804 1805 /* 1806 * Function: scsi_reset_provider 1807 * 1808 * Purpose: Send requested reset to a bus or device at any phase. 1809 * 1810 * Arguments: device - device to send reset to 1811 * flag - reset type (see scsi.h) 1812 * 1813 * Returns: SUCCESS/FAILURE. 1814 * 1815 * Notes: This is used by the SCSI Generic driver to provide 1816 * Bus/Device reset capability. 1817 */ 1818 int 1819 scsi_reset_provider(struct scsi_device *dev, int flag) 1820 { 1821 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); 1822 struct request req; 1823 int rtn; 1824 1825 scmd->request = &req; 1826 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); 1827 scmd->request->rq_status = RQ_SCSI_BUSY; 1828 scmd->state = SCSI_STATE_INITIALIZING; 1829 scmd->owner = SCSI_OWNER_MIDLEVEL; 1830 1831 memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd)); 1832 1833 scmd->scsi_done = scsi_reset_provider_done_command; 1834 scmd->done = NULL; 1835 scmd->buffer = NULL; 1836 scmd->bufflen = 0; 1837 scmd->request_buffer = NULL; 1838 scmd->request_bufflen = 0; 1839 scmd->abort_reason = DID_ABORT; 1840 1841 scmd->cmd_len = 0; 1842 1843 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 1844 scmd->sc_request = NULL; 1845 scmd->sc_magic = SCSI_CMND_MAGIC; 1846 1847 init_timer(&scmd->eh_timeout); 1848 1849 /* 1850 * Sometimes the command can get back into the timer chain, 1851 * so use the pid as an identifier. 1852 */ 1853 scmd->pid = 0; 1854 1855 switch (flag) { 1856 case SCSI_TRY_RESET_DEVICE: 1857 rtn = scsi_try_bus_device_reset(scmd); 1858 if (rtn == SUCCESS) 1859 break; 1860 /* FALLTHROUGH */ 1861 case SCSI_TRY_RESET_BUS: 1862 rtn = scsi_try_bus_reset(scmd); 1863 if (rtn == SUCCESS) 1864 break; 1865 /* FALLTHROUGH */ 1866 case SCSI_TRY_RESET_HOST: 1867 rtn = scsi_try_host_reset(scmd); 1868 break; 1869 default: 1870 rtn = FAILED; 1871 } 1872 1873 scsi_delete_timer(scmd); 1874 scsi_next_command(scmd); 1875 return rtn; 1876 } 1877 EXPORT_SYMBOL(scsi_reset_provider); 1878 1879 /** 1880 * scsi_normalize_sense - normalize main elements from either fixed or 1881 * descriptor sense data format into a common format. 1882 * 1883 * @sense_buffer: byte array containing sense data returned by device 1884 * @sb_len: number of valid bytes in sense_buffer 1885 * @sshdr: pointer to instance of structure that common 1886 * elements are written to. 1887 * 1888 * Notes: 1889 * The "main elements" from sense data are: response_code, sense_key, 1890 * asc, ascq and additional_length (only for descriptor format). 1891 * 1892 * Typically this function can be called after a device has 1893 * responded to a SCSI command with the CHECK_CONDITION status. 1894 * 1895 * Return value: 1896 * 1 if valid sense data information found, else 0; 1897 **/ 1898 int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, 1899 struct scsi_sense_hdr *sshdr) 1900 { 1901 if (!sense_buffer || !sb_len || (sense_buffer[0] & 0x70) != 0x70) 1902 return 0; 1903 1904 memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); 1905 1906 sshdr->response_code = (sense_buffer[0] & 0x7f); 1907 if (sshdr->response_code >= 0x72) { 1908 /* 1909 * descriptor format 1910 */ 1911 if (sb_len > 1) 1912 sshdr->sense_key = (sense_buffer[1] & 0xf); 1913 if (sb_len > 2) 1914 sshdr->asc = sense_buffer[2]; 1915 if (sb_len > 3) 1916 sshdr->ascq = sense_buffer[3]; 1917 if (sb_len > 7) 1918 sshdr->additional_length = sense_buffer[7]; 1919 } else { 1920 /* 1921 * fixed format 1922 */ 1923 if (sb_len > 2) 1924 sshdr->sense_key = (sense_buffer[2] & 0xf); 1925 if (sb_len > 7) { 1926 sb_len = (sb_len < (sense_buffer[7] + 8)) ? 1927 sb_len : (sense_buffer[7] + 8); 1928 if (sb_len > 12) 1929 sshdr->asc = sense_buffer[12]; 1930 if (sb_len > 13) 1931 sshdr->ascq = sense_buffer[13]; 1932 } 1933 } 1934 1935 return 1; 1936 } 1937 EXPORT_SYMBOL(scsi_normalize_sense); 1938 1939 int scsi_request_normalize_sense(struct scsi_request *sreq, 1940 struct scsi_sense_hdr *sshdr) 1941 { 1942 return scsi_normalize_sense(sreq->sr_sense_buffer, 1943 sizeof(sreq->sr_sense_buffer), sshdr); 1944 } 1945 EXPORT_SYMBOL(scsi_request_normalize_sense); 1946 1947 int scsi_command_normalize_sense(struct scsi_cmnd *cmd, 1948 struct scsi_sense_hdr *sshdr) 1949 { 1950 return scsi_normalize_sense(cmd->sense_buffer, 1951 sizeof(cmd->sense_buffer), sshdr); 1952 } 1953 EXPORT_SYMBOL(scsi_command_normalize_sense); 1954 1955 /** 1956 * scsi_sense_desc_find - search for a given descriptor type in 1957 * descriptor sense data format. 1958 * 1959 * @sense_buffer: byte array of descriptor format sense data 1960 * @sb_len: number of valid bytes in sense_buffer 1961 * @desc_type: value of descriptor type to find 1962 * (e.g. 0 -> information) 1963 * 1964 * Notes: 1965 * only valid when sense data is in descriptor format 1966 * 1967 * Return value: 1968 * pointer to start of (first) descriptor if found else NULL 1969 **/ 1970 const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, 1971 int desc_type) 1972 { 1973 int add_sen_len, add_len, desc_len, k; 1974 const u8 * descp; 1975 1976 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7]))) 1977 return NULL; 1978 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73)) 1979 return NULL; 1980 add_sen_len = (add_sen_len < (sb_len - 8)) ? 1981 add_sen_len : (sb_len - 8); 1982 descp = &sense_buffer[8]; 1983 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) { 1984 descp += desc_len; 1985 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1; 1986 desc_len = add_len + 2; 1987 if (descp[0] == desc_type) 1988 return descp; 1989 if (add_len < 0) // short descriptor ?? 1990 break; 1991 } 1992 return NULL; 1993 } 1994 EXPORT_SYMBOL(scsi_sense_desc_find); 1995 1996 /** 1997 * scsi_get_sense_info_fld - attempts to get information field from 1998 * sense data (either fixed or descriptor format) 1999 * 2000 * @sense_buffer: byte array of sense data 2001 * @sb_len: number of valid bytes in sense_buffer 2002 * @info_out: pointer to 64 integer where 8 or 4 byte information 2003 * field will be placed if found. 2004 * 2005 * Return value: 2006 * 1 if information field found, 0 if not found. 2007 **/ 2008 int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, 2009 u64 * info_out) 2010 { 2011 int j; 2012 const u8 * ucp; 2013 u64 ull; 2014 2015 if (sb_len < 7) 2016 return 0; 2017 switch (sense_buffer[0] & 0x7f) { 2018 case 0x70: 2019 case 0x71: 2020 if (sense_buffer[0] & 0x80) { 2021 *info_out = (sense_buffer[3] << 24) + 2022 (sense_buffer[4] << 16) + 2023 (sense_buffer[5] << 8) + sense_buffer[6]; 2024 return 1; 2025 } else 2026 return 0; 2027 case 0x72: 2028 case 0x73: 2029 ucp = scsi_sense_desc_find(sense_buffer, sb_len, 2030 0 /* info desc */); 2031 if (ucp && (0xa == ucp[1])) { 2032 ull = 0; 2033 for (j = 0; j < 8; ++j) { 2034 if (j > 0) 2035 ull <<= 8; 2036 ull |= ucp[4 + j]; 2037 } 2038 *info_out = ull; 2039 return 1; 2040 } else 2041 return 0; 2042 default: 2043 return 0; 2044 } 2045 } 2046 EXPORT_SYMBOL(scsi_get_sense_info_fld); 2047