1 /* 2 * libata-eh.c - libata error handling 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2006 Tejun Heo <htejun@gmail.com> 9 * 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License as 13 * published by the Free Software Foundation; either version 2, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24 * USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/blkdev.h> 37 #include <linux/pci.h> 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_host.h> 40 #include <scsi/scsi_eh.h> 41 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_dbg.h> 44 #include "../scsi/scsi_transport_api.h" 45 46 #include <linux/libata.h> 47 48 #include "libata.h" 49 50 enum { 51 /* speed down verdicts */ 52 ATA_EH_SPDN_NCQ_OFF = (1 << 0), 53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 56 57 /* error flags */ 58 ATA_EFLAG_IS_IO = (1 << 0), 59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 60 61 /* error categories */ 62 ATA_ECAT_NONE = 0, 63 ATA_ECAT_ATA_BUS = 1, 64 ATA_ECAT_TOUT_HSM = 2, 65 ATA_ECAT_UNK_DEV = 3, 66 ATA_ECAT_DUBIOUS_NONE = 4, 67 ATA_ECAT_DUBIOUS_ATA_BUS = 5, 68 ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 69 ATA_ECAT_DUBIOUS_UNK_DEV = 7, 70 ATA_ECAT_NR = 8, 71 72 ATA_EH_CMD_DFL_TIMEOUT = 5000, 73 74 /* always put at least this amount of time between resets */ 75 ATA_EH_RESET_COOL_DOWN = 5000, 76 77 /* Waiting in ->prereset can never be reliable. It's 78 * sometimes nice to wait there but it can't be depended upon; 79 * otherwise, we wouldn't be resetting. Just give it enough 80 * time for most drives to spin up. 81 */ 82 ATA_EH_PRERESET_TIMEOUT = 10000, 83 ATA_EH_FASTDRAIN_INTERVAL = 3000, 84 85 ATA_EH_UA_TRIES = 5, 86 87 /* probe speed down parameters, see ata_eh_schedule_probe() */ 88 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 89 ATA_EH_PROBE_TRIALS = 2, 90 }; 91 92 /* The following table determines how we sequence resets. Each entry 93 * represents timeout for that try. The first try can be soft or 94 * hardreset. All others are hardreset if available. In most cases 95 * the first reset w/ 10sec timeout should succeed. Following entries 96 * are mostly for error handling, hotplug and retarded devices. 97 */ 98 static const unsigned long ata_eh_reset_timeouts[] = { 99 10000, /* most drives spin up by 10sec */ 100 10000, /* > 99% working drives spin up before 20sec */ 101 35000, /* give > 30 secs of idleness for retarded devices */ 102 5000, /* and sweet one last chance */ 103 ULONG_MAX, /* > 1 min has elapsed, give up */ 104 }; 105 106 static const unsigned long ata_eh_identify_timeouts[] = { 107 5000, /* covers > 99% of successes and not too boring on failures */ 108 10000, /* combined time till here is enough even for media access */ 109 30000, /* for true idiots */ 110 ULONG_MAX, 111 }; 112 113 static const unsigned long ata_eh_flush_timeouts[] = { 114 15000, /* be generous with flush */ 115 15000, /* ditto */ 116 30000, /* and even more generous */ 117 ULONG_MAX, 118 }; 119 120 static const unsigned long ata_eh_other_timeouts[] = { 121 5000, /* same rationale as identify timeout */ 122 10000, /* ditto */ 123 /* but no merciful 30sec for other commands, it just isn't worth it */ 124 ULONG_MAX, 125 }; 126 127 struct ata_eh_cmd_timeout_ent { 128 const u8 *commands; 129 const unsigned long *timeouts; 130 }; 131 132 /* The following table determines timeouts to use for EH internal 133 * commands. Each table entry is a command class and matches the 134 * commands the entry applies to and the timeout table to use. 135 * 136 * On the retry after a command timed out, the next timeout value from 137 * the table is used. If the table doesn't contain further entries, 138 * the last value is used. 139 * 140 * ehc->cmd_timeout_idx keeps track of which timeout to use per 141 * command class, so if SET_FEATURES times out on the first try, the 142 * next try will use the second timeout value only for that class. 143 */ 144 #define CMDS(cmds...) (const u8 []){ cmds, 0 } 145 static const struct ata_eh_cmd_timeout_ent 146 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 147 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 148 .timeouts = ata_eh_identify_timeouts, }, 149 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 150 .timeouts = ata_eh_other_timeouts, }, 151 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 152 .timeouts = ata_eh_other_timeouts, }, 153 { .commands = CMDS(ATA_CMD_SET_FEATURES), 154 .timeouts = ata_eh_other_timeouts, }, 155 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 156 .timeouts = ata_eh_other_timeouts, }, 157 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 158 .timeouts = ata_eh_flush_timeouts }, 159 }; 160 #undef CMDS 161 162 static void __ata_port_freeze(struct ata_port *ap); 163 #ifdef CONFIG_PM 164 static void ata_eh_handle_port_suspend(struct ata_port *ap); 165 static void ata_eh_handle_port_resume(struct ata_port *ap); 166 #else /* CONFIG_PM */ 167 static void ata_eh_handle_port_suspend(struct ata_port *ap) 168 { } 169 170 static void ata_eh_handle_port_resume(struct ata_port *ap) 171 { } 172 #endif /* CONFIG_PM */ 173 174 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 175 va_list args) 176 { 177 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 178 ATA_EH_DESC_LEN - ehi->desc_len, 179 fmt, args); 180 } 181 182 /** 183 * __ata_ehi_push_desc - push error description without adding separator 184 * @ehi: target EHI 185 * @fmt: printf format string 186 * 187 * Format string according to @fmt and append it to @ehi->desc. 188 * 189 * LOCKING: 190 * spin_lock_irqsave(host lock) 191 */ 192 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 193 { 194 va_list args; 195 196 va_start(args, fmt); 197 __ata_ehi_pushv_desc(ehi, fmt, args); 198 va_end(args); 199 } 200 201 /** 202 * ata_ehi_push_desc - push error description with separator 203 * @ehi: target EHI 204 * @fmt: printf format string 205 * 206 * Format string according to @fmt and append it to @ehi->desc. 207 * If @ehi->desc is not empty, ", " is added in-between. 208 * 209 * LOCKING: 210 * spin_lock_irqsave(host lock) 211 */ 212 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 213 { 214 va_list args; 215 216 if (ehi->desc_len) 217 __ata_ehi_push_desc(ehi, ", "); 218 219 va_start(args, fmt); 220 __ata_ehi_pushv_desc(ehi, fmt, args); 221 va_end(args); 222 } 223 224 /** 225 * ata_ehi_clear_desc - clean error description 226 * @ehi: target EHI 227 * 228 * Clear @ehi->desc. 229 * 230 * LOCKING: 231 * spin_lock_irqsave(host lock) 232 */ 233 void ata_ehi_clear_desc(struct ata_eh_info *ehi) 234 { 235 ehi->desc[0] = '\0'; 236 ehi->desc_len = 0; 237 } 238 239 /** 240 * ata_port_desc - append port description 241 * @ap: target ATA port 242 * @fmt: printf format string 243 * 244 * Format string according to @fmt and append it to port 245 * description. If port description is not empty, " " is added 246 * in-between. This function is to be used while initializing 247 * ata_host. The description is printed on host registration. 248 * 249 * LOCKING: 250 * None. 251 */ 252 void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 253 { 254 va_list args; 255 256 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 257 258 if (ap->link.eh_info.desc_len) 259 __ata_ehi_push_desc(&ap->link.eh_info, " "); 260 261 va_start(args, fmt); 262 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 263 va_end(args); 264 } 265 266 #ifdef CONFIG_PCI 267 268 /** 269 * ata_port_pbar_desc - append PCI BAR description 270 * @ap: target ATA port 271 * @bar: target PCI BAR 272 * @offset: offset into PCI BAR 273 * @name: name of the area 274 * 275 * If @offset is negative, this function formats a string which 276 * contains the name, address, size and type of the BAR and 277 * appends it to the port description. If @offset is zero or 278 * positive, only name and offsetted address is appended. 279 * 280 * LOCKING: 281 * None. 282 */ 283 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 284 const char *name) 285 { 286 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 287 char *type = ""; 288 unsigned long long start, len; 289 290 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 291 type = "m"; 292 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 293 type = "i"; 294 295 start = (unsigned long long)pci_resource_start(pdev, bar); 296 len = (unsigned long long)pci_resource_len(pdev, bar); 297 298 if (offset < 0) 299 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 300 else 301 ata_port_desc(ap, "%s 0x%llx", name, 302 start + (unsigned long long)offset); 303 } 304 305 #endif /* CONFIG_PCI */ 306 307 static int ata_lookup_timeout_table(u8 cmd) 308 { 309 int i; 310 311 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 312 const u8 *cur; 313 314 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 315 if (*cur == cmd) 316 return i; 317 } 318 319 return -1; 320 } 321 322 /** 323 * ata_internal_cmd_timeout - determine timeout for an internal command 324 * @dev: target device 325 * @cmd: internal command to be issued 326 * 327 * Determine timeout for internal command @cmd for @dev. 328 * 329 * LOCKING: 330 * EH context. 331 * 332 * RETURNS: 333 * Determined timeout. 334 */ 335 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 336 { 337 struct ata_eh_context *ehc = &dev->link->eh_context; 338 int ent = ata_lookup_timeout_table(cmd); 339 int idx; 340 341 if (ent < 0) 342 return ATA_EH_CMD_DFL_TIMEOUT; 343 344 idx = ehc->cmd_timeout_idx[dev->devno][ent]; 345 return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 346 } 347 348 /** 349 * ata_internal_cmd_timed_out - notification for internal command timeout 350 * @dev: target device 351 * @cmd: internal command which timed out 352 * 353 * Notify EH that internal command @cmd for @dev timed out. This 354 * function should be called only for commands whose timeouts are 355 * determined using ata_internal_cmd_timeout(). 356 * 357 * LOCKING: 358 * EH context. 359 */ 360 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 361 { 362 struct ata_eh_context *ehc = &dev->link->eh_context; 363 int ent = ata_lookup_timeout_table(cmd); 364 int idx; 365 366 if (ent < 0) 367 return; 368 369 idx = ehc->cmd_timeout_idx[dev->devno][ent]; 370 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 371 ehc->cmd_timeout_idx[dev->devno][ent]++; 372 } 373 374 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 375 unsigned int err_mask) 376 { 377 struct ata_ering_entry *ent; 378 379 WARN_ON(!err_mask); 380 381 ering->cursor++; 382 ering->cursor %= ATA_ERING_SIZE; 383 384 ent = &ering->ring[ering->cursor]; 385 ent->eflags = eflags; 386 ent->err_mask = err_mask; 387 ent->timestamp = get_jiffies_64(); 388 } 389 390 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 391 { 392 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 393 394 if (ent->err_mask) 395 return ent; 396 return NULL; 397 } 398 399 static void ata_ering_clear(struct ata_ering *ering) 400 { 401 memset(ering, 0, sizeof(*ering)); 402 } 403 404 static int ata_ering_map(struct ata_ering *ering, 405 int (*map_fn)(struct ata_ering_entry *, void *), 406 void *arg) 407 { 408 int idx, rc = 0; 409 struct ata_ering_entry *ent; 410 411 idx = ering->cursor; 412 do { 413 ent = &ering->ring[idx]; 414 if (!ent->err_mask) 415 break; 416 rc = map_fn(ent, arg); 417 if (rc) 418 break; 419 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 420 } while (idx != ering->cursor); 421 422 return rc; 423 } 424 425 static unsigned int ata_eh_dev_action(struct ata_device *dev) 426 { 427 struct ata_eh_context *ehc = &dev->link->eh_context; 428 429 return ehc->i.action | ehc->i.dev_action[dev->devno]; 430 } 431 432 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 433 struct ata_eh_info *ehi, unsigned int action) 434 { 435 struct ata_device *tdev; 436 437 if (!dev) { 438 ehi->action &= ~action; 439 ata_for_each_dev(tdev, link, ALL) 440 ehi->dev_action[tdev->devno] &= ~action; 441 } else { 442 /* doesn't make sense for port-wide EH actions */ 443 WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 444 445 /* break ehi->action into ehi->dev_action */ 446 if (ehi->action & action) { 447 ata_for_each_dev(tdev, link, ALL) 448 ehi->dev_action[tdev->devno] |= 449 ehi->action & action; 450 ehi->action &= ~action; 451 } 452 453 /* turn off the specified per-dev action */ 454 ehi->dev_action[dev->devno] &= ~action; 455 } 456 } 457 458 /** 459 * ata_scsi_timed_out - SCSI layer time out callback 460 * @cmd: timed out SCSI command 461 * 462 * Handles SCSI layer timeout. We race with normal completion of 463 * the qc for @cmd. If the qc is already gone, we lose and let 464 * the scsi command finish (EH_HANDLED). Otherwise, the qc has 465 * timed out and EH should be invoked. Prevent ata_qc_complete() 466 * from finishing it by setting EH_SCHEDULED and return 467 * EH_NOT_HANDLED. 468 * 469 * TODO: kill this function once old EH is gone. 470 * 471 * LOCKING: 472 * Called from timer context 473 * 474 * RETURNS: 475 * EH_HANDLED or EH_NOT_HANDLED 476 */ 477 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 478 { 479 struct Scsi_Host *host = cmd->device->host; 480 struct ata_port *ap = ata_shost_to_port(host); 481 unsigned long flags; 482 struct ata_queued_cmd *qc; 483 enum blk_eh_timer_return ret; 484 485 DPRINTK("ENTER\n"); 486 487 if (ap->ops->error_handler) { 488 ret = BLK_EH_NOT_HANDLED; 489 goto out; 490 } 491 492 ret = BLK_EH_HANDLED; 493 spin_lock_irqsave(ap->lock, flags); 494 qc = ata_qc_from_tag(ap, ap->link.active_tag); 495 if (qc) { 496 WARN_ON(qc->scsicmd != cmd); 497 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 498 qc->err_mask |= AC_ERR_TIMEOUT; 499 ret = BLK_EH_NOT_HANDLED; 500 } 501 spin_unlock_irqrestore(ap->lock, flags); 502 503 out: 504 DPRINTK("EXIT, ret=%d\n", ret); 505 return ret; 506 } 507 508 static void ata_eh_unload(struct ata_port *ap) 509 { 510 struct ata_link *link; 511 struct ata_device *dev; 512 unsigned long flags; 513 514 /* Restore SControl IPM and SPD for the next driver and 515 * disable attached devices. 516 */ 517 ata_for_each_link(link, ap, PMP_FIRST) { 518 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 519 ata_for_each_dev(dev, link, ALL) 520 ata_dev_disable(dev); 521 } 522 523 /* freeze and set UNLOADED */ 524 spin_lock_irqsave(ap->lock, flags); 525 526 ata_port_freeze(ap); /* won't be thawed */ 527 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 528 ap->pflags |= ATA_PFLAG_UNLOADED; 529 530 spin_unlock_irqrestore(ap->lock, flags); 531 } 532 533 /** 534 * ata_scsi_error - SCSI layer error handler callback 535 * @host: SCSI host on which error occurred 536 * 537 * Handles SCSI-layer-thrown error events. 538 * 539 * LOCKING: 540 * Inherited from SCSI layer (none, can sleep) 541 * 542 * RETURNS: 543 * Zero. 544 */ 545 void ata_scsi_error(struct Scsi_Host *host) 546 { 547 struct ata_port *ap = ata_shost_to_port(host); 548 int i; 549 unsigned long flags; 550 551 DPRINTK("ENTER\n"); 552 553 /* make sure sff pio task is not running */ 554 ata_sff_flush_pio_task(ap); 555 556 /* synchronize with host lock and sort out timeouts */ 557 558 /* For new EH, all qcs are finished in one of three ways - 559 * normal completion, error completion, and SCSI timeout. 560 * Both completions can race against SCSI timeout. When normal 561 * completion wins, the qc never reaches EH. When error 562 * completion wins, the qc has ATA_QCFLAG_FAILED set. 563 * 564 * When SCSI timeout wins, things are a bit more complex. 565 * Normal or error completion can occur after the timeout but 566 * before this point. In such cases, both types of 567 * completions are honored. A scmd is determined to have 568 * timed out iff its associated qc is active and not failed. 569 */ 570 if (ap->ops->error_handler) { 571 struct scsi_cmnd *scmd, *tmp; 572 int nr_timedout = 0; 573 574 spin_lock_irqsave(ap->lock, flags); 575 576 /* This must occur under the ap->lock as we don't want 577 a polled recovery to race the real interrupt handler 578 579 The lost_interrupt handler checks for any completed but 580 non-notified command and completes much like an IRQ handler. 581 582 We then fall into the error recovery code which will treat 583 this as if normal completion won the race */ 584 585 if (ap->ops->lost_interrupt) 586 ap->ops->lost_interrupt(ap); 587 588 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 589 struct ata_queued_cmd *qc; 590 591 for (i = 0; i < ATA_MAX_QUEUE; i++) { 592 qc = __ata_qc_from_tag(ap, i); 593 if (qc->flags & ATA_QCFLAG_ACTIVE && 594 qc->scsicmd == scmd) 595 break; 596 } 597 598 if (i < ATA_MAX_QUEUE) { 599 /* the scmd has an associated qc */ 600 if (!(qc->flags & ATA_QCFLAG_FAILED)) { 601 /* which hasn't failed yet, timeout */ 602 qc->err_mask |= AC_ERR_TIMEOUT; 603 qc->flags |= ATA_QCFLAG_FAILED; 604 nr_timedout++; 605 } 606 } else { 607 /* Normal completion occurred after 608 * SCSI timeout but before this point. 609 * Successfully complete it. 610 */ 611 scmd->retries = scmd->allowed; 612 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 613 } 614 } 615 616 /* If we have timed out qcs. They belong to EH from 617 * this point but the state of the controller is 618 * unknown. Freeze the port to make sure the IRQ 619 * handler doesn't diddle with those qcs. This must 620 * be done atomically w.r.t. setting QCFLAG_FAILED. 621 */ 622 if (nr_timedout) 623 __ata_port_freeze(ap); 624 625 spin_unlock_irqrestore(ap->lock, flags); 626 627 /* initialize eh_tries */ 628 ap->eh_tries = ATA_EH_MAX_TRIES; 629 } else 630 spin_unlock_wait(ap->lock); 631 632 /* If we timed raced normal completion and there is nothing to 633 recover nr_timedout == 0 why exactly are we doing error recovery ? */ 634 635 repeat: 636 /* invoke error handler */ 637 if (ap->ops->error_handler) { 638 struct ata_link *link; 639 640 /* kill fast drain timer */ 641 del_timer_sync(&ap->fastdrain_timer); 642 643 /* process port resume request */ 644 ata_eh_handle_port_resume(ap); 645 646 /* fetch & clear EH info */ 647 spin_lock_irqsave(ap->lock, flags); 648 649 ata_for_each_link(link, ap, HOST_FIRST) { 650 struct ata_eh_context *ehc = &link->eh_context; 651 struct ata_device *dev; 652 653 memset(&link->eh_context, 0, sizeof(link->eh_context)); 654 link->eh_context.i = link->eh_info; 655 memset(&link->eh_info, 0, sizeof(link->eh_info)); 656 657 ata_for_each_dev(dev, link, ENABLED) { 658 int devno = dev->devno; 659 660 ehc->saved_xfer_mode[devno] = dev->xfer_mode; 661 if (ata_ncq_enabled(dev)) 662 ehc->saved_ncq_enabled |= 1 << devno; 663 } 664 } 665 666 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 667 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 668 ap->excl_link = NULL; /* don't maintain exclusion over EH */ 669 670 spin_unlock_irqrestore(ap->lock, flags); 671 672 /* invoke EH, skip if unloading or suspended */ 673 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 674 ap->ops->error_handler(ap); 675 else { 676 /* if unloading, commence suicide */ 677 if ((ap->pflags & ATA_PFLAG_UNLOADING) && 678 !(ap->pflags & ATA_PFLAG_UNLOADED)) 679 ata_eh_unload(ap); 680 ata_eh_finish(ap); 681 } 682 683 /* process port suspend request */ 684 ata_eh_handle_port_suspend(ap); 685 686 /* Exception might have happend after ->error_handler 687 * recovered the port but before this point. Repeat 688 * EH in such case. 689 */ 690 spin_lock_irqsave(ap->lock, flags); 691 692 if (ap->pflags & ATA_PFLAG_EH_PENDING) { 693 if (--ap->eh_tries) { 694 spin_unlock_irqrestore(ap->lock, flags); 695 goto repeat; 696 } 697 ata_port_printk(ap, KERN_ERR, "EH pending after %d " 698 "tries, giving up\n", ATA_EH_MAX_TRIES); 699 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 700 } 701 702 /* this run is complete, make sure EH info is clear */ 703 ata_for_each_link(link, ap, HOST_FIRST) 704 memset(&link->eh_info, 0, sizeof(link->eh_info)); 705 706 /* Clear host_eh_scheduled while holding ap->lock such 707 * that if exception occurs after this point but 708 * before EH completion, SCSI midlayer will 709 * re-initiate EH. 710 */ 711 host->host_eh_scheduled = 0; 712 713 spin_unlock_irqrestore(ap->lock, flags); 714 } else { 715 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 716 ap->ops->eng_timeout(ap); 717 } 718 719 /* finish or retry handled scmd's and clean up */ 720 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 721 722 scsi_eh_flush_done_q(&ap->eh_done_q); 723 724 /* clean up */ 725 spin_lock_irqsave(ap->lock, flags); 726 727 if (ap->pflags & ATA_PFLAG_LOADING) 728 ap->pflags &= ~ATA_PFLAG_LOADING; 729 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 730 schedule_delayed_work(&ap->hotplug_task, 0); 731 732 if (ap->pflags & ATA_PFLAG_RECOVERED) 733 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 734 735 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 736 737 /* tell wait_eh that we're done */ 738 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 739 wake_up_all(&ap->eh_wait_q); 740 741 spin_unlock_irqrestore(ap->lock, flags); 742 743 DPRINTK("EXIT\n"); 744 } 745 746 /** 747 * ata_port_wait_eh - Wait for the currently pending EH to complete 748 * @ap: Port to wait EH for 749 * 750 * Wait until the currently pending EH is complete. 751 * 752 * LOCKING: 753 * Kernel thread context (may sleep). 754 */ 755 void ata_port_wait_eh(struct ata_port *ap) 756 { 757 unsigned long flags; 758 DEFINE_WAIT(wait); 759 760 retry: 761 spin_lock_irqsave(ap->lock, flags); 762 763 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 764 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 765 spin_unlock_irqrestore(ap->lock, flags); 766 schedule(); 767 spin_lock_irqsave(ap->lock, flags); 768 } 769 finish_wait(&ap->eh_wait_q, &wait); 770 771 spin_unlock_irqrestore(ap->lock, flags); 772 773 /* make sure SCSI EH is complete */ 774 if (scsi_host_in_recovery(ap->scsi_host)) { 775 msleep(10); 776 goto retry; 777 } 778 } 779 780 static int ata_eh_nr_in_flight(struct ata_port *ap) 781 { 782 unsigned int tag; 783 int nr = 0; 784 785 /* count only non-internal commands */ 786 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 787 if (ata_qc_from_tag(ap, tag)) 788 nr++; 789 790 return nr; 791 } 792 793 void ata_eh_fastdrain_timerfn(unsigned long arg) 794 { 795 struct ata_port *ap = (void *)arg; 796 unsigned long flags; 797 int cnt; 798 799 spin_lock_irqsave(ap->lock, flags); 800 801 cnt = ata_eh_nr_in_flight(ap); 802 803 /* are we done? */ 804 if (!cnt) 805 goto out_unlock; 806 807 if (cnt == ap->fastdrain_cnt) { 808 unsigned int tag; 809 810 /* No progress during the last interval, tag all 811 * in-flight qcs as timed out and freeze the port. 812 */ 813 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 814 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 815 if (qc) 816 qc->err_mask |= AC_ERR_TIMEOUT; 817 } 818 819 ata_port_freeze(ap); 820 } else { 821 /* some qcs have finished, give it another chance */ 822 ap->fastdrain_cnt = cnt; 823 ap->fastdrain_timer.expires = 824 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 825 add_timer(&ap->fastdrain_timer); 826 } 827 828 out_unlock: 829 spin_unlock_irqrestore(ap->lock, flags); 830 } 831 832 /** 833 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 834 * @ap: target ATA port 835 * @fastdrain: activate fast drain 836 * 837 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 838 * is non-zero and EH wasn't pending before. Fast drain ensures 839 * that EH kicks in in timely manner. 840 * 841 * LOCKING: 842 * spin_lock_irqsave(host lock) 843 */ 844 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 845 { 846 int cnt; 847 848 /* already scheduled? */ 849 if (ap->pflags & ATA_PFLAG_EH_PENDING) 850 return; 851 852 ap->pflags |= ATA_PFLAG_EH_PENDING; 853 854 if (!fastdrain) 855 return; 856 857 /* do we have in-flight qcs? */ 858 cnt = ata_eh_nr_in_flight(ap); 859 if (!cnt) 860 return; 861 862 /* activate fast drain */ 863 ap->fastdrain_cnt = cnt; 864 ap->fastdrain_timer.expires = 865 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 866 add_timer(&ap->fastdrain_timer); 867 } 868 869 /** 870 * ata_qc_schedule_eh - schedule qc for error handling 871 * @qc: command to schedule error handling for 872 * 873 * Schedule error handling for @qc. EH will kick in as soon as 874 * other commands are drained. 875 * 876 * LOCKING: 877 * spin_lock_irqsave(host lock) 878 */ 879 void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 880 { 881 struct ata_port *ap = qc->ap; 882 struct request_queue *q = qc->scsicmd->device->request_queue; 883 unsigned long flags; 884 885 WARN_ON(!ap->ops->error_handler); 886 887 qc->flags |= ATA_QCFLAG_FAILED; 888 ata_eh_set_pending(ap, 1); 889 890 /* The following will fail if timeout has already expired. 891 * ata_scsi_error() takes care of such scmds on EH entry. 892 * Note that ATA_QCFLAG_FAILED is unconditionally set after 893 * this function completes. 894 */ 895 spin_lock_irqsave(q->queue_lock, flags); 896 blk_abort_request(qc->scsicmd->request); 897 spin_unlock_irqrestore(q->queue_lock, flags); 898 } 899 900 /** 901 * ata_port_schedule_eh - schedule error handling without a qc 902 * @ap: ATA port to schedule EH for 903 * 904 * Schedule error handling for @ap. EH will kick in as soon as 905 * all commands are drained. 906 * 907 * LOCKING: 908 * spin_lock_irqsave(host lock) 909 */ 910 void ata_port_schedule_eh(struct ata_port *ap) 911 { 912 WARN_ON(!ap->ops->error_handler); 913 914 if (ap->pflags & ATA_PFLAG_INITIALIZING) 915 return; 916 917 ata_eh_set_pending(ap, 1); 918 scsi_schedule_eh(ap->scsi_host); 919 920 DPRINTK("port EH scheduled\n"); 921 } 922 923 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 924 { 925 int tag, nr_aborted = 0; 926 927 WARN_ON(!ap->ops->error_handler); 928 929 /* we're gonna abort all commands, no need for fast drain */ 930 ata_eh_set_pending(ap, 0); 931 932 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 933 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 934 935 if (qc && (!link || qc->dev->link == link)) { 936 qc->flags |= ATA_QCFLAG_FAILED; 937 ata_qc_complete(qc); 938 nr_aborted++; 939 } 940 } 941 942 if (!nr_aborted) 943 ata_port_schedule_eh(ap); 944 945 return nr_aborted; 946 } 947 948 /** 949 * ata_link_abort - abort all qc's on the link 950 * @link: ATA link to abort qc's for 951 * 952 * Abort all active qc's active on @link and schedule EH. 953 * 954 * LOCKING: 955 * spin_lock_irqsave(host lock) 956 * 957 * RETURNS: 958 * Number of aborted qc's. 959 */ 960 int ata_link_abort(struct ata_link *link) 961 { 962 return ata_do_link_abort(link->ap, link); 963 } 964 965 /** 966 * ata_port_abort - abort all qc's on the port 967 * @ap: ATA port to abort qc's for 968 * 969 * Abort all active qc's of @ap and schedule EH. 970 * 971 * LOCKING: 972 * spin_lock_irqsave(host_set lock) 973 * 974 * RETURNS: 975 * Number of aborted qc's. 976 */ 977 int ata_port_abort(struct ata_port *ap) 978 { 979 return ata_do_link_abort(ap, NULL); 980 } 981 982 /** 983 * __ata_port_freeze - freeze port 984 * @ap: ATA port to freeze 985 * 986 * This function is called when HSM violation or some other 987 * condition disrupts normal operation of the port. Frozen port 988 * is not allowed to perform any operation until the port is 989 * thawed, which usually follows a successful reset. 990 * 991 * ap->ops->freeze() callback can be used for freezing the port 992 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 993 * port cannot be frozen hardware-wise, the interrupt handler 994 * must ack and clear interrupts unconditionally while the port 995 * is frozen. 996 * 997 * LOCKING: 998 * spin_lock_irqsave(host lock) 999 */ 1000 static void __ata_port_freeze(struct ata_port *ap) 1001 { 1002 WARN_ON(!ap->ops->error_handler); 1003 1004 if (ap->ops->freeze) 1005 ap->ops->freeze(ap); 1006 1007 ap->pflags |= ATA_PFLAG_FROZEN; 1008 1009 DPRINTK("ata%u port frozen\n", ap->print_id); 1010 } 1011 1012 /** 1013 * ata_port_freeze - abort & freeze port 1014 * @ap: ATA port to freeze 1015 * 1016 * Abort and freeze @ap. The freeze operation must be called 1017 * first, because some hardware requires special operations 1018 * before the taskfile registers are accessible. 1019 * 1020 * LOCKING: 1021 * spin_lock_irqsave(host lock) 1022 * 1023 * RETURNS: 1024 * Number of aborted commands. 1025 */ 1026 int ata_port_freeze(struct ata_port *ap) 1027 { 1028 int nr_aborted; 1029 1030 WARN_ON(!ap->ops->error_handler); 1031 1032 __ata_port_freeze(ap); 1033 nr_aborted = ata_port_abort(ap); 1034 1035 return nr_aborted; 1036 } 1037 1038 /** 1039 * sata_async_notification - SATA async notification handler 1040 * @ap: ATA port where async notification is received 1041 * 1042 * Handler to be called when async notification via SDB FIS is 1043 * received. This function schedules EH if necessary. 1044 * 1045 * LOCKING: 1046 * spin_lock_irqsave(host lock) 1047 * 1048 * RETURNS: 1049 * 1 if EH is scheduled, 0 otherwise. 1050 */ 1051 int sata_async_notification(struct ata_port *ap) 1052 { 1053 u32 sntf; 1054 int rc; 1055 1056 if (!(ap->flags & ATA_FLAG_AN)) 1057 return 0; 1058 1059 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 1060 if (rc == 0) 1061 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 1062 1063 if (!sata_pmp_attached(ap) || rc) { 1064 /* PMP is not attached or SNTF is not available */ 1065 if (!sata_pmp_attached(ap)) { 1066 /* PMP is not attached. Check whether ATAPI 1067 * AN is configured. If so, notify media 1068 * change. 1069 */ 1070 struct ata_device *dev = ap->link.device; 1071 1072 if ((dev->class == ATA_DEV_ATAPI) && 1073 (dev->flags & ATA_DFLAG_AN)) 1074 ata_scsi_media_change_notify(dev); 1075 return 0; 1076 } else { 1077 /* PMP is attached but SNTF is not available. 1078 * ATAPI async media change notification is 1079 * not used. The PMP must be reporting PHY 1080 * status change, schedule EH. 1081 */ 1082 ata_port_schedule_eh(ap); 1083 return 1; 1084 } 1085 } else { 1086 /* PMP is attached and SNTF is available */ 1087 struct ata_link *link; 1088 1089 /* check and notify ATAPI AN */ 1090 ata_for_each_link(link, ap, EDGE) { 1091 if (!(sntf & (1 << link->pmp))) 1092 continue; 1093 1094 if ((link->device->class == ATA_DEV_ATAPI) && 1095 (link->device->flags & ATA_DFLAG_AN)) 1096 ata_scsi_media_change_notify(link->device); 1097 } 1098 1099 /* If PMP is reporting that PHY status of some 1100 * downstream ports has changed, schedule EH. 1101 */ 1102 if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 1103 ata_port_schedule_eh(ap); 1104 return 1; 1105 } 1106 1107 return 0; 1108 } 1109 } 1110 1111 /** 1112 * ata_eh_freeze_port - EH helper to freeze port 1113 * @ap: ATA port to freeze 1114 * 1115 * Freeze @ap. 1116 * 1117 * LOCKING: 1118 * None. 1119 */ 1120 void ata_eh_freeze_port(struct ata_port *ap) 1121 { 1122 unsigned long flags; 1123 1124 if (!ap->ops->error_handler) 1125 return; 1126 1127 spin_lock_irqsave(ap->lock, flags); 1128 __ata_port_freeze(ap); 1129 spin_unlock_irqrestore(ap->lock, flags); 1130 } 1131 1132 /** 1133 * ata_port_thaw_port - EH helper to thaw port 1134 * @ap: ATA port to thaw 1135 * 1136 * Thaw frozen port @ap. 1137 * 1138 * LOCKING: 1139 * None. 1140 */ 1141 void ata_eh_thaw_port(struct ata_port *ap) 1142 { 1143 unsigned long flags; 1144 1145 if (!ap->ops->error_handler) 1146 return; 1147 1148 spin_lock_irqsave(ap->lock, flags); 1149 1150 ap->pflags &= ~ATA_PFLAG_FROZEN; 1151 1152 if (ap->ops->thaw) 1153 ap->ops->thaw(ap); 1154 1155 spin_unlock_irqrestore(ap->lock, flags); 1156 1157 DPRINTK("ata%u port thawed\n", ap->print_id); 1158 } 1159 1160 static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1161 { 1162 /* nada */ 1163 } 1164 1165 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1166 { 1167 struct ata_port *ap = qc->ap; 1168 struct scsi_cmnd *scmd = qc->scsicmd; 1169 unsigned long flags; 1170 1171 spin_lock_irqsave(ap->lock, flags); 1172 qc->scsidone = ata_eh_scsidone; 1173 __ata_qc_complete(qc); 1174 WARN_ON(ata_tag_valid(qc->tag)); 1175 spin_unlock_irqrestore(ap->lock, flags); 1176 1177 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1178 } 1179 1180 /** 1181 * ata_eh_qc_complete - Complete an active ATA command from EH 1182 * @qc: Command to complete 1183 * 1184 * Indicate to the mid and upper layers that an ATA command has 1185 * completed. To be used from EH. 1186 */ 1187 void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1188 { 1189 struct scsi_cmnd *scmd = qc->scsicmd; 1190 scmd->retries = scmd->allowed; 1191 __ata_eh_qc_complete(qc); 1192 } 1193 1194 /** 1195 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1196 * @qc: Command to retry 1197 * 1198 * Indicate to the mid and upper layers that an ATA command 1199 * should be retried. To be used from EH. 1200 * 1201 * SCSI midlayer limits the number of retries to scmd->allowed. 1202 * scmd->retries is decremented for commands which get retried 1203 * due to unrelated failures (qc->err_mask is zero). 1204 */ 1205 void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1206 { 1207 struct scsi_cmnd *scmd = qc->scsicmd; 1208 if (!qc->err_mask && scmd->retries) 1209 scmd->retries--; 1210 __ata_eh_qc_complete(qc); 1211 } 1212 1213 /** 1214 * ata_dev_disable - disable ATA device 1215 * @dev: ATA device to disable 1216 * 1217 * Disable @dev. 1218 * 1219 * Locking: 1220 * EH context. 1221 */ 1222 void ata_dev_disable(struct ata_device *dev) 1223 { 1224 if (!ata_dev_enabled(dev)) 1225 return; 1226 1227 if (ata_msg_drv(dev->link->ap)) 1228 ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 1229 ata_acpi_on_disable(dev); 1230 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1231 dev->class++; 1232 1233 /* From now till the next successful probe, ering is used to 1234 * track probe failures. Clear accumulated device error info. 1235 */ 1236 ata_ering_clear(&dev->ering); 1237 } 1238 1239 /** 1240 * ata_eh_detach_dev - detach ATA device 1241 * @dev: ATA device to detach 1242 * 1243 * Detach @dev. 1244 * 1245 * LOCKING: 1246 * None. 1247 */ 1248 void ata_eh_detach_dev(struct ata_device *dev) 1249 { 1250 struct ata_link *link = dev->link; 1251 struct ata_port *ap = link->ap; 1252 struct ata_eh_context *ehc = &link->eh_context; 1253 unsigned long flags; 1254 1255 ata_dev_disable(dev); 1256 1257 spin_lock_irqsave(ap->lock, flags); 1258 1259 dev->flags &= ~ATA_DFLAG_DETACH; 1260 1261 if (ata_scsi_offline_dev(dev)) { 1262 dev->flags |= ATA_DFLAG_DETACHED; 1263 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1264 } 1265 1266 /* clear per-dev EH info */ 1267 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1268 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 1269 ehc->saved_xfer_mode[dev->devno] = 0; 1270 ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1271 1272 spin_unlock_irqrestore(ap->lock, flags); 1273 } 1274 1275 /** 1276 * ata_eh_about_to_do - about to perform eh_action 1277 * @link: target ATA link 1278 * @dev: target ATA dev for per-dev action (can be NULL) 1279 * @action: action about to be performed 1280 * 1281 * Called just before performing EH actions to clear related bits 1282 * in @link->eh_info such that eh actions are not unnecessarily 1283 * repeated. 1284 * 1285 * LOCKING: 1286 * None. 1287 */ 1288 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1289 unsigned int action) 1290 { 1291 struct ata_port *ap = link->ap; 1292 struct ata_eh_info *ehi = &link->eh_info; 1293 struct ata_eh_context *ehc = &link->eh_context; 1294 unsigned long flags; 1295 1296 spin_lock_irqsave(ap->lock, flags); 1297 1298 ata_eh_clear_action(link, dev, ehi, action); 1299 1300 /* About to take EH action, set RECOVERED. Ignore actions on 1301 * slave links as master will do them again. 1302 */ 1303 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1304 ap->pflags |= ATA_PFLAG_RECOVERED; 1305 1306 spin_unlock_irqrestore(ap->lock, flags); 1307 } 1308 1309 /** 1310 * ata_eh_done - EH action complete 1311 * @ap: target ATA port 1312 * @dev: target ATA dev for per-dev action (can be NULL) 1313 * @action: action just completed 1314 * 1315 * Called right after performing EH actions to clear related bits 1316 * in @link->eh_context. 1317 * 1318 * LOCKING: 1319 * None. 1320 */ 1321 void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1322 unsigned int action) 1323 { 1324 struct ata_eh_context *ehc = &link->eh_context; 1325 1326 ata_eh_clear_action(link, dev, &ehc->i, action); 1327 } 1328 1329 /** 1330 * ata_err_string - convert err_mask to descriptive string 1331 * @err_mask: error mask to convert to string 1332 * 1333 * Convert @err_mask to descriptive string. Errors are 1334 * prioritized according to severity and only the most severe 1335 * error is reported. 1336 * 1337 * LOCKING: 1338 * None. 1339 * 1340 * RETURNS: 1341 * Descriptive string for @err_mask 1342 */ 1343 static const char *ata_err_string(unsigned int err_mask) 1344 { 1345 if (err_mask & AC_ERR_HOST_BUS) 1346 return "host bus error"; 1347 if (err_mask & AC_ERR_ATA_BUS) 1348 return "ATA bus error"; 1349 if (err_mask & AC_ERR_TIMEOUT) 1350 return "timeout"; 1351 if (err_mask & AC_ERR_HSM) 1352 return "HSM violation"; 1353 if (err_mask & AC_ERR_SYSTEM) 1354 return "internal error"; 1355 if (err_mask & AC_ERR_MEDIA) 1356 return "media error"; 1357 if (err_mask & AC_ERR_INVALID) 1358 return "invalid argument"; 1359 if (err_mask & AC_ERR_DEV) 1360 return "device error"; 1361 return "unknown error"; 1362 } 1363 1364 /** 1365 * ata_read_log_page - read a specific log page 1366 * @dev: target device 1367 * @page: page to read 1368 * @buf: buffer to store read page 1369 * @sectors: number of sectors to read 1370 * 1371 * Read log page using READ_LOG_EXT command. 1372 * 1373 * LOCKING: 1374 * Kernel thread context (may sleep). 1375 * 1376 * RETURNS: 1377 * 0 on success, AC_ERR_* mask otherwise. 1378 */ 1379 static unsigned int ata_read_log_page(struct ata_device *dev, 1380 u8 page, void *buf, unsigned int sectors) 1381 { 1382 struct ata_taskfile tf; 1383 unsigned int err_mask; 1384 1385 DPRINTK("read log page - page %d\n", page); 1386 1387 ata_tf_init(dev, &tf); 1388 tf.command = ATA_CMD_READ_LOG_EXT; 1389 tf.lbal = page; 1390 tf.nsect = sectors; 1391 tf.hob_nsect = sectors >> 8; 1392 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1393 tf.protocol = ATA_PROT_PIO; 1394 1395 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1396 buf, sectors * ATA_SECT_SIZE, 0); 1397 1398 DPRINTK("EXIT, err_mask=%x\n", err_mask); 1399 return err_mask; 1400 } 1401 1402 /** 1403 * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1404 * @dev: Device to read log page 10h from 1405 * @tag: Resulting tag of the failed command 1406 * @tf: Resulting taskfile registers of the failed command 1407 * 1408 * Read log page 10h to obtain NCQ error details and clear error 1409 * condition. 1410 * 1411 * LOCKING: 1412 * Kernel thread context (may sleep). 1413 * 1414 * RETURNS: 1415 * 0 on success, -errno otherwise. 1416 */ 1417 static int ata_eh_read_log_10h(struct ata_device *dev, 1418 int *tag, struct ata_taskfile *tf) 1419 { 1420 u8 *buf = dev->link->ap->sector_buf; 1421 unsigned int err_mask; 1422 u8 csum; 1423 int i; 1424 1425 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1426 if (err_mask) 1427 return -EIO; 1428 1429 csum = 0; 1430 for (i = 0; i < ATA_SECT_SIZE; i++) 1431 csum += buf[i]; 1432 if (csum) 1433 ata_dev_printk(dev, KERN_WARNING, 1434 "invalid checksum 0x%x on log page 10h\n", csum); 1435 1436 if (buf[0] & 0x80) 1437 return -ENOENT; 1438 1439 *tag = buf[0] & 0x1f; 1440 1441 tf->command = buf[2]; 1442 tf->feature = buf[3]; 1443 tf->lbal = buf[4]; 1444 tf->lbam = buf[5]; 1445 tf->lbah = buf[6]; 1446 tf->device = buf[7]; 1447 tf->hob_lbal = buf[8]; 1448 tf->hob_lbam = buf[9]; 1449 tf->hob_lbah = buf[10]; 1450 tf->nsect = buf[12]; 1451 tf->hob_nsect = buf[13]; 1452 1453 return 0; 1454 } 1455 1456 /** 1457 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 1458 * @dev: target ATAPI device 1459 * @r_sense_key: out parameter for sense_key 1460 * 1461 * Perform ATAPI TEST_UNIT_READY. 1462 * 1463 * LOCKING: 1464 * EH context (may sleep). 1465 * 1466 * RETURNS: 1467 * 0 on success, AC_ERR_* mask on failure. 1468 */ 1469 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 1470 { 1471 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 1472 struct ata_taskfile tf; 1473 unsigned int err_mask; 1474 1475 ata_tf_init(dev, &tf); 1476 1477 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1478 tf.command = ATA_CMD_PACKET; 1479 tf.protocol = ATAPI_PROT_NODATA; 1480 1481 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 1482 if (err_mask == AC_ERR_DEV) 1483 *r_sense_key = tf.feature >> 4; 1484 return err_mask; 1485 } 1486 1487 /** 1488 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1489 * @dev: device to perform REQUEST_SENSE to 1490 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1491 * @dfl_sense_key: default sense key to use 1492 * 1493 * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1494 * SENSE. This function is EH helper. 1495 * 1496 * LOCKING: 1497 * Kernel thread context (may sleep). 1498 * 1499 * RETURNS: 1500 * 0 on success, AC_ERR_* mask on failure 1501 */ 1502 static unsigned int atapi_eh_request_sense(struct ata_device *dev, 1503 u8 *sense_buf, u8 dfl_sense_key) 1504 { 1505 u8 cdb[ATAPI_CDB_LEN] = 1506 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 1507 struct ata_port *ap = dev->link->ap; 1508 struct ata_taskfile tf; 1509 1510 DPRINTK("ATAPI request sense\n"); 1511 1512 /* FIXME: is this needed? */ 1513 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1514 1515 /* initialize sense_buf with the error register, 1516 * for the case where they are -not- overwritten 1517 */ 1518 sense_buf[0] = 0x70; 1519 sense_buf[2] = dfl_sense_key; 1520 1521 /* some devices time out if garbage left in tf */ 1522 ata_tf_init(dev, &tf); 1523 1524 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1525 tf.command = ATA_CMD_PACKET; 1526 1527 /* is it pointless to prefer PIO for "safety reasons"? */ 1528 if (ap->flags & ATA_FLAG_PIO_DMA) { 1529 tf.protocol = ATAPI_PROT_DMA; 1530 tf.feature |= ATAPI_PKT_DMA; 1531 } else { 1532 tf.protocol = ATAPI_PROT_PIO; 1533 tf.lbam = SCSI_SENSE_BUFFERSIZE; 1534 tf.lbah = 0; 1535 } 1536 1537 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 1538 sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1539 } 1540 1541 /** 1542 * ata_eh_analyze_serror - analyze SError for a failed port 1543 * @link: ATA link to analyze SError for 1544 * 1545 * Analyze SError if available and further determine cause of 1546 * failure. 1547 * 1548 * LOCKING: 1549 * None. 1550 */ 1551 static void ata_eh_analyze_serror(struct ata_link *link) 1552 { 1553 struct ata_eh_context *ehc = &link->eh_context; 1554 u32 serror = ehc->i.serror; 1555 unsigned int err_mask = 0, action = 0; 1556 u32 hotplug_mask; 1557 1558 if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1559 err_mask |= AC_ERR_ATA_BUS; 1560 action |= ATA_EH_RESET; 1561 } 1562 if (serror & SERR_PROTOCOL) { 1563 err_mask |= AC_ERR_HSM; 1564 action |= ATA_EH_RESET; 1565 } 1566 if (serror & SERR_INTERNAL) { 1567 err_mask |= AC_ERR_SYSTEM; 1568 action |= ATA_EH_RESET; 1569 } 1570 1571 /* Determine whether a hotplug event has occurred. Both 1572 * SError.N/X are considered hotplug events for enabled or 1573 * host links. For disabled PMP links, only N bit is 1574 * considered as X bit is left at 1 for link plugging. 1575 */ 1576 hotplug_mask = 0; 1577 1578 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1579 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1580 else 1581 hotplug_mask = SERR_PHYRDY_CHG; 1582 1583 if (serror & hotplug_mask) 1584 ata_ehi_hotplugged(&ehc->i); 1585 1586 ehc->i.err_mask |= err_mask; 1587 ehc->i.action |= action; 1588 } 1589 1590 /** 1591 * ata_eh_analyze_ncq_error - analyze NCQ error 1592 * @link: ATA link to analyze NCQ error for 1593 * 1594 * Read log page 10h, determine the offending qc and acquire 1595 * error status TF. For NCQ device errors, all LLDDs have to do 1596 * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1597 * care of the rest. 1598 * 1599 * LOCKING: 1600 * Kernel thread context (may sleep). 1601 */ 1602 void ata_eh_analyze_ncq_error(struct ata_link *link) 1603 { 1604 struct ata_port *ap = link->ap; 1605 struct ata_eh_context *ehc = &link->eh_context; 1606 struct ata_device *dev = link->device; 1607 struct ata_queued_cmd *qc; 1608 struct ata_taskfile tf; 1609 int tag, rc; 1610 1611 /* if frozen, we can't do much */ 1612 if (ap->pflags & ATA_PFLAG_FROZEN) 1613 return; 1614 1615 /* is it NCQ device error? */ 1616 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1617 return; 1618 1619 /* has LLDD analyzed already? */ 1620 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1621 qc = __ata_qc_from_tag(ap, tag); 1622 1623 if (!(qc->flags & ATA_QCFLAG_FAILED)) 1624 continue; 1625 1626 if (qc->err_mask) 1627 return; 1628 } 1629 1630 /* okay, this error is ours */ 1631 memset(&tf, 0, sizeof(tf)); 1632 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1633 if (rc) { 1634 ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1635 "(errno=%d)\n", rc); 1636 return; 1637 } 1638 1639 if (!(link->sactive & (1 << tag))) { 1640 ata_link_printk(link, KERN_ERR, "log page 10h reported " 1641 "inactive tag %d\n", tag); 1642 return; 1643 } 1644 1645 /* we've got the perpetrator, condemn it */ 1646 qc = __ata_qc_from_tag(ap, tag); 1647 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1648 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1649 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1650 ehc->i.err_mask &= ~AC_ERR_DEV; 1651 } 1652 1653 /** 1654 * ata_eh_analyze_tf - analyze taskfile of a failed qc 1655 * @qc: qc to analyze 1656 * @tf: Taskfile registers to analyze 1657 * 1658 * Analyze taskfile of @qc and further determine cause of 1659 * failure. This function also requests ATAPI sense data if 1660 * avaliable. 1661 * 1662 * LOCKING: 1663 * Kernel thread context (may sleep). 1664 * 1665 * RETURNS: 1666 * Determined recovery action 1667 */ 1668 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1669 const struct ata_taskfile *tf) 1670 { 1671 unsigned int tmp, action = 0; 1672 u8 stat = tf->command, err = tf->feature; 1673 1674 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1675 qc->err_mask |= AC_ERR_HSM; 1676 return ATA_EH_RESET; 1677 } 1678 1679 if (stat & (ATA_ERR | ATA_DF)) 1680 qc->err_mask |= AC_ERR_DEV; 1681 else 1682 return 0; 1683 1684 switch (qc->dev->class) { 1685 case ATA_DEV_ATA: 1686 if (err & ATA_ICRC) 1687 qc->err_mask |= AC_ERR_ATA_BUS; 1688 if (err & ATA_UNC) 1689 qc->err_mask |= AC_ERR_MEDIA; 1690 if (err & ATA_IDNF) 1691 qc->err_mask |= AC_ERR_INVALID; 1692 break; 1693 1694 case ATA_DEV_ATAPI: 1695 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 1696 tmp = atapi_eh_request_sense(qc->dev, 1697 qc->scsicmd->sense_buffer, 1698 qc->result_tf.feature >> 4); 1699 if (!tmp) { 1700 /* ATA_QCFLAG_SENSE_VALID is used to 1701 * tell atapi_qc_complete() that sense 1702 * data is already valid. 1703 * 1704 * TODO: interpret sense data and set 1705 * appropriate err_mask. 1706 */ 1707 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1708 } else 1709 qc->err_mask |= tmp; 1710 } 1711 } 1712 1713 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1714 action |= ATA_EH_RESET; 1715 1716 return action; 1717 } 1718 1719 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 1720 int *xfer_ok) 1721 { 1722 int base = 0; 1723 1724 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 1725 *xfer_ok = 1; 1726 1727 if (!*xfer_ok) 1728 base = ATA_ECAT_DUBIOUS_NONE; 1729 1730 if (err_mask & AC_ERR_ATA_BUS) 1731 return base + ATA_ECAT_ATA_BUS; 1732 1733 if (err_mask & AC_ERR_TIMEOUT) 1734 return base + ATA_ECAT_TOUT_HSM; 1735 1736 if (eflags & ATA_EFLAG_IS_IO) { 1737 if (err_mask & AC_ERR_HSM) 1738 return base + ATA_ECAT_TOUT_HSM; 1739 if ((err_mask & 1740 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 1741 return base + ATA_ECAT_UNK_DEV; 1742 } 1743 1744 return 0; 1745 } 1746 1747 struct speed_down_verdict_arg { 1748 u64 since; 1749 int xfer_ok; 1750 int nr_errors[ATA_ECAT_NR]; 1751 }; 1752 1753 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1754 { 1755 struct speed_down_verdict_arg *arg = void_arg; 1756 int cat; 1757 1758 if (ent->timestamp < arg->since) 1759 return -1; 1760 1761 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 1762 &arg->xfer_ok); 1763 arg->nr_errors[cat]++; 1764 1765 return 0; 1766 } 1767 1768 /** 1769 * ata_eh_speed_down_verdict - Determine speed down verdict 1770 * @dev: Device of interest 1771 * 1772 * This function examines error ring of @dev and determines 1773 * whether NCQ needs to be turned off, transfer speed should be 1774 * stepped down, or falling back to PIO is necessary. 1775 * 1776 * ECAT_ATA_BUS : ATA_BUS error for any command 1777 * 1778 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 1779 * IO commands 1780 * 1781 * ECAT_UNK_DEV : Unknown DEV error for IO commands 1782 * 1783 * ECAT_DUBIOUS_* : Identical to above three but occurred while 1784 * data transfer hasn't been verified. 1785 * 1786 * Verdicts are 1787 * 1788 * NCQ_OFF : Turn off NCQ. 1789 * 1790 * SPEED_DOWN : Speed down transfer speed but don't fall back 1791 * to PIO. 1792 * 1793 * FALLBACK_TO_PIO : Fall back to PIO. 1794 * 1795 * Even if multiple verdicts are returned, only one action is 1796 * taken per error. An action triggered by non-DUBIOUS errors 1797 * clears ering, while one triggered by DUBIOUS_* errors doesn't. 1798 * This is to expedite speed down decisions right after device is 1799 * initially configured. 1800 * 1801 * The followings are speed down rules. #1 and #2 deal with 1802 * DUBIOUS errors. 1803 * 1804 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 1805 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 1806 * 1807 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 1808 * occurred during last 5 mins, NCQ_OFF. 1809 * 1810 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 1811 * ocurred during last 5 mins, FALLBACK_TO_PIO 1812 * 1813 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 1814 * during last 10 mins, NCQ_OFF. 1815 * 1816 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 1817 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 1818 * 1819 * LOCKING: 1820 * Inherited from caller. 1821 * 1822 * RETURNS: 1823 * OR of ATA_EH_SPDN_* flags. 1824 */ 1825 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1826 { 1827 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 1828 u64 j64 = get_jiffies_64(); 1829 struct speed_down_verdict_arg arg; 1830 unsigned int verdict = 0; 1831 1832 /* scan past 5 mins of error history */ 1833 memset(&arg, 0, sizeof(arg)); 1834 arg.since = j64 - min(j64, j5mins); 1835 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1836 1837 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 1838 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 1839 verdict |= ATA_EH_SPDN_SPEED_DOWN | 1840 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 1841 1842 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 1843 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 1844 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 1845 1846 if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 1847 arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1848 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 1849 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 1850 1851 /* scan past 10 mins of error history */ 1852 memset(&arg, 0, sizeof(arg)); 1853 arg.since = j64 - min(j64, j10mins); 1854 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1855 1856 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1857 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 1858 verdict |= ATA_EH_SPDN_NCQ_OFF; 1859 1860 if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 1861 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1862 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 1863 verdict |= ATA_EH_SPDN_SPEED_DOWN; 1864 1865 return verdict; 1866 } 1867 1868 /** 1869 * ata_eh_speed_down - record error and speed down if necessary 1870 * @dev: Failed device 1871 * @eflags: mask of ATA_EFLAG_* flags 1872 * @err_mask: err_mask of the error 1873 * 1874 * Record error and examine error history to determine whether 1875 * adjusting transmission speed is necessary. It also sets 1876 * transmission limits appropriately if such adjustment is 1877 * necessary. 1878 * 1879 * LOCKING: 1880 * Kernel thread context (may sleep). 1881 * 1882 * RETURNS: 1883 * Determined recovery action. 1884 */ 1885 static unsigned int ata_eh_speed_down(struct ata_device *dev, 1886 unsigned int eflags, unsigned int err_mask) 1887 { 1888 struct ata_link *link = ata_dev_phys_link(dev); 1889 int xfer_ok = 0; 1890 unsigned int verdict; 1891 unsigned int action = 0; 1892 1893 /* don't bother if Cat-0 error */ 1894 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1895 return 0; 1896 1897 /* record error and determine whether speed down is necessary */ 1898 ata_ering_record(&dev->ering, eflags, err_mask); 1899 verdict = ata_eh_speed_down_verdict(dev); 1900 1901 /* turn off NCQ? */ 1902 if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 1903 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 1904 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 1905 dev->flags |= ATA_DFLAG_NCQ_OFF; 1906 ata_dev_printk(dev, KERN_WARNING, 1907 "NCQ disabled due to excessive errors\n"); 1908 goto done; 1909 } 1910 1911 /* speed down? */ 1912 if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1913 /* speed down SATA link speed if possible */ 1914 if (sata_down_spd_limit(link, 0) == 0) { 1915 action |= ATA_EH_RESET; 1916 goto done; 1917 } 1918 1919 /* lower transfer mode */ 1920 if (dev->spdn_cnt < 2) { 1921 static const int dma_dnxfer_sel[] = 1922 { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 1923 static const int pio_dnxfer_sel[] = 1924 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 1925 int sel; 1926 1927 if (dev->xfer_shift != ATA_SHIFT_PIO) 1928 sel = dma_dnxfer_sel[dev->spdn_cnt]; 1929 else 1930 sel = pio_dnxfer_sel[dev->spdn_cnt]; 1931 1932 dev->spdn_cnt++; 1933 1934 if (ata_down_xfermask_limit(dev, sel) == 0) { 1935 action |= ATA_EH_RESET; 1936 goto done; 1937 } 1938 } 1939 } 1940 1941 /* Fall back to PIO? Slowing down to PIO is meaningless for 1942 * SATA ATA devices. Consider it only for PATA and SATAPI. 1943 */ 1944 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1945 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 1946 (dev->xfer_shift != ATA_SHIFT_PIO)) { 1947 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 1948 dev->spdn_cnt = 0; 1949 action |= ATA_EH_RESET; 1950 goto done; 1951 } 1952 } 1953 1954 return 0; 1955 done: 1956 /* device has been slowed down, blow error history */ 1957 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 1958 ata_ering_clear(&dev->ering); 1959 return action; 1960 } 1961 1962 /** 1963 * ata_eh_link_autopsy - analyze error and determine recovery action 1964 * @link: host link to perform autopsy on 1965 * 1966 * Analyze why @link failed and determine which recovery actions 1967 * are needed. This function also sets more detailed AC_ERR_* 1968 * values and fills sense data for ATAPI CHECK SENSE. 1969 * 1970 * LOCKING: 1971 * Kernel thread context (may sleep). 1972 */ 1973 static void ata_eh_link_autopsy(struct ata_link *link) 1974 { 1975 struct ata_port *ap = link->ap; 1976 struct ata_eh_context *ehc = &link->eh_context; 1977 struct ata_device *dev; 1978 unsigned int all_err_mask = 0, eflags = 0; 1979 int tag; 1980 u32 serror; 1981 int rc; 1982 1983 DPRINTK("ENTER\n"); 1984 1985 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1986 return; 1987 1988 /* obtain and analyze SError */ 1989 rc = sata_scr_read(link, SCR_ERROR, &serror); 1990 if (rc == 0) { 1991 ehc->i.serror |= serror; 1992 ata_eh_analyze_serror(link); 1993 } else if (rc != -EOPNOTSUPP) { 1994 /* SError read failed, force reset and probing */ 1995 ehc->i.probe_mask |= ATA_ALL_DEVICES; 1996 ehc->i.action |= ATA_EH_RESET; 1997 ehc->i.err_mask |= AC_ERR_OTHER; 1998 } 1999 2000 /* analyze NCQ failure */ 2001 ata_eh_analyze_ncq_error(link); 2002 2003 /* any real error trumps AC_ERR_OTHER */ 2004 if (ehc->i.err_mask & ~AC_ERR_OTHER) 2005 ehc->i.err_mask &= ~AC_ERR_OTHER; 2006 2007 all_err_mask |= ehc->i.err_mask; 2008 2009 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2010 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2011 2012 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2013 ata_dev_phys_link(qc->dev) != link) 2014 continue; 2015 2016 /* inherit upper level err_mask */ 2017 qc->err_mask |= ehc->i.err_mask; 2018 2019 /* analyze TF */ 2020 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2021 2022 /* DEV errors are probably spurious in case of ATA_BUS error */ 2023 if (qc->err_mask & AC_ERR_ATA_BUS) 2024 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2025 AC_ERR_INVALID); 2026 2027 /* any real error trumps unknown error */ 2028 if (qc->err_mask & ~AC_ERR_OTHER) 2029 qc->err_mask &= ~AC_ERR_OTHER; 2030 2031 /* SENSE_VALID trumps dev/unknown error and revalidation */ 2032 if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2033 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2034 2035 /* determine whether the command is worth retrying */ 2036 if (qc->flags & ATA_QCFLAG_IO || 2037 (!(qc->err_mask & AC_ERR_INVALID) && 2038 qc->err_mask != AC_ERR_DEV)) 2039 qc->flags |= ATA_QCFLAG_RETRY; 2040 2041 /* accumulate error info */ 2042 ehc->i.dev = qc->dev; 2043 all_err_mask |= qc->err_mask; 2044 if (qc->flags & ATA_QCFLAG_IO) 2045 eflags |= ATA_EFLAG_IS_IO; 2046 } 2047 2048 /* enforce default EH actions */ 2049 if (ap->pflags & ATA_PFLAG_FROZEN || 2050 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2051 ehc->i.action |= ATA_EH_RESET; 2052 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 2053 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2054 ehc->i.action |= ATA_EH_REVALIDATE; 2055 2056 /* If we have offending qcs and the associated failed device, 2057 * perform per-dev EH action only on the offending device. 2058 */ 2059 if (ehc->i.dev) { 2060 ehc->i.dev_action[ehc->i.dev->devno] |= 2061 ehc->i.action & ATA_EH_PERDEV_MASK; 2062 ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2063 } 2064 2065 /* propagate timeout to host link */ 2066 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 2067 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 2068 2069 /* record error and consider speeding down */ 2070 dev = ehc->i.dev; 2071 if (!dev && ((ata_link_max_devices(link) == 1 && 2072 ata_dev_enabled(link->device)))) 2073 dev = link->device; 2074 2075 if (dev) { 2076 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 2077 eflags |= ATA_EFLAG_DUBIOUS_XFER; 2078 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 2079 } 2080 2081 DPRINTK("EXIT\n"); 2082 } 2083 2084 /** 2085 * ata_eh_autopsy - analyze error and determine recovery action 2086 * @ap: host port to perform autopsy on 2087 * 2088 * Analyze all links of @ap and determine why they failed and 2089 * which recovery actions are needed. 2090 * 2091 * LOCKING: 2092 * Kernel thread context (may sleep). 2093 */ 2094 void ata_eh_autopsy(struct ata_port *ap) 2095 { 2096 struct ata_link *link; 2097 2098 ata_for_each_link(link, ap, EDGE) 2099 ata_eh_link_autopsy(link); 2100 2101 /* Handle the frigging slave link. Autopsy is done similarly 2102 * but actions and flags are transferred over to the master 2103 * link and handled from there. 2104 */ 2105 if (ap->slave_link) { 2106 struct ata_eh_context *mehc = &ap->link.eh_context; 2107 struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2108 2109 /* transfer control flags from master to slave */ 2110 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2111 2112 /* perform autopsy on the slave link */ 2113 ata_eh_link_autopsy(ap->slave_link); 2114 2115 /* transfer actions from slave to master and clear slave */ 2116 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2117 mehc->i.action |= sehc->i.action; 2118 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2119 mehc->i.flags |= sehc->i.flags; 2120 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2121 } 2122 2123 /* Autopsy of fanout ports can affect host link autopsy. 2124 * Perform host link autopsy last. 2125 */ 2126 if (sata_pmp_attached(ap)) 2127 ata_eh_link_autopsy(&ap->link); 2128 } 2129 2130 /** 2131 * ata_get_cmd_descript - get description for ATA command 2132 * @command: ATA command code to get description for 2133 * 2134 * Return a textual description of the given command, or NULL if the 2135 * command is not known. 2136 * 2137 * LOCKING: 2138 * None 2139 */ 2140 const char *ata_get_cmd_descript(u8 command) 2141 { 2142 #ifdef CONFIG_ATA_VERBOSE_ERROR 2143 static const struct 2144 { 2145 u8 command; 2146 const char *text; 2147 } cmd_descr[] = { 2148 { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 2149 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 2150 { ATA_CMD_STANDBY, "STANDBY" }, 2151 { ATA_CMD_IDLE, "IDLE" }, 2152 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 2153 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 2154 { ATA_CMD_NOP, "NOP" }, 2155 { ATA_CMD_FLUSH, "FLUSH CACHE" }, 2156 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 2157 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 2158 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 2159 { ATA_CMD_SERVICE, "SERVICE" }, 2160 { ATA_CMD_READ, "READ DMA" }, 2161 { ATA_CMD_READ_EXT, "READ DMA EXT" }, 2162 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 2163 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 2164 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 2165 { ATA_CMD_WRITE, "WRITE DMA" }, 2166 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 2167 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 2168 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 2169 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 2170 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 2171 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 2172 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 2173 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 2174 { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 2175 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 2176 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 2177 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 2178 { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 2179 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 2180 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 2181 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 2182 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 2183 { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 2184 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 2185 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 2186 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 2187 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 2188 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 2189 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 2190 { ATA_CMD_SLEEP, "SLEEP" }, 2191 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 2192 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 2193 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 2194 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 2195 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 2196 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 2197 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 2198 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 2199 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 2200 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 2201 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 2202 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 2203 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 2204 { ATA_CMD_PMP_READ, "READ BUFFER" }, 2205 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 2206 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 2207 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 2208 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 2209 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 2210 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 2211 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 2212 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 2213 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 2214 { ATA_CMD_SMART, "SMART" }, 2215 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 2216 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2217 { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 2218 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 2219 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 2220 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 2221 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 2222 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 2223 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 2224 { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 2225 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 2226 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 2227 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 2228 { ATA_CMD_RESTORE, "RECALIBRATE" }, 2229 { 0, NULL } /* terminate list */ 2230 }; 2231 2232 unsigned int i; 2233 for (i = 0; cmd_descr[i].text; i++) 2234 if (cmd_descr[i].command == command) 2235 return cmd_descr[i].text; 2236 #endif 2237 2238 return NULL; 2239 } 2240 2241 /** 2242 * ata_eh_link_report - report error handling to user 2243 * @link: ATA link EH is going on 2244 * 2245 * Report EH to user. 2246 * 2247 * LOCKING: 2248 * None. 2249 */ 2250 static void ata_eh_link_report(struct ata_link *link) 2251 { 2252 struct ata_port *ap = link->ap; 2253 struct ata_eh_context *ehc = &link->eh_context; 2254 const char *frozen, *desc; 2255 char tries_buf[6]; 2256 int tag, nr_failed = 0; 2257 2258 if (ehc->i.flags & ATA_EHI_QUIET) 2259 return; 2260 2261 desc = NULL; 2262 if (ehc->i.desc[0] != '\0') 2263 desc = ehc->i.desc; 2264 2265 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2266 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2267 2268 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2269 ata_dev_phys_link(qc->dev) != link || 2270 ((qc->flags & ATA_QCFLAG_QUIET) && 2271 qc->err_mask == AC_ERR_DEV)) 2272 continue; 2273 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2274 continue; 2275 2276 nr_failed++; 2277 } 2278 2279 if (!nr_failed && !ehc->i.err_mask) 2280 return; 2281 2282 frozen = ""; 2283 if (ap->pflags & ATA_PFLAG_FROZEN) 2284 frozen = " frozen"; 2285 2286 memset(tries_buf, 0, sizeof(tries_buf)); 2287 if (ap->eh_tries < ATA_EH_MAX_TRIES) 2288 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2289 ap->eh_tries); 2290 2291 if (ehc->i.dev) { 2292 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 2293 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2294 ehc->i.err_mask, link->sactive, ehc->i.serror, 2295 ehc->i.action, frozen, tries_buf); 2296 if (desc) 2297 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 2298 } else { 2299 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 2300 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2301 ehc->i.err_mask, link->sactive, ehc->i.serror, 2302 ehc->i.action, frozen, tries_buf); 2303 if (desc) 2304 ata_link_printk(link, KERN_ERR, "%s\n", desc); 2305 } 2306 2307 #ifdef CONFIG_ATA_VERBOSE_ERROR 2308 if (ehc->i.serror) 2309 ata_link_printk(link, KERN_ERR, 2310 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 2311 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 2312 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 2313 ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 2314 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 2315 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 2316 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 2317 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 2318 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 2319 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 2320 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 2321 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 2322 ehc->i.serror & SERR_CRC ? "BadCRC " : "", 2323 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 2324 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 2325 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 2326 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 2327 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 2328 #endif 2329 2330 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2331 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2332 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2333 const u8 *cdb = qc->cdb; 2334 char data_buf[20] = ""; 2335 char cdb_buf[70] = ""; 2336 2337 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2338 ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2339 continue; 2340 2341 if (qc->dma_dir != DMA_NONE) { 2342 static const char *dma_str[] = { 2343 [DMA_BIDIRECTIONAL] = "bidi", 2344 [DMA_TO_DEVICE] = "out", 2345 [DMA_FROM_DEVICE] = "in", 2346 }; 2347 static const char *prot_str[] = { 2348 [ATA_PROT_PIO] = "pio", 2349 [ATA_PROT_DMA] = "dma", 2350 [ATA_PROT_NCQ] = "ncq", 2351 [ATAPI_PROT_PIO] = "pio", 2352 [ATAPI_PROT_DMA] = "dma", 2353 }; 2354 2355 snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2356 prot_str[qc->tf.protocol], qc->nbytes, 2357 dma_str[qc->dma_dir]); 2358 } 2359 2360 if (ata_is_atapi(qc->tf.protocol)) { 2361 if (qc->scsicmd) 2362 scsi_print_command(qc->scsicmd); 2363 else 2364 snprintf(cdb_buf, sizeof(cdb_buf), 2365 "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2366 "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2367 cdb[0], cdb[1], cdb[2], cdb[3], 2368 cdb[4], cdb[5], cdb[6], cdb[7], 2369 cdb[8], cdb[9], cdb[10], cdb[11], 2370 cdb[12], cdb[13], cdb[14], cdb[15]); 2371 } else { 2372 const char *descr = ata_get_cmd_descript(cmd->command); 2373 if (descr) 2374 ata_dev_printk(qc->dev, KERN_ERR, 2375 "failed command: %s\n", descr); 2376 } 2377 2378 ata_dev_printk(qc->dev, KERN_ERR, 2379 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2380 "tag %d%s\n %s" 2381 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2382 "Emask 0x%x (%s)%s\n", 2383 cmd->command, cmd->feature, cmd->nsect, 2384 cmd->lbal, cmd->lbam, cmd->lbah, 2385 cmd->hob_feature, cmd->hob_nsect, 2386 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2387 cmd->device, qc->tag, data_buf, cdb_buf, 2388 res->command, res->feature, res->nsect, 2389 res->lbal, res->lbam, res->lbah, 2390 res->hob_feature, res->hob_nsect, 2391 res->hob_lbal, res->hob_lbam, res->hob_lbah, 2392 res->device, qc->err_mask, ata_err_string(qc->err_mask), 2393 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 2394 2395 #ifdef CONFIG_ATA_VERBOSE_ERROR 2396 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2397 ATA_ERR)) { 2398 if (res->command & ATA_BUSY) 2399 ata_dev_printk(qc->dev, KERN_ERR, 2400 "status: { Busy }\n"); 2401 else 2402 ata_dev_printk(qc->dev, KERN_ERR, 2403 "status: { %s%s%s%s}\n", 2404 res->command & ATA_DRDY ? "DRDY " : "", 2405 res->command & ATA_DF ? "DF " : "", 2406 res->command & ATA_DRQ ? "DRQ " : "", 2407 res->command & ATA_ERR ? "ERR " : ""); 2408 } 2409 2410 if (cmd->command != ATA_CMD_PACKET && 2411 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2412 ATA_ABORTED))) 2413 ata_dev_printk(qc->dev, KERN_ERR, 2414 "error: { %s%s%s%s}\n", 2415 res->feature & ATA_ICRC ? "ICRC " : "", 2416 res->feature & ATA_UNC ? "UNC " : "", 2417 res->feature & ATA_IDNF ? "IDNF " : "", 2418 res->feature & ATA_ABORTED ? "ABRT " : ""); 2419 #endif 2420 } 2421 } 2422 2423 /** 2424 * ata_eh_report - report error handling to user 2425 * @ap: ATA port to report EH about 2426 * 2427 * Report EH to user. 2428 * 2429 * LOCKING: 2430 * None. 2431 */ 2432 void ata_eh_report(struct ata_port *ap) 2433 { 2434 struct ata_link *link; 2435 2436 ata_for_each_link(link, ap, HOST_FIRST) 2437 ata_eh_link_report(link); 2438 } 2439 2440 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2441 unsigned int *classes, unsigned long deadline, 2442 bool clear_classes) 2443 { 2444 struct ata_device *dev; 2445 2446 if (clear_classes) 2447 ata_for_each_dev(dev, link, ALL) 2448 classes[dev->devno] = ATA_DEV_UNKNOWN; 2449 2450 return reset(link, classes, deadline); 2451 } 2452 2453 static int ata_eh_followup_srst_needed(struct ata_link *link, 2454 int rc, const unsigned int *classes) 2455 { 2456 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2457 return 0; 2458 if (rc == -EAGAIN) 2459 return 1; 2460 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 2461 return 1; 2462 return 0; 2463 } 2464 2465 int ata_eh_reset(struct ata_link *link, int classify, 2466 ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2467 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2468 { 2469 struct ata_port *ap = link->ap; 2470 struct ata_link *slave = ap->slave_link; 2471 struct ata_eh_context *ehc = &link->eh_context; 2472 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2473 unsigned int *classes = ehc->classes; 2474 unsigned int lflags = link->flags; 2475 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2476 int max_tries = 0, try = 0; 2477 struct ata_link *failed_link; 2478 struct ata_device *dev; 2479 unsigned long deadline, now; 2480 ata_reset_fn_t reset; 2481 unsigned long flags; 2482 u32 sstatus; 2483 int nr_unknown, rc; 2484 2485 /* 2486 * Prepare to reset 2487 */ 2488 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2489 max_tries++; 2490 if (link->flags & ATA_LFLAG_NO_HRST) 2491 hardreset = NULL; 2492 if (link->flags & ATA_LFLAG_NO_SRST) 2493 softreset = NULL; 2494 2495 /* make sure each reset attemp is at least COOL_DOWN apart */ 2496 if (ehc->i.flags & ATA_EHI_DID_RESET) { 2497 now = jiffies; 2498 WARN_ON(time_after(ehc->last_reset, now)); 2499 deadline = ata_deadline(ehc->last_reset, 2500 ATA_EH_RESET_COOL_DOWN); 2501 if (time_before(now, deadline)) 2502 schedule_timeout_uninterruptible(deadline - now); 2503 } 2504 2505 spin_lock_irqsave(ap->lock, flags); 2506 ap->pflags |= ATA_PFLAG_RESETTING; 2507 spin_unlock_irqrestore(ap->lock, flags); 2508 2509 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2510 2511 ata_for_each_dev(dev, link, ALL) { 2512 /* If we issue an SRST then an ATA drive (not ATAPI) 2513 * may change configuration and be in PIO0 timing. If 2514 * we do a hard reset (or are coming from power on) 2515 * this is true for ATA or ATAPI. Until we've set a 2516 * suitable controller mode we should not touch the 2517 * bus as we may be talking too fast. 2518 */ 2519 dev->pio_mode = XFER_PIO_0; 2520 2521 /* If the controller has a pio mode setup function 2522 * then use it to set the chipset to rights. Don't 2523 * touch the DMA setup as that will be dealt with when 2524 * configuring devices. 2525 */ 2526 if (ap->ops->set_piomode) 2527 ap->ops->set_piomode(ap, dev); 2528 } 2529 2530 /* prefer hardreset */ 2531 reset = NULL; 2532 ehc->i.action &= ~ATA_EH_RESET; 2533 if (hardreset) { 2534 reset = hardreset; 2535 ehc->i.action |= ATA_EH_HARDRESET; 2536 } else if (softreset) { 2537 reset = softreset; 2538 ehc->i.action |= ATA_EH_SOFTRESET; 2539 } 2540 2541 if (prereset) { 2542 unsigned long deadline = ata_deadline(jiffies, 2543 ATA_EH_PRERESET_TIMEOUT); 2544 2545 if (slave) { 2546 sehc->i.action &= ~ATA_EH_RESET; 2547 sehc->i.action |= ehc->i.action; 2548 } 2549 2550 rc = prereset(link, deadline); 2551 2552 /* If present, do prereset on slave link too. Reset 2553 * is skipped iff both master and slave links report 2554 * -ENOENT or clear ATA_EH_RESET. 2555 */ 2556 if (slave && (rc == 0 || rc == -ENOENT)) { 2557 int tmp; 2558 2559 tmp = prereset(slave, deadline); 2560 if (tmp != -ENOENT) 2561 rc = tmp; 2562 2563 ehc->i.action |= sehc->i.action; 2564 } 2565 2566 if (rc) { 2567 if (rc == -ENOENT) { 2568 ata_link_printk(link, KERN_DEBUG, 2569 "port disabled. ignoring.\n"); 2570 ehc->i.action &= ~ATA_EH_RESET; 2571 2572 ata_for_each_dev(dev, link, ALL) 2573 classes[dev->devno] = ATA_DEV_NONE; 2574 2575 rc = 0; 2576 } else 2577 ata_link_printk(link, KERN_ERR, 2578 "prereset failed (errno=%d)\n", rc); 2579 goto out; 2580 } 2581 2582 /* prereset() might have cleared ATA_EH_RESET. If so, 2583 * bang classes, thaw and return. 2584 */ 2585 if (reset && !(ehc->i.action & ATA_EH_RESET)) { 2586 ata_for_each_dev(dev, link, ALL) 2587 classes[dev->devno] = ATA_DEV_NONE; 2588 if ((ap->pflags & ATA_PFLAG_FROZEN) && 2589 ata_is_host_link(link)) 2590 ata_eh_thaw_port(ap); 2591 rc = 0; 2592 goto out; 2593 } 2594 } 2595 2596 retry: 2597 /* 2598 * Perform reset 2599 */ 2600 if (ata_is_host_link(link)) 2601 ata_eh_freeze_port(ap); 2602 2603 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 2604 2605 if (reset) { 2606 if (verbose) 2607 ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2608 reset == softreset ? "soft" : "hard"); 2609 2610 /* mark that this EH session started with reset */ 2611 ehc->last_reset = jiffies; 2612 if (reset == hardreset) 2613 ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2614 else 2615 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2616 2617 rc = ata_do_reset(link, reset, classes, deadline, true); 2618 if (rc && rc != -EAGAIN) { 2619 failed_link = link; 2620 goto fail; 2621 } 2622 2623 /* hardreset slave link if existent */ 2624 if (slave && reset == hardreset) { 2625 int tmp; 2626 2627 if (verbose) 2628 ata_link_printk(slave, KERN_INFO, 2629 "hard resetting link\n"); 2630 2631 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2632 tmp = ata_do_reset(slave, reset, classes, deadline, 2633 false); 2634 switch (tmp) { 2635 case -EAGAIN: 2636 rc = -EAGAIN; 2637 case 0: 2638 break; 2639 default: 2640 failed_link = slave; 2641 rc = tmp; 2642 goto fail; 2643 } 2644 } 2645 2646 /* perform follow-up SRST if necessary */ 2647 if (reset == hardreset && 2648 ata_eh_followup_srst_needed(link, rc, classes)) { 2649 reset = softreset; 2650 2651 if (!reset) { 2652 ata_link_printk(link, KERN_ERR, 2653 "follow-up softreset required " 2654 "but no softreset avaliable\n"); 2655 failed_link = link; 2656 rc = -EINVAL; 2657 goto fail; 2658 } 2659 2660 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2661 rc = ata_do_reset(link, reset, classes, deadline, true); 2662 if (rc) { 2663 failed_link = link; 2664 goto fail; 2665 } 2666 } 2667 } else { 2668 if (verbose) 2669 ata_link_printk(link, KERN_INFO, "no reset method " 2670 "available, skipping reset\n"); 2671 if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2672 lflags |= ATA_LFLAG_ASSUME_ATA; 2673 } 2674 2675 /* 2676 * Post-reset processing 2677 */ 2678 ata_for_each_dev(dev, link, ALL) { 2679 /* After the reset, the device state is PIO 0 and the 2680 * controller state is undefined. Reset also wakes up 2681 * drives from sleeping mode. 2682 */ 2683 dev->pio_mode = XFER_PIO_0; 2684 dev->flags &= ~ATA_DFLAG_SLEEPING; 2685 2686 if (ata_phys_link_offline(ata_dev_phys_link(dev))) 2687 continue; 2688 2689 /* apply class override */ 2690 if (lflags & ATA_LFLAG_ASSUME_ATA) 2691 classes[dev->devno] = ATA_DEV_ATA; 2692 else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2693 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2694 } 2695 2696 /* record current link speed */ 2697 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2698 link->sata_spd = (sstatus >> 4) & 0xf; 2699 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2700 slave->sata_spd = (sstatus >> 4) & 0xf; 2701 2702 /* thaw the port */ 2703 if (ata_is_host_link(link)) 2704 ata_eh_thaw_port(ap); 2705 2706 /* postreset() should clear hardware SError. Although SError 2707 * is cleared during link resume, clearing SError here is 2708 * necessary as some PHYs raise hotplug events after SRST. 2709 * This introduces race condition where hotplug occurs between 2710 * reset and here. This race is mediated by cross checking 2711 * link onlineness and classification result later. 2712 */ 2713 if (postreset) { 2714 postreset(link, classes); 2715 if (slave) 2716 postreset(slave, classes); 2717 } 2718 2719 /* 2720 * Some controllers can't be frozen very well and may set 2721 * spuruious error conditions during reset. Clear accumulated 2722 * error information. As reset is the final recovery action, 2723 * nothing is lost by doing this. 2724 */ 2725 spin_lock_irqsave(link->ap->lock, flags); 2726 memset(&link->eh_info, 0, sizeof(link->eh_info)); 2727 if (slave) 2728 memset(&slave->eh_info, 0, sizeof(link->eh_info)); 2729 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2730 spin_unlock_irqrestore(link->ap->lock, flags); 2731 2732 /* 2733 * Make sure onlineness and classification result correspond. 2734 * Hotplug could have happened during reset and some 2735 * controllers fail to wait while a drive is spinning up after 2736 * being hotplugged causing misdetection. By cross checking 2737 * link on/offlineness and classification result, those 2738 * conditions can be reliably detected and retried. 2739 */ 2740 nr_unknown = 0; 2741 ata_for_each_dev(dev, link, ALL) { 2742 if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2743 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2744 ata_dev_printk(dev, KERN_DEBUG, "link online " 2745 "but device misclassifed\n"); 2746 classes[dev->devno] = ATA_DEV_NONE; 2747 nr_unknown++; 2748 } 2749 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2750 if (ata_class_enabled(classes[dev->devno])) 2751 ata_dev_printk(dev, KERN_DEBUG, "link offline, " 2752 "clearing class %d to NONE\n", 2753 classes[dev->devno]); 2754 classes[dev->devno] = ATA_DEV_NONE; 2755 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2756 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, " 2757 "clearing UNKNOWN to NONE\n"); 2758 classes[dev->devno] = ATA_DEV_NONE; 2759 } 2760 } 2761 2762 if (classify && nr_unknown) { 2763 if (try < max_tries) { 2764 ata_link_printk(link, KERN_WARNING, "link online but " 2765 "%d devices misclassified, retrying\n", 2766 nr_unknown); 2767 failed_link = link; 2768 rc = -EAGAIN; 2769 goto fail; 2770 } 2771 ata_link_printk(link, KERN_WARNING, 2772 "link online but %d devices misclassified, " 2773 "device detection might fail\n", nr_unknown); 2774 } 2775 2776 /* reset successful, schedule revalidation */ 2777 ata_eh_done(link, NULL, ATA_EH_RESET); 2778 if (slave) 2779 ata_eh_done(slave, NULL, ATA_EH_RESET); 2780 ehc->last_reset = jiffies; /* update to completion time */ 2781 ehc->i.action |= ATA_EH_REVALIDATE; 2782 2783 rc = 0; 2784 out: 2785 /* clear hotplug flag */ 2786 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2787 if (slave) 2788 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2789 2790 spin_lock_irqsave(ap->lock, flags); 2791 ap->pflags &= ~ATA_PFLAG_RESETTING; 2792 spin_unlock_irqrestore(ap->lock, flags); 2793 2794 return rc; 2795 2796 fail: 2797 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 2798 if (!ata_is_host_link(link) && 2799 sata_scr_read(link, SCR_STATUS, &sstatus)) 2800 rc = -ERESTART; 2801 2802 if (rc == -ERESTART || try >= max_tries) 2803 goto out; 2804 2805 now = jiffies; 2806 if (time_before(now, deadline)) { 2807 unsigned long delta = deadline - now; 2808 2809 ata_link_printk(failed_link, KERN_WARNING, 2810 "reset failed (errno=%d), retrying in %u secs\n", 2811 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2812 2813 while (delta) 2814 delta = schedule_timeout_uninterruptible(delta); 2815 } 2816 2817 if (try == max_tries - 1) { 2818 sata_down_spd_limit(link, 0); 2819 if (slave) 2820 sata_down_spd_limit(slave, 0); 2821 } else if (rc == -EPIPE) 2822 sata_down_spd_limit(failed_link, 0); 2823 2824 if (hardreset) 2825 reset = hardreset; 2826 goto retry; 2827 } 2828 2829 static inline void ata_eh_pull_park_action(struct ata_port *ap) 2830 { 2831 struct ata_link *link; 2832 struct ata_device *dev; 2833 unsigned long flags; 2834 2835 /* 2836 * This function can be thought of as an extended version of 2837 * ata_eh_about_to_do() specially crafted to accommodate the 2838 * requirements of ATA_EH_PARK handling. Since the EH thread 2839 * does not leave the do {} while () loop in ata_eh_recover as 2840 * long as the timeout for a park request to *one* device on 2841 * the port has not expired, and since we still want to pick 2842 * up park requests to other devices on the same port or 2843 * timeout updates for the same device, we have to pull 2844 * ATA_EH_PARK actions from eh_info into eh_context.i 2845 * ourselves at the beginning of each pass over the loop. 2846 * 2847 * Additionally, all write accesses to &ap->park_req_pending 2848 * through INIT_COMPLETION() (see below) or complete_all() 2849 * (see ata_scsi_park_store()) are protected by the host lock. 2850 * As a result we have that park_req_pending.done is zero on 2851 * exit from this function, i.e. when ATA_EH_PARK actions for 2852 * *all* devices on port ap have been pulled into the 2853 * respective eh_context structs. If, and only if, 2854 * park_req_pending.done is non-zero by the time we reach 2855 * wait_for_completion_timeout(), another ATA_EH_PARK action 2856 * has been scheduled for at least one of the devices on port 2857 * ap and we have to cycle over the do {} while () loop in 2858 * ata_eh_recover() again. 2859 */ 2860 2861 spin_lock_irqsave(ap->lock, flags); 2862 INIT_COMPLETION(ap->park_req_pending); 2863 ata_for_each_link(link, ap, EDGE) { 2864 ata_for_each_dev(dev, link, ALL) { 2865 struct ata_eh_info *ehi = &link->eh_info; 2866 2867 link->eh_context.i.dev_action[dev->devno] |= 2868 ehi->dev_action[dev->devno] & ATA_EH_PARK; 2869 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 2870 } 2871 } 2872 spin_unlock_irqrestore(ap->lock, flags); 2873 } 2874 2875 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 2876 { 2877 struct ata_eh_context *ehc = &dev->link->eh_context; 2878 struct ata_taskfile tf; 2879 unsigned int err_mask; 2880 2881 ata_tf_init(dev, &tf); 2882 if (park) { 2883 ehc->unloaded_mask |= 1 << dev->devno; 2884 tf.command = ATA_CMD_IDLEIMMEDIATE; 2885 tf.feature = 0x44; 2886 tf.lbal = 0x4c; 2887 tf.lbam = 0x4e; 2888 tf.lbah = 0x55; 2889 } else { 2890 ehc->unloaded_mask &= ~(1 << dev->devno); 2891 tf.command = ATA_CMD_CHK_POWER; 2892 } 2893 2894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 2895 tf.protocol |= ATA_PROT_NODATA; 2896 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 2897 if (park && (err_mask || tf.lbal != 0xc4)) { 2898 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); 2899 ehc->unloaded_mask &= ~(1 << dev->devno); 2900 } 2901 } 2902 2903 static int ata_eh_revalidate_and_attach(struct ata_link *link, 2904 struct ata_device **r_failed_dev) 2905 { 2906 struct ata_port *ap = link->ap; 2907 struct ata_eh_context *ehc = &link->eh_context; 2908 struct ata_device *dev; 2909 unsigned int new_mask = 0; 2910 unsigned long flags; 2911 int rc = 0; 2912 2913 DPRINTK("ENTER\n"); 2914 2915 /* For PATA drive side cable detection to work, IDENTIFY must 2916 * be done backwards such that PDIAG- is released by the slave 2917 * device before the master device is identified. 2918 */ 2919 ata_for_each_dev(dev, link, ALL_REVERSE) { 2920 unsigned int action = ata_eh_dev_action(dev); 2921 unsigned int readid_flags = 0; 2922 2923 if (ehc->i.flags & ATA_EHI_DID_RESET) 2924 readid_flags |= ATA_READID_POSTRESET; 2925 2926 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2927 WARN_ON(dev->class == ATA_DEV_PMP); 2928 2929 if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2930 rc = -EIO; 2931 goto err; 2932 } 2933 2934 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2935 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2936 readid_flags); 2937 if (rc) 2938 goto err; 2939 2940 ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2941 2942 /* Configuration may have changed, reconfigure 2943 * transfer mode. 2944 */ 2945 ehc->i.flags |= ATA_EHI_SETMODE; 2946 2947 /* schedule the scsi_rescan_device() here */ 2948 schedule_work(&(ap->scsi_rescan_task)); 2949 } else if (dev->class == ATA_DEV_UNKNOWN && 2950 ehc->tries[dev->devno] && 2951 ata_class_enabled(ehc->classes[dev->devno])) { 2952 /* Temporarily set dev->class, it will be 2953 * permanently set once all configurations are 2954 * complete. This is necessary because new 2955 * device configuration is done in two 2956 * separate loops. 2957 */ 2958 dev->class = ehc->classes[dev->devno]; 2959 2960 if (dev->class == ATA_DEV_PMP) 2961 rc = sata_pmp_attach(dev); 2962 else 2963 rc = ata_dev_read_id(dev, &dev->class, 2964 readid_flags, dev->id); 2965 2966 /* read_id might have changed class, store and reset */ 2967 ehc->classes[dev->devno] = dev->class; 2968 dev->class = ATA_DEV_UNKNOWN; 2969 2970 switch (rc) { 2971 case 0: 2972 /* clear error info accumulated during probe */ 2973 ata_ering_clear(&dev->ering); 2974 new_mask |= 1 << dev->devno; 2975 break; 2976 case -ENOENT: 2977 /* IDENTIFY was issued to non-existent 2978 * device. No need to reset. Just 2979 * thaw and ignore the device. 2980 */ 2981 ata_eh_thaw_port(ap); 2982 break; 2983 default: 2984 goto err; 2985 } 2986 } 2987 } 2988 2989 /* PDIAG- should have been released, ask cable type if post-reset */ 2990 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 2991 if (ap->ops->cable_detect) 2992 ap->cbl = ap->ops->cable_detect(ap); 2993 ata_force_cbl(ap); 2994 } 2995 2996 /* Configure new devices forward such that user doesn't see 2997 * device detection messages backwards. 2998 */ 2999 ata_for_each_dev(dev, link, ALL) { 3000 if (!(new_mask & (1 << dev->devno))) 3001 continue; 3002 3003 dev->class = ehc->classes[dev->devno]; 3004 3005 if (dev->class == ATA_DEV_PMP) 3006 continue; 3007 3008 ehc->i.flags |= ATA_EHI_PRINTINFO; 3009 rc = ata_dev_configure(dev); 3010 ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3011 if (rc) { 3012 dev->class = ATA_DEV_UNKNOWN; 3013 goto err; 3014 } 3015 3016 spin_lock_irqsave(ap->lock, flags); 3017 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3018 spin_unlock_irqrestore(ap->lock, flags); 3019 3020 /* new device discovered, configure xfermode */ 3021 ehc->i.flags |= ATA_EHI_SETMODE; 3022 } 3023 3024 return 0; 3025 3026 err: 3027 *r_failed_dev = dev; 3028 DPRINTK("EXIT rc=%d\n", rc); 3029 return rc; 3030 } 3031 3032 /** 3033 * ata_set_mode - Program timings and issue SET FEATURES - XFER 3034 * @link: link on which timings will be programmed 3035 * @r_failed_dev: out parameter for failed device 3036 * 3037 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3038 * ata_set_mode() fails, pointer to the failing device is 3039 * returned in @r_failed_dev. 3040 * 3041 * LOCKING: 3042 * PCI/etc. bus probe sem. 3043 * 3044 * RETURNS: 3045 * 0 on success, negative errno otherwise 3046 */ 3047 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3048 { 3049 struct ata_port *ap = link->ap; 3050 struct ata_device *dev; 3051 int rc; 3052 3053 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 3054 ata_for_each_dev(dev, link, ENABLED) { 3055 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 3056 struct ata_ering_entry *ent; 3057 3058 ent = ata_ering_top(&dev->ering); 3059 if (ent) 3060 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 3061 } 3062 } 3063 3064 /* has private set_mode? */ 3065 if (ap->ops->set_mode) 3066 rc = ap->ops->set_mode(link, r_failed_dev); 3067 else 3068 rc = ata_do_set_mode(link, r_failed_dev); 3069 3070 /* if transfer mode has changed, set DUBIOUS_XFER on device */ 3071 ata_for_each_dev(dev, link, ENABLED) { 3072 struct ata_eh_context *ehc = &link->eh_context; 3073 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 3074 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 3075 3076 if (dev->xfer_mode != saved_xfer_mode || 3077 ata_ncq_enabled(dev) != saved_ncq) 3078 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 3079 } 3080 3081 return rc; 3082 } 3083 3084 /** 3085 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 3086 * @dev: ATAPI device to clear UA for 3087 * 3088 * Resets and other operations can make an ATAPI device raise 3089 * UNIT ATTENTION which causes the next operation to fail. This 3090 * function clears UA. 3091 * 3092 * LOCKING: 3093 * EH context (may sleep). 3094 * 3095 * RETURNS: 3096 * 0 on success, -errno on failure. 3097 */ 3098 static int atapi_eh_clear_ua(struct ata_device *dev) 3099 { 3100 int i; 3101 3102 for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3103 u8 *sense_buffer = dev->link->ap->sector_buf; 3104 u8 sense_key = 0; 3105 unsigned int err_mask; 3106 3107 err_mask = atapi_eh_tur(dev, &sense_key); 3108 if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3109 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " 3110 "failed (err_mask=0x%x)\n", err_mask); 3111 return -EIO; 3112 } 3113 3114 if (!err_mask || sense_key != UNIT_ATTENTION) 3115 return 0; 3116 3117 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 3118 if (err_mask) { 3119 ata_dev_printk(dev, KERN_WARNING, "failed to clear " 3120 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 3121 return -EIO; 3122 } 3123 } 3124 3125 ata_dev_printk(dev, KERN_WARNING, 3126 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); 3127 3128 return 0; 3129 } 3130 3131 /** 3132 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 3133 * @dev: ATA device which may need FLUSH retry 3134 * 3135 * If @dev failed FLUSH, it needs to be reported upper layer 3136 * immediately as it means that @dev failed to remap and already 3137 * lost at least a sector and further FLUSH retrials won't make 3138 * any difference to the lost sector. However, if FLUSH failed 3139 * for other reasons, for example transmission error, FLUSH needs 3140 * to be retried. 3141 * 3142 * This function determines whether FLUSH failure retry is 3143 * necessary and performs it if so. 3144 * 3145 * RETURNS: 3146 * 0 if EH can continue, -errno if EH needs to be repeated. 3147 */ 3148 static int ata_eh_maybe_retry_flush(struct ata_device *dev) 3149 { 3150 struct ata_link *link = dev->link; 3151 struct ata_port *ap = link->ap; 3152 struct ata_queued_cmd *qc; 3153 struct ata_taskfile tf; 3154 unsigned int err_mask; 3155 int rc = 0; 3156 3157 /* did flush fail for this device? */ 3158 if (!ata_tag_valid(link->active_tag)) 3159 return 0; 3160 3161 qc = __ata_qc_from_tag(ap, link->active_tag); 3162 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 3163 qc->tf.command != ATA_CMD_FLUSH)) 3164 return 0; 3165 3166 /* if the device failed it, it should be reported to upper layers */ 3167 if (qc->err_mask & AC_ERR_DEV) 3168 return 0; 3169 3170 /* flush failed for some other reason, give it another shot */ 3171 ata_tf_init(dev, &tf); 3172 3173 tf.command = qc->tf.command; 3174 tf.flags |= ATA_TFLAG_DEVICE; 3175 tf.protocol = ATA_PROT_NODATA; 3176 3177 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n", 3178 tf.command, qc->err_mask); 3179 3180 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 3181 if (!err_mask) { 3182 /* 3183 * FLUSH is complete but there's no way to 3184 * successfully complete a failed command from EH. 3185 * Making sure retry is allowed at least once and 3186 * retrying it should do the trick - whatever was in 3187 * the cache is already on the platter and this won't 3188 * cause infinite loop. 3189 */ 3190 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 3191 } else { 3192 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n", 3193 err_mask); 3194 rc = -EIO; 3195 3196 /* if device failed it, report it to upper layers */ 3197 if (err_mask & AC_ERR_DEV) { 3198 qc->err_mask |= AC_ERR_DEV; 3199 qc->result_tf = tf; 3200 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 3201 rc = 0; 3202 } 3203 } 3204 return rc; 3205 } 3206 3207 static int ata_link_nr_enabled(struct ata_link *link) 3208 { 3209 struct ata_device *dev; 3210 int cnt = 0; 3211 3212 ata_for_each_dev(dev, link, ENABLED) 3213 cnt++; 3214 return cnt; 3215 } 3216 3217 static int ata_link_nr_vacant(struct ata_link *link) 3218 { 3219 struct ata_device *dev; 3220 int cnt = 0; 3221 3222 ata_for_each_dev(dev, link, ALL) 3223 if (dev->class == ATA_DEV_UNKNOWN) 3224 cnt++; 3225 return cnt; 3226 } 3227 3228 static int ata_eh_skip_recovery(struct ata_link *link) 3229 { 3230 struct ata_port *ap = link->ap; 3231 struct ata_eh_context *ehc = &link->eh_context; 3232 struct ata_device *dev; 3233 3234 /* skip disabled links */ 3235 if (link->flags & ATA_LFLAG_DISABLED) 3236 return 1; 3237 3238 /* thaw frozen port and recover failed devices */ 3239 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3240 return 0; 3241 3242 /* reset at least once if reset is requested */ 3243 if ((ehc->i.action & ATA_EH_RESET) && 3244 !(ehc->i.flags & ATA_EHI_DID_RESET)) 3245 return 0; 3246 3247 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 3248 ata_for_each_dev(dev, link, ALL) { 3249 if (dev->class == ATA_DEV_UNKNOWN && 3250 ehc->classes[dev->devno] != ATA_DEV_NONE) 3251 return 0; 3252 } 3253 3254 return 1; 3255 } 3256 3257 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3258 { 3259 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3260 u64 now = get_jiffies_64(); 3261 int *trials = void_arg; 3262 3263 if (ent->timestamp < now - min(now, interval)) 3264 return -1; 3265 3266 (*trials)++; 3267 return 0; 3268 } 3269 3270 static int ata_eh_schedule_probe(struct ata_device *dev) 3271 { 3272 struct ata_eh_context *ehc = &dev->link->eh_context; 3273 struct ata_link *link = ata_dev_phys_link(dev); 3274 int trials = 0; 3275 3276 if (!(ehc->i.probe_mask & (1 << dev->devno)) || 3277 (ehc->did_probe_mask & (1 << dev->devno))) 3278 return 0; 3279 3280 ata_eh_detach_dev(dev); 3281 ata_dev_init(dev); 3282 ehc->did_probe_mask |= (1 << dev->devno); 3283 ehc->i.action |= ATA_EH_RESET; 3284 ehc->saved_xfer_mode[dev->devno] = 0; 3285 ehc->saved_ncq_enabled &= ~(1 << dev->devno); 3286 3287 /* Record and count probe trials on the ering. The specific 3288 * error mask used is irrelevant. Because a successful device 3289 * detection clears the ering, this count accumulates only if 3290 * there are consecutive failed probes. 3291 * 3292 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3293 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3294 * forced to 1.5Gbps. 3295 * 3296 * This is to work around cases where failed link speed 3297 * negotiation results in device misdetection leading to 3298 * infinite DEVXCHG or PHRDY CHG events. 3299 */ 3300 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3301 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3302 3303 if (trials > ATA_EH_PROBE_TRIALS) 3304 sata_down_spd_limit(link, 1); 3305 3306 return 1; 3307 } 3308 3309 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3310 { 3311 struct ata_eh_context *ehc = &dev->link->eh_context; 3312 3313 /* -EAGAIN from EH routine indicates retry without prejudice. 3314 * The requester is responsible for ensuring forward progress. 3315 */ 3316 if (err != -EAGAIN) 3317 ehc->tries[dev->devno]--; 3318 3319 switch (err) { 3320 case -ENODEV: 3321 /* device missing or wrong IDENTIFY data, schedule probing */ 3322 ehc->i.probe_mask |= (1 << dev->devno); 3323 case -EINVAL: 3324 /* give it just one more chance */ 3325 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3326 case -EIO: 3327 if (ehc->tries[dev->devno] == 1) { 3328 /* This is the last chance, better to slow 3329 * down than lose it. 3330 */ 3331 sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3332 if (dev->pio_mode > XFER_PIO_0) 3333 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3334 } 3335 } 3336 3337 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3338 /* disable device if it has used up all its chances */ 3339 ata_dev_disable(dev); 3340 3341 /* detach if offline */ 3342 if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3343 ata_eh_detach_dev(dev); 3344 3345 /* schedule probe if necessary */ 3346 if (ata_eh_schedule_probe(dev)) { 3347 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3348 memset(ehc->cmd_timeout_idx[dev->devno], 0, 3349 sizeof(ehc->cmd_timeout_idx[dev->devno])); 3350 } 3351 3352 return 1; 3353 } else { 3354 ehc->i.action |= ATA_EH_RESET; 3355 return 0; 3356 } 3357 } 3358 3359 /** 3360 * ata_eh_recover - recover host port after error 3361 * @ap: host port to recover 3362 * @prereset: prereset method (can be NULL) 3363 * @softreset: softreset method (can be NULL) 3364 * @hardreset: hardreset method (can be NULL) 3365 * @postreset: postreset method (can be NULL) 3366 * @r_failed_link: out parameter for failed link 3367 * 3368 * This is the alpha and omega, eum and yang, heart and soul of 3369 * libata exception handling. On entry, actions required to 3370 * recover each link and hotplug requests are recorded in the 3371 * link's eh_context. This function executes all the operations 3372 * with appropriate retrials and fallbacks to resurrect failed 3373 * devices, detach goners and greet newcomers. 3374 * 3375 * LOCKING: 3376 * Kernel thread context (may sleep). 3377 * 3378 * RETURNS: 3379 * 0 on success, -errno on failure. 3380 */ 3381 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3382 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3383 ata_postreset_fn_t postreset, 3384 struct ata_link **r_failed_link) 3385 { 3386 struct ata_link *link; 3387 struct ata_device *dev; 3388 int nr_failed_devs; 3389 int rc; 3390 unsigned long flags, deadline; 3391 3392 DPRINTK("ENTER\n"); 3393 3394 /* prep for recovery */ 3395 ata_for_each_link(link, ap, EDGE) { 3396 struct ata_eh_context *ehc = &link->eh_context; 3397 3398 /* re-enable link? */ 3399 if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3400 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3401 spin_lock_irqsave(ap->lock, flags); 3402 link->flags &= ~ATA_LFLAG_DISABLED; 3403 spin_unlock_irqrestore(ap->lock, flags); 3404 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3405 } 3406 3407 ata_for_each_dev(dev, link, ALL) { 3408 if (link->flags & ATA_LFLAG_NO_RETRY) 3409 ehc->tries[dev->devno] = 1; 3410 else 3411 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3412 3413 /* collect port action mask recorded in dev actions */ 3414 ehc->i.action |= ehc->i.dev_action[dev->devno] & 3415 ~ATA_EH_PERDEV_MASK; 3416 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 3417 3418 /* process hotplug request */ 3419 if (dev->flags & ATA_DFLAG_DETACH) 3420 ata_eh_detach_dev(dev); 3421 3422 /* schedule probe if necessary */ 3423 if (!ata_dev_enabled(dev)) 3424 ata_eh_schedule_probe(dev); 3425 } 3426 } 3427 3428 retry: 3429 rc = 0; 3430 nr_failed_devs = 0; 3431 3432 /* if UNLOADING, finish immediately */ 3433 if (ap->pflags & ATA_PFLAG_UNLOADING) 3434 goto out; 3435 3436 /* prep for EH */ 3437 ata_for_each_link(link, ap, EDGE) { 3438 struct ata_eh_context *ehc = &link->eh_context; 3439 3440 /* skip EH if possible. */ 3441 if (ata_eh_skip_recovery(link)) 3442 ehc->i.action = 0; 3443 3444 ata_for_each_dev(dev, link, ALL) 3445 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 3446 } 3447 3448 /* reset */ 3449 ata_for_each_link(link, ap, EDGE) { 3450 struct ata_eh_context *ehc = &link->eh_context; 3451 3452 if (!(ehc->i.action & ATA_EH_RESET)) 3453 continue; 3454 3455 rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3456 prereset, softreset, hardreset, postreset); 3457 if (rc) { 3458 ata_link_printk(link, KERN_ERR, 3459 "reset failed, giving up\n"); 3460 goto out; 3461 } 3462 } 3463 3464 do { 3465 unsigned long now; 3466 3467 /* 3468 * clears ATA_EH_PARK in eh_info and resets 3469 * ap->park_req_pending 3470 */ 3471 ata_eh_pull_park_action(ap); 3472 3473 deadline = jiffies; 3474 ata_for_each_link(link, ap, EDGE) { 3475 ata_for_each_dev(dev, link, ALL) { 3476 struct ata_eh_context *ehc = &link->eh_context; 3477 unsigned long tmp; 3478 3479 if (dev->class != ATA_DEV_ATA) 3480 continue; 3481 if (!(ehc->i.dev_action[dev->devno] & 3482 ATA_EH_PARK)) 3483 continue; 3484 tmp = dev->unpark_deadline; 3485 if (time_before(deadline, tmp)) 3486 deadline = tmp; 3487 else if (time_before_eq(tmp, jiffies)) 3488 continue; 3489 if (ehc->unloaded_mask & (1 << dev->devno)) 3490 continue; 3491 3492 ata_eh_park_issue_cmd(dev, 1); 3493 } 3494 } 3495 3496 now = jiffies; 3497 if (time_before_eq(deadline, now)) 3498 break; 3499 3500 deadline = wait_for_completion_timeout(&ap->park_req_pending, 3501 deadline - now); 3502 } while (deadline); 3503 ata_for_each_link(link, ap, EDGE) { 3504 ata_for_each_dev(dev, link, ALL) { 3505 if (!(link->eh_context.unloaded_mask & 3506 (1 << dev->devno))) 3507 continue; 3508 3509 ata_eh_park_issue_cmd(dev, 0); 3510 ata_eh_done(link, dev, ATA_EH_PARK); 3511 } 3512 } 3513 3514 /* the rest */ 3515 ata_for_each_link(link, ap, EDGE) { 3516 struct ata_eh_context *ehc = &link->eh_context; 3517 3518 /* revalidate existing devices and attach new ones */ 3519 rc = ata_eh_revalidate_and_attach(link, &dev); 3520 if (rc) 3521 goto dev_fail; 3522 3523 /* if PMP got attached, return, pmp EH will take care of it */ 3524 if (link->device->class == ATA_DEV_PMP) { 3525 ehc->i.action = 0; 3526 return 0; 3527 } 3528 3529 /* configure transfer mode if necessary */ 3530 if (ehc->i.flags & ATA_EHI_SETMODE) { 3531 rc = ata_set_mode(link, &dev); 3532 if (rc) 3533 goto dev_fail; 3534 ehc->i.flags &= ~ATA_EHI_SETMODE; 3535 } 3536 3537 /* If reset has been issued, clear UA to avoid 3538 * disrupting the current users of the device. 3539 */ 3540 if (ehc->i.flags & ATA_EHI_DID_RESET) { 3541 ata_for_each_dev(dev, link, ALL) { 3542 if (dev->class != ATA_DEV_ATAPI) 3543 continue; 3544 rc = atapi_eh_clear_ua(dev); 3545 if (rc) 3546 goto dev_fail; 3547 } 3548 } 3549 3550 /* retry flush if necessary */ 3551 ata_for_each_dev(dev, link, ALL) { 3552 if (dev->class != ATA_DEV_ATA) 3553 continue; 3554 rc = ata_eh_maybe_retry_flush(dev); 3555 if (rc) 3556 goto dev_fail; 3557 } 3558 3559 /* configure link power saving */ 3560 if (ehc->i.action & ATA_EH_LPM) 3561 ata_for_each_dev(dev, link, ALL) 3562 ata_dev_enable_pm(dev, ap->pm_policy); 3563 3564 /* this link is okay now */ 3565 ehc->i.flags = 0; 3566 continue; 3567 3568 dev_fail: 3569 nr_failed_devs++; 3570 ata_eh_handle_dev_fail(dev, rc); 3571 3572 if (ap->pflags & ATA_PFLAG_FROZEN) { 3573 /* PMP reset requires working host port. 3574 * Can't retry if it's frozen. 3575 */ 3576 if (sata_pmp_attached(ap)) 3577 goto out; 3578 break; 3579 } 3580 } 3581 3582 if (nr_failed_devs) 3583 goto retry; 3584 3585 out: 3586 if (rc && r_failed_link) 3587 *r_failed_link = link; 3588 3589 DPRINTK("EXIT, rc=%d\n", rc); 3590 return rc; 3591 } 3592 3593 /** 3594 * ata_eh_finish - finish up EH 3595 * @ap: host port to finish EH for 3596 * 3597 * Recovery is complete. Clean up EH states and retry or finish 3598 * failed qcs. 3599 * 3600 * LOCKING: 3601 * None. 3602 */ 3603 void ata_eh_finish(struct ata_port *ap) 3604 { 3605 int tag; 3606 3607 /* retry or finish qcs */ 3608 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3609 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3610 3611 if (!(qc->flags & ATA_QCFLAG_FAILED)) 3612 continue; 3613 3614 if (qc->err_mask) { 3615 /* FIXME: Once EH migration is complete, 3616 * generate sense data in this function, 3617 * considering both err_mask and tf. 3618 */ 3619 if (qc->flags & ATA_QCFLAG_RETRY) 3620 ata_eh_qc_retry(qc); 3621 else 3622 ata_eh_qc_complete(qc); 3623 } else { 3624 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3625 ata_eh_qc_complete(qc); 3626 } else { 3627 /* feed zero TF to sense generation */ 3628 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3629 ata_eh_qc_retry(qc); 3630 } 3631 } 3632 } 3633 3634 /* make sure nr_active_links is zero after EH */ 3635 WARN_ON(ap->nr_active_links); 3636 ap->nr_active_links = 0; 3637 } 3638 3639 /** 3640 * ata_do_eh - do standard error handling 3641 * @ap: host port to handle error for 3642 * 3643 * @prereset: prereset method (can be NULL) 3644 * @softreset: softreset method (can be NULL) 3645 * @hardreset: hardreset method (can be NULL) 3646 * @postreset: postreset method (can be NULL) 3647 * 3648 * Perform standard error handling sequence. 3649 * 3650 * LOCKING: 3651 * Kernel thread context (may sleep). 3652 */ 3653 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3654 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3655 ata_postreset_fn_t postreset) 3656 { 3657 struct ata_device *dev; 3658 int rc; 3659 3660 ata_eh_autopsy(ap); 3661 ata_eh_report(ap); 3662 3663 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 3664 NULL); 3665 if (rc) { 3666 ata_for_each_dev(dev, &ap->link, ALL) 3667 ata_dev_disable(dev); 3668 } 3669 3670 ata_eh_finish(ap); 3671 } 3672 3673 /** 3674 * ata_std_error_handler - standard error handler 3675 * @ap: host port to handle error for 3676 * 3677 * Standard error handler 3678 * 3679 * LOCKING: 3680 * Kernel thread context (may sleep). 3681 */ 3682 void ata_std_error_handler(struct ata_port *ap) 3683 { 3684 struct ata_port_operations *ops = ap->ops; 3685 ata_reset_fn_t hardreset = ops->hardreset; 3686 3687 /* ignore built-in hardreset if SCR access is not available */ 3688 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3689 hardreset = NULL; 3690 3691 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3692 } 3693 3694 #ifdef CONFIG_PM 3695 /** 3696 * ata_eh_handle_port_suspend - perform port suspend operation 3697 * @ap: port to suspend 3698 * 3699 * Suspend @ap. 3700 * 3701 * LOCKING: 3702 * Kernel thread context (may sleep). 3703 */ 3704 static void ata_eh_handle_port_suspend(struct ata_port *ap) 3705 { 3706 unsigned long flags; 3707 int rc = 0; 3708 3709 /* are we suspending? */ 3710 spin_lock_irqsave(ap->lock, flags); 3711 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3712 ap->pm_mesg.event == PM_EVENT_ON) { 3713 spin_unlock_irqrestore(ap->lock, flags); 3714 return; 3715 } 3716 spin_unlock_irqrestore(ap->lock, flags); 3717 3718 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3719 3720 /* tell ACPI we're suspending */ 3721 rc = ata_acpi_on_suspend(ap); 3722 if (rc) 3723 goto out; 3724 3725 /* suspend */ 3726 ata_eh_freeze_port(ap); 3727 3728 if (ap->ops->port_suspend) 3729 rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3730 3731 ata_acpi_set_state(ap, PMSG_SUSPEND); 3732 out: 3733 /* report result */ 3734 spin_lock_irqsave(ap->lock, flags); 3735 3736 ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3737 if (rc == 0) 3738 ap->pflags |= ATA_PFLAG_SUSPENDED; 3739 else if (ap->pflags & ATA_PFLAG_FROZEN) 3740 ata_port_schedule_eh(ap); 3741 3742 if (ap->pm_result) { 3743 *ap->pm_result = rc; 3744 ap->pm_result = NULL; 3745 } 3746 3747 spin_unlock_irqrestore(ap->lock, flags); 3748 3749 return; 3750 } 3751 3752 /** 3753 * ata_eh_handle_port_resume - perform port resume operation 3754 * @ap: port to resume 3755 * 3756 * Resume @ap. 3757 * 3758 * LOCKING: 3759 * Kernel thread context (may sleep). 3760 */ 3761 static void ata_eh_handle_port_resume(struct ata_port *ap) 3762 { 3763 struct ata_link *link; 3764 struct ata_device *dev; 3765 unsigned long flags; 3766 int rc = 0; 3767 3768 /* are we resuming? */ 3769 spin_lock_irqsave(ap->lock, flags); 3770 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3771 ap->pm_mesg.event != PM_EVENT_ON) { 3772 spin_unlock_irqrestore(ap->lock, flags); 3773 return; 3774 } 3775 spin_unlock_irqrestore(ap->lock, flags); 3776 3777 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3778 3779 /* 3780 * Error timestamps are in jiffies which doesn't run while 3781 * suspended and PHY events during resume isn't too uncommon. 3782 * When the two are combined, it can lead to unnecessary speed 3783 * downs if the machine is suspended and resumed repeatedly. 3784 * Clear error history. 3785 */ 3786 ata_for_each_link(link, ap, HOST_FIRST) 3787 ata_for_each_dev(dev, link, ALL) 3788 ata_ering_clear(&dev->ering); 3789 3790 ata_acpi_set_state(ap, PMSG_ON); 3791 3792 if (ap->ops->port_resume) 3793 rc = ap->ops->port_resume(ap); 3794 3795 /* tell ACPI that we're resuming */ 3796 ata_acpi_on_resume(ap); 3797 3798 /* report result */ 3799 spin_lock_irqsave(ap->lock, flags); 3800 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3801 if (ap->pm_result) { 3802 *ap->pm_result = rc; 3803 ap->pm_result = NULL; 3804 } 3805 spin_unlock_irqrestore(ap->lock, flags); 3806 } 3807 #endif /* CONFIG_PM */ 3808