Lines Matching +full:ignore +full:- +full:power +full:- +full:on +full:- +full:sel

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-eh.c - libata error handling
8 * as Documentation/driver-api/libata.rst
11 * http://www.sata-io.org/
59 /* Waiting in ->prereset can never be reliable. It's
90 5000, /* covers > 99% of successes and not too boring on failures */
97 15000, /* Some drives are slow to read log pages when waking-up */
125 * On the retry after a command timed out, the next timeout value from
129 * ehc->cmd_timeout_idx keeps track of which timeout to use per
130 * command class, so if SET_FEATURES times out on the first try, the
170 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, in __ata_ehi_pushv_desc()
171 ATA_EH_DESC_LEN - ehi->desc_len, in __ata_ehi_pushv_desc()
176 * __ata_ehi_push_desc - push error description without adding separator
180 * Format string according to @fmt and append it to @ehi->desc.
196 * ata_ehi_push_desc - push error description with separator
200 * Format string according to @fmt and append it to @ehi->desc.
201 * If @ehi->desc is not empty, ", " is added in-between.
210 if (ehi->desc_len) in ata_ehi_push_desc()
220 * ata_ehi_clear_desc - clean error description
223 * Clear @ehi->desc.
230 ehi->desc[0] = '\0'; in ata_ehi_clear_desc()
231 ehi->desc_len = 0; in ata_ehi_clear_desc()
236 * ata_port_desc - append port description
242 * in-between. This function is to be used while initializing
243 * ata_host. The description is printed on host registration.
252 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); in ata_port_desc()
254 if (ap->link.eh_info.desc_len) in ata_port_desc()
255 __ata_ehi_push_desc(&ap->link.eh_info, " "); in ata_port_desc()
258 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); in ata_port_desc()
265 * ata_port_pbar_desc - append PCI BAR description
282 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in ata_port_pbar_desc()
315 return -1; in ata_lookup_timeout_table()
319 * ata_internal_cmd_timeout - determine timeout for an internal command
333 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_internal_cmd_timeout()
340 idx = ehc->cmd_timeout_idx[dev->devno][ent]; in ata_internal_cmd_timeout()
345 * ata_internal_cmd_timed_out - notification for internal command timeout
358 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_internal_cmd_timed_out()
365 idx = ehc->cmd_timeout_idx[dev->devno][ent]; in ata_internal_cmd_timed_out()
367 ehc->cmd_timeout_idx[dev->devno][ent]++; in ata_internal_cmd_timed_out()
377 ering->cursor++; in ata_ering_record()
378 ering->cursor %= ATA_ERING_SIZE; in ata_ering_record()
380 ent = &ering->ring[ering->cursor]; in ata_ering_record()
381 ent->eflags = eflags; in ata_ering_record()
382 ent->err_mask = err_mask; in ata_ering_record()
383 ent->timestamp = get_jiffies_64(); in ata_ering_record()
388 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; in ata_ering_top()
390 if (ent->err_mask) in ata_ering_top()
402 idx = ering->cursor; in ata_ering_map()
404 ent = &ering->ring[idx]; in ata_ering_map()
405 if (!ent->err_mask) in ata_ering_map()
410 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; in ata_ering_map()
411 } while (idx != ering->cursor); in ata_ering_map()
418 ent->eflags |= ATA_EFLAG_OLD_ER; in ata_ering_clear_cb()
429 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_dev_action()
431 return ehc->i.action | ehc->i.dev_action[dev->devno]; in ata_eh_dev_action()
440 ehi->action &= ~action; in ata_eh_clear_action()
442 ehi->dev_action[tdev->devno] &= ~action; in ata_eh_clear_action()
444 /* doesn't make sense for port-wide EH actions */ in ata_eh_clear_action()
447 /* break ehi->action into ehi->dev_action */ in ata_eh_clear_action()
448 if (ehi->action & action) { in ata_eh_clear_action()
450 ehi->dev_action[tdev->devno] |= in ata_eh_clear_action()
451 ehi->action & action; in ata_eh_clear_action()
452 ehi->action &= ~action; in ata_eh_clear_action()
455 /* turn off the specified per-dev action */ in ata_eh_clear_action()
456 ehi->dev_action[dev->devno] &= ~action; in ata_eh_clear_action()
461 * ata_eh_acquire - acquire EH ownership
473 mutex_lock(&ap->host->eh_mutex); in ata_eh_acquire()
474 WARN_ON_ONCE(ap->host->eh_owner); in ata_eh_acquire()
475 ap->host->eh_owner = current; in ata_eh_acquire()
479 * ata_eh_release - release EH ownership
490 WARN_ON_ONCE(ap->host->eh_owner != current); in ata_eh_release()
491 ap->host->eh_owner = NULL; in ata_eh_release()
492 mutex_unlock(&ap->host->eh_mutex); in ata_eh_release()
499 dev->class++; in ata_eh_dev_disable()
505 ata_ering_clear(&dev->ering); in ata_eh_dev_disable()
518 * standby power mode. in ata_eh_unload()
532 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); in ata_eh_unload()
538 spin_lock_irqsave(ap->lock, flags); in ata_eh_unload()
541 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ in ata_eh_unload()
542 ap->pflags |= ATA_PFLAG_UNLOADED; in ata_eh_unload()
544 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_unload()
548 * ata_scsi_error - SCSI layer error handler callback
549 * @host: SCSI host on which error occurred
551 * Handles SCSI-layer-thrown error events.
565 spin_lock_irqsave(host->host_lock, flags); in ata_scsi_error()
566 list_splice_init(&host->eh_cmd_q, &eh_work_q); in ata_scsi_error()
567 spin_unlock_irqrestore(host->host_lock, flags); in ata_scsi_error()
581 * ata_scsi_cmd_error_handler - error callback for a list of commands
587 * ap->eh_done_q. This function is the first part of the libata error
604 * For EH, all qcs are finished in one of three ways - in ata_scsi_cmd_error_handler()
616 spin_lock_irqsave(ap->lock, flags); in ata_scsi_cmd_error_handler()
619 * This must occur under the ap->lock as we don't want in ata_scsi_cmd_error_handler()
623 * non-notified command and completes much like an IRQ handler. in ata_scsi_cmd_error_handler()
628 if (ap->ops->lost_interrupt) in ata_scsi_cmd_error_handler()
629 ap->ops->lost_interrupt(ap); in ata_scsi_cmd_error_handler()
635 * If the scmd was added to EH, via ata_qc_schedule_eh() -> in ata_scsi_cmd_error_handler()
636 * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will in ata_scsi_cmd_error_handler()
643 if (qc->flags & ATA_QCFLAG_ACTIVE && in ata_scsi_cmd_error_handler()
644 qc->scsicmd == scmd) in ata_scsi_cmd_error_handler()
650 if (!(qc->flags & ATA_QCFLAG_EH)) { in ata_scsi_cmd_error_handler()
653 qc->err_mask |= AC_ERR_TIMEOUT; in ata_scsi_cmd_error_handler()
654 qc->flags |= ATA_QCFLAG_EH; in ata_scsi_cmd_error_handler()
662 scmd->retries = scmd->allowed; in ata_scsi_cmd_error_handler()
663 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); in ata_scsi_cmd_error_handler()
678 ap->eh_tries = ATA_EH_MAX_TRIES; in ata_scsi_cmd_error_handler()
680 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_cmd_error_handler()
685 * ata_scsi_port_error_handler - recover the port after the commands
701 timer_delete_sync(&ap->fastdrain_timer); in ata_scsi_port_error_handler()
707 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
710 struct ata_eh_context *ehc = &link->eh_context; in ata_scsi_port_error_handler()
713 memset(&link->eh_context, 0, sizeof(link->eh_context)); in ata_scsi_port_error_handler()
714 link->eh_context.i = link->eh_info; in ata_scsi_port_error_handler()
715 memset(&link->eh_info, 0, sizeof(link->eh_info)); in ata_scsi_port_error_handler()
718 int devno = dev->devno; in ata_scsi_port_error_handler()
720 ehc->saved_xfer_mode[devno] = dev->xfer_mode; in ata_scsi_port_error_handler()
722 ehc->saved_ncq_enabled |= 1 << devno; in ata_scsi_port_error_handler()
725 if (ap->pflags & ATA_PFLAG_RESUMING) { in ata_scsi_port_error_handler()
726 dev->flags |= ATA_DFLAG_RESUMING; in ata_scsi_port_error_handler()
727 ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE; in ata_scsi_port_error_handler()
732 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; in ata_scsi_port_error_handler()
733 ap->pflags &= ~ATA_PFLAG_EH_PENDING; in ata_scsi_port_error_handler()
734 ap->excl_link = NULL; /* don't maintain exclusion over EH */ in ata_scsi_port_error_handler()
736 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
739 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) in ata_scsi_port_error_handler()
740 ap->ops->error_handler(ap); in ata_scsi_port_error_handler()
743 if ((ap->pflags & ATA_PFLAG_UNLOADING) && in ata_scsi_port_error_handler()
744 !(ap->pflags & ATA_PFLAG_UNLOADED)) in ata_scsi_port_error_handler()
753 * Exception might have happened after ->error_handler recovered the in ata_scsi_port_error_handler()
756 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
758 if (ap->pflags & ATA_PFLAG_EH_PENDING) { in ata_scsi_port_error_handler()
759 if (--ap->eh_tries) { in ata_scsi_port_error_handler()
760 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
766 ap->pflags &= ~ATA_PFLAG_EH_PENDING; in ata_scsi_port_error_handler()
771 memset(&link->eh_info, 0, sizeof(link->eh_info)); in ata_scsi_port_error_handler()
774 * end eh (clear host_eh_scheduled) while holding ap->lock such that if in ata_scsi_port_error_handler()
776 * midlayer will re-initiate EH. in ata_scsi_port_error_handler()
778 ap->ops->end_eh(ap); in ata_scsi_port_error_handler()
780 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
783 scsi_eh_flush_done_q(&ap->eh_done_q); in ata_scsi_port_error_handler()
786 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
788 ap->pflags &= ~ATA_PFLAG_RESUMING; in ata_scsi_port_error_handler()
790 if (ap->pflags & ATA_PFLAG_LOADING) in ata_scsi_port_error_handler()
791 ap->pflags &= ~ATA_PFLAG_LOADING; in ata_scsi_port_error_handler()
792 else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && in ata_scsi_port_error_handler()
793 !(ap->flags & ATA_FLAG_SAS_HOST)) in ata_scsi_port_error_handler()
794 schedule_delayed_work(&ap->hotplug_task, 0); in ata_scsi_port_error_handler()
796 if (ap->pflags & ATA_PFLAG_RECOVERED) in ata_scsi_port_error_handler()
799 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); in ata_scsi_port_error_handler()
802 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; in ata_scsi_port_error_handler()
803 wake_up_all(&ap->eh_wait_q); in ata_scsi_port_error_handler()
805 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
810 * ata_port_wait_eh - Wait for the currently pending EH to complete
824 spin_lock_irqsave(ap->lock, flags); in ata_port_wait_eh()
827 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); in ata_port_wait_eh()
828 spin_unlock_irqrestore(ap->lock, flags); in ata_port_wait_eh()
830 spin_lock_irqsave(ap->lock, flags); in ata_port_wait_eh()
832 finish_wait(&ap->eh_wait_q, &wait); in ata_port_wait_eh()
834 spin_unlock_irqrestore(ap->lock, flags); in ata_port_wait_eh()
837 if (scsi_host_in_recovery(ap->scsi_host)) { in ata_port_wait_eh()
850 /* count only non-internal commands */ in ata_eh_nr_in_flight()
865 spin_lock_irqsave(ap->lock, flags); in ata_eh_fastdrain_timerfn()
873 if (cnt == ap->fastdrain_cnt) { in ata_eh_fastdrain_timerfn()
878 * in-flight qcs as timed out and freeze the port. in ata_eh_fastdrain_timerfn()
882 qc->err_mask |= AC_ERR_TIMEOUT; in ata_eh_fastdrain_timerfn()
888 ap->fastdrain_cnt = cnt; in ata_eh_fastdrain_timerfn()
889 ap->fastdrain_timer.expires = in ata_eh_fastdrain_timerfn()
891 add_timer(&ap->fastdrain_timer); in ata_eh_fastdrain_timerfn()
895 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_fastdrain_timerfn()
899 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
904 * is non-zero and EH wasn't pending before. Fast drain ensures
915 if (ap->pflags & ATA_PFLAG_EH_PENDING) in ata_eh_set_pending()
918 ap->pflags |= ATA_PFLAG_EH_PENDING; in ata_eh_set_pending()
923 /* do we have in-flight qcs? */ in ata_eh_set_pending()
929 ap->fastdrain_cnt = cnt; in ata_eh_set_pending()
930 ap->fastdrain_timer.expires = in ata_eh_set_pending()
932 add_timer(&ap->fastdrain_timer); in ata_eh_set_pending()
936 * ata_qc_schedule_eh - schedule qc for error handling
947 struct ata_port *ap = qc->ap; in ata_qc_schedule_eh()
949 qc->flags |= ATA_QCFLAG_EH; in ata_qc_schedule_eh()
953 * ata_scsi_error() takes care of such scmds on EH entry. in ata_qc_schedule_eh()
957 blk_abort_request(scsi_cmd_to_rq(qc->scsicmd)); in ata_qc_schedule_eh()
961 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
969 if (ap->pflags & ATA_PFLAG_INITIALIZING) in ata_std_sched_eh()
973 scsi_schedule_eh(ap->scsi_host); in ata_std_sched_eh()
980 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
984 * shost, so host fields can be directly manipulated under ap->lock, in
985 * the libsas case we need to hold a lock at the ha->level to coordinate
993 struct Scsi_Host *host = ap->scsi_host; in ata_std_end_eh()
995 host->host_eh_scheduled = 0; in ata_std_end_eh()
1001 * ata_port_schedule_eh - schedule error handling without a qc
1013 ap->ops->sched_eh(ap); in ata_port_schedule_eh()
1027 if (qc && (!link || qc->dev->link == link)) { in ata_do_link_abort()
1028 qc->flags |= ATA_QCFLAG_EH; in ata_do_link_abort()
1041 * ata_link_abort - abort all qc's on the link
1044 * Abort all active qc's active on @link and schedule EH.
1054 return ata_do_link_abort(link->ap, link); in ata_link_abort()
1059 * ata_port_abort - abort all qc's on the port
1077 * __ata_port_freeze - freeze port
1085 * ap->ops->freeze() callback can be used for freezing the port
1086 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1087 * port cannot be frozen hardware-wise, the interrupt handler
1096 if (ap->ops->freeze) in __ata_port_freeze()
1097 ap->ops->freeze(ap); in __ata_port_freeze()
1099 ap->pflags |= ATA_PFLAG_FROZEN; in __ata_port_freeze()
1105 * ata_port_freeze - abort & freeze port
1127 * ata_eh_freeze_port - EH helper to freeze port
1139 spin_lock_irqsave(ap->lock, flags); in ata_eh_freeze_port()
1141 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_freeze_port()
1146 * ata_eh_thaw_port - EH helper to thaw port
1158 spin_lock_irqsave(ap->lock, flags); in ata_eh_thaw_port()
1160 ap->pflags &= ~ATA_PFLAG_FROZEN; in ata_eh_thaw_port()
1162 if (ap->ops->thaw) in ata_eh_thaw_port()
1163 ap->ops->thaw(ap); in ata_eh_thaw_port()
1165 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_thaw_port()
1177 struct ata_port *ap = qc->ap; in __ata_eh_qc_complete()
1178 struct scsi_cmnd *scmd = qc->scsicmd; in __ata_eh_qc_complete()
1181 spin_lock_irqsave(ap->lock, flags); in __ata_eh_qc_complete()
1182 qc->scsidone = ata_eh_scsidone; in __ata_eh_qc_complete()
1184 WARN_ON(ata_tag_valid(qc->tag)); in __ata_eh_qc_complete()
1185 spin_unlock_irqrestore(ap->lock, flags); in __ata_eh_qc_complete()
1187 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); in __ata_eh_qc_complete()
1191 * ata_eh_qc_complete - Complete an active ATA command from EH
1199 struct scsi_cmnd *scmd = qc->scsicmd; in ata_eh_qc_complete()
1200 scmd->retries = scmd->allowed; in ata_eh_qc_complete()
1205 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1211 * SCSI midlayer limits the number of retries to scmd->allowed.
1212 * scmd->allowed is incremented for commands which get retried
1213 * due to unrelated failures (qc->err_mask is zero).
1217 struct scsi_cmnd *scmd = qc->scsicmd; in ata_eh_qc_retry()
1218 if (!qc->err_mask) in ata_eh_qc_retry()
1219 scmd->allowed++; in ata_eh_qc_retry()
1224 * ata_dev_disable - disable ATA device
1244 * ata_eh_detach_dev - detach ATA device
1254 struct ata_link *link = dev->link; in ata_eh_detach_dev()
1255 struct ata_port *ap = link->ap; in ata_eh_detach_dev()
1256 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_detach_dev()
1260 * If the device is still enabled, transition it to standby power mode in ata_eh_detach_dev()
1268 spin_lock_irqsave(ap->lock, flags); in ata_eh_detach_dev()
1270 dev->flags &= ~ATA_DFLAG_DETACH; in ata_eh_detach_dev()
1273 dev->flags |= ATA_DFLAG_DETACHED; in ata_eh_detach_dev()
1274 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; in ata_eh_detach_dev()
1277 /* clear per-dev EH info */ in ata_eh_detach_dev()
1278 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); in ata_eh_detach_dev()
1279 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); in ata_eh_detach_dev()
1280 ehc->saved_xfer_mode[dev->devno] = 0; in ata_eh_detach_dev()
1281 ehc->saved_ncq_enabled &= ~(1 << dev->devno); in ata_eh_detach_dev()
1283 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_detach_dev()
1287 * ata_eh_about_to_do - about to perform eh_action
1289 * @dev: target ATA dev for per-dev action (can be NULL)
1293 * in @link->eh_info such that eh actions are not unnecessarily
1302 struct ata_port *ap = link->ap; in ata_eh_about_to_do()
1303 struct ata_eh_info *ehi = &link->eh_info; in ata_eh_about_to_do()
1304 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_about_to_do()
1307 trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action); in ata_eh_about_to_do()
1309 spin_lock_irqsave(ap->lock, flags); in ata_eh_about_to_do()
1313 /* About to take EH action, set RECOVERED. Ignore actions on in ata_eh_about_to_do()
1316 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) in ata_eh_about_to_do()
1317 ap->pflags |= ATA_PFLAG_RECOVERED; in ata_eh_about_to_do()
1319 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_about_to_do()
1323 * ata_eh_done - EH action complete
1325 * @dev: target ATA dev for per-dev action (can be NULL)
1329 * in @link->eh_context.
1337 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_done()
1339 trace_ata_eh_done(link, dev ? dev->devno : 0, action); in ata_eh_done()
1341 ata_eh_clear_action(link, dev, &ehc->i, action); in ata_eh_done()
1345 * ata_err_string - convert err_mask to descriptive string
1384 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1394 * 0 on success, AC_ERR_* mask on failure.
1415 * ata_eh_decide_disposition - Disposition a qc based on sense data
1448 return scsi_check_sense(qc->scsicmd); in ata_eh_decide_disposition()
1452 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1466 struct scsi_cmnd *cmd = qc->scsicmd; in ata_eh_request_sense()
1467 struct ata_device *dev = qc->dev; in ata_eh_request_sense()
1471 if (ata_port_is_frozen(qc->ap)) { in ata_eh_request_sense()
1476 if (!ata_id_sense_reporting_enabled(dev->id)) { in ata_eh_request_sense()
1477 ata_dev_warn(qc->dev, "sense data reporting disabled\n"); in ata_eh_request_sense()
1488 /* Ignore err_mask; ATA_ERR might be set */ in ata_eh_request_sense()
1491 /* Set sense without also setting scsicmd->result */ in ata_eh_request_sense()
1492 scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, in ata_eh_request_sense()
1493 cmd->sense_buffer, tf.lbah, in ata_eh_request_sense()
1495 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_request_sense()
1507 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1519 * 0 on success, AC_ERR_* mask on failure
1526 struct ata_port *ap = dev->link->ap; in atapi_eh_request_sense()
1532 * for the case where they are -not- overwritten in atapi_eh_request_sense()
1551 if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) { in atapi_eh_request_sense()
1565 * ata_eh_analyze_serror - analyze SError for a failed port
1576 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_analyze_serror()
1577 u32 serror = ehc->i.serror; in ata_eh_analyze_serror()
1599 if (link->lpm_policy > ATA_LPM_MAX_POWER) in ata_eh_analyze_serror()
1601 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) in ata_eh_analyze_serror()
1607 ata_ehi_hotplugged(&ehc->i); in ata_eh_analyze_serror()
1609 ehc->i.err_mask |= err_mask; in ata_eh_analyze_serror()
1610 ehc->i.action |= action; in ata_eh_analyze_serror()
1614 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1629 const struct ata_taskfile *tf = &qc->result_tf; in ata_eh_analyze_tf()
1631 u8 stat = tf->status, err = tf->error; in ata_eh_analyze_tf()
1634 qc->err_mask |= AC_ERR_HSM; in ata_eh_analyze_tf()
1639 qc->err_mask |= AC_ERR_DEV; in ata_eh_analyze_tf()
1650 switch (qc->dev->class) { in ata_eh_analyze_tf()
1655 * -It was a non-NCQ command that failed, or in ata_eh_analyze_tf()
1656 * -It was a NCQ command that failed, but the sense data in ata_eh_analyze_tf()
1660 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) && in ata_eh_analyze_tf()
1662 set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); in ata_eh_analyze_tf()
1664 qc->err_mask |= AC_ERR_ATA_BUS; in ata_eh_analyze_tf()
1666 qc->err_mask |= AC_ERR_MEDIA; in ata_eh_analyze_tf()
1668 qc->err_mask |= AC_ERR_INVALID; in ata_eh_analyze_tf()
1672 if (!ata_port_is_frozen(qc->ap)) { in ata_eh_analyze_tf()
1673 tmp = atapi_eh_request_sense(qc->dev, in ata_eh_analyze_tf()
1674 qc->scsicmd->sense_buffer, in ata_eh_analyze_tf()
1675 qc->result_tf.error >> 4); in ata_eh_analyze_tf()
1677 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_analyze_tf()
1679 qc->err_mask |= tmp; in ata_eh_analyze_tf()
1683 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { in ata_eh_analyze_tf()
1696 qc->flags |= ATA_QCFLAG_RETRY; in ata_eh_analyze_tf()
1697 qc->err_mask |= AC_ERR_OTHER; in ata_eh_analyze_tf()
1699 qc->err_mask |= AC_ERR_HSM; in ata_eh_analyze_tf()
1702 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) in ata_eh_analyze_tf()
1747 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) in speed_down_verdict_cb()
1748 return -1; in speed_down_verdict_cb()
1750 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, in speed_down_verdict_cb()
1751 &arg->xfer_ok); in speed_down_verdict_cb()
1752 arg->nr_errors[cat]++; in speed_down_verdict_cb()
1758 * ata_eh_speed_down_verdict - Determine speed down verdict
1785 * taken per error. An action triggered by non-DUBIOUS errors
1823 arg.since = j64 - min(j64, j5mins); in ata_eh_speed_down_verdict()
1824 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); in ata_eh_speed_down_verdict()
1842 arg.since = j64 - min(j64, j10mins); in ata_eh_speed_down_verdict()
1843 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); in ata_eh_speed_down_verdict()
1858 * ata_eh_speed_down - record error and speed down if necessary
1882 /* don't bother if Cat-0 error */ in ata_eh_speed_down()
1887 ata_ering_record(&dev->ering, eflags, err_mask); in ata_eh_speed_down()
1892 dev->flags |= ATA_DFLAG_NCQ_OFF; in ata_eh_speed_down()
1906 if (dev->spdn_cnt < 2) { in ata_eh_speed_down()
1911 int sel; in ata_eh_speed_down() local
1913 if (dev->xfer_shift != ATA_SHIFT_PIO) in ata_eh_speed_down()
1914 sel = dma_dnxfer_sel[dev->spdn_cnt]; in ata_eh_speed_down()
1916 sel = pio_dnxfer_sel[dev->spdn_cnt]; in ata_eh_speed_down()
1918 dev->spdn_cnt++; in ata_eh_speed_down()
1920 if (ata_down_xfermask_limit(dev, sel) == 0) { in ata_eh_speed_down()
1930 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && in ata_eh_speed_down()
1931 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && in ata_eh_speed_down()
1932 (dev->xfer_shift != ATA_SHIFT_PIO)) { in ata_eh_speed_down()
1934 dev->spdn_cnt = 0; in ata_eh_speed_down()
1944 ata_ering_clear(&dev->ering); in ata_eh_speed_down()
1949 * ata_eh_worth_retry - analyze error and decide whether to retry
1954 * because the drive itself has probably already taken 10-30 seconds
1959 if (qc->err_mask & AC_ERR_MEDIA) in ata_eh_worth_retry()
1961 if (qc->flags & ATA_QCFLAG_IO) in ata_eh_worth_retry()
1963 if (qc->err_mask & AC_ERR_INVALID) in ata_eh_worth_retry()
1965 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ in ata_eh_worth_retry()
1969 * ata_eh_quiet - check if we need to be quiet about a command error
1977 if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET) in ata_eh_quiet()
1978 qc->flags |= ATA_QCFLAG_QUIET; in ata_eh_quiet()
1979 return qc->flags & ATA_QCFLAG_QUIET; in ata_eh_quiet()
1984 struct ata_port *ap = link->ap; in ata_eh_get_non_ncq_success_sense()
1987 qc = __ata_qc_from_tag(ap, link->active_tag); in ata_eh_get_non_ncq_success_sense()
1989 return -EIO; in ata_eh_get_non_ncq_success_sense()
1991 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_get_non_ncq_success_sense()
1992 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || in ata_eh_get_non_ncq_success_sense()
1993 qc->err_mask) in ata_eh_get_non_ncq_success_sense()
1994 return -EIO; in ata_eh_get_non_ncq_success_sense()
1997 return -EIO; in ata_eh_get_non_ncq_success_sense()
2010 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_get_success_sense()
2011 struct ata_device *dev = link->device; in ata_eh_get_success_sense()
2012 struct ata_port *ap = link->ap; in ata_eh_get_success_sense()
2016 if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE)) in ata_eh_get_success_sense()
2029 * data. Otherwise, we are dealing with a non-NCQ command and use in ata_eh_get_success_sense()
2032 if (link->sactive) in ata_eh_get_success_sense()
2053 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_get_success_sense()
2054 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || in ata_eh_get_success_sense()
2055 qc->err_mask || in ata_eh_get_success_sense()
2056 ata_dev_phys_link(qc->dev) != link) in ata_eh_get_success_sense()
2060 if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_eh_get_success_sense()
2064 if (!(qc->result_tf.status & ATA_SENSE)) in ata_eh_get_success_sense()
2068 ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0); in ata_eh_get_success_sense()
2069 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_get_success_sense()
2077 * called after changing the link power management policy, which may not be
2079 * the PHY in partial, slumber or devsleep Partial power management state.
2081 * - A device is still present, that is, DET is 1h (Device presence detected
2084 * - Communication is established, that is, IPM is not 0h, indicating that PHY
2085 * is online or in a low power state.
2107 * ata_eh_link_set_lpm - configure SATA interface power management
2109 * @policy: the link power management policy
2112 * Enable SATA Interface power management. This will enable
2113 * Device Interface Power Management (DIPM) for min_power and
2115 * callbacks for enabling Host Initiated Power management.
2121 * 0 on success, -errno on failure.
2127 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; in ata_eh_link_set_lpm()
2128 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_set_lpm()
2130 enum ata_lpm_policy old_policy = link->lpm_policy; in ata_eh_link_set_lpm()
2131 bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM); in ata_eh_link_set_lpm()
2138 (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) in ata_eh_link_set_lpm()
2148 ata_link_dbg(link, "Set LPM policy: %d -> %d\n", old_policy, policy); in ata_eh_link_set_lpm()
2156 bool dev_has_hipm = ata_id_has_hipm(dev->id); in ata_eh_link_set_lpm()
2157 bool dev_has_dipm = ata_id_has_dipm(dev->id); in ata_eh_link_set_lpm()
2179 rc = -EIO; in ata_eh_link_set_lpm()
2186 rc = ap->ops->set_lpm(link, policy, hints); in ata_eh_link_set_lpm()
2187 if (!rc && ap->slave_link) in ata_eh_link_set_lpm()
2188 rc = ap->ops->set_lpm(ap->slave_link, policy, hints); in ata_eh_link_set_lpm()
2194 * device on the link. in ata_eh_link_set_lpm()
2197 if (rc == -EOPNOTSUPP) { in ata_eh_link_set_lpm()
2198 link->flags |= ATA_LFLAG_NO_LPM; in ata_eh_link_set_lpm()
2209 link->lpm_policy = policy; in ata_eh_link_set_lpm()
2210 if (ap && ap->slave_link) in ata_eh_link_set_lpm()
2211 ap->slave_link->lpm_policy = policy; in ata_eh_link_set_lpm()
2219 bool dev_has_dipm = ata_id_has_dipm(dev->id); in ata_eh_link_set_lpm()
2229 rc = -EIO; in ata_eh_link_set_lpm()
2235 link->last_lpm_change = jiffies; in ata_eh_link_set_lpm()
2236 link->flags |= ATA_LFLAG_CHANGED; in ata_eh_link_set_lpm()
2242 link->lpm_policy = old_policy; in ata_eh_link_set_lpm()
2243 if (ap && ap->slave_link) in ata_eh_link_set_lpm()
2244 ap->slave_link->lpm_policy = old_policy; in ata_eh_link_set_lpm()
2247 if (!dev || ehc->tries[dev->devno] <= 2) { in ata_eh_link_set_lpm()
2248 ata_link_warn(link, "disabling LPM on the link\n"); in ata_eh_link_set_lpm()
2249 link->flags |= ATA_LFLAG_NO_LPM; in ata_eh_link_set_lpm()
2257 * ata_eh_link_autopsy - analyze error and determine recovery action
2258 * @link: host link to perform autopsy on
2269 struct ata_port *ap = link->ap; in ata_eh_link_autopsy()
2270 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_autopsy()
2278 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) in ata_eh_link_autopsy()
2284 ehc->i.serror |= serror; in ata_eh_link_autopsy()
2286 } else if (rc != -EOPNOTSUPP) { in ata_eh_link_autopsy()
2288 ehc->i.probe_mask |= ATA_ALL_DEVICES; in ata_eh_link_autopsy()
2289 ehc->i.action |= ATA_EH_RESET; in ata_eh_link_autopsy()
2290 ehc->i.err_mask |= AC_ERR_OTHER; in ata_eh_link_autopsy()
2305 if (ehc->i.err_mask & ~AC_ERR_OTHER) in ata_eh_link_autopsy()
2306 ehc->i.err_mask &= ~AC_ERR_OTHER; in ata_eh_link_autopsy()
2308 all_err_mask |= ehc->i.err_mask; in ata_eh_link_autopsy()
2311 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_autopsy()
2312 qc->flags & ATA_QCFLAG_RETRY || in ata_eh_link_autopsy()
2313 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD || in ata_eh_link_autopsy()
2314 ata_dev_phys_link(qc->dev) != link) in ata_eh_link_autopsy()
2318 qc->err_mask |= ehc->i.err_mask; in ata_eh_link_autopsy()
2321 ehc->i.action |= ata_eh_analyze_tf(qc); in ata_eh_link_autopsy()
2324 if (qc->err_mask & AC_ERR_ATA_BUS) in ata_eh_link_autopsy()
2325 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | in ata_eh_link_autopsy()
2329 if (qc->err_mask & ~AC_ERR_OTHER) in ata_eh_link_autopsy()
2330 qc->err_mask &= ~AC_ERR_OTHER; in ata_eh_link_autopsy()
2335 * based on the sense data and device class/type. Otherwise, in ata_eh_link_autopsy()
2339 if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_eh_link_autopsy()
2340 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); in ata_eh_link_autopsy()
2342 qc->flags |= ATA_QCFLAG_RETRY; in ata_eh_link_autopsy()
2345 ehc->i.dev = qc->dev; in ata_eh_link_autopsy()
2346 all_err_mask |= qc->err_mask; in ata_eh_link_autopsy()
2347 if (qc->flags & ATA_QCFLAG_IO) in ata_eh_link_autopsy()
2359 ehc->i.flags |= ATA_EHI_QUIET; in ata_eh_link_autopsy()
2364 ehc->i.action |= ATA_EH_RESET; in ata_eh_link_autopsy()
2367 ehc->i.action |= ATA_EH_REVALIDATE; in ata_eh_link_autopsy()
2370 * perform per-dev EH action only on the offending device. in ata_eh_link_autopsy()
2372 if (ehc->i.dev) { in ata_eh_link_autopsy()
2373 ehc->i.dev_action[ehc->i.dev->devno] |= in ata_eh_link_autopsy()
2374 ehc->i.action & ATA_EH_PERDEV_MASK; in ata_eh_link_autopsy()
2375 ehc->i.action &= ~ATA_EH_PERDEV_MASK; in ata_eh_link_autopsy()
2380 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; in ata_eh_link_autopsy()
2383 dev = ehc->i.dev; in ata_eh_link_autopsy()
2385 ata_dev_enabled(link->device)))) in ata_eh_link_autopsy()
2386 dev = link->device; in ata_eh_link_autopsy()
2389 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) in ata_eh_link_autopsy()
2391 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); in ata_eh_link_autopsy()
2392 trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); in ata_eh_link_autopsy()
2397 * ata_eh_autopsy - analyze error and determine recovery action
2398 * @ap: host port to perform autopsy on
2417 if (ap->slave_link) { in ata_eh_autopsy()
2418 struct ata_eh_context *mehc = &ap->link.eh_context; in ata_eh_autopsy()
2419 struct ata_eh_context *sehc = &ap->slave_link->eh_context; in ata_eh_autopsy()
2422 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; in ata_eh_autopsy()
2424 /* perform autopsy on the slave link */ in ata_eh_autopsy()
2425 ata_eh_link_autopsy(ap->slave_link); in ata_eh_autopsy()
2428 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); in ata_eh_autopsy()
2429 mehc->i.action |= sehc->i.action; in ata_eh_autopsy()
2430 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; in ata_eh_autopsy()
2431 mehc->i.flags |= sehc->i.flags; in ata_eh_autopsy()
2432 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); in ata_eh_autopsy()
2439 ata_eh_link_autopsy(&ap->link); in ata_eh_autopsy()
2443 * ata_get_cmd_name - get name for ATA command
2460 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, in ata_get_cmd_name()
2486 { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" }, in ata_get_cmd_name()
2515 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, in ata_get_cmd_name()
2565 * ata_eh_link_report - report error handling to user
2566 * @link: ATA link EH is going on
2575 struct ata_port *ap = link->ap; in ata_eh_link_report()
2576 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_report()
2582 if (ehc->i.flags & ATA_EHI_QUIET) in ata_eh_link_report()
2586 if (ehc->i.desc[0] != '\0') in ata_eh_link_report()
2587 desc = ehc->i.desc; in ata_eh_link_report()
2590 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_report()
2591 ata_dev_phys_link(qc->dev) != link || in ata_eh_link_report()
2592 ((qc->flags & ATA_QCFLAG_QUIET) && in ata_eh_link_report()
2593 qc->err_mask == AC_ERR_DEV)) in ata_eh_link_report()
2595 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) in ata_eh_link_report()
2601 if (!nr_failed && !ehc->i.err_mask) in ata_eh_link_report()
2608 if (ap->eh_tries < ATA_EH_MAX_TRIES) in ata_eh_link_report()
2610 ap->eh_tries); in ata_eh_link_report()
2612 if (ehc->i.dev) { in ata_eh_link_report()
2613 ata_dev_err(ehc->i.dev, "exception Emask 0x%x " in ata_eh_link_report()
2615 ehc->i.err_mask, link->sactive, ehc->i.serror, in ata_eh_link_report()
2616 ehc->i.action, frozen, tries_buf); in ata_eh_link_report()
2618 ata_dev_err(ehc->i.dev, "%s\n", desc); in ata_eh_link_report()
2622 ehc->i.err_mask, link->sactive, ehc->i.serror, in ata_eh_link_report()
2623 ehc->i.action, frozen, tries_buf); in ata_eh_link_report()
2629 if (ehc->i.serror) in ata_eh_link_report()
2632 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", in ata_eh_link_report()
2633 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", in ata_eh_link_report()
2634 ehc->i.serror & SERR_DATA ? "UnrecovData " : "", in ata_eh_link_report()
2635 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", in ata_eh_link_report()
2636 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", in ata_eh_link_report()
2637 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", in ata_eh_link_report()
2638 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", in ata_eh_link_report()
2639 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", in ata_eh_link_report()
2640 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", in ata_eh_link_report()
2641 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", in ata_eh_link_report()
2642 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", in ata_eh_link_report()
2643 ehc->i.serror & SERR_CRC ? "BadCRC " : "", in ata_eh_link_report()
2644 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", in ata_eh_link_report()
2645 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", in ata_eh_link_report()
2646 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", in ata_eh_link_report()
2647 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", in ata_eh_link_report()
2648 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); in ata_eh_link_report()
2652 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; in ata_eh_link_report()
2656 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_report()
2657 ata_dev_phys_link(qc->dev) != link || !qc->err_mask) in ata_eh_link_report()
2660 if (qc->dma_dir != DMA_NONE) { in ata_eh_link_report()
2668 switch (qc->tf.protocol) { in ata_eh_link_report()
2698 prot_str, qc->nbytes, dma_str[qc->dma_dir]); in ata_eh_link_report()
2701 if (ata_is_atapi(qc->tf.protocol)) { in ata_eh_link_report()
2702 const u8 *cdb = qc->cdb; in ata_eh_link_report()
2703 size_t cdb_len = qc->dev->cdb_len; in ata_eh_link_report()
2705 if (qc->scsicmd) { in ata_eh_link_report()
2706 cdb = qc->scsicmd->cmnd; in ata_eh_link_report()
2707 cdb_len = qc->scsicmd->cmd_len; in ata_eh_link_report()
2712 ata_dev_err(qc->dev, "failed command: %s\n", in ata_eh_link_report()
2713 ata_get_cmd_name(cmd->command)); in ata_eh_link_report()
2715 ata_dev_err(qc->dev, in ata_eh_link_report()
2720 cmd->command, cmd->feature, cmd->nsect, in ata_eh_link_report()
2721 cmd->lbal, cmd->lbam, cmd->lbah, in ata_eh_link_report()
2722 cmd->hob_feature, cmd->hob_nsect, in ata_eh_link_report()
2723 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, in ata_eh_link_report()
2724 cmd->device, qc->tag, data_buf, cdb_buf, in ata_eh_link_report()
2725 res->status, res->error, res->nsect, in ata_eh_link_report()
2726 res->lbal, res->lbam, res->lbah, in ata_eh_link_report()
2727 res->hob_feature, res->hob_nsect, in ata_eh_link_report()
2728 res->hob_lbal, res->hob_lbam, res->hob_lbah, in ata_eh_link_report()
2729 res->device, qc->err_mask, ata_err_string(qc->err_mask), in ata_eh_link_report()
2730 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); in ata_eh_link_report()
2733 if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | in ata_eh_link_report()
2735 if (res->status & ATA_BUSY) in ata_eh_link_report()
2736 ata_dev_err(qc->dev, "status: { Busy }\n"); in ata_eh_link_report()
2738 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", in ata_eh_link_report()
2739 res->status & ATA_DRDY ? "DRDY " : "", in ata_eh_link_report()
2740 res->status & ATA_DF ? "DF " : "", in ata_eh_link_report()
2741 res->status & ATA_DRQ ? "DRQ " : "", in ata_eh_link_report()
2742 res->status & ATA_SENSE ? "SENSE " : "", in ata_eh_link_report()
2743 res->status & ATA_ERR ? "ERR " : ""); in ata_eh_link_report()
2746 if (cmd->command != ATA_CMD_PACKET && in ata_eh_link_report()
2747 (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | in ata_eh_link_report()
2749 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", in ata_eh_link_report()
2750 res->error & ATA_ICRC ? "ICRC " : "", in ata_eh_link_report()
2751 res->error & ATA_UNC ? "UNC " : "", in ata_eh_link_report()
2752 res->error & ATA_AMNF ? "AMNF " : "", in ata_eh_link_report()
2753 res->error & ATA_IDNF ? "IDNF " : "", in ata_eh_link_report()
2754 res->error & ATA_ABORTED ? "ABRT " : ""); in ata_eh_link_report()
2760 * ata_eh_report - report error handling to user
2784 classes[dev->devno] = ATA_DEV_UNKNOWN; in ata_do_reset()
2791 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) in ata_eh_followup_srst_needed()
2793 if (rc == -EAGAIN) in ata_eh_followup_srst_needed()
2795 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) in ata_eh_followup_srst_needed()
2803 struct ata_port *ap = link->ap; in ata_eh_reset()
2804 struct ata_link *slave = ap->slave_link; in ata_eh_reset()
2805 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_reset()
2806 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; in ata_eh_reset()
2807 ata_reset_fn_t hardreset = reset_ops->hardreset; in ata_eh_reset()
2808 ata_reset_fn_t softreset = reset_ops->softreset; in ata_eh_reset()
2809 ata_prereset_fn_t prereset = reset_ops->prereset; in ata_eh_reset()
2810 ata_postreset_fn_t postreset = reset_ops->postreset; in ata_eh_reset()
2811 unsigned int *classes = ehc->classes; in ata_eh_reset()
2812 unsigned int lflags = link->flags; in ata_eh_reset()
2813 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); in ata_eh_reset()
2828 if (link->flags & ATA_LFLAG_RST_ONCE) in ata_eh_reset()
2830 if (link->flags & ATA_LFLAG_NO_HRST) in ata_eh_reset()
2832 if (link->flags & ATA_LFLAG_NO_SRST) in ata_eh_reset()
2836 if (ehc->i.flags & ATA_EHI_DID_RESET) { in ata_eh_reset()
2838 WARN_ON(time_after(ehc->last_reset, now)); in ata_eh_reset()
2839 deadline = ata_deadline(ehc->last_reset, in ata_eh_reset()
2842 schedule_timeout_uninterruptible(deadline - now); in ata_eh_reset()
2845 spin_lock_irqsave(ap->lock, flags); in ata_eh_reset()
2846 ap->pflags |= ATA_PFLAG_RESETTING; in ata_eh_reset()
2847 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_reset()
2854 * we do a hard reset (or are coming from power on) in ata_eh_reset()
2859 dev->pio_mode = XFER_PIO_0; in ata_eh_reset()
2860 dev->dma_mode = 0xff; in ata_eh_reset()
2867 if (ap->ops->set_piomode) in ata_eh_reset()
2868 ap->ops->set_piomode(ap, dev); in ata_eh_reset()
2873 ehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2876 ehc->i.action |= ATA_EH_HARDRESET; in ata_eh_reset()
2879 ehc->i.action |= ATA_EH_SOFTRESET; in ata_eh_reset()
2887 sehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2888 sehc->i.action |= ehc->i.action; in ata_eh_reset()
2893 /* If present, do prereset on slave link too. Reset in ata_eh_reset()
2895 * -ENOENT or clear ATA_EH_RESET. in ata_eh_reset()
2897 if (slave && (rc == 0 || rc == -ENOENT)) { in ata_eh_reset()
2901 if (tmp != -ENOENT) in ata_eh_reset()
2904 ehc->i.action |= sehc->i.action; in ata_eh_reset()
2908 if (rc == -ENOENT) { in ata_eh_reset()
2909 ata_link_dbg(link, "port disabled--ignoring\n"); in ata_eh_reset()
2910 ehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2913 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2926 if (reset && !(ehc->i.action & ATA_EH_RESET)) { in ata_eh_reset()
2928 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2951 ehc->last_reset = jiffies; in ata_eh_reset()
2953 ehc->i.flags |= ATA_EHI_DID_HARDRESET; in ata_eh_reset()
2956 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; in ata_eh_reset()
2965 if (rc && rc != -EAGAIN) { in ata_eh_reset()
2984 case -EAGAIN: in ata_eh_reset()
2985 rc = -EAGAIN; in ata_eh_reset()
2996 /* perform follow-up SRST if necessary */ in ata_eh_reset()
3003 "follow-up softreset required but no softreset available\n"); in ata_eh_reset()
3005 rc = -EINVAL; in ata_eh_reset()
3027 * Post-reset processing in ata_eh_reset()
3034 dev->pio_mode = XFER_PIO_0; in ata_eh_reset()
3035 dev->flags &= ~ATA_DFLAG_SLEEPING; in ata_eh_reset()
3042 classes[dev->devno] = ATA_DEV_ATA; in ata_eh_reset()
3044 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; in ata_eh_reset()
3049 link->sata_spd = (sstatus >> 4) & 0xf; in ata_eh_reset()
3051 slave->sata_spd = (sstatus >> 4) & 0xf; in ata_eh_reset()
3074 spin_lock_irqsave(link->ap->lock, flags); in ata_eh_reset()
3075 link->eh_info.serror = 0; in ata_eh_reset()
3077 slave->eh_info.serror = 0; in ata_eh_reset()
3078 spin_unlock_irqrestore(link->ap->lock, flags); in ata_eh_reset()
3085 * link on/offlineness and classification result, those in ata_eh_reset()
3091 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { in ata_eh_reset()
3093 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
3097 if (ata_class_enabled(classes[dev->devno])) in ata_eh_reset()
3100 classes[dev->devno]); in ata_eh_reset()
3101 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
3102 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { in ata_eh_reset()
3105 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
3115 rc = -EAGAIN; in ata_eh_reset()
3127 ehc->last_reset = jiffies; /* update to completion time */ in ata_eh_reset()
3128 ehc->i.action |= ATA_EH_REVALIDATE; in ata_eh_reset()
3129 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ in ata_eh_reset()
3134 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; in ata_eh_reset()
3136 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; in ata_eh_reset()
3138 spin_lock_irqsave(ap->lock, flags); in ata_eh_reset()
3139 ap->pflags &= ~ATA_PFLAG_RESETTING; in ata_eh_reset()
3140 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_reset()
3145 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ in ata_eh_reset()
3148 rc = -ERESTART; in ata_eh_reset()
3153 * can be retried on the next phy event. This risks in ata_eh_reset()
3166 unsigned long delta = deadline - now; in ata_eh_reset()
3180 * They need to be reset - as well as the PMP - before retrying. in ata_eh_reset()
3182 if (rc == -ERESTART) { in ata_eh_reset()
3188 if (try == max_tries - 1) { in ata_eh_reset()
3192 } else if (rc == -EPIPE) in ata_eh_reset()
3211 * long as the timeout for a park request to *one* device on in ata_eh_pull_park_action()
3213 * up park requests to other devices on the same port or in ata_eh_pull_park_action()
3218 * Additionally, all write accesses to &ap->park_req_pending in ata_eh_pull_park_action()
3221 * As a result we have that park_req_pending.done is zero on in ata_eh_pull_park_action()
3223 * *all* devices on port ap have been pulled into the in ata_eh_pull_park_action()
3225 * park_req_pending.done is non-zero by the time we reach in ata_eh_pull_park_action()
3227 * has been scheduled for at least one of the devices on port in ata_eh_pull_park_action()
3232 spin_lock_irqsave(ap->lock, flags); in ata_eh_pull_park_action()
3233 reinit_completion(&ap->park_req_pending); in ata_eh_pull_park_action()
3236 struct ata_eh_info *ehi = &link->eh_info; in ata_eh_pull_park_action()
3238 link->eh_context.i.dev_action[dev->devno] |= in ata_eh_pull_park_action()
3239 ehi->dev_action[dev->devno] & ATA_EH_PARK; in ata_eh_pull_park_action()
3243 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_pull_park_action()
3248 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_park_issue_cmd()
3254 ehc->unloaded_mask |= 1 << dev->devno; in ata_eh_park_issue_cmd()
3261 ehc->unloaded_mask &= ~(1 << dev->devno); in ata_eh_park_issue_cmd()
3270 ehc->unloaded_mask &= ~(1 << dev->devno); in ata_eh_park_issue_cmd()
3277 struct ata_port *ap = link->ap; in ata_eh_revalidate_and_attach()
3278 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_revalidate_and_attach()
3285 * be done backwards such that PDIAG- is released by the slave in ata_eh_revalidate_and_attach()
3292 if (ehc->i.flags & ATA_EHI_DID_RESET) in ata_eh_revalidate_and_attach()
3296 WARN_ON(dev->class == ATA_DEV_PMP); in ata_eh_revalidate_and_attach()
3306 * to ap->target_lpm_policy after revalidation is done. in ata_eh_revalidate_and_attach()
3308 if (link->lpm_policy > ATA_LPM_MAX_POWER) { in ata_eh_revalidate_and_attach()
3316 rc = -EIO; in ata_eh_revalidate_and_attach()
3321 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], in ata_eh_revalidate_and_attach()
3331 ehc->i.flags |= ATA_EHI_SETMODE; in ata_eh_revalidate_and_attach()
3334 schedule_delayed_work(&ap->scsi_rescan_task, 0); in ata_eh_revalidate_and_attach()
3335 } else if (dev->class == ATA_DEV_UNKNOWN && in ata_eh_revalidate_and_attach()
3336 ehc->tries[dev->devno] && in ata_eh_revalidate_and_attach()
3337 ata_class_enabled(ehc->classes[dev->devno])) { in ata_eh_revalidate_and_attach()
3338 /* Temporarily set dev->class, it will be in ata_eh_revalidate_and_attach()
3344 dev->class = ehc->classes[dev->devno]; in ata_eh_revalidate_and_attach()
3346 if (dev->class == ATA_DEV_PMP) in ata_eh_revalidate_and_attach()
3349 rc = ata_dev_read_id(dev, &dev->class, in ata_eh_revalidate_and_attach()
3350 readid_flags, dev->id); in ata_eh_revalidate_and_attach()
3353 ehc->classes[dev->devno] = dev->class; in ata_eh_revalidate_and_attach()
3354 dev->class = ATA_DEV_UNKNOWN; in ata_eh_revalidate_and_attach()
3359 ata_ering_clear(&dev->ering); in ata_eh_revalidate_and_attach()
3360 new_mask |= 1 << dev->devno; in ata_eh_revalidate_and_attach()
3362 case -ENOENT: in ata_eh_revalidate_and_attach()
3363 /* IDENTIFY was issued to non-existent in ata_eh_revalidate_and_attach()
3365 * thaw and ignore the device. in ata_eh_revalidate_and_attach()
3375 /* PDIAG- should have been released, ask cable type if post-reset */ in ata_eh_revalidate_and_attach()
3376 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { in ata_eh_revalidate_and_attach()
3377 if (ap->ops->cable_detect) in ata_eh_revalidate_and_attach()
3378 ap->cbl = ap->ops->cable_detect(ap); in ata_eh_revalidate_and_attach()
3386 if (!(new_mask & (1 << dev->devno))) in ata_eh_revalidate_and_attach()
3389 dev->class = ehc->classes[dev->devno]; in ata_eh_revalidate_and_attach()
3391 if (dev->class == ATA_DEV_PMP) in ata_eh_revalidate_and_attach()
3394 ehc->i.flags |= ATA_EHI_PRINTINFO; in ata_eh_revalidate_and_attach()
3396 ehc->i.flags &= ~ATA_EHI_PRINTINFO; in ata_eh_revalidate_and_attach()
3398 dev->class = ATA_DEV_UNKNOWN; in ata_eh_revalidate_and_attach()
3402 spin_lock_irqsave(ap->lock, flags); in ata_eh_revalidate_and_attach()
3403 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; in ata_eh_revalidate_and_attach()
3404 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_revalidate_and_attach()
3407 ehc->i.flags |= ATA_EHI_SETMODE; in ata_eh_revalidate_and_attach()
3413 dev->flags &= ~ATA_DFLAG_RESUMING; in ata_eh_revalidate_and_attach()
3419 * ata_eh_set_mode - Program timings and issue SET FEATURES - XFER
3420 * @link: link on which timings will be programmed
3431 * 0 on success, negative errno otherwise
3436 struct ata_port *ap = link->ap; in ata_eh_set_mode()
3440 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ in ata_eh_set_mode()
3442 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { in ata_eh_set_mode()
3445 ent = ata_ering_top(&dev->ering); in ata_eh_set_mode()
3447 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; in ata_eh_set_mode()
3452 if (ap->ops->set_mode) in ata_eh_set_mode()
3453 rc = ap->ops->set_mode(link, r_failed_dev); in ata_eh_set_mode()
3457 /* if transfer mode has changed, set DUBIOUS_XFER on device */ in ata_eh_set_mode()
3459 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_set_mode()
3460 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; in ata_eh_set_mode()
3461 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); in ata_eh_set_mode()
3463 if (dev->xfer_mode != saved_xfer_mode || in ata_eh_set_mode()
3465 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; in ata_eh_set_mode()
3472 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3483 * 0 on success, -errno on failure.
3490 u8 *sense_buffer = dev->sector_buf; in atapi_eh_clear_ua()
3499 return -EIO; in atapi_eh_clear_ua()
3509 return -EIO; in atapi_eh_clear_ua()
3520 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3534 * 0 if EH can continue, -errno if EH needs to be repeated.
3538 struct ata_link *link = dev->link; in ata_eh_maybe_retry_flush()
3539 struct ata_port *ap = link->ap; in ata_eh_maybe_retry_flush()
3546 if (!ata_tag_valid(link->active_tag)) in ata_eh_maybe_retry_flush()
3549 qc = __ata_qc_from_tag(ap, link->active_tag); in ata_eh_maybe_retry_flush()
3550 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && in ata_eh_maybe_retry_flush()
3551 qc->tf.command != ATA_CMD_FLUSH)) in ata_eh_maybe_retry_flush()
3555 if (qc->err_mask & AC_ERR_DEV) in ata_eh_maybe_retry_flush()
3561 tf.command = qc->tf.command; in ata_eh_maybe_retry_flush()
3566 tf.command, qc->err_mask); in ata_eh_maybe_retry_flush()
3574 * retrying it should do the trick - whatever was in in ata_eh_maybe_retry_flush()
3575 * the cache is already on the platter and this won't in ata_eh_maybe_retry_flush()
3578 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); in ata_eh_maybe_retry_flush()
3582 rc = -EIO; in ata_eh_maybe_retry_flush()
3586 qc->err_mask |= AC_ERR_DEV; in ata_eh_maybe_retry_flush()
3587 qc->result_tf = tf; in ata_eh_maybe_retry_flush()
3611 if (dev->class == ATA_DEV_UNKNOWN) in ata_link_nr_vacant()
3618 struct ata_port *ap = link->ap; in ata_eh_skip_recovery()
3619 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_skip_recovery()
3623 if (link->flags & ATA_LFLAG_DISABLED) in ata_eh_skip_recovery()
3627 if (ehc->i.flags & ATA_EHI_NO_RECOVERY) in ata_eh_skip_recovery()
3635 if ((ehc->i.action & ATA_EH_RESET) && in ata_eh_skip_recovery()
3636 !(ehc->i.flags & ATA_EHI_DID_RESET)) in ata_eh_skip_recovery()
3641 if (dev->class == ATA_DEV_UNKNOWN && in ata_eh_skip_recovery()
3642 ehc->classes[dev->devno] != ATA_DEV_NONE) in ata_eh_skip_recovery()
3655 if ((ent->eflags & ATA_EFLAG_OLD_ER) || in ata_count_probe_trials_cb()
3656 (ent->timestamp < now - min(now, interval))) in ata_count_probe_trials_cb()
3657 return -1; in ata_count_probe_trials_cb()
3665 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_schedule_probe()
3669 if (!(ehc->i.probe_mask & (1 << dev->devno)) || in ata_eh_schedule_probe()
3670 (ehc->did_probe_mask & (1 << dev->devno))) in ata_eh_schedule_probe()
3675 ehc->did_probe_mask |= (1 << dev->devno); in ata_eh_schedule_probe()
3676 ehc->i.action |= ATA_EH_RESET; in ata_eh_schedule_probe()
3677 ehc->saved_xfer_mode[dev->devno] = 0; in ata_eh_schedule_probe()
3678 ehc->saved_ncq_enabled &= ~(1 << dev->devno); in ata_eh_schedule_probe()
3681 if (link->lpm_policy > ATA_LPM_MAX_POWER) { in ata_eh_schedule_probe()
3683 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, in ata_eh_schedule_probe()
3690 /* Record and count probe trials on the ering. The specific in ata_eh_schedule_probe()
3703 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); in ata_eh_schedule_probe()
3704 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); in ata_eh_schedule_probe()
3714 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_handle_dev_fail()
3716 /* -EAGAIN from EH routine indicates retry without prejudice. in ata_eh_handle_dev_fail()
3719 if (err != -EAGAIN) in ata_eh_handle_dev_fail()
3720 ehc->tries[dev->devno]--; in ata_eh_handle_dev_fail()
3723 case -ENODEV: in ata_eh_handle_dev_fail()
3725 ehc->i.probe_mask |= (1 << dev->devno); in ata_eh_handle_dev_fail()
3727 case -EINVAL: in ata_eh_handle_dev_fail()
3729 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); in ata_eh_handle_dev_fail()
3731 case -EIO: in ata_eh_handle_dev_fail()
3732 if (ehc->tries[dev->devno] == 1) { in ata_eh_handle_dev_fail()
3737 if (dev->pio_mode > XFER_PIO_0) in ata_eh_handle_dev_fail()
3742 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { in ata_eh_handle_dev_fail()
3752 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; in ata_eh_handle_dev_fail()
3753 memset(ehc->cmd_timeout_idx[dev->devno], 0, in ata_eh_handle_dev_fail()
3754 sizeof(ehc->cmd_timeout_idx[dev->devno])); in ata_eh_handle_dev_fail()
3759 ehc->i.action |= ATA_EH_RESET; in ata_eh_handle_dev_fail()
3765 * ata_eh_recover - recover host port after error
3771 * libata exception handling. On entry, actions required to
3781 * 0 on success, -errno on failure.
3793 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3795 /* re-enable link? */ in ata_eh_recover()
3796 if (ehc->i.action & ATA_EH_ENABLE_LINK) { in ata_eh_recover()
3798 spin_lock_irqsave(ap->lock, flags); in ata_eh_recover()
3799 link->flags &= ~ATA_LFLAG_DISABLED; in ata_eh_recover()
3800 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_recover()
3805 if (link->flags & ATA_LFLAG_NO_RETRY) in ata_eh_recover()
3806 ehc->tries[dev->devno] = 1; in ata_eh_recover()
3808 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; in ata_eh_recover()
3811 ehc->i.action |= ehc->i.dev_action[dev->devno] & in ata_eh_recover()
3813 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; in ata_eh_recover()
3816 if (dev->flags & ATA_DFLAG_DETACH) in ata_eh_recover()
3829 if (ap->pflags & ATA_PFLAG_UNLOADING) in ata_eh_recover()
3834 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3838 ehc->i.action = 0; in ata_eh_recover()
3841 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; in ata_eh_recover()
3846 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3848 if (!(ehc->i.action & ATA_EH_RESET)) in ata_eh_recover()
3863 * ap->park_req_pending in ata_eh_recover()
3870 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3873 if (dev->class != ATA_DEV_ATA && in ata_eh_recover()
3874 dev->class != ATA_DEV_ZAC) in ata_eh_recover()
3876 if (!(ehc->i.dev_action[dev->devno] & in ata_eh_recover()
3879 tmp = dev->unpark_deadline; in ata_eh_recover()
3884 if (ehc->unloaded_mask & (1 << dev->devno)) in ata_eh_recover()
3896 deadline = wait_for_completion_timeout(&ap->park_req_pending, in ata_eh_recover()
3897 deadline - now); in ata_eh_recover()
3902 if (!(link->eh_context.unloaded_mask & in ata_eh_recover()
3903 (1 << dev->devno))) in ata_eh_recover()
3914 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3925 if (link->device->class == ATA_DEV_PMP) { in ata_eh_recover()
3926 ehc->i.action = 0; in ata_eh_recover()
3931 if (ehc->i.flags & ATA_EHI_SETMODE) { in ata_eh_recover()
3935 ehc->i.flags &= ~ATA_EHI_SETMODE; in ata_eh_recover()
3941 if (ehc->i.flags & ATA_EHI_DID_RESET) { in ata_eh_recover()
3943 if (dev->class != ATA_DEV_ATAPI) in ata_eh_recover()
3954 * Make sure to transition devices to the active power mode in ata_eh_recover()
3955 * if needed (e.g. if we were scheduled on system resume). in ata_eh_recover()
3958 if (ehc->i.dev_action[dev->devno] & ATA_EH_SET_ACTIVE) { in ata_eh_recover()
3966 if (dev->class != ATA_DEV_ATA && in ata_eh_recover()
3967 dev->class != ATA_DEV_ZAC) in ata_eh_recover()
3975 /* configure link power saving */ in ata_eh_recover()
3976 if (link->lpm_policy != ap->target_lpm_policy) { in ata_eh_recover()
3977 rc = ata_eh_link_set_lpm(link, ap->target_lpm_policy, in ata_eh_recover()
3984 ehc->i.flags = 0; in ata_eh_recover()
4013 * ata_eh_finish - finish up EH
4029 if (!(qc->flags & ATA_QCFLAG_EH)) in ata_eh_finish()
4032 if (qc->err_mask) { in ata_eh_finish()
4037 if (qc->flags & ATA_QCFLAG_RETRY) { in ata_eh_finish()
4039 * Since qc->err_mask is set, ata_eh_qc_retry() in ata_eh_finish()
4040 * will not increment scmd->allowed, so upper in ata_eh_finish()
4049 if (qc->flags & ATA_QCFLAG_SENSE_VALID || in ata_eh_finish()
4050 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) { in ata_eh_finish()
4054 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); in ata_eh_finish()
4056 * Since qc->err_mask is not set, in ata_eh_finish()
4058 * scmd->allowed, so upper layer is guaranteed in ata_eh_finish()
4067 WARN_ON(ap->nr_active_links); in ata_eh_finish()
4068 ap->nr_active_links = 0; in ata_eh_finish()
4072 * ata_std_error_handler - standard error handler
4082 struct ata_reset_operations *reset_ops = &ap->ops->reset; in ata_std_error_handler()
4083 struct ata_link *link = &ap->link; in ata_std_error_handler()
4086 /* Ignore built-in hardresets if SCR access is not available */ in ata_std_error_handler()
4087 if ((reset_ops->hardreset == sata_std_hardreset || in ata_std_error_handler()
4088 reset_ops->hardreset == sata_sff_hardreset) && in ata_std_error_handler()
4090 link->flags |= ATA_LFLAG_NO_HRST; in ata_std_error_handler()
4109 * ata_eh_handle_port_suspend - perform port suspend operation
4125 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_suspend()
4126 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || in ata_eh_handle_port_suspend()
4127 ap->pm_mesg.event & PM_EVENT_RESUME) { in ata_eh_handle_port_suspend()
4128 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4131 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4133 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); in ata_eh_handle_port_suspend()
4143 if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) { in ata_eh_handle_port_suspend()
4153 * power ready status before the port is frozen. in ata_eh_handle_port_suspend()
4156 if (PMSG_IS_AUTO(ap->pm_mesg)) { in ata_eh_handle_port_suspend()
4157 ata_for_each_dev(dev, &ap->link, ENABLED) { in ata_eh_handle_port_suspend()
4166 if (ap->ops->port_suspend) in ata_eh_handle_port_suspend()
4167 rc = ap->ops->port_suspend(ap, ap->pm_mesg); in ata_eh_handle_port_suspend()
4169 ata_acpi_set_state(ap, ap->pm_mesg); in ata_eh_handle_port_suspend()
4172 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_suspend()
4174 ap->pflags &= ~ATA_PFLAG_PM_PENDING; in ata_eh_handle_port_suspend()
4176 ap->pflags |= ATA_PFLAG_SUSPENDED; in ata_eh_handle_port_suspend()
4180 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4186 * ata_eh_handle_port_resume - perform port resume operation
4201 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_resume()
4202 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || in ata_eh_handle_port_resume()
4203 !(ap->pm_mesg.event & PM_EVENT_RESUME)) { in ata_eh_handle_port_resume()
4204 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()
4207 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()
4209 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); in ata_eh_handle_port_resume()
4220 ata_ering_clear(&dev->ering); in ata_eh_handle_port_resume()
4222 ata_acpi_set_state(ap, ap->pm_mesg); in ata_eh_handle_port_resume()
4224 if (ap->ops->port_resume) in ata_eh_handle_port_resume()
4225 ap->ops->port_resume(ap); in ata_eh_handle_port_resume()
4231 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_resume()
4232 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); in ata_eh_handle_port_resume()
4233 ap->pflags |= ATA_PFLAG_RESUMING; in ata_eh_handle_port_resume()
4234 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()