xref: /linux/drivers/ata/libata-eh.c (revision e8d235d4d8fb8957bae5f6ed4521115203a00d8b)
1 /*
2  *  libata-eh.c - libata error handling
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9  *
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License as
13  *  published by the Free Software Foundation; either version 2, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  *  General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24  *  USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
46 
47 #include <linux/libata.h>
48 
49 #include <trace/events/libata.h>
50 #include "libata.h"
51 
52 enum {
53 	/* speed down verdicts */
54 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
55 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
56 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
57 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
58 
59 	/* error flags */
60 	ATA_EFLAG_IS_IO			= (1 << 0),
61 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
62 	ATA_EFLAG_OLD_ER                = (1 << 31),
63 
64 	/* error categories */
65 	ATA_ECAT_NONE			= 0,
66 	ATA_ECAT_ATA_BUS		= 1,
67 	ATA_ECAT_TOUT_HSM		= 2,
68 	ATA_ECAT_UNK_DEV		= 3,
69 	ATA_ECAT_DUBIOUS_NONE		= 4,
70 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
71 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
72 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
73 	ATA_ECAT_NR			= 8,
74 
75 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
76 
77 	/* always put at least this amount of time between resets */
78 	ATA_EH_RESET_COOL_DOWN		=  5000,
79 
80 	/* Waiting in ->prereset can never be reliable.  It's
81 	 * sometimes nice to wait there but it can't be depended upon;
82 	 * otherwise, we wouldn't be resetting.  Just give it enough
83 	 * time for most drives to spin up.
84 	 */
85 	ATA_EH_PRERESET_TIMEOUT		= 10000,
86 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
87 
88 	ATA_EH_UA_TRIES			= 5,
89 
90 	/* probe speed down parameters, see ata_eh_schedule_probe() */
91 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
92 	ATA_EH_PROBE_TRIALS		= 2,
93 };
94 
95 /* The following table determines how we sequence resets.  Each entry
96  * represents timeout for that try.  The first try can be soft or
97  * hardreset.  All others are hardreset if available.  In most cases
98  * the first reset w/ 10sec timeout should succeed.  Following entries
99  * are mostly for error handling, hotplug and those outlier devices that
100  * take an exceptionally long time to recover from reset.
101  */
102 static const unsigned long ata_eh_reset_timeouts[] = {
103 	10000,	/* most drives spin up by 10sec */
104 	10000,	/* > 99% working drives spin up before 20sec */
105 	35000,	/* give > 30 secs of idleness for outlier devices */
106 	 5000,	/* and sweet one last chance */
107 	ULONG_MAX, /* > 1 min has elapsed, give up */
108 };
109 
110 static const unsigned long ata_eh_identify_timeouts[] = {
111 	 5000,	/* covers > 99% of successes and not too boring on failures */
112 	10000,  /* combined time till here is enough even for media access */
113 	30000,	/* for true idiots */
114 	ULONG_MAX,
115 };
116 
117 static const unsigned long ata_eh_flush_timeouts[] = {
118 	15000,	/* be generous with flush */
119 	15000,  /* ditto */
120 	30000,	/* and even more generous */
121 	ULONG_MAX,
122 };
123 
124 static const unsigned long ata_eh_other_timeouts[] = {
125 	 5000,	/* same rationale as identify timeout */
126 	10000,	/* ditto */
127 	/* but no merciful 30sec for other commands, it just isn't worth it */
128 	ULONG_MAX,
129 };
130 
131 struct ata_eh_cmd_timeout_ent {
132 	const u8		*commands;
133 	const unsigned long	*timeouts;
134 };
135 
136 /* The following table determines timeouts to use for EH internal
137  * commands.  Each table entry is a command class and matches the
138  * commands the entry applies to and the timeout table to use.
139  *
140  * On the retry after a command timed out, the next timeout value from
141  * the table is used.  If the table doesn't contain further entries,
142  * the last value is used.
143  *
144  * ehc->cmd_timeout_idx keeps track of which timeout to use per
145  * command class, so if SET_FEATURES times out on the first try, the
146  * next try will use the second timeout value only for that class.
147  */
148 #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
149 static const struct ata_eh_cmd_timeout_ent
150 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
151 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
152 	  .timeouts = ata_eh_identify_timeouts, },
153 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
154 	  .timeouts = ata_eh_other_timeouts, },
155 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
156 	  .timeouts = ata_eh_other_timeouts, },
157 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
158 	  .timeouts = ata_eh_other_timeouts, },
159 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
160 	  .timeouts = ata_eh_other_timeouts, },
161 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
162 	  .timeouts = ata_eh_flush_timeouts },
163 };
164 #undef CMDS
165 
166 static void __ata_port_freeze(struct ata_port *ap);
167 #ifdef CONFIG_PM
168 static void ata_eh_handle_port_suspend(struct ata_port *ap);
169 static void ata_eh_handle_port_resume(struct ata_port *ap);
170 #else /* CONFIG_PM */
171 static void ata_eh_handle_port_suspend(struct ata_port *ap)
172 { }
173 
174 static void ata_eh_handle_port_resume(struct ata_port *ap)
175 { }
176 #endif /* CONFIG_PM */
177 
178 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
179 				 va_list args)
180 {
181 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
182 				     ATA_EH_DESC_LEN - ehi->desc_len,
183 				     fmt, args);
184 }
185 
186 /**
187  *	__ata_ehi_push_desc - push error description without adding separator
188  *	@ehi: target EHI
189  *	@fmt: printf format string
190  *
191  *	Format string according to @fmt and append it to @ehi->desc.
192  *
193  *	LOCKING:
194  *	spin_lock_irqsave(host lock)
195  */
196 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
197 {
198 	va_list args;
199 
200 	va_start(args, fmt);
201 	__ata_ehi_pushv_desc(ehi, fmt, args);
202 	va_end(args);
203 }
204 
205 /**
206  *	ata_ehi_push_desc - push error description with separator
207  *	@ehi: target EHI
208  *	@fmt: printf format string
209  *
210  *	Format string according to @fmt and append it to @ehi->desc.
211  *	If @ehi->desc is not empty, ", " is added in-between.
212  *
213  *	LOCKING:
214  *	spin_lock_irqsave(host lock)
215  */
216 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
217 {
218 	va_list args;
219 
220 	if (ehi->desc_len)
221 		__ata_ehi_push_desc(ehi, ", ");
222 
223 	va_start(args, fmt);
224 	__ata_ehi_pushv_desc(ehi, fmt, args);
225 	va_end(args);
226 }
227 
228 /**
229  *	ata_ehi_clear_desc - clean error description
230  *	@ehi: target EHI
231  *
232  *	Clear @ehi->desc.
233  *
234  *	LOCKING:
235  *	spin_lock_irqsave(host lock)
236  */
237 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
238 {
239 	ehi->desc[0] = '\0';
240 	ehi->desc_len = 0;
241 }
242 
243 /**
244  *	ata_port_desc - append port description
245  *	@ap: target ATA port
246  *	@fmt: printf format string
247  *
248  *	Format string according to @fmt and append it to port
249  *	description.  If port description is not empty, " " is added
250  *	in-between.  This function is to be used while initializing
251  *	ata_host.  The description is printed on host registration.
252  *
253  *	LOCKING:
254  *	None.
255  */
256 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
257 {
258 	va_list args;
259 
260 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
261 
262 	if (ap->link.eh_info.desc_len)
263 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
264 
265 	va_start(args, fmt);
266 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
267 	va_end(args);
268 }
269 
270 #ifdef CONFIG_PCI
271 
272 /**
273  *	ata_port_pbar_desc - append PCI BAR description
274  *	@ap: target ATA port
275  *	@bar: target PCI BAR
276  *	@offset: offset into PCI BAR
277  *	@name: name of the area
278  *
279  *	If @offset is negative, this function formats a string which
280  *	contains the name, address, size and type of the BAR and
281  *	appends it to the port description.  If @offset is zero or
282  *	positive, only name and offsetted address is appended.
283  *
284  *	LOCKING:
285  *	None.
286  */
287 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
288 			const char *name)
289 {
290 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
291 	char *type = "";
292 	unsigned long long start, len;
293 
294 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
295 		type = "m";
296 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
297 		type = "i";
298 
299 	start = (unsigned long long)pci_resource_start(pdev, bar);
300 	len = (unsigned long long)pci_resource_len(pdev, bar);
301 
302 	if (offset < 0)
303 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
304 	else
305 		ata_port_desc(ap, "%s 0x%llx", name,
306 				start + (unsigned long long)offset);
307 }
308 
309 #endif /* CONFIG_PCI */
310 
311 static int ata_lookup_timeout_table(u8 cmd)
312 {
313 	int i;
314 
315 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
316 		const u8 *cur;
317 
318 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
319 			if (*cur == cmd)
320 				return i;
321 	}
322 
323 	return -1;
324 }
325 
326 /**
327  *	ata_internal_cmd_timeout - determine timeout for an internal command
328  *	@dev: target device
329  *	@cmd: internal command to be issued
330  *
331  *	Determine timeout for internal command @cmd for @dev.
332  *
333  *	LOCKING:
334  *	EH context.
335  *
336  *	RETURNS:
337  *	Determined timeout.
338  */
339 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
340 {
341 	struct ata_eh_context *ehc = &dev->link->eh_context;
342 	int ent = ata_lookup_timeout_table(cmd);
343 	int idx;
344 
345 	if (ent < 0)
346 		return ATA_EH_CMD_DFL_TIMEOUT;
347 
348 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
349 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
350 }
351 
352 /**
353  *	ata_internal_cmd_timed_out - notification for internal command timeout
354  *	@dev: target device
355  *	@cmd: internal command which timed out
356  *
357  *	Notify EH that internal command @cmd for @dev timed out.  This
358  *	function should be called only for commands whose timeouts are
359  *	determined using ata_internal_cmd_timeout().
360  *
361  *	LOCKING:
362  *	EH context.
363  */
364 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
365 {
366 	struct ata_eh_context *ehc = &dev->link->eh_context;
367 	int ent = ata_lookup_timeout_table(cmd);
368 	int idx;
369 
370 	if (ent < 0)
371 		return;
372 
373 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
374 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
375 		ehc->cmd_timeout_idx[dev->devno][ent]++;
376 }
377 
378 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
379 			     unsigned int err_mask)
380 {
381 	struct ata_ering_entry *ent;
382 
383 	WARN_ON(!err_mask);
384 
385 	ering->cursor++;
386 	ering->cursor %= ATA_ERING_SIZE;
387 
388 	ent = &ering->ring[ering->cursor];
389 	ent->eflags = eflags;
390 	ent->err_mask = err_mask;
391 	ent->timestamp = get_jiffies_64();
392 }
393 
394 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
395 {
396 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
397 
398 	if (ent->err_mask)
399 		return ent;
400 	return NULL;
401 }
402 
403 int ata_ering_map(struct ata_ering *ering,
404 		  int (*map_fn)(struct ata_ering_entry *, void *),
405 		  void *arg)
406 {
407 	int idx, rc = 0;
408 	struct ata_ering_entry *ent;
409 
410 	idx = ering->cursor;
411 	do {
412 		ent = &ering->ring[idx];
413 		if (!ent->err_mask)
414 			break;
415 		rc = map_fn(ent, arg);
416 		if (rc)
417 			break;
418 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
419 	} while (idx != ering->cursor);
420 
421 	return rc;
422 }
423 
424 static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
425 {
426 	ent->eflags |= ATA_EFLAG_OLD_ER;
427 	return 0;
428 }
429 
430 static void ata_ering_clear(struct ata_ering *ering)
431 {
432 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
433 }
434 
435 static unsigned int ata_eh_dev_action(struct ata_device *dev)
436 {
437 	struct ata_eh_context *ehc = &dev->link->eh_context;
438 
439 	return ehc->i.action | ehc->i.dev_action[dev->devno];
440 }
441 
442 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
443 				struct ata_eh_info *ehi, unsigned int action)
444 {
445 	struct ata_device *tdev;
446 
447 	if (!dev) {
448 		ehi->action &= ~action;
449 		ata_for_each_dev(tdev, link, ALL)
450 			ehi->dev_action[tdev->devno] &= ~action;
451 	} else {
452 		/* doesn't make sense for port-wide EH actions */
453 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
454 
455 		/* break ehi->action into ehi->dev_action */
456 		if (ehi->action & action) {
457 			ata_for_each_dev(tdev, link, ALL)
458 				ehi->dev_action[tdev->devno] |=
459 					ehi->action & action;
460 			ehi->action &= ~action;
461 		}
462 
463 		/* turn off the specified per-dev action */
464 		ehi->dev_action[dev->devno] &= ~action;
465 	}
466 }
467 
468 /**
469  *	ata_eh_acquire - acquire EH ownership
470  *	@ap: ATA port to acquire EH ownership for
471  *
472  *	Acquire EH ownership for @ap.  This is the basic exclusion
473  *	mechanism for ports sharing a host.  Only one port hanging off
474  *	the same host can claim the ownership of EH.
475  *
476  *	LOCKING:
477  *	EH context.
478  */
479 void ata_eh_acquire(struct ata_port *ap)
480 {
481 	mutex_lock(&ap->host->eh_mutex);
482 	WARN_ON_ONCE(ap->host->eh_owner);
483 	ap->host->eh_owner = current;
484 }
485 
486 /**
487  *	ata_eh_release - release EH ownership
488  *	@ap: ATA port to release EH ownership for
489  *
490  *	Release EH ownership for @ap if the caller.  The caller must
491  *	have acquired EH ownership using ata_eh_acquire() previously.
492  *
493  *	LOCKING:
494  *	EH context.
495  */
496 void ata_eh_release(struct ata_port *ap)
497 {
498 	WARN_ON_ONCE(ap->host->eh_owner != current);
499 	ap->host->eh_owner = NULL;
500 	mutex_unlock(&ap->host->eh_mutex);
501 }
502 
503 /**
504  *	ata_scsi_timed_out - SCSI layer time out callback
505  *	@cmd: timed out SCSI command
506  *
507  *	Handles SCSI layer timeout.  We race with normal completion of
508  *	the qc for @cmd.  If the qc is already gone, we lose and let
509  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
510  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
511  *	from finishing it by setting EH_SCHEDULED and return
512  *	EH_NOT_HANDLED.
513  *
514  *	TODO: kill this function once old EH is gone.
515  *
516  *	LOCKING:
517  *	Called from timer context
518  *
519  *	RETURNS:
520  *	EH_HANDLED or EH_NOT_HANDLED
521  */
522 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
523 {
524 	struct Scsi_Host *host = cmd->device->host;
525 	struct ata_port *ap = ata_shost_to_port(host);
526 	unsigned long flags;
527 	struct ata_queued_cmd *qc;
528 	enum blk_eh_timer_return ret;
529 
530 	DPRINTK("ENTER\n");
531 
532 	if (ap->ops->error_handler) {
533 		ret = BLK_EH_NOT_HANDLED;
534 		goto out;
535 	}
536 
537 	ret = BLK_EH_HANDLED;
538 	spin_lock_irqsave(ap->lock, flags);
539 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
540 	if (qc) {
541 		WARN_ON(qc->scsicmd != cmd);
542 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
543 		qc->err_mask |= AC_ERR_TIMEOUT;
544 		ret = BLK_EH_NOT_HANDLED;
545 	}
546 	spin_unlock_irqrestore(ap->lock, flags);
547 
548  out:
549 	DPRINTK("EXIT, ret=%d\n", ret);
550 	return ret;
551 }
552 
553 static void ata_eh_unload(struct ata_port *ap)
554 {
555 	struct ata_link *link;
556 	struct ata_device *dev;
557 	unsigned long flags;
558 
559 	/* Restore SControl IPM and SPD for the next driver and
560 	 * disable attached devices.
561 	 */
562 	ata_for_each_link(link, ap, PMP_FIRST) {
563 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
564 		ata_for_each_dev(dev, link, ALL)
565 			ata_dev_disable(dev);
566 	}
567 
568 	/* freeze and set UNLOADED */
569 	spin_lock_irqsave(ap->lock, flags);
570 
571 	ata_port_freeze(ap);			/* won't be thawed */
572 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
573 	ap->pflags |= ATA_PFLAG_UNLOADED;
574 
575 	spin_unlock_irqrestore(ap->lock, flags);
576 }
577 
578 /**
579  *	ata_scsi_error - SCSI layer error handler callback
580  *	@host: SCSI host on which error occurred
581  *
582  *	Handles SCSI-layer-thrown error events.
583  *
584  *	LOCKING:
585  *	Inherited from SCSI layer (none, can sleep)
586  *
587  *	RETURNS:
588  *	Zero.
589  */
590 void ata_scsi_error(struct Scsi_Host *host)
591 {
592 	struct ata_port *ap = ata_shost_to_port(host);
593 	unsigned long flags;
594 	LIST_HEAD(eh_work_q);
595 
596 	DPRINTK("ENTER\n");
597 
598 	spin_lock_irqsave(host->host_lock, flags);
599 	list_splice_init(&host->eh_cmd_q, &eh_work_q);
600 	spin_unlock_irqrestore(host->host_lock, flags);
601 
602 	ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
603 
604 	/* If we timed raced normal completion and there is nothing to
605 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
606 	ata_scsi_port_error_handler(host, ap);
607 
608 	/* finish or retry handled scmd's and clean up */
609 	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
610 
611 	DPRINTK("EXIT\n");
612 }
613 
614 /**
615  * ata_scsi_cmd_error_handler - error callback for a list of commands
616  * @host:	scsi host containing the port
617  * @ap:		ATA port within the host
618  * @eh_work_q:	list of commands to process
619  *
620  * process the given list of commands and return those finished to the
621  * ap->eh_done_q.  This function is the first part of the libata error
622  * handler which processes a given list of failed commands.
623  */
624 void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
625 				struct list_head *eh_work_q)
626 {
627 	int i;
628 	unsigned long flags;
629 
630 	/* make sure sff pio task is not running */
631 	ata_sff_flush_pio_task(ap);
632 
633 	/* synchronize with host lock and sort out timeouts */
634 
635 	/* For new EH, all qcs are finished in one of three ways -
636 	 * normal completion, error completion, and SCSI timeout.
637 	 * Both completions can race against SCSI timeout.  When normal
638 	 * completion wins, the qc never reaches EH.  When error
639 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
640 	 *
641 	 * When SCSI timeout wins, things are a bit more complex.
642 	 * Normal or error completion can occur after the timeout but
643 	 * before this point.  In such cases, both types of
644 	 * completions are honored.  A scmd is determined to have
645 	 * timed out iff its associated qc is active and not failed.
646 	 */
647 	if (ap->ops->error_handler) {
648 		struct scsi_cmnd *scmd, *tmp;
649 		int nr_timedout = 0;
650 
651 		spin_lock_irqsave(ap->lock, flags);
652 
653 		/* This must occur under the ap->lock as we don't want
654 		   a polled recovery to race the real interrupt handler
655 
656 		   The lost_interrupt handler checks for any completed but
657 		   non-notified command and completes much like an IRQ handler.
658 
659 		   We then fall into the error recovery code which will treat
660 		   this as if normal completion won the race */
661 
662 		if (ap->ops->lost_interrupt)
663 			ap->ops->lost_interrupt(ap);
664 
665 		list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
666 			struct ata_queued_cmd *qc;
667 
668 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
669 				qc = __ata_qc_from_tag(ap, i);
670 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
671 				    qc->scsicmd == scmd)
672 					break;
673 			}
674 
675 			if (i < ATA_MAX_QUEUE) {
676 				/* the scmd has an associated qc */
677 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
678 					/* which hasn't failed yet, timeout */
679 					qc->err_mask |= AC_ERR_TIMEOUT;
680 					qc->flags |= ATA_QCFLAG_FAILED;
681 					nr_timedout++;
682 				}
683 			} else {
684 				/* Normal completion occurred after
685 				 * SCSI timeout but before this point.
686 				 * Successfully complete it.
687 				 */
688 				scmd->retries = scmd->allowed;
689 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
690 			}
691 		}
692 
693 		/* If we have timed out qcs.  They belong to EH from
694 		 * this point but the state of the controller is
695 		 * unknown.  Freeze the port to make sure the IRQ
696 		 * handler doesn't diddle with those qcs.  This must
697 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
698 		 */
699 		if (nr_timedout)
700 			__ata_port_freeze(ap);
701 
702 		spin_unlock_irqrestore(ap->lock, flags);
703 
704 		/* initialize eh_tries */
705 		ap->eh_tries = ATA_EH_MAX_TRIES;
706 	} else
707 		spin_unlock_wait(ap->lock);
708 
709 }
710 EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
711 
712 /**
713  * ata_scsi_port_error_handler - recover the port after the commands
714  * @host:	SCSI host containing the port
715  * @ap:		the ATA port
716  *
717  * Handle the recovery of the port @ap after all the commands
718  * have been recovered.
719  */
720 void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
721 {
722 	unsigned long flags;
723 
724 	/* invoke error handler */
725 	if (ap->ops->error_handler) {
726 		struct ata_link *link;
727 
728 		/* acquire EH ownership */
729 		ata_eh_acquire(ap);
730  repeat:
731 		/* kill fast drain timer */
732 		del_timer_sync(&ap->fastdrain_timer);
733 
734 		/* process port resume request */
735 		ata_eh_handle_port_resume(ap);
736 
737 		/* fetch & clear EH info */
738 		spin_lock_irqsave(ap->lock, flags);
739 
740 		ata_for_each_link(link, ap, HOST_FIRST) {
741 			struct ata_eh_context *ehc = &link->eh_context;
742 			struct ata_device *dev;
743 
744 			memset(&link->eh_context, 0, sizeof(link->eh_context));
745 			link->eh_context.i = link->eh_info;
746 			memset(&link->eh_info, 0, sizeof(link->eh_info));
747 
748 			ata_for_each_dev(dev, link, ENABLED) {
749 				int devno = dev->devno;
750 
751 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
752 				if (ata_ncq_enabled(dev))
753 					ehc->saved_ncq_enabled |= 1 << devno;
754 			}
755 		}
756 
757 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
758 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
759 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
760 
761 		spin_unlock_irqrestore(ap->lock, flags);
762 
763 		/* invoke EH, skip if unloading or suspended */
764 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
765 			ap->ops->error_handler(ap);
766 		else {
767 			/* if unloading, commence suicide */
768 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
769 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
770 				ata_eh_unload(ap);
771 			ata_eh_finish(ap);
772 		}
773 
774 		/* process port suspend request */
775 		ata_eh_handle_port_suspend(ap);
776 
777 		/* Exception might have happened after ->error_handler
778 		 * recovered the port but before this point.  Repeat
779 		 * EH in such case.
780 		 */
781 		spin_lock_irqsave(ap->lock, flags);
782 
783 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
784 			if (--ap->eh_tries) {
785 				spin_unlock_irqrestore(ap->lock, flags);
786 				goto repeat;
787 			}
788 			ata_port_err(ap,
789 				     "EH pending after %d tries, giving up\n",
790 				     ATA_EH_MAX_TRIES);
791 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
792 		}
793 
794 		/* this run is complete, make sure EH info is clear */
795 		ata_for_each_link(link, ap, HOST_FIRST)
796 			memset(&link->eh_info, 0, sizeof(link->eh_info));
797 
798 		/* end eh (clear host_eh_scheduled) while holding
799 		 * ap->lock such that if exception occurs after this
800 		 * point but before EH completion, SCSI midlayer will
801 		 * re-initiate EH.
802 		 */
803 		ap->ops->end_eh(ap);
804 
805 		spin_unlock_irqrestore(ap->lock, flags);
806 		ata_eh_release(ap);
807 	} else {
808 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
809 		ap->ops->eng_timeout(ap);
810 	}
811 
812 	scsi_eh_flush_done_q(&ap->eh_done_q);
813 
814 	/* clean up */
815 	spin_lock_irqsave(ap->lock, flags);
816 
817 	if (ap->pflags & ATA_PFLAG_LOADING)
818 		ap->pflags &= ~ATA_PFLAG_LOADING;
819 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
820 		schedule_delayed_work(&ap->hotplug_task, 0);
821 
822 	if (ap->pflags & ATA_PFLAG_RECOVERED)
823 		ata_port_info(ap, "EH complete\n");
824 
825 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
826 
827 	/* tell wait_eh that we're done */
828 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
829 	wake_up_all(&ap->eh_wait_q);
830 
831 	spin_unlock_irqrestore(ap->lock, flags);
832 }
833 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
834 
835 /**
836  *	ata_port_wait_eh - Wait for the currently pending EH to complete
837  *	@ap: Port to wait EH for
838  *
839  *	Wait until the currently pending EH is complete.
840  *
841  *	LOCKING:
842  *	Kernel thread context (may sleep).
843  */
844 void ata_port_wait_eh(struct ata_port *ap)
845 {
846 	unsigned long flags;
847 	DEFINE_WAIT(wait);
848 
849  retry:
850 	spin_lock_irqsave(ap->lock, flags);
851 
852 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
853 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
854 		spin_unlock_irqrestore(ap->lock, flags);
855 		schedule();
856 		spin_lock_irqsave(ap->lock, flags);
857 	}
858 	finish_wait(&ap->eh_wait_q, &wait);
859 
860 	spin_unlock_irqrestore(ap->lock, flags);
861 
862 	/* make sure SCSI EH is complete */
863 	if (scsi_host_in_recovery(ap->scsi_host)) {
864 		ata_msleep(ap, 10);
865 		goto retry;
866 	}
867 }
868 EXPORT_SYMBOL_GPL(ata_port_wait_eh);
869 
870 static int ata_eh_nr_in_flight(struct ata_port *ap)
871 {
872 	unsigned int tag;
873 	int nr = 0;
874 
875 	/* count only non-internal commands */
876 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
877 		if (ata_qc_from_tag(ap, tag))
878 			nr++;
879 
880 	return nr;
881 }
882 
883 void ata_eh_fastdrain_timerfn(unsigned long arg)
884 {
885 	struct ata_port *ap = (void *)arg;
886 	unsigned long flags;
887 	int cnt;
888 
889 	spin_lock_irqsave(ap->lock, flags);
890 
891 	cnt = ata_eh_nr_in_flight(ap);
892 
893 	/* are we done? */
894 	if (!cnt)
895 		goto out_unlock;
896 
897 	if (cnt == ap->fastdrain_cnt) {
898 		unsigned int tag;
899 
900 		/* No progress during the last interval, tag all
901 		 * in-flight qcs as timed out and freeze the port.
902 		 */
903 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
904 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
905 			if (qc)
906 				qc->err_mask |= AC_ERR_TIMEOUT;
907 		}
908 
909 		ata_port_freeze(ap);
910 	} else {
911 		/* some qcs have finished, give it another chance */
912 		ap->fastdrain_cnt = cnt;
913 		ap->fastdrain_timer.expires =
914 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
915 		add_timer(&ap->fastdrain_timer);
916 	}
917 
918  out_unlock:
919 	spin_unlock_irqrestore(ap->lock, flags);
920 }
921 
922 /**
923  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
924  *	@ap: target ATA port
925  *	@fastdrain: activate fast drain
926  *
927  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
928  *	is non-zero and EH wasn't pending before.  Fast drain ensures
929  *	that EH kicks in in timely manner.
930  *
931  *	LOCKING:
932  *	spin_lock_irqsave(host lock)
933  */
934 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
935 {
936 	int cnt;
937 
938 	/* already scheduled? */
939 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
940 		return;
941 
942 	ap->pflags |= ATA_PFLAG_EH_PENDING;
943 
944 	if (!fastdrain)
945 		return;
946 
947 	/* do we have in-flight qcs? */
948 	cnt = ata_eh_nr_in_flight(ap);
949 	if (!cnt)
950 		return;
951 
952 	/* activate fast drain */
953 	ap->fastdrain_cnt = cnt;
954 	ap->fastdrain_timer.expires =
955 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
956 	add_timer(&ap->fastdrain_timer);
957 }
958 
959 /**
960  *	ata_qc_schedule_eh - schedule qc for error handling
961  *	@qc: command to schedule error handling for
962  *
963  *	Schedule error handling for @qc.  EH will kick in as soon as
964  *	other commands are drained.
965  *
966  *	LOCKING:
967  *	spin_lock_irqsave(host lock)
968  */
969 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
970 {
971 	struct ata_port *ap = qc->ap;
972 	struct request_queue *q = qc->scsicmd->device->request_queue;
973 	unsigned long flags;
974 
975 	WARN_ON(!ap->ops->error_handler);
976 
977 	qc->flags |= ATA_QCFLAG_FAILED;
978 	ata_eh_set_pending(ap, 1);
979 
980 	/* The following will fail if timeout has already expired.
981 	 * ata_scsi_error() takes care of such scmds on EH entry.
982 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
983 	 * this function completes.
984 	 */
985 	spin_lock_irqsave(q->queue_lock, flags);
986 	blk_abort_request(qc->scsicmd->request);
987 	spin_unlock_irqrestore(q->queue_lock, flags);
988 }
989 
990 /**
991  * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
992  * @ap: ATA port to schedule EH for
993  *
994  *	LOCKING: inherited from ata_port_schedule_eh
995  *	spin_lock_irqsave(host lock)
996  */
997 void ata_std_sched_eh(struct ata_port *ap)
998 {
999 	WARN_ON(!ap->ops->error_handler);
1000 
1001 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
1002 		return;
1003 
1004 	ata_eh_set_pending(ap, 1);
1005 	scsi_schedule_eh(ap->scsi_host);
1006 
1007 	DPRINTK("port EH scheduled\n");
1008 }
1009 EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1010 
1011 /**
1012  * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1013  * @ap: ATA port to end EH for
1014  *
1015  * In the libata object model there is a 1:1 mapping of ata_port to
1016  * shost, so host fields can be directly manipulated under ap->lock, in
1017  * the libsas case we need to hold a lock at the ha->level to coordinate
1018  * these events.
1019  *
1020  *	LOCKING:
1021  *	spin_lock_irqsave(host lock)
1022  */
1023 void ata_std_end_eh(struct ata_port *ap)
1024 {
1025 	struct Scsi_Host *host = ap->scsi_host;
1026 
1027 	host->host_eh_scheduled = 0;
1028 }
1029 EXPORT_SYMBOL(ata_std_end_eh);
1030 
1031 
1032 /**
1033  *	ata_port_schedule_eh - schedule error handling without a qc
1034  *	@ap: ATA port to schedule EH for
1035  *
1036  *	Schedule error handling for @ap.  EH will kick in as soon as
1037  *	all commands are drained.
1038  *
1039  *	LOCKING:
1040  *	spin_lock_irqsave(host lock)
1041  */
1042 void ata_port_schedule_eh(struct ata_port *ap)
1043 {
1044 	/* see: ata_std_sched_eh, unless you know better */
1045 	ap->ops->sched_eh(ap);
1046 }
1047 
1048 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1049 {
1050 	int tag, nr_aborted = 0;
1051 
1052 	WARN_ON(!ap->ops->error_handler);
1053 
1054 	/* we're gonna abort all commands, no need for fast drain */
1055 	ata_eh_set_pending(ap, 0);
1056 
1057 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1058 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1059 
1060 		if (qc && (!link || qc->dev->link == link)) {
1061 			qc->flags |= ATA_QCFLAG_FAILED;
1062 			ata_qc_complete(qc);
1063 			nr_aborted++;
1064 		}
1065 	}
1066 
1067 	if (!nr_aborted)
1068 		ata_port_schedule_eh(ap);
1069 
1070 	return nr_aborted;
1071 }
1072 
1073 /**
1074  *	ata_link_abort - abort all qc's on the link
1075  *	@link: ATA link to abort qc's for
1076  *
1077  *	Abort all active qc's active on @link and schedule EH.
1078  *
1079  *	LOCKING:
1080  *	spin_lock_irqsave(host lock)
1081  *
1082  *	RETURNS:
1083  *	Number of aborted qc's.
1084  */
1085 int ata_link_abort(struct ata_link *link)
1086 {
1087 	return ata_do_link_abort(link->ap, link);
1088 }
1089 
1090 /**
1091  *	ata_port_abort - abort all qc's on the port
1092  *	@ap: ATA port to abort qc's for
1093  *
1094  *	Abort all active qc's of @ap and schedule EH.
1095  *
1096  *	LOCKING:
1097  *	spin_lock_irqsave(host_set lock)
1098  *
1099  *	RETURNS:
1100  *	Number of aborted qc's.
1101  */
1102 int ata_port_abort(struct ata_port *ap)
1103 {
1104 	return ata_do_link_abort(ap, NULL);
1105 }
1106 
1107 /**
1108  *	__ata_port_freeze - freeze port
1109  *	@ap: ATA port to freeze
1110  *
1111  *	This function is called when HSM violation or some other
1112  *	condition disrupts normal operation of the port.  Frozen port
1113  *	is not allowed to perform any operation until the port is
1114  *	thawed, which usually follows a successful reset.
1115  *
1116  *	ap->ops->freeze() callback can be used for freezing the port
1117  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1118  *	port cannot be frozen hardware-wise, the interrupt handler
1119  *	must ack and clear interrupts unconditionally while the port
1120  *	is frozen.
1121  *
1122  *	LOCKING:
1123  *	spin_lock_irqsave(host lock)
1124  */
1125 static void __ata_port_freeze(struct ata_port *ap)
1126 {
1127 	WARN_ON(!ap->ops->error_handler);
1128 
1129 	if (ap->ops->freeze)
1130 		ap->ops->freeze(ap);
1131 
1132 	ap->pflags |= ATA_PFLAG_FROZEN;
1133 
1134 	DPRINTK("ata%u port frozen\n", ap->print_id);
1135 }
1136 
1137 /**
1138  *	ata_port_freeze - abort & freeze port
1139  *	@ap: ATA port to freeze
1140  *
1141  *	Abort and freeze @ap.  The freeze operation must be called
1142  *	first, because some hardware requires special operations
1143  *	before the taskfile registers are accessible.
1144  *
1145  *	LOCKING:
1146  *	spin_lock_irqsave(host lock)
1147  *
1148  *	RETURNS:
1149  *	Number of aborted commands.
1150  */
1151 int ata_port_freeze(struct ata_port *ap)
1152 {
1153 	int nr_aborted;
1154 
1155 	WARN_ON(!ap->ops->error_handler);
1156 
1157 	__ata_port_freeze(ap);
1158 	nr_aborted = ata_port_abort(ap);
1159 
1160 	return nr_aborted;
1161 }
1162 
1163 /**
1164  *	sata_async_notification - SATA async notification handler
1165  *	@ap: ATA port where async notification is received
1166  *
1167  *	Handler to be called when async notification via SDB FIS is
1168  *	received.  This function schedules EH if necessary.
1169  *
1170  *	LOCKING:
1171  *	spin_lock_irqsave(host lock)
1172  *
1173  *	RETURNS:
1174  *	1 if EH is scheduled, 0 otherwise.
1175  */
1176 int sata_async_notification(struct ata_port *ap)
1177 {
1178 	u32 sntf;
1179 	int rc;
1180 
1181 	if (!(ap->flags & ATA_FLAG_AN))
1182 		return 0;
1183 
1184 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1185 	if (rc == 0)
1186 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1187 
1188 	if (!sata_pmp_attached(ap) || rc) {
1189 		/* PMP is not attached or SNTF is not available */
1190 		if (!sata_pmp_attached(ap)) {
1191 			/* PMP is not attached.  Check whether ATAPI
1192 			 * AN is configured.  If so, notify media
1193 			 * change.
1194 			 */
1195 			struct ata_device *dev = ap->link.device;
1196 
1197 			if ((dev->class == ATA_DEV_ATAPI) &&
1198 			    (dev->flags & ATA_DFLAG_AN))
1199 				ata_scsi_media_change_notify(dev);
1200 			return 0;
1201 		} else {
1202 			/* PMP is attached but SNTF is not available.
1203 			 * ATAPI async media change notification is
1204 			 * not used.  The PMP must be reporting PHY
1205 			 * status change, schedule EH.
1206 			 */
1207 			ata_port_schedule_eh(ap);
1208 			return 1;
1209 		}
1210 	} else {
1211 		/* PMP is attached and SNTF is available */
1212 		struct ata_link *link;
1213 
1214 		/* check and notify ATAPI AN */
1215 		ata_for_each_link(link, ap, EDGE) {
1216 			if (!(sntf & (1 << link->pmp)))
1217 				continue;
1218 
1219 			if ((link->device->class == ATA_DEV_ATAPI) &&
1220 			    (link->device->flags & ATA_DFLAG_AN))
1221 				ata_scsi_media_change_notify(link->device);
1222 		}
1223 
1224 		/* If PMP is reporting that PHY status of some
1225 		 * downstream ports has changed, schedule EH.
1226 		 */
1227 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1228 			ata_port_schedule_eh(ap);
1229 			return 1;
1230 		}
1231 
1232 		return 0;
1233 	}
1234 }
1235 
1236 /**
1237  *	ata_eh_freeze_port - EH helper to freeze port
1238  *	@ap: ATA port to freeze
1239  *
1240  *	Freeze @ap.
1241  *
1242  *	LOCKING:
1243  *	None.
1244  */
1245 void ata_eh_freeze_port(struct ata_port *ap)
1246 {
1247 	unsigned long flags;
1248 
1249 	if (!ap->ops->error_handler)
1250 		return;
1251 
1252 	spin_lock_irqsave(ap->lock, flags);
1253 	__ata_port_freeze(ap);
1254 	spin_unlock_irqrestore(ap->lock, flags);
1255 }
1256 
1257 /**
1258  *	ata_port_thaw_port - EH helper to thaw port
1259  *	@ap: ATA port to thaw
1260  *
1261  *	Thaw frozen port @ap.
1262  *
1263  *	LOCKING:
1264  *	None.
1265  */
1266 void ata_eh_thaw_port(struct ata_port *ap)
1267 {
1268 	unsigned long flags;
1269 
1270 	if (!ap->ops->error_handler)
1271 		return;
1272 
1273 	spin_lock_irqsave(ap->lock, flags);
1274 
1275 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1276 
1277 	if (ap->ops->thaw)
1278 		ap->ops->thaw(ap);
1279 
1280 	spin_unlock_irqrestore(ap->lock, flags);
1281 
1282 	DPRINTK("ata%u port thawed\n", ap->print_id);
1283 }
1284 
1285 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1286 {
1287 	/* nada */
1288 }
1289 
1290 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1291 {
1292 	struct ata_port *ap = qc->ap;
1293 	struct scsi_cmnd *scmd = qc->scsicmd;
1294 	unsigned long flags;
1295 
1296 	spin_lock_irqsave(ap->lock, flags);
1297 	qc->scsidone = ata_eh_scsidone;
1298 	__ata_qc_complete(qc);
1299 	WARN_ON(ata_tag_valid(qc->tag));
1300 	spin_unlock_irqrestore(ap->lock, flags);
1301 
1302 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1303 }
1304 
1305 /**
1306  *	ata_eh_qc_complete - Complete an active ATA command from EH
1307  *	@qc: Command to complete
1308  *
1309  *	Indicate to the mid and upper layers that an ATA command has
1310  *	completed.  To be used from EH.
1311  */
1312 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1313 {
1314 	struct scsi_cmnd *scmd = qc->scsicmd;
1315 	scmd->retries = scmd->allowed;
1316 	__ata_eh_qc_complete(qc);
1317 }
1318 
1319 /**
1320  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1321  *	@qc: Command to retry
1322  *
1323  *	Indicate to the mid and upper layers that an ATA command
1324  *	should be retried.  To be used from EH.
1325  *
1326  *	SCSI midlayer limits the number of retries to scmd->allowed.
1327  *	scmd->allowed is incremented for commands which get retried
1328  *	due to unrelated failures (qc->err_mask is zero).
1329  */
1330 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1331 {
1332 	struct scsi_cmnd *scmd = qc->scsicmd;
1333 	if (!qc->err_mask)
1334 		scmd->allowed++;
1335 	__ata_eh_qc_complete(qc);
1336 }
1337 
1338 /**
1339  *	ata_dev_disable - disable ATA device
1340  *	@dev: ATA device to disable
1341  *
1342  *	Disable @dev.
1343  *
1344  *	Locking:
1345  *	EH context.
1346  */
1347 void ata_dev_disable(struct ata_device *dev)
1348 {
1349 	if (!ata_dev_enabled(dev))
1350 		return;
1351 
1352 	if (ata_msg_drv(dev->link->ap))
1353 		ata_dev_warn(dev, "disabled\n");
1354 	ata_acpi_on_disable(dev);
1355 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1356 	dev->class++;
1357 
1358 	/* From now till the next successful probe, ering is used to
1359 	 * track probe failures.  Clear accumulated device error info.
1360 	 */
1361 	ata_ering_clear(&dev->ering);
1362 }
1363 
1364 /**
1365  *	ata_eh_detach_dev - detach ATA device
1366  *	@dev: ATA device to detach
1367  *
1368  *	Detach @dev.
1369  *
1370  *	LOCKING:
1371  *	None.
1372  */
1373 void ata_eh_detach_dev(struct ata_device *dev)
1374 {
1375 	struct ata_link *link = dev->link;
1376 	struct ata_port *ap = link->ap;
1377 	struct ata_eh_context *ehc = &link->eh_context;
1378 	unsigned long flags;
1379 
1380 	ata_dev_disable(dev);
1381 
1382 	spin_lock_irqsave(ap->lock, flags);
1383 
1384 	dev->flags &= ~ATA_DFLAG_DETACH;
1385 
1386 	if (ata_scsi_offline_dev(dev)) {
1387 		dev->flags |= ATA_DFLAG_DETACHED;
1388 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1389 	}
1390 
1391 	/* clear per-dev EH info */
1392 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1393 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1394 	ehc->saved_xfer_mode[dev->devno] = 0;
1395 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1396 
1397 	spin_unlock_irqrestore(ap->lock, flags);
1398 }
1399 
1400 /**
1401  *	ata_eh_about_to_do - about to perform eh_action
1402  *	@link: target ATA link
1403  *	@dev: target ATA dev for per-dev action (can be NULL)
1404  *	@action: action about to be performed
1405  *
1406  *	Called just before performing EH actions to clear related bits
1407  *	in @link->eh_info such that eh actions are not unnecessarily
1408  *	repeated.
1409  *
1410  *	LOCKING:
1411  *	None.
1412  */
1413 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1414 			unsigned int action)
1415 {
1416 	struct ata_port *ap = link->ap;
1417 	struct ata_eh_info *ehi = &link->eh_info;
1418 	struct ata_eh_context *ehc = &link->eh_context;
1419 	unsigned long flags;
1420 
1421 	spin_lock_irqsave(ap->lock, flags);
1422 
1423 	ata_eh_clear_action(link, dev, ehi, action);
1424 
1425 	/* About to take EH action, set RECOVERED.  Ignore actions on
1426 	 * slave links as master will do them again.
1427 	 */
1428 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1429 		ap->pflags |= ATA_PFLAG_RECOVERED;
1430 
1431 	spin_unlock_irqrestore(ap->lock, flags);
1432 }
1433 
1434 /**
1435  *	ata_eh_done - EH action complete
1436 *	@ap: target ATA port
1437  *	@dev: target ATA dev for per-dev action (can be NULL)
1438  *	@action: action just completed
1439  *
1440  *	Called right after performing EH actions to clear related bits
1441  *	in @link->eh_context.
1442  *
1443  *	LOCKING:
1444  *	None.
1445  */
1446 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1447 		 unsigned int action)
1448 {
1449 	struct ata_eh_context *ehc = &link->eh_context;
1450 
1451 	ata_eh_clear_action(link, dev, &ehc->i, action);
1452 }
1453 
1454 /**
1455  *	ata_err_string - convert err_mask to descriptive string
1456  *	@err_mask: error mask to convert to string
1457  *
1458  *	Convert @err_mask to descriptive string.  Errors are
1459  *	prioritized according to severity and only the most severe
1460  *	error is reported.
1461  *
1462  *	LOCKING:
1463  *	None.
1464  *
1465  *	RETURNS:
1466  *	Descriptive string for @err_mask
1467  */
1468 static const char *ata_err_string(unsigned int err_mask)
1469 {
1470 	if (err_mask & AC_ERR_HOST_BUS)
1471 		return "host bus error";
1472 	if (err_mask & AC_ERR_ATA_BUS)
1473 		return "ATA bus error";
1474 	if (err_mask & AC_ERR_TIMEOUT)
1475 		return "timeout";
1476 	if (err_mask & AC_ERR_HSM)
1477 		return "HSM violation";
1478 	if (err_mask & AC_ERR_SYSTEM)
1479 		return "internal error";
1480 	if (err_mask & AC_ERR_MEDIA)
1481 		return "media error";
1482 	if (err_mask & AC_ERR_INVALID)
1483 		return "invalid argument";
1484 	if (err_mask & AC_ERR_DEV)
1485 		return "device error";
1486 	return "unknown error";
1487 }
1488 
1489 /**
1490  *	ata_read_log_page - read a specific log page
1491  *	@dev: target device
1492  *	@log: log to read
1493  *	@page: page to read
1494  *	@buf: buffer to store read page
1495  *	@sectors: number of sectors to read
1496  *
1497  *	Read log page using READ_LOG_EXT command.
1498  *
1499  *	LOCKING:
1500  *	Kernel thread context (may sleep).
1501  *
1502  *	RETURNS:
1503  *	0 on success, AC_ERR_* mask otherwise.
1504  */
1505 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1506 			       u8 page, void *buf, unsigned int sectors)
1507 {
1508 	struct ata_taskfile tf;
1509 	unsigned int err_mask;
1510 
1511 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1512 
1513 	ata_tf_init(dev, &tf);
1514 	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id)) {
1515 		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1516 		tf.protocol = ATA_PROT_DMA;
1517 	} else {
1518 		tf.command = ATA_CMD_READ_LOG_EXT;
1519 		tf.protocol = ATA_PROT_PIO;
1520 	}
1521 	tf.lbal = log;
1522 	tf.lbam = page;
1523 	tf.nsect = sectors;
1524 	tf.hob_nsect = sectors >> 8;
1525 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1526 
1527 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1528 				     buf, sectors * ATA_SECT_SIZE, 0);
1529 
1530 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1531 	return err_mask;
1532 }
1533 
1534 /**
1535  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1536  *	@dev: Device to read log page 10h from
1537  *	@tag: Resulting tag of the failed command
1538  *	@tf: Resulting taskfile registers of the failed command
1539  *
1540  *	Read log page 10h to obtain NCQ error details and clear error
1541  *	condition.
1542  *
1543  *	LOCKING:
1544  *	Kernel thread context (may sleep).
1545  *
1546  *	RETURNS:
1547  *	0 on success, -errno otherwise.
1548  */
1549 static int ata_eh_read_log_10h(struct ata_device *dev,
1550 			       int *tag, struct ata_taskfile *tf)
1551 {
1552 	u8 *buf = dev->link->ap->sector_buf;
1553 	unsigned int err_mask;
1554 	u8 csum;
1555 	int i;
1556 
1557 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1558 	if (err_mask)
1559 		return -EIO;
1560 
1561 	csum = 0;
1562 	for (i = 0; i < ATA_SECT_SIZE; i++)
1563 		csum += buf[i];
1564 	if (csum)
1565 		ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1566 			     csum);
1567 
1568 	if (buf[0] & 0x80)
1569 		return -ENOENT;
1570 
1571 	*tag = buf[0] & 0x1f;
1572 
1573 	tf->command = buf[2];
1574 	tf->feature = buf[3];
1575 	tf->lbal = buf[4];
1576 	tf->lbam = buf[5];
1577 	tf->lbah = buf[6];
1578 	tf->device = buf[7];
1579 	tf->hob_lbal = buf[8];
1580 	tf->hob_lbam = buf[9];
1581 	tf->hob_lbah = buf[10];
1582 	tf->nsect = buf[12];
1583 	tf->hob_nsect = buf[13];
1584 	if (ata_id_has_ncq_autosense(dev->id))
1585 		tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1586 
1587 	return 0;
1588 }
1589 
1590 /**
1591  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1592  *	@dev: target ATAPI device
1593  *	@r_sense_key: out parameter for sense_key
1594  *
1595  *	Perform ATAPI TEST_UNIT_READY.
1596  *
1597  *	LOCKING:
1598  *	EH context (may sleep).
1599  *
1600  *	RETURNS:
1601  *	0 on success, AC_ERR_* mask on failure.
1602  */
1603 unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1604 {
1605 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1606 	struct ata_taskfile tf;
1607 	unsigned int err_mask;
1608 
1609 	ata_tf_init(dev, &tf);
1610 
1611 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1612 	tf.command = ATA_CMD_PACKET;
1613 	tf.protocol = ATAPI_PROT_NODATA;
1614 
1615 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1616 	if (err_mask == AC_ERR_DEV)
1617 		*r_sense_key = tf.feature >> 4;
1618 	return err_mask;
1619 }
1620 
1621 /**
1622  *	ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1623  *	@dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
1624  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1625  *	@dfl_sense_key: default sense key to use
1626  *
1627  *	Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1628  *	SENSE.  This function is EH helper.
1629  *
1630  *	LOCKING:
1631  *	Kernel thread context (may sleep).
1632  *
1633  *	RETURNS:
1634  *	encoded sense data on success, 0 on failure or if sense data
1635  *	is not available.
1636  */
1637 static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
1638 				struct scsi_cmnd *cmd)
1639 {
1640 	struct ata_device *dev = qc->dev;
1641 	struct ata_taskfile tf;
1642 	unsigned int err_mask;
1643 
1644 	if (!cmd)
1645 		return 0;
1646 
1647 	DPRINTK("ATA request sense\n");
1648 	ata_dev_warn(dev, "request sense\n");
1649 	if (!ata_id_sense_reporting_enabled(dev->id)) {
1650 		ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1651 		return 0;
1652 	}
1653 	ata_tf_init(dev, &tf);
1654 
1655 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1656 	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1657 	tf.command = ATA_CMD_REQ_SENSE_DATA;
1658 	tf.protocol = ATA_PROT_NODATA;
1659 
1660 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1661 	/*
1662 	 * ACS-4 states:
1663 	 * The device may set the SENSE DATA AVAILABLE bit to one in the
1664 	 * STATUS field and clear the ERROR bit to zero in the STATUS field
1665 	 * to indicate that the command returned completion without an error
1666 	 * and the sense data described in table 306 is available.
1667 	 *
1668 	 * IOW the 'ATA_SENSE' bit might not be set even though valid
1669 	 * sense data is available.
1670 	 * So check for both.
1671 	 */
1672 	if ((tf.command & ATA_SENSE) ||
1673 		tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
1674 		ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
1675 		qc->flags |= ATA_QCFLAG_SENSE_VALID;
1676 		ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
1677 			     tf.lbah, tf.lbam, tf.lbal);
1678 	} else {
1679 		ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1680 			     tf.command, err_mask);
1681 	}
1682 	return err_mask;
1683 }
1684 
1685 /**
1686  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1687  *	@dev: device to perform REQUEST_SENSE to
1688  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1689  *	@dfl_sense_key: default sense key to use
1690  *
1691  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1692  *	SENSE.  This function is EH helper.
1693  *
1694  *	LOCKING:
1695  *	Kernel thread context (may sleep).
1696  *
1697  *	RETURNS:
1698  *	0 on success, AC_ERR_* mask on failure
1699  */
1700 unsigned int atapi_eh_request_sense(struct ata_device *dev,
1701 					   u8 *sense_buf, u8 dfl_sense_key)
1702 {
1703 	u8 cdb[ATAPI_CDB_LEN] =
1704 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1705 	struct ata_port *ap = dev->link->ap;
1706 	struct ata_taskfile tf;
1707 
1708 	DPRINTK("ATAPI request sense\n");
1709 
1710 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1711 
1712 	/* initialize sense_buf with the error register,
1713 	 * for the case where they are -not- overwritten
1714 	 */
1715 	sense_buf[0] = 0x70;
1716 	sense_buf[2] = dfl_sense_key;
1717 
1718 	/* some devices time out if garbage left in tf */
1719 	ata_tf_init(dev, &tf);
1720 
1721 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1722 	tf.command = ATA_CMD_PACKET;
1723 
1724 	/* is it pointless to prefer PIO for "safety reasons"? */
1725 	if (ap->flags & ATA_FLAG_PIO_DMA) {
1726 		tf.protocol = ATAPI_PROT_DMA;
1727 		tf.feature |= ATAPI_PKT_DMA;
1728 	} else {
1729 		tf.protocol = ATAPI_PROT_PIO;
1730 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1731 		tf.lbah = 0;
1732 	}
1733 
1734 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1735 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1736 }
1737 
1738 /**
1739  *	ata_eh_analyze_serror - analyze SError for a failed port
1740  *	@link: ATA link to analyze SError for
1741  *
1742  *	Analyze SError if available and further determine cause of
1743  *	failure.
1744  *
1745  *	LOCKING:
1746  *	None.
1747  */
1748 static void ata_eh_analyze_serror(struct ata_link *link)
1749 {
1750 	struct ata_eh_context *ehc = &link->eh_context;
1751 	u32 serror = ehc->i.serror;
1752 	unsigned int err_mask = 0, action = 0;
1753 	u32 hotplug_mask;
1754 
1755 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1756 		err_mask |= AC_ERR_ATA_BUS;
1757 		action |= ATA_EH_RESET;
1758 	}
1759 	if (serror & SERR_PROTOCOL) {
1760 		err_mask |= AC_ERR_HSM;
1761 		action |= ATA_EH_RESET;
1762 	}
1763 	if (serror & SERR_INTERNAL) {
1764 		err_mask |= AC_ERR_SYSTEM;
1765 		action |= ATA_EH_RESET;
1766 	}
1767 
1768 	/* Determine whether a hotplug event has occurred.  Both
1769 	 * SError.N/X are considered hotplug events for enabled or
1770 	 * host links.  For disabled PMP links, only N bit is
1771 	 * considered as X bit is left at 1 for link plugging.
1772 	 */
1773 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
1774 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
1775 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1776 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1777 	else
1778 		hotplug_mask = SERR_PHYRDY_CHG;
1779 
1780 	if (serror & hotplug_mask)
1781 		ata_ehi_hotplugged(&ehc->i);
1782 
1783 	ehc->i.err_mask |= err_mask;
1784 	ehc->i.action |= action;
1785 }
1786 
1787 /**
1788  *	ata_eh_analyze_ncq_error - analyze NCQ error
1789  *	@link: ATA link to analyze NCQ error for
1790  *
1791  *	Read log page 10h, determine the offending qc and acquire
1792  *	error status TF.  For NCQ device errors, all LLDDs have to do
1793  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1794  *	care of the rest.
1795  *
1796  *	LOCKING:
1797  *	Kernel thread context (may sleep).
1798  */
1799 void ata_eh_analyze_ncq_error(struct ata_link *link)
1800 {
1801 	struct ata_port *ap = link->ap;
1802 	struct ata_eh_context *ehc = &link->eh_context;
1803 	struct ata_device *dev = link->device;
1804 	struct ata_queued_cmd *qc;
1805 	struct ata_taskfile tf;
1806 	int tag, rc;
1807 
1808 	/* if frozen, we can't do much */
1809 	if (ap->pflags & ATA_PFLAG_FROZEN)
1810 		return;
1811 
1812 	/* is it NCQ device error? */
1813 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1814 		return;
1815 
1816 	/* has LLDD analyzed already? */
1817 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1818 		qc = __ata_qc_from_tag(ap, tag);
1819 
1820 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1821 			continue;
1822 
1823 		if (qc->err_mask)
1824 			return;
1825 	}
1826 
1827 	/* okay, this error is ours */
1828 	memset(&tf, 0, sizeof(tf));
1829 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1830 	if (rc) {
1831 		ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1832 			     rc);
1833 		return;
1834 	}
1835 
1836 	if (!(link->sactive & (1 << tag))) {
1837 		ata_link_err(link, "log page 10h reported inactive tag %d\n",
1838 			     tag);
1839 		return;
1840 	}
1841 
1842 	/* we've got the perpetrator, condemn it */
1843 	qc = __ata_qc_from_tag(ap, tag);
1844 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1845 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1846 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1847 	if (qc->result_tf.auxiliary) {
1848 		char sense_key, asc, ascq;
1849 
1850 		sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1851 		asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1852 		ascq = qc->result_tf.auxiliary & 0xff;
1853 		ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
1854 			    sense_key, asc, ascq);
1855 		ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
1856 		ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
1857 		qc->flags |= ATA_QCFLAG_SENSE_VALID;
1858 	}
1859 
1860 	ehc->i.err_mask &= ~AC_ERR_DEV;
1861 }
1862 
1863 /**
1864  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1865  *	@qc: qc to analyze
1866  *	@tf: Taskfile registers to analyze
1867  *
1868  *	Analyze taskfile of @qc and further determine cause of
1869  *	failure.  This function also requests ATAPI sense data if
1870  *	available.
1871  *
1872  *	LOCKING:
1873  *	Kernel thread context (may sleep).
1874  *
1875  *	RETURNS:
1876  *	Determined recovery action
1877  */
1878 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1879 				      const struct ata_taskfile *tf)
1880 {
1881 	unsigned int tmp, action = 0;
1882 	u8 stat = tf->command, err = tf->feature;
1883 
1884 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1885 		qc->err_mask |= AC_ERR_HSM;
1886 		return ATA_EH_RESET;
1887 	}
1888 
1889 	/*
1890 	 * Sense data reporting does not work if the
1891 	 * device fault bit is set.
1892 	 */
1893 	if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
1894 	    !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
1895 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1896 			tmp = ata_eh_request_sense(qc, qc->scsicmd);
1897 			if (tmp)
1898 				qc->err_mask |= tmp;
1899 			else
1900 				ata_scsi_set_sense_information(qc->scsicmd, tf);
1901 		} else {
1902 			ata_dev_warn(qc->dev, "sense data available but port frozen\n");
1903 		}
1904 	}
1905 
1906 	/* Set by NCQ autosense or request sense above */
1907 	if (qc->flags & ATA_QCFLAG_SENSE_VALID)
1908 		return 0;
1909 
1910 	if (stat & (ATA_ERR | ATA_DF))
1911 		qc->err_mask |= AC_ERR_DEV;
1912 	else
1913 		return 0;
1914 
1915 	switch (qc->dev->class) {
1916 	case ATA_DEV_ATA:
1917 	case ATA_DEV_ZAC:
1918 		if (err & ATA_ICRC)
1919 			qc->err_mask |= AC_ERR_ATA_BUS;
1920 		if (err & (ATA_UNC | ATA_AMNF))
1921 			qc->err_mask |= AC_ERR_MEDIA;
1922 		if (err & ATA_IDNF)
1923 			qc->err_mask |= AC_ERR_INVALID;
1924 		break;
1925 
1926 	case ATA_DEV_ATAPI:
1927 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1928 			tmp = atapi_eh_request_sense(qc->dev,
1929 						qc->scsicmd->sense_buffer,
1930 						qc->result_tf.feature >> 4);
1931 			if (!tmp) {
1932 				/* ATA_QCFLAG_SENSE_VALID is used to
1933 				 * tell atapi_qc_complete() that sense
1934 				 * data is already valid.
1935 				 *
1936 				 * TODO: interpret sense data and set
1937 				 * appropriate err_mask.
1938 				 */
1939 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1940 			} else
1941 				qc->err_mask |= tmp;
1942 		}
1943 	}
1944 
1945 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1946 		action |= ATA_EH_RESET;
1947 
1948 	return action;
1949 }
1950 
1951 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1952 				   int *xfer_ok)
1953 {
1954 	int base = 0;
1955 
1956 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1957 		*xfer_ok = 1;
1958 
1959 	if (!*xfer_ok)
1960 		base = ATA_ECAT_DUBIOUS_NONE;
1961 
1962 	if (err_mask & AC_ERR_ATA_BUS)
1963 		return base + ATA_ECAT_ATA_BUS;
1964 
1965 	if (err_mask & AC_ERR_TIMEOUT)
1966 		return base + ATA_ECAT_TOUT_HSM;
1967 
1968 	if (eflags & ATA_EFLAG_IS_IO) {
1969 		if (err_mask & AC_ERR_HSM)
1970 			return base + ATA_ECAT_TOUT_HSM;
1971 		if ((err_mask &
1972 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1973 			return base + ATA_ECAT_UNK_DEV;
1974 	}
1975 
1976 	return 0;
1977 }
1978 
1979 struct speed_down_verdict_arg {
1980 	u64 since;
1981 	int xfer_ok;
1982 	int nr_errors[ATA_ECAT_NR];
1983 };
1984 
1985 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1986 {
1987 	struct speed_down_verdict_arg *arg = void_arg;
1988 	int cat;
1989 
1990 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1991 		return -1;
1992 
1993 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1994 				      &arg->xfer_ok);
1995 	arg->nr_errors[cat]++;
1996 
1997 	return 0;
1998 }
1999 
2000 /**
2001  *	ata_eh_speed_down_verdict - Determine speed down verdict
2002  *	@dev: Device of interest
2003  *
2004  *	This function examines error ring of @dev and determines
2005  *	whether NCQ needs to be turned off, transfer speed should be
2006  *	stepped down, or falling back to PIO is necessary.
2007  *
2008  *	ECAT_ATA_BUS	: ATA_BUS error for any command
2009  *
2010  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
2011  *			  IO commands
2012  *
2013  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
2014  *
2015  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
2016  *			  data transfer hasn't been verified.
2017  *
2018  *	Verdicts are
2019  *
2020  *	NCQ_OFF		: Turn off NCQ.
2021  *
2022  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
2023  *			  to PIO.
2024  *
2025  *	FALLBACK_TO_PIO	: Fall back to PIO.
2026  *
2027  *	Even if multiple verdicts are returned, only one action is
2028  *	taken per error.  An action triggered by non-DUBIOUS errors
2029  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
2030  *	This is to expedite speed down decisions right after device is
2031  *	initially configured.
2032  *
2033  *	The followings are speed down rules.  #1 and #2 deal with
2034  *	DUBIOUS errors.
2035  *
2036  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
2037  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
2038  *
2039  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
2040  *	   occurred during last 5 mins, NCQ_OFF.
2041  *
2042  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
2043  *	   occurred during last 5 mins, FALLBACK_TO_PIO
2044  *
2045  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
2046  *	   during last 10 mins, NCQ_OFF.
2047  *
2048  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
2049  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
2050  *
2051  *	LOCKING:
2052  *	Inherited from caller.
2053  *
2054  *	RETURNS:
2055  *	OR of ATA_EH_SPDN_* flags.
2056  */
2057 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
2058 {
2059 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
2060 	u64 j64 = get_jiffies_64();
2061 	struct speed_down_verdict_arg arg;
2062 	unsigned int verdict = 0;
2063 
2064 	/* scan past 5 mins of error history */
2065 	memset(&arg, 0, sizeof(arg));
2066 	arg.since = j64 - min(j64, j5mins);
2067 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2068 
2069 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
2070 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
2071 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
2072 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
2073 
2074 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
2075 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
2076 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
2077 
2078 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2079 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2080 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2081 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
2082 
2083 	/* scan past 10 mins of error history */
2084 	memset(&arg, 0, sizeof(arg));
2085 	arg.since = j64 - min(j64, j10mins);
2086 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2087 
2088 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2089 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
2090 		verdict |= ATA_EH_SPDN_NCQ_OFF;
2091 
2092 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2093 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
2094 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2095 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
2096 
2097 	return verdict;
2098 }
2099 
2100 /**
2101  *	ata_eh_speed_down - record error and speed down if necessary
2102  *	@dev: Failed device
2103  *	@eflags: mask of ATA_EFLAG_* flags
2104  *	@err_mask: err_mask of the error
2105  *
2106  *	Record error and examine error history to determine whether
2107  *	adjusting transmission speed is necessary.  It also sets
2108  *	transmission limits appropriately if such adjustment is
2109  *	necessary.
2110  *
2111  *	LOCKING:
2112  *	Kernel thread context (may sleep).
2113  *
2114  *	RETURNS:
2115  *	Determined recovery action.
2116  */
2117 static unsigned int ata_eh_speed_down(struct ata_device *dev,
2118 				unsigned int eflags, unsigned int err_mask)
2119 {
2120 	struct ata_link *link = ata_dev_phys_link(dev);
2121 	int xfer_ok = 0;
2122 	unsigned int verdict;
2123 	unsigned int action = 0;
2124 
2125 	/* don't bother if Cat-0 error */
2126 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2127 		return 0;
2128 
2129 	/* record error and determine whether speed down is necessary */
2130 	ata_ering_record(&dev->ering, eflags, err_mask);
2131 	verdict = ata_eh_speed_down_verdict(dev);
2132 
2133 	/* turn off NCQ? */
2134 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2135 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2136 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
2137 		dev->flags |= ATA_DFLAG_NCQ_OFF;
2138 		ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
2139 		goto done;
2140 	}
2141 
2142 	/* speed down? */
2143 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2144 		/* speed down SATA link speed if possible */
2145 		if (sata_down_spd_limit(link, 0) == 0) {
2146 			action |= ATA_EH_RESET;
2147 			goto done;
2148 		}
2149 
2150 		/* lower transfer mode */
2151 		if (dev->spdn_cnt < 2) {
2152 			static const int dma_dnxfer_sel[] =
2153 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
2154 			static const int pio_dnxfer_sel[] =
2155 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
2156 			int sel;
2157 
2158 			if (dev->xfer_shift != ATA_SHIFT_PIO)
2159 				sel = dma_dnxfer_sel[dev->spdn_cnt];
2160 			else
2161 				sel = pio_dnxfer_sel[dev->spdn_cnt];
2162 
2163 			dev->spdn_cnt++;
2164 
2165 			if (ata_down_xfermask_limit(dev, sel) == 0) {
2166 				action |= ATA_EH_RESET;
2167 				goto done;
2168 			}
2169 		}
2170 	}
2171 
2172 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
2173 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
2174 	 */
2175 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2176 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
2177 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
2178 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
2179 			dev->spdn_cnt = 0;
2180 			action |= ATA_EH_RESET;
2181 			goto done;
2182 		}
2183 	}
2184 
2185 	return 0;
2186  done:
2187 	/* device has been slowed down, blow error history */
2188 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2189 		ata_ering_clear(&dev->ering);
2190 	return action;
2191 }
2192 
2193 /**
2194  *	ata_eh_worth_retry - analyze error and decide whether to retry
2195  *	@qc: qc to possibly retry
2196  *
2197  *	Look at the cause of the error and decide if a retry
2198  * 	might be useful or not.  We don't want to retry media errors
2199  *	because the drive itself has probably already taken 10-30 seconds
2200  *	doing its own internal retries before reporting the failure.
2201  */
2202 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2203 {
2204 	if (qc->err_mask & AC_ERR_MEDIA)
2205 		return 0;	/* don't retry media errors */
2206 	if (qc->flags & ATA_QCFLAG_IO)
2207 		return 1;	/* otherwise retry anything from fs stack */
2208 	if (qc->err_mask & AC_ERR_INVALID)
2209 		return 0;	/* don't retry these */
2210 	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
2211 }
2212 
2213 /**
2214  *	ata_eh_link_autopsy - analyze error and determine recovery action
2215  *	@link: host link to perform autopsy on
2216  *
2217  *	Analyze why @link failed and determine which recovery actions
2218  *	are needed.  This function also sets more detailed AC_ERR_*
2219  *	values and fills sense data for ATAPI CHECK SENSE.
2220  *
2221  *	LOCKING:
2222  *	Kernel thread context (may sleep).
2223  */
2224 static void ata_eh_link_autopsy(struct ata_link *link)
2225 {
2226 	struct ata_port *ap = link->ap;
2227 	struct ata_eh_context *ehc = &link->eh_context;
2228 	struct ata_device *dev;
2229 	unsigned int all_err_mask = 0, eflags = 0;
2230 	int tag;
2231 	u32 serror;
2232 	int rc;
2233 
2234 	DPRINTK("ENTER\n");
2235 
2236 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2237 		return;
2238 
2239 	/* obtain and analyze SError */
2240 	rc = sata_scr_read(link, SCR_ERROR, &serror);
2241 	if (rc == 0) {
2242 		ehc->i.serror |= serror;
2243 		ata_eh_analyze_serror(link);
2244 	} else if (rc != -EOPNOTSUPP) {
2245 		/* SError read failed, force reset and probing */
2246 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2247 		ehc->i.action |= ATA_EH_RESET;
2248 		ehc->i.err_mask |= AC_ERR_OTHER;
2249 	}
2250 
2251 	/* analyze NCQ failure */
2252 	ata_eh_analyze_ncq_error(link);
2253 
2254 	/* any real error trumps AC_ERR_OTHER */
2255 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2256 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2257 
2258 	all_err_mask |= ehc->i.err_mask;
2259 
2260 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2261 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2262 
2263 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2264 		    ata_dev_phys_link(qc->dev) != link)
2265 			continue;
2266 
2267 		/* inherit upper level err_mask */
2268 		qc->err_mask |= ehc->i.err_mask;
2269 
2270 		/* analyze TF */
2271 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2272 
2273 		/* DEV errors are probably spurious in case of ATA_BUS error */
2274 		if (qc->err_mask & AC_ERR_ATA_BUS)
2275 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2276 					  AC_ERR_INVALID);
2277 
2278 		/* any real error trumps unknown error */
2279 		if (qc->err_mask & ~AC_ERR_OTHER)
2280 			qc->err_mask &= ~AC_ERR_OTHER;
2281 
2282 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2283 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2284 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2285 
2286 		/* determine whether the command is worth retrying */
2287 		if (ata_eh_worth_retry(qc))
2288 			qc->flags |= ATA_QCFLAG_RETRY;
2289 
2290 		/* accumulate error info */
2291 		ehc->i.dev = qc->dev;
2292 		all_err_mask |= qc->err_mask;
2293 		if (qc->flags & ATA_QCFLAG_IO)
2294 			eflags |= ATA_EFLAG_IS_IO;
2295 		trace_ata_eh_link_autopsy_qc(qc);
2296 	}
2297 
2298 	/* enforce default EH actions */
2299 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2300 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2301 		ehc->i.action |= ATA_EH_RESET;
2302 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2303 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2304 		ehc->i.action |= ATA_EH_REVALIDATE;
2305 
2306 	/* If we have offending qcs and the associated failed device,
2307 	 * perform per-dev EH action only on the offending device.
2308 	 */
2309 	if (ehc->i.dev) {
2310 		ehc->i.dev_action[ehc->i.dev->devno] |=
2311 			ehc->i.action & ATA_EH_PERDEV_MASK;
2312 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2313 	}
2314 
2315 	/* propagate timeout to host link */
2316 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2317 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2318 
2319 	/* record error and consider speeding down */
2320 	dev = ehc->i.dev;
2321 	if (!dev && ((ata_link_max_devices(link) == 1 &&
2322 		      ata_dev_enabled(link->device))))
2323 	    dev = link->device;
2324 
2325 	if (dev) {
2326 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2327 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
2328 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2329 	}
2330 	trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
2331 	DPRINTK("EXIT\n");
2332 }
2333 
2334 /**
2335  *	ata_eh_autopsy - analyze error and determine recovery action
2336  *	@ap: host port to perform autopsy on
2337  *
2338  *	Analyze all links of @ap and determine why they failed and
2339  *	which recovery actions are needed.
2340  *
2341  *	LOCKING:
2342  *	Kernel thread context (may sleep).
2343  */
2344 void ata_eh_autopsy(struct ata_port *ap)
2345 {
2346 	struct ata_link *link;
2347 
2348 	ata_for_each_link(link, ap, EDGE)
2349 		ata_eh_link_autopsy(link);
2350 
2351 	/* Handle the frigging slave link.  Autopsy is done similarly
2352 	 * but actions and flags are transferred over to the master
2353 	 * link and handled from there.
2354 	 */
2355 	if (ap->slave_link) {
2356 		struct ata_eh_context *mehc = &ap->link.eh_context;
2357 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2358 
2359 		/* transfer control flags from master to slave */
2360 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2361 
2362 		/* perform autopsy on the slave link */
2363 		ata_eh_link_autopsy(ap->slave_link);
2364 
2365 		/* transfer actions from slave to master and clear slave */
2366 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2367 		mehc->i.action		|= sehc->i.action;
2368 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2369 		mehc->i.flags		|= sehc->i.flags;
2370 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2371 	}
2372 
2373 	/* Autopsy of fanout ports can affect host link autopsy.
2374 	 * Perform host link autopsy last.
2375 	 */
2376 	if (sata_pmp_attached(ap))
2377 		ata_eh_link_autopsy(&ap->link);
2378 }
2379 
2380 /**
2381  *	ata_get_cmd_descript - get description for ATA command
2382  *	@command: ATA command code to get description for
2383  *
2384  *	Return a textual description of the given command, or NULL if the
2385  *	command is not known.
2386  *
2387  *	LOCKING:
2388  *	None
2389  */
2390 const char *ata_get_cmd_descript(u8 command)
2391 {
2392 #ifdef CONFIG_ATA_VERBOSE_ERROR
2393 	static const struct
2394 	{
2395 		u8 command;
2396 		const char *text;
2397 	} cmd_descr[] = {
2398 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
2399 		{ ATA_CMD_CHK_POWER,		"CHECK POWER MODE" },
2400 		{ ATA_CMD_STANDBY,		"STANDBY" },
2401 		{ ATA_CMD_IDLE,			"IDLE" },
2402 		{ ATA_CMD_EDD,			"EXECUTE DEVICE DIAGNOSTIC" },
2403 		{ ATA_CMD_DOWNLOAD_MICRO,	"DOWNLOAD MICROCODE" },
2404 		{ ATA_CMD_DOWNLOAD_MICRO_DMA,	"DOWNLOAD MICROCODE DMA" },
2405 		{ ATA_CMD_NOP,			"NOP" },
2406 		{ ATA_CMD_FLUSH,		"FLUSH CACHE" },
2407 		{ ATA_CMD_FLUSH_EXT,		"FLUSH CACHE EXT" },
2408 		{ ATA_CMD_ID_ATA,		"IDENTIFY DEVICE" },
2409 		{ ATA_CMD_ID_ATAPI,		"IDENTIFY PACKET DEVICE" },
2410 		{ ATA_CMD_SERVICE,		"SERVICE" },
2411 		{ ATA_CMD_READ,			"READ DMA" },
2412 		{ ATA_CMD_READ_EXT,		"READ DMA EXT" },
2413 		{ ATA_CMD_READ_QUEUED,		"READ DMA QUEUED" },
2414 		{ ATA_CMD_READ_STREAM_EXT,	"READ STREAM EXT" },
2415 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
2416 		{ ATA_CMD_WRITE,		"WRITE DMA" },
2417 		{ ATA_CMD_WRITE_EXT,		"WRITE DMA EXT" },
2418 		{ ATA_CMD_WRITE_QUEUED,		"WRITE DMA QUEUED EXT" },
2419 		{ ATA_CMD_WRITE_STREAM_EXT,	"WRITE STREAM EXT" },
2420 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2421 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
2422 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2423 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
2424 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
2425 		{ ATA_CMD_FPDMA_SEND,		"SEND FPDMA QUEUED" },
2426 		{ ATA_CMD_FPDMA_RECV,		"RECEIVE FPDMA QUEUED" },
2427 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
2428 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
2429 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
2430 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
2431 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
2432 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
2433 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
2434 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
2435 		{ ATA_CMD_WRITE_MULTI_FUA_EXT,	"WRITE MULTIPLE FUA EXT" },
2436 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
2437 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
2438 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
2439 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
2440 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
2441 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
2442 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
2443 		{ ATA_CMD_SLEEP,		"SLEEP" },
2444 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
2445 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
2446 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
2447 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
2448 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
2449 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
2450 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
2451 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
2452 		{ ATA_CMD_WRITE_LOG_DMA_EXT,	"WRITE LOG DMA EXT" },
2453 		{ ATA_CMD_TRUSTED_NONDATA,	"TRUSTED NON-DATA" },
2454 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
2455 		{ ATA_CMD_TRUSTED_RCV_DMA,	"TRUSTED RECEIVE DMA" },
2456 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
2457 		{ ATA_CMD_TRUSTED_SND_DMA,	"TRUSTED SEND DMA" },
2458 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
2459 		{ ATA_CMD_PMP_READ_DMA,		"READ BUFFER DMA" },
2460 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
2461 		{ ATA_CMD_PMP_WRITE_DMA,	"WRITE BUFFER DMA" },
2462 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
2463 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
2464 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
2465 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
2466 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
2467 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
2468 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
2469 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
2470 		{ ATA_CMD_SMART,		"SMART" },
2471 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
2472 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2473 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
2474 		{ ATA_CMD_CHK_MED_CRD_TYP,	"CHECK MEDIA CARD TYPE" },
2475 		{ ATA_CMD_CFA_REQ_EXT_ERR,	"CFA REQUEST EXTENDED ERROR" },
2476 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
2477 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
2478 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
2479 		{ ATA_CMD_CFA_WRITE_MULT_NE,	"CFA WRITE MULTIPLE WITHOUT ERASE" },
2480 		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
2481 		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
2482 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
2483 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
2484 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
2485 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
2486 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
2487 		{ 0,				NULL } /* terminate list */
2488 	};
2489 
2490 	unsigned int i;
2491 	for (i = 0; cmd_descr[i].text; i++)
2492 		if (cmd_descr[i].command == command)
2493 			return cmd_descr[i].text;
2494 #endif
2495 
2496 	return NULL;
2497 }
2498 EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
2499 
2500 /**
2501  *	ata_eh_link_report - report error handling to user
2502  *	@link: ATA link EH is going on
2503  *
2504  *	Report EH to user.
2505  *
2506  *	LOCKING:
2507  *	None.
2508  */
2509 static void ata_eh_link_report(struct ata_link *link)
2510 {
2511 	struct ata_port *ap = link->ap;
2512 	struct ata_eh_context *ehc = &link->eh_context;
2513 	const char *frozen, *desc;
2514 	char tries_buf[6] = "";
2515 	int tag, nr_failed = 0;
2516 
2517 	if (ehc->i.flags & ATA_EHI_QUIET)
2518 		return;
2519 
2520 	desc = NULL;
2521 	if (ehc->i.desc[0] != '\0')
2522 		desc = ehc->i.desc;
2523 
2524 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2525 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2526 
2527 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2528 		    ata_dev_phys_link(qc->dev) != link ||
2529 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2530 		     qc->err_mask == AC_ERR_DEV))
2531 			continue;
2532 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2533 			continue;
2534 
2535 		nr_failed++;
2536 	}
2537 
2538 	if (!nr_failed && !ehc->i.err_mask)
2539 		return;
2540 
2541 	frozen = "";
2542 	if (ap->pflags & ATA_PFLAG_FROZEN)
2543 		frozen = " frozen";
2544 
2545 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2546 		snprintf(tries_buf, sizeof(tries_buf), " t%d",
2547 			 ap->eh_tries);
2548 
2549 	if (ehc->i.dev) {
2550 		ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2551 			    "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2552 			    ehc->i.err_mask, link->sactive, ehc->i.serror,
2553 			    ehc->i.action, frozen, tries_buf);
2554 		if (desc)
2555 			ata_dev_err(ehc->i.dev, "%s\n", desc);
2556 	} else {
2557 		ata_link_err(link, "exception Emask 0x%x "
2558 			     "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2559 			     ehc->i.err_mask, link->sactive, ehc->i.serror,
2560 			     ehc->i.action, frozen, tries_buf);
2561 		if (desc)
2562 			ata_link_err(link, "%s\n", desc);
2563 	}
2564 
2565 #ifdef CONFIG_ATA_VERBOSE_ERROR
2566 	if (ehc->i.serror)
2567 		ata_link_err(link,
2568 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2569 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2570 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2571 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2572 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2573 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2574 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2575 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2576 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2577 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2578 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2579 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2580 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2581 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2582 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2583 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2584 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2585 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2586 #endif
2587 
2588 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2589 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2590 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2591 		char data_buf[20] = "";
2592 		char cdb_buf[70] = "";
2593 
2594 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2595 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2596 			continue;
2597 
2598 		if (qc->dma_dir != DMA_NONE) {
2599 			static const char *dma_str[] = {
2600 				[DMA_BIDIRECTIONAL]	= "bidi",
2601 				[DMA_TO_DEVICE]		= "out",
2602 				[DMA_FROM_DEVICE]	= "in",
2603 			};
2604 			static const char *prot_str[] = {
2605 				[ATA_PROT_PIO]		= "pio",
2606 				[ATA_PROT_DMA]		= "dma",
2607 				[ATA_PROT_NCQ]		= "ncq",
2608 				[ATAPI_PROT_PIO]	= "pio",
2609 				[ATAPI_PROT_DMA]	= "dma",
2610 			};
2611 
2612 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2613 				 prot_str[qc->tf.protocol], qc->nbytes,
2614 				 dma_str[qc->dma_dir]);
2615 		}
2616 
2617 		if (ata_is_atapi(qc->tf.protocol)) {
2618 			const u8 *cdb = qc->cdb;
2619 			size_t cdb_len = qc->dev->cdb_len;
2620 
2621 			if (qc->scsicmd) {
2622 				cdb = qc->scsicmd->cmnd;
2623 				cdb_len = qc->scsicmd->cmd_len;
2624 			}
2625 			__scsi_format_command(cdb_buf, sizeof(cdb_buf),
2626 					      cdb, cdb_len);
2627 		} else {
2628 			const char *descr = ata_get_cmd_descript(cmd->command);
2629 			if (descr)
2630 				ata_dev_err(qc->dev, "failed command: %s\n",
2631 					    descr);
2632 		}
2633 
2634 		ata_dev_err(qc->dev,
2635 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2636 			"tag %d%s\n         %s"
2637 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2638 			"Emask 0x%x (%s)%s\n",
2639 			cmd->command, cmd->feature, cmd->nsect,
2640 			cmd->lbal, cmd->lbam, cmd->lbah,
2641 			cmd->hob_feature, cmd->hob_nsect,
2642 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2643 			cmd->device, qc->tag, data_buf, cdb_buf,
2644 			res->command, res->feature, res->nsect,
2645 			res->lbal, res->lbam, res->lbah,
2646 			res->hob_feature, res->hob_nsect,
2647 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
2648 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
2649 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2650 
2651 #ifdef CONFIG_ATA_VERBOSE_ERROR
2652 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2653 				    ATA_SENSE | ATA_ERR)) {
2654 			if (res->command & ATA_BUSY)
2655 				ata_dev_err(qc->dev, "status: { Busy }\n");
2656 			else
2657 				ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
2658 				  res->command & ATA_DRDY ? "DRDY " : "",
2659 				  res->command & ATA_DF ? "DF " : "",
2660 				  res->command & ATA_DRQ ? "DRQ " : "",
2661 				  res->command & ATA_SENSE ? "SENSE " : "",
2662 				  res->command & ATA_ERR ? "ERR " : "");
2663 		}
2664 
2665 		if (cmd->command != ATA_CMD_PACKET &&
2666 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2667 				     ATA_IDNF | ATA_ABORTED)))
2668 			ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2669 			  res->feature & ATA_ICRC ? "ICRC " : "",
2670 			  res->feature & ATA_UNC ? "UNC " : "",
2671 			  res->feature & ATA_AMNF ? "AMNF " : "",
2672 			  res->feature & ATA_IDNF ? "IDNF " : "",
2673 			  res->feature & ATA_ABORTED ? "ABRT " : "");
2674 #endif
2675 	}
2676 }
2677 
2678 /**
2679  *	ata_eh_report - report error handling to user
2680  *	@ap: ATA port to report EH about
2681  *
2682  *	Report EH to user.
2683  *
2684  *	LOCKING:
2685  *	None.
2686  */
2687 void ata_eh_report(struct ata_port *ap)
2688 {
2689 	struct ata_link *link;
2690 
2691 	ata_for_each_link(link, ap, HOST_FIRST)
2692 		ata_eh_link_report(link);
2693 }
2694 
2695 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2696 			unsigned int *classes, unsigned long deadline,
2697 			bool clear_classes)
2698 {
2699 	struct ata_device *dev;
2700 
2701 	if (clear_classes)
2702 		ata_for_each_dev(dev, link, ALL)
2703 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2704 
2705 	return reset(link, classes, deadline);
2706 }
2707 
2708 static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2709 {
2710 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2711 		return 0;
2712 	if (rc == -EAGAIN)
2713 		return 1;
2714 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2715 		return 1;
2716 	return 0;
2717 }
2718 
2719 int ata_eh_reset(struct ata_link *link, int classify,
2720 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2721 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2722 {
2723 	struct ata_port *ap = link->ap;
2724 	struct ata_link *slave = ap->slave_link;
2725 	struct ata_eh_context *ehc = &link->eh_context;
2726 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2727 	unsigned int *classes = ehc->classes;
2728 	unsigned int lflags = link->flags;
2729 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2730 	int max_tries = 0, try = 0;
2731 	struct ata_link *failed_link;
2732 	struct ata_device *dev;
2733 	unsigned long deadline, now;
2734 	ata_reset_fn_t reset;
2735 	unsigned long flags;
2736 	u32 sstatus;
2737 	int nr_unknown, rc;
2738 
2739 	/*
2740 	 * Prepare to reset
2741 	 */
2742 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2743 		max_tries++;
2744 	if (link->flags & ATA_LFLAG_RST_ONCE)
2745 		max_tries = 1;
2746 	if (link->flags & ATA_LFLAG_NO_HRST)
2747 		hardreset = NULL;
2748 	if (link->flags & ATA_LFLAG_NO_SRST)
2749 		softreset = NULL;
2750 
2751 	/* make sure each reset attempt is at least COOL_DOWN apart */
2752 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
2753 		now = jiffies;
2754 		WARN_ON(time_after(ehc->last_reset, now));
2755 		deadline = ata_deadline(ehc->last_reset,
2756 					ATA_EH_RESET_COOL_DOWN);
2757 		if (time_before(now, deadline))
2758 			schedule_timeout_uninterruptible(deadline - now);
2759 	}
2760 
2761 	spin_lock_irqsave(ap->lock, flags);
2762 	ap->pflags |= ATA_PFLAG_RESETTING;
2763 	spin_unlock_irqrestore(ap->lock, flags);
2764 
2765 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2766 
2767 	ata_for_each_dev(dev, link, ALL) {
2768 		/* If we issue an SRST then an ATA drive (not ATAPI)
2769 		 * may change configuration and be in PIO0 timing. If
2770 		 * we do a hard reset (or are coming from power on)
2771 		 * this is true for ATA or ATAPI. Until we've set a
2772 		 * suitable controller mode we should not touch the
2773 		 * bus as we may be talking too fast.
2774 		 */
2775 		dev->pio_mode = XFER_PIO_0;
2776 		dev->dma_mode = 0xff;
2777 
2778 		/* If the controller has a pio mode setup function
2779 		 * then use it to set the chipset to rights. Don't
2780 		 * touch the DMA setup as that will be dealt with when
2781 		 * configuring devices.
2782 		 */
2783 		if (ap->ops->set_piomode)
2784 			ap->ops->set_piomode(ap, dev);
2785 	}
2786 
2787 	/* prefer hardreset */
2788 	reset = NULL;
2789 	ehc->i.action &= ~ATA_EH_RESET;
2790 	if (hardreset) {
2791 		reset = hardreset;
2792 		ehc->i.action |= ATA_EH_HARDRESET;
2793 	} else if (softreset) {
2794 		reset = softreset;
2795 		ehc->i.action |= ATA_EH_SOFTRESET;
2796 	}
2797 
2798 	if (prereset) {
2799 		unsigned long deadline = ata_deadline(jiffies,
2800 						      ATA_EH_PRERESET_TIMEOUT);
2801 
2802 		if (slave) {
2803 			sehc->i.action &= ~ATA_EH_RESET;
2804 			sehc->i.action |= ehc->i.action;
2805 		}
2806 
2807 		rc = prereset(link, deadline);
2808 
2809 		/* If present, do prereset on slave link too.  Reset
2810 		 * is skipped iff both master and slave links report
2811 		 * -ENOENT or clear ATA_EH_RESET.
2812 		 */
2813 		if (slave && (rc == 0 || rc == -ENOENT)) {
2814 			int tmp;
2815 
2816 			tmp = prereset(slave, deadline);
2817 			if (tmp != -ENOENT)
2818 				rc = tmp;
2819 
2820 			ehc->i.action |= sehc->i.action;
2821 		}
2822 
2823 		if (rc) {
2824 			if (rc == -ENOENT) {
2825 				ata_link_dbg(link, "port disabled--ignoring\n");
2826 				ehc->i.action &= ~ATA_EH_RESET;
2827 
2828 				ata_for_each_dev(dev, link, ALL)
2829 					classes[dev->devno] = ATA_DEV_NONE;
2830 
2831 				rc = 0;
2832 			} else
2833 				ata_link_err(link,
2834 					     "prereset failed (errno=%d)\n",
2835 					     rc);
2836 			goto out;
2837 		}
2838 
2839 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2840 		 * bang classes, thaw and return.
2841 		 */
2842 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2843 			ata_for_each_dev(dev, link, ALL)
2844 				classes[dev->devno] = ATA_DEV_NONE;
2845 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2846 			    ata_is_host_link(link))
2847 				ata_eh_thaw_port(ap);
2848 			rc = 0;
2849 			goto out;
2850 		}
2851 	}
2852 
2853  retry:
2854 	/*
2855 	 * Perform reset
2856 	 */
2857 	if (ata_is_host_link(link))
2858 		ata_eh_freeze_port(ap);
2859 
2860 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2861 
2862 	if (reset) {
2863 		if (verbose)
2864 			ata_link_info(link, "%s resetting link\n",
2865 				      reset == softreset ? "soft" : "hard");
2866 
2867 		/* mark that this EH session started with reset */
2868 		ehc->last_reset = jiffies;
2869 		if (reset == hardreset)
2870 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2871 		else
2872 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2873 
2874 		rc = ata_do_reset(link, reset, classes, deadline, true);
2875 		if (rc && rc != -EAGAIN) {
2876 			failed_link = link;
2877 			goto fail;
2878 		}
2879 
2880 		/* hardreset slave link if existent */
2881 		if (slave && reset == hardreset) {
2882 			int tmp;
2883 
2884 			if (verbose)
2885 				ata_link_info(slave, "hard resetting link\n");
2886 
2887 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2888 			tmp = ata_do_reset(slave, reset, classes, deadline,
2889 					   false);
2890 			switch (tmp) {
2891 			case -EAGAIN:
2892 				rc = -EAGAIN;
2893 			case 0:
2894 				break;
2895 			default:
2896 				failed_link = slave;
2897 				rc = tmp;
2898 				goto fail;
2899 			}
2900 		}
2901 
2902 		/* perform follow-up SRST if necessary */
2903 		if (reset == hardreset &&
2904 		    ata_eh_followup_srst_needed(link, rc)) {
2905 			reset = softreset;
2906 
2907 			if (!reset) {
2908 				ata_link_err(link,
2909 	     "follow-up softreset required but no softreset available\n");
2910 				failed_link = link;
2911 				rc = -EINVAL;
2912 				goto fail;
2913 			}
2914 
2915 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2916 			rc = ata_do_reset(link, reset, classes, deadline, true);
2917 			if (rc) {
2918 				failed_link = link;
2919 				goto fail;
2920 			}
2921 		}
2922 	} else {
2923 		if (verbose)
2924 			ata_link_info(link,
2925 	"no reset method available, skipping reset\n");
2926 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2927 			lflags |= ATA_LFLAG_ASSUME_ATA;
2928 	}
2929 
2930 	/*
2931 	 * Post-reset processing
2932 	 */
2933 	ata_for_each_dev(dev, link, ALL) {
2934 		/* After the reset, the device state is PIO 0 and the
2935 		 * controller state is undefined.  Reset also wakes up
2936 		 * drives from sleeping mode.
2937 		 */
2938 		dev->pio_mode = XFER_PIO_0;
2939 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2940 
2941 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2942 			continue;
2943 
2944 		/* apply class override */
2945 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2946 			classes[dev->devno] = ATA_DEV_ATA;
2947 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2948 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2949 	}
2950 
2951 	/* record current link speed */
2952 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2953 		link->sata_spd = (sstatus >> 4) & 0xf;
2954 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2955 		slave->sata_spd = (sstatus >> 4) & 0xf;
2956 
2957 	/* thaw the port */
2958 	if (ata_is_host_link(link))
2959 		ata_eh_thaw_port(ap);
2960 
2961 	/* postreset() should clear hardware SError.  Although SError
2962 	 * is cleared during link resume, clearing SError here is
2963 	 * necessary as some PHYs raise hotplug events after SRST.
2964 	 * This introduces race condition where hotplug occurs between
2965 	 * reset and here.  This race is mediated by cross checking
2966 	 * link onlineness and classification result later.
2967 	 */
2968 	if (postreset) {
2969 		postreset(link, classes);
2970 		if (slave)
2971 			postreset(slave, classes);
2972 	}
2973 
2974 	/*
2975 	 * Some controllers can't be frozen very well and may set spurious
2976 	 * error conditions during reset.  Clear accumulated error
2977 	 * information and re-thaw the port if frozen.  As reset is the
2978 	 * final recovery action and we cross check link onlineness against
2979 	 * device classification later, no hotplug event is lost by this.
2980 	 */
2981 	spin_lock_irqsave(link->ap->lock, flags);
2982 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2983 	if (slave)
2984 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
2985 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2986 	spin_unlock_irqrestore(link->ap->lock, flags);
2987 
2988 	if (ap->pflags & ATA_PFLAG_FROZEN)
2989 		ata_eh_thaw_port(ap);
2990 
2991 	/*
2992 	 * Make sure onlineness and classification result correspond.
2993 	 * Hotplug could have happened during reset and some
2994 	 * controllers fail to wait while a drive is spinning up after
2995 	 * being hotplugged causing misdetection.  By cross checking
2996 	 * link on/offlineness and classification result, those
2997 	 * conditions can be reliably detected and retried.
2998 	 */
2999 	nr_unknown = 0;
3000 	ata_for_each_dev(dev, link, ALL) {
3001 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
3002 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
3003 				ata_dev_dbg(dev, "link online but device misclassified\n");
3004 				classes[dev->devno] = ATA_DEV_NONE;
3005 				nr_unknown++;
3006 			}
3007 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3008 			if (ata_class_enabled(classes[dev->devno]))
3009 				ata_dev_dbg(dev,
3010 					    "link offline, clearing class %d to NONE\n",
3011 					    classes[dev->devno]);
3012 			classes[dev->devno] = ATA_DEV_NONE;
3013 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
3014 			ata_dev_dbg(dev,
3015 				    "link status unknown, clearing UNKNOWN to NONE\n");
3016 			classes[dev->devno] = ATA_DEV_NONE;
3017 		}
3018 	}
3019 
3020 	if (classify && nr_unknown) {
3021 		if (try < max_tries) {
3022 			ata_link_warn(link,
3023 				      "link online but %d devices misclassified, retrying\n",
3024 				      nr_unknown);
3025 			failed_link = link;
3026 			rc = -EAGAIN;
3027 			goto fail;
3028 		}
3029 		ata_link_warn(link,
3030 			      "link online but %d devices misclassified, "
3031 			      "device detection might fail\n", nr_unknown);
3032 	}
3033 
3034 	/* reset successful, schedule revalidation */
3035 	ata_eh_done(link, NULL, ATA_EH_RESET);
3036 	if (slave)
3037 		ata_eh_done(slave, NULL, ATA_EH_RESET);
3038 	ehc->last_reset = jiffies;		/* update to completion time */
3039 	ehc->i.action |= ATA_EH_REVALIDATE;
3040 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
3041 
3042 	rc = 0;
3043  out:
3044 	/* clear hotplug flag */
3045 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3046 	if (slave)
3047 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3048 
3049 	spin_lock_irqsave(ap->lock, flags);
3050 	ap->pflags &= ~ATA_PFLAG_RESETTING;
3051 	spin_unlock_irqrestore(ap->lock, flags);
3052 
3053 	return rc;
3054 
3055  fail:
3056 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
3057 	if (!ata_is_host_link(link) &&
3058 	    sata_scr_read(link, SCR_STATUS, &sstatus))
3059 		rc = -ERESTART;
3060 
3061 	if (try >= max_tries) {
3062 		/*
3063 		 * Thaw host port even if reset failed, so that the port
3064 		 * can be retried on the next phy event.  This risks
3065 		 * repeated EH runs but seems to be a better tradeoff than
3066 		 * shutting down a port after a botched hotplug attempt.
3067 		 */
3068 		if (ata_is_host_link(link))
3069 			ata_eh_thaw_port(ap);
3070 		goto out;
3071 	}
3072 
3073 	now = jiffies;
3074 	if (time_before(now, deadline)) {
3075 		unsigned long delta = deadline - now;
3076 
3077 		ata_link_warn(failed_link,
3078 			"reset failed (errno=%d), retrying in %u secs\n",
3079 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
3080 
3081 		ata_eh_release(ap);
3082 		while (delta)
3083 			delta = schedule_timeout_uninterruptible(delta);
3084 		ata_eh_acquire(ap);
3085 	}
3086 
3087 	/*
3088 	 * While disks spinup behind PMP, some controllers fail sending SRST.
3089 	 * They need to be reset - as well as the PMP - before retrying.
3090 	 */
3091 	if (rc == -ERESTART) {
3092 		if (ata_is_host_link(link))
3093 			ata_eh_thaw_port(ap);
3094 		goto out;
3095 	}
3096 
3097 	if (try == max_tries - 1) {
3098 		sata_down_spd_limit(link, 0);
3099 		if (slave)
3100 			sata_down_spd_limit(slave, 0);
3101 	} else if (rc == -EPIPE)
3102 		sata_down_spd_limit(failed_link, 0);
3103 
3104 	if (hardreset)
3105 		reset = hardreset;
3106 	goto retry;
3107 }
3108 
3109 static inline void ata_eh_pull_park_action(struct ata_port *ap)
3110 {
3111 	struct ata_link *link;
3112 	struct ata_device *dev;
3113 	unsigned long flags;
3114 
3115 	/*
3116 	 * This function can be thought of as an extended version of
3117 	 * ata_eh_about_to_do() specially crafted to accommodate the
3118 	 * requirements of ATA_EH_PARK handling. Since the EH thread
3119 	 * does not leave the do {} while () loop in ata_eh_recover as
3120 	 * long as the timeout for a park request to *one* device on
3121 	 * the port has not expired, and since we still want to pick
3122 	 * up park requests to other devices on the same port or
3123 	 * timeout updates for the same device, we have to pull
3124 	 * ATA_EH_PARK actions from eh_info into eh_context.i
3125 	 * ourselves at the beginning of each pass over the loop.
3126 	 *
3127 	 * Additionally, all write accesses to &ap->park_req_pending
3128 	 * through reinit_completion() (see below) or complete_all()
3129 	 * (see ata_scsi_park_store()) are protected by the host lock.
3130 	 * As a result we have that park_req_pending.done is zero on
3131 	 * exit from this function, i.e. when ATA_EH_PARK actions for
3132 	 * *all* devices on port ap have been pulled into the
3133 	 * respective eh_context structs. If, and only if,
3134 	 * park_req_pending.done is non-zero by the time we reach
3135 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
3136 	 * has been scheduled for at least one of the devices on port
3137 	 * ap and we have to cycle over the do {} while () loop in
3138 	 * ata_eh_recover() again.
3139 	 */
3140 
3141 	spin_lock_irqsave(ap->lock, flags);
3142 	reinit_completion(&ap->park_req_pending);
3143 	ata_for_each_link(link, ap, EDGE) {
3144 		ata_for_each_dev(dev, link, ALL) {
3145 			struct ata_eh_info *ehi = &link->eh_info;
3146 
3147 			link->eh_context.i.dev_action[dev->devno] |=
3148 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
3149 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3150 		}
3151 	}
3152 	spin_unlock_irqrestore(ap->lock, flags);
3153 }
3154 
3155 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3156 {
3157 	struct ata_eh_context *ehc = &dev->link->eh_context;
3158 	struct ata_taskfile tf;
3159 	unsigned int err_mask;
3160 
3161 	ata_tf_init(dev, &tf);
3162 	if (park) {
3163 		ehc->unloaded_mask |= 1 << dev->devno;
3164 		tf.command = ATA_CMD_IDLEIMMEDIATE;
3165 		tf.feature = 0x44;
3166 		tf.lbal = 0x4c;
3167 		tf.lbam = 0x4e;
3168 		tf.lbah = 0x55;
3169 	} else {
3170 		ehc->unloaded_mask &= ~(1 << dev->devno);
3171 		tf.command = ATA_CMD_CHK_POWER;
3172 	}
3173 
3174 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3175 	tf.protocol |= ATA_PROT_NODATA;
3176 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3177 	if (park && (err_mask || tf.lbal != 0xc4)) {
3178 		ata_dev_err(dev, "head unload failed!\n");
3179 		ehc->unloaded_mask &= ~(1 << dev->devno);
3180 	}
3181 }
3182 
3183 static int ata_eh_revalidate_and_attach(struct ata_link *link,
3184 					struct ata_device **r_failed_dev)
3185 {
3186 	struct ata_port *ap = link->ap;
3187 	struct ata_eh_context *ehc = &link->eh_context;
3188 	struct ata_device *dev;
3189 	unsigned int new_mask = 0;
3190 	unsigned long flags;
3191 	int rc = 0;
3192 
3193 	DPRINTK("ENTER\n");
3194 
3195 	/* For PATA drive side cable detection to work, IDENTIFY must
3196 	 * be done backwards such that PDIAG- is released by the slave
3197 	 * device before the master device is identified.
3198 	 */
3199 	ata_for_each_dev(dev, link, ALL_REVERSE) {
3200 		unsigned int action = ata_eh_dev_action(dev);
3201 		unsigned int readid_flags = 0;
3202 
3203 		if (ehc->i.flags & ATA_EHI_DID_RESET)
3204 			readid_flags |= ATA_READID_POSTRESET;
3205 
3206 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3207 			WARN_ON(dev->class == ATA_DEV_PMP);
3208 
3209 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3210 				rc = -EIO;
3211 				goto err;
3212 			}
3213 
3214 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3215 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3216 						readid_flags);
3217 			if (rc)
3218 				goto err;
3219 
3220 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3221 
3222 			/* Configuration may have changed, reconfigure
3223 			 * transfer mode.
3224 			 */
3225 			ehc->i.flags |= ATA_EHI_SETMODE;
3226 
3227 			/* schedule the scsi_rescan_device() here */
3228 			schedule_work(&(ap->scsi_rescan_task));
3229 		} else if (dev->class == ATA_DEV_UNKNOWN &&
3230 			   ehc->tries[dev->devno] &&
3231 			   ata_class_enabled(ehc->classes[dev->devno])) {
3232 			/* Temporarily set dev->class, it will be
3233 			 * permanently set once all configurations are
3234 			 * complete.  This is necessary because new
3235 			 * device configuration is done in two
3236 			 * separate loops.
3237 			 */
3238 			dev->class = ehc->classes[dev->devno];
3239 
3240 			if (dev->class == ATA_DEV_PMP)
3241 				rc = sata_pmp_attach(dev);
3242 			else
3243 				rc = ata_dev_read_id(dev, &dev->class,
3244 						     readid_flags, dev->id);
3245 
3246 			/* read_id might have changed class, store and reset */
3247 			ehc->classes[dev->devno] = dev->class;
3248 			dev->class = ATA_DEV_UNKNOWN;
3249 
3250 			switch (rc) {
3251 			case 0:
3252 				/* clear error info accumulated during probe */
3253 				ata_ering_clear(&dev->ering);
3254 				new_mask |= 1 << dev->devno;
3255 				break;
3256 			case -ENOENT:
3257 				/* IDENTIFY was issued to non-existent
3258 				 * device.  No need to reset.  Just
3259 				 * thaw and ignore the device.
3260 				 */
3261 				ata_eh_thaw_port(ap);
3262 				break;
3263 			default:
3264 				goto err;
3265 			}
3266 		}
3267 	}
3268 
3269 	/* PDIAG- should have been released, ask cable type if post-reset */
3270 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3271 		if (ap->ops->cable_detect)
3272 			ap->cbl = ap->ops->cable_detect(ap);
3273 		ata_force_cbl(ap);
3274 	}
3275 
3276 	/* Configure new devices forward such that user doesn't see
3277 	 * device detection messages backwards.
3278 	 */
3279 	ata_for_each_dev(dev, link, ALL) {
3280 		if (!(new_mask & (1 << dev->devno)))
3281 			continue;
3282 
3283 		dev->class = ehc->classes[dev->devno];
3284 
3285 		if (dev->class == ATA_DEV_PMP)
3286 			continue;
3287 
3288 		ehc->i.flags |= ATA_EHI_PRINTINFO;
3289 		rc = ata_dev_configure(dev);
3290 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3291 		if (rc) {
3292 			dev->class = ATA_DEV_UNKNOWN;
3293 			goto err;
3294 		}
3295 
3296 		spin_lock_irqsave(ap->lock, flags);
3297 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3298 		spin_unlock_irqrestore(ap->lock, flags);
3299 
3300 		/* new device discovered, configure xfermode */
3301 		ehc->i.flags |= ATA_EHI_SETMODE;
3302 	}
3303 
3304 	return 0;
3305 
3306  err:
3307 	*r_failed_dev = dev;
3308 	DPRINTK("EXIT rc=%d\n", rc);
3309 	return rc;
3310 }
3311 
3312 /**
3313  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
3314  *	@link: link on which timings will be programmed
3315  *	@r_failed_dev: out parameter for failed device
3316  *
3317  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3318  *	ata_set_mode() fails, pointer to the failing device is
3319  *	returned in @r_failed_dev.
3320  *
3321  *	LOCKING:
3322  *	PCI/etc. bus probe sem.
3323  *
3324  *	RETURNS:
3325  *	0 on success, negative errno otherwise
3326  */
3327 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3328 {
3329 	struct ata_port *ap = link->ap;
3330 	struct ata_device *dev;
3331 	int rc;
3332 
3333 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3334 	ata_for_each_dev(dev, link, ENABLED) {
3335 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3336 			struct ata_ering_entry *ent;
3337 
3338 			ent = ata_ering_top(&dev->ering);
3339 			if (ent)
3340 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3341 		}
3342 	}
3343 
3344 	/* has private set_mode? */
3345 	if (ap->ops->set_mode)
3346 		rc = ap->ops->set_mode(link, r_failed_dev);
3347 	else
3348 		rc = ata_do_set_mode(link, r_failed_dev);
3349 
3350 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
3351 	ata_for_each_dev(dev, link, ENABLED) {
3352 		struct ata_eh_context *ehc = &link->eh_context;
3353 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3354 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3355 
3356 		if (dev->xfer_mode != saved_xfer_mode ||
3357 		    ata_ncq_enabled(dev) != saved_ncq)
3358 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3359 	}
3360 
3361 	return rc;
3362 }
3363 
3364 /**
3365  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3366  *	@dev: ATAPI device to clear UA for
3367  *
3368  *	Resets and other operations can make an ATAPI device raise
3369  *	UNIT ATTENTION which causes the next operation to fail.  This
3370  *	function clears UA.
3371  *
3372  *	LOCKING:
3373  *	EH context (may sleep).
3374  *
3375  *	RETURNS:
3376  *	0 on success, -errno on failure.
3377  */
3378 static int atapi_eh_clear_ua(struct ata_device *dev)
3379 {
3380 	int i;
3381 
3382 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3383 		u8 *sense_buffer = dev->link->ap->sector_buf;
3384 		u8 sense_key = 0;
3385 		unsigned int err_mask;
3386 
3387 		err_mask = atapi_eh_tur(dev, &sense_key);
3388 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3389 			ata_dev_warn(dev,
3390 				     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3391 				     err_mask);
3392 			return -EIO;
3393 		}
3394 
3395 		if (!err_mask || sense_key != UNIT_ATTENTION)
3396 			return 0;
3397 
3398 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3399 		if (err_mask) {
3400 			ata_dev_warn(dev, "failed to clear "
3401 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3402 			return -EIO;
3403 		}
3404 	}
3405 
3406 	ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3407 		     ATA_EH_UA_TRIES);
3408 
3409 	return 0;
3410 }
3411 
3412 /**
3413  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3414  *	@dev: ATA device which may need FLUSH retry
3415  *
3416  *	If @dev failed FLUSH, it needs to be reported upper layer
3417  *	immediately as it means that @dev failed to remap and already
3418  *	lost at least a sector and further FLUSH retrials won't make
3419  *	any difference to the lost sector.  However, if FLUSH failed
3420  *	for other reasons, for example transmission error, FLUSH needs
3421  *	to be retried.
3422  *
3423  *	This function determines whether FLUSH failure retry is
3424  *	necessary and performs it if so.
3425  *
3426  *	RETURNS:
3427  *	0 if EH can continue, -errno if EH needs to be repeated.
3428  */
3429 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3430 {
3431 	struct ata_link *link = dev->link;
3432 	struct ata_port *ap = link->ap;
3433 	struct ata_queued_cmd *qc;
3434 	struct ata_taskfile tf;
3435 	unsigned int err_mask;
3436 	int rc = 0;
3437 
3438 	/* did flush fail for this device? */
3439 	if (!ata_tag_valid(link->active_tag))
3440 		return 0;
3441 
3442 	qc = __ata_qc_from_tag(ap, link->active_tag);
3443 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3444 			       qc->tf.command != ATA_CMD_FLUSH))
3445 		return 0;
3446 
3447 	/* if the device failed it, it should be reported to upper layers */
3448 	if (qc->err_mask & AC_ERR_DEV)
3449 		return 0;
3450 
3451 	/* flush failed for some other reason, give it another shot */
3452 	ata_tf_init(dev, &tf);
3453 
3454 	tf.command = qc->tf.command;
3455 	tf.flags |= ATA_TFLAG_DEVICE;
3456 	tf.protocol = ATA_PROT_NODATA;
3457 
3458 	ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
3459 		       tf.command, qc->err_mask);
3460 
3461 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3462 	if (!err_mask) {
3463 		/*
3464 		 * FLUSH is complete but there's no way to
3465 		 * successfully complete a failed command from EH.
3466 		 * Making sure retry is allowed at least once and
3467 		 * retrying it should do the trick - whatever was in
3468 		 * the cache is already on the platter and this won't
3469 		 * cause infinite loop.
3470 		 */
3471 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3472 	} else {
3473 		ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
3474 			       err_mask);
3475 		rc = -EIO;
3476 
3477 		/* if device failed it, report it to upper layers */
3478 		if (err_mask & AC_ERR_DEV) {
3479 			qc->err_mask |= AC_ERR_DEV;
3480 			qc->result_tf = tf;
3481 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
3482 				rc = 0;
3483 		}
3484 	}
3485 	return rc;
3486 }
3487 
3488 /**
3489  *	ata_eh_set_lpm - configure SATA interface power management
3490  *	@link: link to configure power management
3491  *	@policy: the link power management policy
3492  *	@r_failed_dev: out parameter for failed device
3493  *
3494  *	Enable SATA Interface power management.  This will enable
3495  *	Device Interface Power Management (DIPM) for min_power
3496  * 	policy, and then call driver specific callbacks for
3497  *	enabling Host Initiated Power management.
3498  *
3499  *	LOCKING:
3500  *	EH context.
3501  *
3502  *	RETURNS:
3503  *	0 on success, -errno on failure.
3504  */
3505 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3506 			  struct ata_device **r_failed_dev)
3507 {
3508 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3509 	struct ata_eh_context *ehc = &link->eh_context;
3510 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3511 	enum ata_lpm_policy old_policy = link->lpm_policy;
3512 	bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3513 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3514 	unsigned int err_mask;
3515 	int rc;
3516 
3517 	/* if the link or host doesn't do LPM, noop */
3518 	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3519 		return 0;
3520 
3521 	/*
3522 	 * DIPM is enabled only for MIN_POWER as some devices
3523 	 * misbehave when the host NACKs transition to SLUMBER.  Order
3524 	 * device and link configurations such that the host always
3525 	 * allows DIPM requests.
3526 	 */
3527 	ata_for_each_dev(dev, link, ENABLED) {
3528 		bool hipm = ata_id_has_hipm(dev->id);
3529 		bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3530 
3531 		/* find the first enabled and LPM enabled devices */
3532 		if (!link_dev)
3533 			link_dev = dev;
3534 
3535 		if (!lpm_dev && (hipm || dipm))
3536 			lpm_dev = dev;
3537 
3538 		hints &= ~ATA_LPM_EMPTY;
3539 		if (!hipm)
3540 			hints &= ~ATA_LPM_HIPM;
3541 
3542 		/* disable DIPM before changing link config */
3543 		if (policy != ATA_LPM_MIN_POWER && dipm) {
3544 			err_mask = ata_dev_set_feature(dev,
3545 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
3546 			if (err_mask && err_mask != AC_ERR_DEV) {
3547 				ata_dev_warn(dev,
3548 					     "failed to disable DIPM, Emask 0x%x\n",
3549 					     err_mask);
3550 				rc = -EIO;
3551 				goto fail;
3552 			}
3553 		}
3554 	}
3555 
3556 	if (ap) {
3557 		rc = ap->ops->set_lpm(link, policy, hints);
3558 		if (!rc && ap->slave_link)
3559 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3560 	} else
3561 		rc = sata_pmp_set_lpm(link, policy, hints);
3562 
3563 	/*
3564 	 * Attribute link config failure to the first (LPM) enabled
3565 	 * device on the link.
3566 	 */
3567 	if (rc) {
3568 		if (rc == -EOPNOTSUPP) {
3569 			link->flags |= ATA_LFLAG_NO_LPM;
3570 			return 0;
3571 		}
3572 		dev = lpm_dev ? lpm_dev : link_dev;
3573 		goto fail;
3574 	}
3575 
3576 	/*
3577 	 * Low level driver acked the transition.  Issue DIPM command
3578 	 * with the new policy set.
3579 	 */
3580 	link->lpm_policy = policy;
3581 	if (ap && ap->slave_link)
3582 		ap->slave_link->lpm_policy = policy;
3583 
3584 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
3585 	ata_for_each_dev(dev, link, ENABLED) {
3586 		if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3587 		    ata_id_has_dipm(dev->id)) {
3588 			err_mask = ata_dev_set_feature(dev,
3589 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
3590 			if (err_mask && err_mask != AC_ERR_DEV) {
3591 				ata_dev_warn(dev,
3592 					"failed to enable DIPM, Emask 0x%x\n",
3593 					err_mask);
3594 				rc = -EIO;
3595 				goto fail;
3596 			}
3597 		}
3598 	}
3599 
3600 	return 0;
3601 
3602 fail:
3603 	/* restore the old policy */
3604 	link->lpm_policy = old_policy;
3605 	if (ap && ap->slave_link)
3606 		ap->slave_link->lpm_policy = old_policy;
3607 
3608 	/* if no device or only one more chance is left, disable LPM */
3609 	if (!dev || ehc->tries[dev->devno] <= 2) {
3610 		ata_link_warn(link, "disabling LPM on the link\n");
3611 		link->flags |= ATA_LFLAG_NO_LPM;
3612 	}
3613 	if (r_failed_dev)
3614 		*r_failed_dev = dev;
3615 	return rc;
3616 }
3617 
3618 int ata_link_nr_enabled(struct ata_link *link)
3619 {
3620 	struct ata_device *dev;
3621 	int cnt = 0;
3622 
3623 	ata_for_each_dev(dev, link, ENABLED)
3624 		cnt++;
3625 	return cnt;
3626 }
3627 
3628 static int ata_link_nr_vacant(struct ata_link *link)
3629 {
3630 	struct ata_device *dev;
3631 	int cnt = 0;
3632 
3633 	ata_for_each_dev(dev, link, ALL)
3634 		if (dev->class == ATA_DEV_UNKNOWN)
3635 			cnt++;
3636 	return cnt;
3637 }
3638 
3639 static int ata_eh_skip_recovery(struct ata_link *link)
3640 {
3641 	struct ata_port *ap = link->ap;
3642 	struct ata_eh_context *ehc = &link->eh_context;
3643 	struct ata_device *dev;
3644 
3645 	/* skip disabled links */
3646 	if (link->flags & ATA_LFLAG_DISABLED)
3647 		return 1;
3648 
3649 	/* skip if explicitly requested */
3650 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3651 		return 1;
3652 
3653 	/* thaw frozen port and recover failed devices */
3654 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3655 		return 0;
3656 
3657 	/* reset at least once if reset is requested */
3658 	if ((ehc->i.action & ATA_EH_RESET) &&
3659 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3660 		return 0;
3661 
3662 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
3663 	ata_for_each_dev(dev, link, ALL) {
3664 		if (dev->class == ATA_DEV_UNKNOWN &&
3665 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3666 			return 0;
3667 	}
3668 
3669 	return 1;
3670 }
3671 
3672 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3673 {
3674 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3675 	u64 now = get_jiffies_64();
3676 	int *trials = void_arg;
3677 
3678 	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3679 	    (ent->timestamp < now - min(now, interval)))
3680 		return -1;
3681 
3682 	(*trials)++;
3683 	return 0;
3684 }
3685 
3686 static int ata_eh_schedule_probe(struct ata_device *dev)
3687 {
3688 	struct ata_eh_context *ehc = &dev->link->eh_context;
3689 	struct ata_link *link = ata_dev_phys_link(dev);
3690 	int trials = 0;
3691 
3692 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3693 	    (ehc->did_probe_mask & (1 << dev->devno)))
3694 		return 0;
3695 
3696 	ata_eh_detach_dev(dev);
3697 	ata_dev_init(dev);
3698 	ehc->did_probe_mask |= (1 << dev->devno);
3699 	ehc->i.action |= ATA_EH_RESET;
3700 	ehc->saved_xfer_mode[dev->devno] = 0;
3701 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3702 
3703 	/* the link maybe in a deep sleep, wake it up */
3704 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3705 		if (ata_is_host_link(link))
3706 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3707 					       ATA_LPM_EMPTY);
3708 		else
3709 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3710 					 ATA_LPM_EMPTY);
3711 	}
3712 
3713 	/* Record and count probe trials on the ering.  The specific
3714 	 * error mask used is irrelevant.  Because a successful device
3715 	 * detection clears the ering, this count accumulates only if
3716 	 * there are consecutive failed probes.
3717 	 *
3718 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3719 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3720 	 * forced to 1.5Gbps.
3721 	 *
3722 	 * This is to work around cases where failed link speed
3723 	 * negotiation results in device misdetection leading to
3724 	 * infinite DEVXCHG or PHRDY CHG events.
3725 	 */
3726 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3727 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3728 
3729 	if (trials > ATA_EH_PROBE_TRIALS)
3730 		sata_down_spd_limit(link, 1);
3731 
3732 	return 1;
3733 }
3734 
3735 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3736 {
3737 	struct ata_eh_context *ehc = &dev->link->eh_context;
3738 
3739 	/* -EAGAIN from EH routine indicates retry without prejudice.
3740 	 * The requester is responsible for ensuring forward progress.
3741 	 */
3742 	if (err != -EAGAIN)
3743 		ehc->tries[dev->devno]--;
3744 
3745 	switch (err) {
3746 	case -ENODEV:
3747 		/* device missing or wrong IDENTIFY data, schedule probing */
3748 		ehc->i.probe_mask |= (1 << dev->devno);
3749 	case -EINVAL:
3750 		/* give it just one more chance */
3751 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3752 	case -EIO:
3753 		if (ehc->tries[dev->devno] == 1) {
3754 			/* This is the last chance, better to slow
3755 			 * down than lose it.
3756 			 */
3757 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3758 			if (dev->pio_mode > XFER_PIO_0)
3759 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3760 		}
3761 	}
3762 
3763 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3764 		/* disable device if it has used up all its chances */
3765 		ata_dev_disable(dev);
3766 
3767 		/* detach if offline */
3768 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3769 			ata_eh_detach_dev(dev);
3770 
3771 		/* schedule probe if necessary */
3772 		if (ata_eh_schedule_probe(dev)) {
3773 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3774 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
3775 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
3776 		}
3777 
3778 		return 1;
3779 	} else {
3780 		ehc->i.action |= ATA_EH_RESET;
3781 		return 0;
3782 	}
3783 }
3784 
3785 /**
3786  *	ata_eh_recover - recover host port after error
3787  *	@ap: host port to recover
3788  *	@prereset: prereset method (can be NULL)
3789  *	@softreset: softreset method (can be NULL)
3790  *	@hardreset: hardreset method (can be NULL)
3791  *	@postreset: postreset method (can be NULL)
3792  *	@r_failed_link: out parameter for failed link
3793  *
3794  *	This is the alpha and omega, eum and yang, heart and soul of
3795  *	libata exception handling.  On entry, actions required to
3796  *	recover each link and hotplug requests are recorded in the
3797  *	link's eh_context.  This function executes all the operations
3798  *	with appropriate retrials and fallbacks to resurrect failed
3799  *	devices, detach goners and greet newcomers.
3800  *
3801  *	LOCKING:
3802  *	Kernel thread context (may sleep).
3803  *
3804  *	RETURNS:
3805  *	0 on success, -errno on failure.
3806  */
3807 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3808 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3809 		   ata_postreset_fn_t postreset,
3810 		   struct ata_link **r_failed_link)
3811 {
3812 	struct ata_link *link;
3813 	struct ata_device *dev;
3814 	int rc, nr_fails;
3815 	unsigned long flags, deadline;
3816 
3817 	DPRINTK("ENTER\n");
3818 
3819 	/* prep for recovery */
3820 	ata_for_each_link(link, ap, EDGE) {
3821 		struct ata_eh_context *ehc = &link->eh_context;
3822 
3823 		/* re-enable link? */
3824 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3825 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3826 			spin_lock_irqsave(ap->lock, flags);
3827 			link->flags &= ~ATA_LFLAG_DISABLED;
3828 			spin_unlock_irqrestore(ap->lock, flags);
3829 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3830 		}
3831 
3832 		ata_for_each_dev(dev, link, ALL) {
3833 			if (link->flags & ATA_LFLAG_NO_RETRY)
3834 				ehc->tries[dev->devno] = 1;
3835 			else
3836 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3837 
3838 			/* collect port action mask recorded in dev actions */
3839 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
3840 					 ~ATA_EH_PERDEV_MASK;
3841 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3842 
3843 			/* process hotplug request */
3844 			if (dev->flags & ATA_DFLAG_DETACH)
3845 				ata_eh_detach_dev(dev);
3846 
3847 			/* schedule probe if necessary */
3848 			if (!ata_dev_enabled(dev))
3849 				ata_eh_schedule_probe(dev);
3850 		}
3851 	}
3852 
3853  retry:
3854 	rc = 0;
3855 
3856 	/* if UNLOADING, finish immediately */
3857 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3858 		goto out;
3859 
3860 	/* prep for EH */
3861 	ata_for_each_link(link, ap, EDGE) {
3862 		struct ata_eh_context *ehc = &link->eh_context;
3863 
3864 		/* skip EH if possible. */
3865 		if (ata_eh_skip_recovery(link))
3866 			ehc->i.action = 0;
3867 
3868 		ata_for_each_dev(dev, link, ALL)
3869 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3870 	}
3871 
3872 	/* reset */
3873 	ata_for_each_link(link, ap, EDGE) {
3874 		struct ata_eh_context *ehc = &link->eh_context;
3875 
3876 		if (!(ehc->i.action & ATA_EH_RESET))
3877 			continue;
3878 
3879 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3880 				  prereset, softreset, hardreset, postreset);
3881 		if (rc) {
3882 			ata_link_err(link, "reset failed, giving up\n");
3883 			goto out;
3884 		}
3885 	}
3886 
3887 	do {
3888 		unsigned long now;
3889 
3890 		/*
3891 		 * clears ATA_EH_PARK in eh_info and resets
3892 		 * ap->park_req_pending
3893 		 */
3894 		ata_eh_pull_park_action(ap);
3895 
3896 		deadline = jiffies;
3897 		ata_for_each_link(link, ap, EDGE) {
3898 			ata_for_each_dev(dev, link, ALL) {
3899 				struct ata_eh_context *ehc = &link->eh_context;
3900 				unsigned long tmp;
3901 
3902 				if (dev->class != ATA_DEV_ATA &&
3903 				    dev->class != ATA_DEV_ZAC)
3904 					continue;
3905 				if (!(ehc->i.dev_action[dev->devno] &
3906 				      ATA_EH_PARK))
3907 					continue;
3908 				tmp = dev->unpark_deadline;
3909 				if (time_before(deadline, tmp))
3910 					deadline = tmp;
3911 				else if (time_before_eq(tmp, jiffies))
3912 					continue;
3913 				if (ehc->unloaded_mask & (1 << dev->devno))
3914 					continue;
3915 
3916 				ata_eh_park_issue_cmd(dev, 1);
3917 			}
3918 		}
3919 
3920 		now = jiffies;
3921 		if (time_before_eq(deadline, now))
3922 			break;
3923 
3924 		ata_eh_release(ap);
3925 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
3926 						       deadline - now);
3927 		ata_eh_acquire(ap);
3928 	} while (deadline);
3929 	ata_for_each_link(link, ap, EDGE) {
3930 		ata_for_each_dev(dev, link, ALL) {
3931 			if (!(link->eh_context.unloaded_mask &
3932 			      (1 << dev->devno)))
3933 				continue;
3934 
3935 			ata_eh_park_issue_cmd(dev, 0);
3936 			ata_eh_done(link, dev, ATA_EH_PARK);
3937 		}
3938 	}
3939 
3940 	/* the rest */
3941 	nr_fails = 0;
3942 	ata_for_each_link(link, ap, PMP_FIRST) {
3943 		struct ata_eh_context *ehc = &link->eh_context;
3944 
3945 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
3946 			goto config_lpm;
3947 
3948 		/* revalidate existing devices and attach new ones */
3949 		rc = ata_eh_revalidate_and_attach(link, &dev);
3950 		if (rc)
3951 			goto rest_fail;
3952 
3953 		/* if PMP got attached, return, pmp EH will take care of it */
3954 		if (link->device->class == ATA_DEV_PMP) {
3955 			ehc->i.action = 0;
3956 			return 0;
3957 		}
3958 
3959 		/* configure transfer mode if necessary */
3960 		if (ehc->i.flags & ATA_EHI_SETMODE) {
3961 			rc = ata_set_mode(link, &dev);
3962 			if (rc)
3963 				goto rest_fail;
3964 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3965 		}
3966 
3967 		/* If reset has been issued, clear UA to avoid
3968 		 * disrupting the current users of the device.
3969 		 */
3970 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
3971 			ata_for_each_dev(dev, link, ALL) {
3972 				if (dev->class != ATA_DEV_ATAPI)
3973 					continue;
3974 				rc = atapi_eh_clear_ua(dev);
3975 				if (rc)
3976 					goto rest_fail;
3977 				if (zpodd_dev_enabled(dev))
3978 					zpodd_post_poweron(dev);
3979 			}
3980 		}
3981 
3982 		/* retry flush if necessary */
3983 		ata_for_each_dev(dev, link, ALL) {
3984 			if (dev->class != ATA_DEV_ATA &&
3985 			    dev->class != ATA_DEV_ZAC)
3986 				continue;
3987 			rc = ata_eh_maybe_retry_flush(dev);
3988 			if (rc)
3989 				goto rest_fail;
3990 		}
3991 
3992 	config_lpm:
3993 		/* configure link power saving */
3994 		if (link->lpm_policy != ap->target_lpm_policy) {
3995 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3996 			if (rc)
3997 				goto rest_fail;
3998 		}
3999 
4000 		/* this link is okay now */
4001 		ehc->i.flags = 0;
4002 		continue;
4003 
4004 	rest_fail:
4005 		nr_fails++;
4006 		if (dev)
4007 			ata_eh_handle_dev_fail(dev, rc);
4008 
4009 		if (ap->pflags & ATA_PFLAG_FROZEN) {
4010 			/* PMP reset requires working host port.
4011 			 * Can't retry if it's frozen.
4012 			 */
4013 			if (sata_pmp_attached(ap))
4014 				goto out;
4015 			break;
4016 		}
4017 	}
4018 
4019 	if (nr_fails)
4020 		goto retry;
4021 
4022  out:
4023 	if (rc && r_failed_link)
4024 		*r_failed_link = link;
4025 
4026 	DPRINTK("EXIT, rc=%d\n", rc);
4027 	return rc;
4028 }
4029 
4030 /**
4031  *	ata_eh_finish - finish up EH
4032  *	@ap: host port to finish EH for
4033  *
4034  *	Recovery is complete.  Clean up EH states and retry or finish
4035  *	failed qcs.
4036  *
4037  *	LOCKING:
4038  *	None.
4039  */
4040 void ata_eh_finish(struct ata_port *ap)
4041 {
4042 	int tag;
4043 
4044 	/* retry or finish qcs */
4045 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
4046 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
4047 
4048 		if (!(qc->flags & ATA_QCFLAG_FAILED))
4049 			continue;
4050 
4051 		if (qc->err_mask) {
4052 			/* FIXME: Once EH migration is complete,
4053 			 * generate sense data in this function,
4054 			 * considering both err_mask and tf.
4055 			 */
4056 			if (qc->flags & ATA_QCFLAG_RETRY)
4057 				ata_eh_qc_retry(qc);
4058 			else
4059 				ata_eh_qc_complete(qc);
4060 		} else {
4061 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
4062 				ata_eh_qc_complete(qc);
4063 			} else {
4064 				/* feed zero TF to sense generation */
4065 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
4066 				ata_eh_qc_retry(qc);
4067 			}
4068 		}
4069 	}
4070 
4071 	/* make sure nr_active_links is zero after EH */
4072 	WARN_ON(ap->nr_active_links);
4073 	ap->nr_active_links = 0;
4074 }
4075 
4076 /**
4077  *	ata_do_eh - do standard error handling
4078  *	@ap: host port to handle error for
4079  *
4080  *	@prereset: prereset method (can be NULL)
4081  *	@softreset: softreset method (can be NULL)
4082  *	@hardreset: hardreset method (can be NULL)
4083  *	@postreset: postreset method (can be NULL)
4084  *
4085  *	Perform standard error handling sequence.
4086  *
4087  *	LOCKING:
4088  *	Kernel thread context (may sleep).
4089  */
4090 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
4091 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
4092 	       ata_postreset_fn_t postreset)
4093 {
4094 	struct ata_device *dev;
4095 	int rc;
4096 
4097 	ata_eh_autopsy(ap);
4098 	ata_eh_report(ap);
4099 
4100 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
4101 			    NULL);
4102 	if (rc) {
4103 		ata_for_each_dev(dev, &ap->link, ALL)
4104 			ata_dev_disable(dev);
4105 	}
4106 
4107 	ata_eh_finish(ap);
4108 }
4109 
4110 /**
4111  *	ata_std_error_handler - standard error handler
4112  *	@ap: host port to handle error for
4113  *
4114  *	Standard error handler
4115  *
4116  *	LOCKING:
4117  *	Kernel thread context (may sleep).
4118  */
4119 void ata_std_error_handler(struct ata_port *ap)
4120 {
4121 	struct ata_port_operations *ops = ap->ops;
4122 	ata_reset_fn_t hardreset = ops->hardreset;
4123 
4124 	/* ignore built-in hardreset if SCR access is not available */
4125 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4126 		hardreset = NULL;
4127 
4128 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4129 }
4130 
4131 #ifdef CONFIG_PM
4132 /**
4133  *	ata_eh_handle_port_suspend - perform port suspend operation
4134  *	@ap: port to suspend
4135  *
4136  *	Suspend @ap.
4137  *
4138  *	LOCKING:
4139  *	Kernel thread context (may sleep).
4140  */
4141 static void ata_eh_handle_port_suspend(struct ata_port *ap)
4142 {
4143 	unsigned long flags;
4144 	int rc = 0;
4145 	struct ata_device *dev;
4146 
4147 	/* are we suspending? */
4148 	spin_lock_irqsave(ap->lock, flags);
4149 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4150 	    ap->pm_mesg.event & PM_EVENT_RESUME) {
4151 		spin_unlock_irqrestore(ap->lock, flags);
4152 		return;
4153 	}
4154 	spin_unlock_irqrestore(ap->lock, flags);
4155 
4156 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4157 
4158 	/*
4159 	 * If we have a ZPODD attached, check its zero
4160 	 * power ready status before the port is frozen.
4161 	 * Only needed for runtime suspend.
4162 	 */
4163 	if (PMSG_IS_AUTO(ap->pm_mesg)) {
4164 		ata_for_each_dev(dev, &ap->link, ENABLED) {
4165 			if (zpodd_dev_enabled(dev))
4166 				zpodd_on_suspend(dev);
4167 		}
4168 	}
4169 
4170 	/* tell ACPI we're suspending */
4171 	rc = ata_acpi_on_suspend(ap);
4172 	if (rc)
4173 		goto out;
4174 
4175 	/* suspend */
4176 	ata_eh_freeze_port(ap);
4177 
4178 	if (ap->ops->port_suspend)
4179 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4180 
4181 	ata_acpi_set_state(ap, ap->pm_mesg);
4182  out:
4183 	/* update the flags */
4184 	spin_lock_irqsave(ap->lock, flags);
4185 
4186 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4187 	if (rc == 0)
4188 		ap->pflags |= ATA_PFLAG_SUSPENDED;
4189 	else if (ap->pflags & ATA_PFLAG_FROZEN)
4190 		ata_port_schedule_eh(ap);
4191 
4192 	spin_unlock_irqrestore(ap->lock, flags);
4193 
4194 	return;
4195 }
4196 
4197 /**
4198  *	ata_eh_handle_port_resume - perform port resume operation
4199  *	@ap: port to resume
4200  *
4201  *	Resume @ap.
4202  *
4203  *	LOCKING:
4204  *	Kernel thread context (may sleep).
4205  */
4206 static void ata_eh_handle_port_resume(struct ata_port *ap)
4207 {
4208 	struct ata_link *link;
4209 	struct ata_device *dev;
4210 	unsigned long flags;
4211 	int rc = 0;
4212 
4213 	/* are we resuming? */
4214 	spin_lock_irqsave(ap->lock, flags);
4215 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4216 	    !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4217 		spin_unlock_irqrestore(ap->lock, flags);
4218 		return;
4219 	}
4220 	spin_unlock_irqrestore(ap->lock, flags);
4221 
4222 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4223 
4224 	/*
4225 	 * Error timestamps are in jiffies which doesn't run while
4226 	 * suspended and PHY events during resume isn't too uncommon.
4227 	 * When the two are combined, it can lead to unnecessary speed
4228 	 * downs if the machine is suspended and resumed repeatedly.
4229 	 * Clear error history.
4230 	 */
4231 	ata_for_each_link(link, ap, HOST_FIRST)
4232 		ata_for_each_dev(dev, link, ALL)
4233 			ata_ering_clear(&dev->ering);
4234 
4235 	ata_acpi_set_state(ap, ap->pm_mesg);
4236 
4237 	if (ap->ops->port_resume)
4238 		rc = ap->ops->port_resume(ap);
4239 
4240 	/* tell ACPI that we're resuming */
4241 	ata_acpi_on_resume(ap);
4242 
4243 	/* update the flags */
4244 	spin_lock_irqsave(ap->lock, flags);
4245 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4246 	spin_unlock_irqrestore(ap->lock, flags);
4247 }
4248 #endif /* CONFIG_PM */
4249