xref: /linux/drivers/ata/libata-eh.c (revision 5499b45190237ca90dd2ac86395cf464fe1f4cc7)
1 /*
2  *  libata-eh.c - libata error handling
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9  *
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License as
13  *  published by the Free Software Foundation; either version 2, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  *  General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24  *  USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/pci.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_dbg.h>
44 #include "../scsi/scsi_transport_api.h"
45 
46 #include <linux/libata.h>
47 
48 #include "libata.h"
49 
50 enum {
51 	/* speed down verdicts */
52 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
53 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
54 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
55 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
56 
57 	/* error flags */
58 	ATA_EFLAG_IS_IO			= (1 << 0),
59 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
60 
61 	/* error categories */
62 	ATA_ECAT_NONE			= 0,
63 	ATA_ECAT_ATA_BUS		= 1,
64 	ATA_ECAT_TOUT_HSM		= 2,
65 	ATA_ECAT_UNK_DEV		= 3,
66 	ATA_ECAT_DUBIOUS_NONE		= 4,
67 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
68 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
69 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
70 	ATA_ECAT_NR			= 8,
71 
72 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
73 
74 	/* always put at least this amount of time between resets */
75 	ATA_EH_RESET_COOL_DOWN		=  5000,
76 
77 	/* Waiting in ->prereset can never be reliable.  It's
78 	 * sometimes nice to wait there but it can't be depended upon;
79 	 * otherwise, we wouldn't be resetting.  Just give it enough
80 	 * time for most drives to spin up.
81 	 */
82 	ATA_EH_PRERESET_TIMEOUT		= 10000,
83 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
84 
85 	ATA_EH_UA_TRIES			= 5,
86 
87 	/* probe speed down parameters, see ata_eh_schedule_probe() */
88 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
89 	ATA_EH_PROBE_TRIALS		= 2,
90 };
91 
92 /* The following table determines how we sequence resets.  Each entry
93  * represents timeout for that try.  The first try can be soft or
94  * hardreset.  All others are hardreset if available.  In most cases
95  * the first reset w/ 10sec timeout should succeed.  Following entries
96  * are mostly for error handling, hotplug and retarded devices.
97  */
98 static const unsigned long ata_eh_reset_timeouts[] = {
99 	10000,	/* most drives spin up by 10sec */
100 	10000,	/* > 99% working drives spin up before 20sec */
101 	35000,	/* give > 30 secs of idleness for retarded devices */
102 	 5000,	/* and sweet one last chance */
103 	ULONG_MAX, /* > 1 min has elapsed, give up */
104 };
105 
106 static const unsigned long ata_eh_identify_timeouts[] = {
107 	 5000,	/* covers > 99% of successes and not too boring on failures */
108 	10000,  /* combined time till here is enough even for media access */
109 	30000,	/* for true idiots */
110 	ULONG_MAX,
111 };
112 
113 static const unsigned long ata_eh_flush_timeouts[] = {
114 	15000,	/* be generous with flush */
115 	15000,  /* ditto */
116 	30000,	/* and even more generous */
117 	ULONG_MAX,
118 };
119 
120 static const unsigned long ata_eh_other_timeouts[] = {
121 	 5000,	/* same rationale as identify timeout */
122 	10000,	/* ditto */
123 	/* but no merciful 30sec for other commands, it just isn't worth it */
124 	ULONG_MAX,
125 };
126 
127 struct ata_eh_cmd_timeout_ent {
128 	const u8		*commands;
129 	const unsigned long	*timeouts;
130 };
131 
132 /* The following table determines timeouts to use for EH internal
133  * commands.  Each table entry is a command class and matches the
134  * commands the entry applies to and the timeout table to use.
135  *
136  * On the retry after a command timed out, the next timeout value from
137  * the table is used.  If the table doesn't contain further entries,
138  * the last value is used.
139  *
140  * ehc->cmd_timeout_idx keeps track of which timeout to use per
141  * command class, so if SET_FEATURES times out on the first try, the
142  * next try will use the second timeout value only for that class.
143  */
144 #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
145 static const struct ata_eh_cmd_timeout_ent
146 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
147 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
148 	  .timeouts = ata_eh_identify_timeouts, },
149 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
150 	  .timeouts = ata_eh_other_timeouts, },
151 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
152 	  .timeouts = ata_eh_other_timeouts, },
153 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
154 	  .timeouts = ata_eh_other_timeouts, },
155 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
156 	  .timeouts = ata_eh_other_timeouts, },
157 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
158 	  .timeouts = ata_eh_flush_timeouts },
159 };
160 #undef CMDS
161 
162 static void __ata_port_freeze(struct ata_port *ap);
163 #ifdef CONFIG_PM
164 static void ata_eh_handle_port_suspend(struct ata_port *ap);
165 static void ata_eh_handle_port_resume(struct ata_port *ap);
166 #else /* CONFIG_PM */
167 static void ata_eh_handle_port_suspend(struct ata_port *ap)
168 { }
169 
170 static void ata_eh_handle_port_resume(struct ata_port *ap)
171 { }
172 #endif /* CONFIG_PM */
173 
174 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
175 				 va_list args)
176 {
177 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
178 				     ATA_EH_DESC_LEN - ehi->desc_len,
179 				     fmt, args);
180 }
181 
182 /**
183  *	__ata_ehi_push_desc - push error description without adding separator
184  *	@ehi: target EHI
185  *	@fmt: printf format string
186  *
187  *	Format string according to @fmt and append it to @ehi->desc.
188  *
189  *	LOCKING:
190  *	spin_lock_irqsave(host lock)
191  */
192 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
193 {
194 	va_list args;
195 
196 	va_start(args, fmt);
197 	__ata_ehi_pushv_desc(ehi, fmt, args);
198 	va_end(args);
199 }
200 
201 /**
202  *	ata_ehi_push_desc - push error description with separator
203  *	@ehi: target EHI
204  *	@fmt: printf format string
205  *
206  *	Format string according to @fmt and append it to @ehi->desc.
207  *	If @ehi->desc is not empty, ", " is added in-between.
208  *
209  *	LOCKING:
210  *	spin_lock_irqsave(host lock)
211  */
212 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
213 {
214 	va_list args;
215 
216 	if (ehi->desc_len)
217 		__ata_ehi_push_desc(ehi, ", ");
218 
219 	va_start(args, fmt);
220 	__ata_ehi_pushv_desc(ehi, fmt, args);
221 	va_end(args);
222 }
223 
224 /**
225  *	ata_ehi_clear_desc - clean error description
226  *	@ehi: target EHI
227  *
228  *	Clear @ehi->desc.
229  *
230  *	LOCKING:
231  *	spin_lock_irqsave(host lock)
232  */
233 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
234 {
235 	ehi->desc[0] = '\0';
236 	ehi->desc_len = 0;
237 }
238 
239 /**
240  *	ata_port_desc - append port description
241  *	@ap: target ATA port
242  *	@fmt: printf format string
243  *
244  *	Format string according to @fmt and append it to port
245  *	description.  If port description is not empty, " " is added
246  *	in-between.  This function is to be used while initializing
247  *	ata_host.  The description is printed on host registration.
248  *
249  *	LOCKING:
250  *	None.
251  */
252 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
253 {
254 	va_list args;
255 
256 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
257 
258 	if (ap->link.eh_info.desc_len)
259 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
260 
261 	va_start(args, fmt);
262 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
263 	va_end(args);
264 }
265 
266 #ifdef CONFIG_PCI
267 
268 /**
269  *	ata_port_pbar_desc - append PCI BAR description
270  *	@ap: target ATA port
271  *	@bar: target PCI BAR
272  *	@offset: offset into PCI BAR
273  *	@name: name of the area
274  *
275  *	If @offset is negative, this function formats a string which
276  *	contains the name, address, size and type of the BAR and
277  *	appends it to the port description.  If @offset is zero or
278  *	positive, only name and offsetted address is appended.
279  *
280  *	LOCKING:
281  *	None.
282  */
283 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
284 			const char *name)
285 {
286 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
287 	char *type = "";
288 	unsigned long long start, len;
289 
290 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
291 		type = "m";
292 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
293 		type = "i";
294 
295 	start = (unsigned long long)pci_resource_start(pdev, bar);
296 	len = (unsigned long long)pci_resource_len(pdev, bar);
297 
298 	if (offset < 0)
299 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
300 	else
301 		ata_port_desc(ap, "%s 0x%llx", name,
302 				start + (unsigned long long)offset);
303 }
304 
305 #endif /* CONFIG_PCI */
306 
307 static int ata_lookup_timeout_table(u8 cmd)
308 {
309 	int i;
310 
311 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
312 		const u8 *cur;
313 
314 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
315 			if (*cur == cmd)
316 				return i;
317 	}
318 
319 	return -1;
320 }
321 
322 /**
323  *	ata_internal_cmd_timeout - determine timeout for an internal command
324  *	@dev: target device
325  *	@cmd: internal command to be issued
326  *
327  *	Determine timeout for internal command @cmd for @dev.
328  *
329  *	LOCKING:
330  *	EH context.
331  *
332  *	RETURNS:
333  *	Determined timeout.
334  */
335 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
336 {
337 	struct ata_eh_context *ehc = &dev->link->eh_context;
338 	int ent = ata_lookup_timeout_table(cmd);
339 	int idx;
340 
341 	if (ent < 0)
342 		return ATA_EH_CMD_DFL_TIMEOUT;
343 
344 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
345 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
346 }
347 
348 /**
349  *	ata_internal_cmd_timed_out - notification for internal command timeout
350  *	@dev: target device
351  *	@cmd: internal command which timed out
352  *
353  *	Notify EH that internal command @cmd for @dev timed out.  This
354  *	function should be called only for commands whose timeouts are
355  *	determined using ata_internal_cmd_timeout().
356  *
357  *	LOCKING:
358  *	EH context.
359  */
360 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
361 {
362 	struct ata_eh_context *ehc = &dev->link->eh_context;
363 	int ent = ata_lookup_timeout_table(cmd);
364 	int idx;
365 
366 	if (ent < 0)
367 		return;
368 
369 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
370 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
371 		ehc->cmd_timeout_idx[dev->devno][ent]++;
372 }
373 
374 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
375 			     unsigned int err_mask)
376 {
377 	struct ata_ering_entry *ent;
378 
379 	WARN_ON(!err_mask);
380 
381 	ering->cursor++;
382 	ering->cursor %= ATA_ERING_SIZE;
383 
384 	ent = &ering->ring[ering->cursor];
385 	ent->eflags = eflags;
386 	ent->err_mask = err_mask;
387 	ent->timestamp = get_jiffies_64();
388 }
389 
390 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
391 {
392 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
393 
394 	if (ent->err_mask)
395 		return ent;
396 	return NULL;
397 }
398 
399 static void ata_ering_clear(struct ata_ering *ering)
400 {
401 	memset(ering, 0, sizeof(*ering));
402 }
403 
404 static int ata_ering_map(struct ata_ering *ering,
405 			 int (*map_fn)(struct ata_ering_entry *, void *),
406 			 void *arg)
407 {
408 	int idx, rc = 0;
409 	struct ata_ering_entry *ent;
410 
411 	idx = ering->cursor;
412 	do {
413 		ent = &ering->ring[idx];
414 		if (!ent->err_mask)
415 			break;
416 		rc = map_fn(ent, arg);
417 		if (rc)
418 			break;
419 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
420 	} while (idx != ering->cursor);
421 
422 	return rc;
423 }
424 
425 static unsigned int ata_eh_dev_action(struct ata_device *dev)
426 {
427 	struct ata_eh_context *ehc = &dev->link->eh_context;
428 
429 	return ehc->i.action | ehc->i.dev_action[dev->devno];
430 }
431 
432 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
433 				struct ata_eh_info *ehi, unsigned int action)
434 {
435 	struct ata_device *tdev;
436 
437 	if (!dev) {
438 		ehi->action &= ~action;
439 		ata_for_each_dev(tdev, link, ALL)
440 			ehi->dev_action[tdev->devno] &= ~action;
441 	} else {
442 		/* doesn't make sense for port-wide EH actions */
443 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
444 
445 		/* break ehi->action into ehi->dev_action */
446 		if (ehi->action & action) {
447 			ata_for_each_dev(tdev, link, ALL)
448 				ehi->dev_action[tdev->devno] |=
449 					ehi->action & action;
450 			ehi->action &= ~action;
451 		}
452 
453 		/* turn off the specified per-dev action */
454 		ehi->dev_action[dev->devno] &= ~action;
455 	}
456 }
457 
458 /**
459  *	ata_scsi_timed_out - SCSI layer time out callback
460  *	@cmd: timed out SCSI command
461  *
462  *	Handles SCSI layer timeout.  We race with normal completion of
463  *	the qc for @cmd.  If the qc is already gone, we lose and let
464  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
465  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
466  *	from finishing it by setting EH_SCHEDULED and return
467  *	EH_NOT_HANDLED.
468  *
469  *	TODO: kill this function once old EH is gone.
470  *
471  *	LOCKING:
472  *	Called from timer context
473  *
474  *	RETURNS:
475  *	EH_HANDLED or EH_NOT_HANDLED
476  */
477 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
478 {
479 	struct Scsi_Host *host = cmd->device->host;
480 	struct ata_port *ap = ata_shost_to_port(host);
481 	unsigned long flags;
482 	struct ata_queued_cmd *qc;
483 	enum blk_eh_timer_return ret;
484 
485 	DPRINTK("ENTER\n");
486 
487 	if (ap->ops->error_handler) {
488 		ret = BLK_EH_NOT_HANDLED;
489 		goto out;
490 	}
491 
492 	ret = BLK_EH_HANDLED;
493 	spin_lock_irqsave(ap->lock, flags);
494 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
495 	if (qc) {
496 		WARN_ON(qc->scsicmd != cmd);
497 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
498 		qc->err_mask |= AC_ERR_TIMEOUT;
499 		ret = BLK_EH_NOT_HANDLED;
500 	}
501 	spin_unlock_irqrestore(ap->lock, flags);
502 
503  out:
504 	DPRINTK("EXIT, ret=%d\n", ret);
505 	return ret;
506 }
507 
508 static void ata_eh_unload(struct ata_port *ap)
509 {
510 	struct ata_link *link;
511 	struct ata_device *dev;
512 	unsigned long flags;
513 
514 	/* Restore SControl IPM and SPD for the next driver and
515 	 * disable attached devices.
516 	 */
517 	ata_for_each_link(link, ap, PMP_FIRST) {
518 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
519 		ata_for_each_dev(dev, link, ALL)
520 			ata_dev_disable(dev);
521 	}
522 
523 	/* freeze and set UNLOADED */
524 	spin_lock_irqsave(ap->lock, flags);
525 
526 	ata_port_freeze(ap);			/* won't be thawed */
527 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
528 	ap->pflags |= ATA_PFLAG_UNLOADED;
529 
530 	spin_unlock_irqrestore(ap->lock, flags);
531 }
532 
533 /**
534  *	ata_scsi_error - SCSI layer error handler callback
535  *	@host: SCSI host on which error occurred
536  *
537  *	Handles SCSI-layer-thrown error events.
538  *
539  *	LOCKING:
540  *	Inherited from SCSI layer (none, can sleep)
541  *
542  *	RETURNS:
543  *	Zero.
544  */
545 void ata_scsi_error(struct Scsi_Host *host)
546 {
547 	struct ata_port *ap = ata_shost_to_port(host);
548 	int i;
549 	unsigned long flags;
550 
551 	DPRINTK("ENTER\n");
552 
553 	/* synchronize with port task */
554 	ata_port_flush_task(ap);
555 
556 	/* synchronize with host lock and sort out timeouts */
557 
558 	/* For new EH, all qcs are finished in one of three ways -
559 	 * normal completion, error completion, and SCSI timeout.
560 	 * Both completions can race against SCSI timeout.  When normal
561 	 * completion wins, the qc never reaches EH.  When error
562 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
563 	 *
564 	 * When SCSI timeout wins, things are a bit more complex.
565 	 * Normal or error completion can occur after the timeout but
566 	 * before this point.  In such cases, both types of
567 	 * completions are honored.  A scmd is determined to have
568 	 * timed out iff its associated qc is active and not failed.
569 	 */
570 	if (ap->ops->error_handler) {
571 		struct scsi_cmnd *scmd, *tmp;
572 		int nr_timedout = 0;
573 
574 		spin_lock_irqsave(ap->lock, flags);
575 
576 		/* This must occur under the ap->lock as we don't want
577 		   a polled recovery to race the real interrupt handler
578 
579 		   The lost_interrupt handler checks for any completed but
580 		   non-notified command and completes much like an IRQ handler.
581 
582 		   We then fall into the error recovery code which will treat
583 		   this as if normal completion won the race */
584 
585 		if (ap->ops->lost_interrupt)
586 			ap->ops->lost_interrupt(ap);
587 
588 		list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
589 			struct ata_queued_cmd *qc;
590 
591 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
592 				qc = __ata_qc_from_tag(ap, i);
593 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
594 				    qc->scsicmd == scmd)
595 					break;
596 			}
597 
598 			if (i < ATA_MAX_QUEUE) {
599 				/* the scmd has an associated qc */
600 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
601 					/* which hasn't failed yet, timeout */
602 					qc->err_mask |= AC_ERR_TIMEOUT;
603 					qc->flags |= ATA_QCFLAG_FAILED;
604 					nr_timedout++;
605 				}
606 			} else {
607 				/* Normal completion occurred after
608 				 * SCSI timeout but before this point.
609 				 * Successfully complete it.
610 				 */
611 				scmd->retries = scmd->allowed;
612 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
613 			}
614 		}
615 
616 		/* If we have timed out qcs.  They belong to EH from
617 		 * this point but the state of the controller is
618 		 * unknown.  Freeze the port to make sure the IRQ
619 		 * handler doesn't diddle with those qcs.  This must
620 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
621 		 */
622 		if (nr_timedout)
623 			__ata_port_freeze(ap);
624 
625 		spin_unlock_irqrestore(ap->lock, flags);
626 
627 		/* initialize eh_tries */
628 		ap->eh_tries = ATA_EH_MAX_TRIES;
629 	} else
630 		spin_unlock_wait(ap->lock);
631 
632 	/* If we timed raced normal completion and there is nothing to
633 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
634 
635  repeat:
636 	/* invoke error handler */
637 	if (ap->ops->error_handler) {
638 		struct ata_link *link;
639 
640 		/* kill fast drain timer */
641 		del_timer_sync(&ap->fastdrain_timer);
642 
643 		/* process port resume request */
644 		ata_eh_handle_port_resume(ap);
645 
646 		/* fetch & clear EH info */
647 		spin_lock_irqsave(ap->lock, flags);
648 
649 		ata_for_each_link(link, ap, HOST_FIRST) {
650 			struct ata_eh_context *ehc = &link->eh_context;
651 			struct ata_device *dev;
652 
653 			memset(&link->eh_context, 0, sizeof(link->eh_context));
654 			link->eh_context.i = link->eh_info;
655 			memset(&link->eh_info, 0, sizeof(link->eh_info));
656 
657 			ata_for_each_dev(dev, link, ENABLED) {
658 				int devno = dev->devno;
659 
660 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
661 				if (ata_ncq_enabled(dev))
662 					ehc->saved_ncq_enabled |= 1 << devno;
663 			}
664 		}
665 
666 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
667 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
668 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
669 
670 		spin_unlock_irqrestore(ap->lock, flags);
671 
672 		/* invoke EH, skip if unloading or suspended */
673 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
674 			ap->ops->error_handler(ap);
675 		else {
676 			/* if unloading, commence suicide */
677 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
678 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
679 				ata_eh_unload(ap);
680 			ata_eh_finish(ap);
681 		}
682 
683 		/* process port suspend request */
684 		ata_eh_handle_port_suspend(ap);
685 
686 		/* Exception might have happend after ->error_handler
687 		 * recovered the port but before this point.  Repeat
688 		 * EH in such case.
689 		 */
690 		spin_lock_irqsave(ap->lock, flags);
691 
692 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
693 			if (--ap->eh_tries) {
694 				spin_unlock_irqrestore(ap->lock, flags);
695 				goto repeat;
696 			}
697 			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
698 					"tries, giving up\n", ATA_EH_MAX_TRIES);
699 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
700 		}
701 
702 		/* this run is complete, make sure EH info is clear */
703 		ata_for_each_link(link, ap, HOST_FIRST)
704 			memset(&link->eh_info, 0, sizeof(link->eh_info));
705 
706 		/* Clear host_eh_scheduled while holding ap->lock such
707 		 * that if exception occurs after this point but
708 		 * before EH completion, SCSI midlayer will
709 		 * re-initiate EH.
710 		 */
711 		host->host_eh_scheduled = 0;
712 
713 		spin_unlock_irqrestore(ap->lock, flags);
714 	} else {
715 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
716 		ap->ops->eng_timeout(ap);
717 	}
718 
719 	/* finish or retry handled scmd's and clean up */
720 	WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
721 
722 	scsi_eh_flush_done_q(&ap->eh_done_q);
723 
724 	/* clean up */
725 	spin_lock_irqsave(ap->lock, flags);
726 
727 	if (ap->pflags & ATA_PFLAG_LOADING)
728 		ap->pflags &= ~ATA_PFLAG_LOADING;
729 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
730 		queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
731 
732 	if (ap->pflags & ATA_PFLAG_RECOVERED)
733 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
734 
735 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
736 
737 	/* tell wait_eh that we're done */
738 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
739 	wake_up_all(&ap->eh_wait_q);
740 
741 	spin_unlock_irqrestore(ap->lock, flags);
742 
743 	DPRINTK("EXIT\n");
744 }
745 
746 /**
747  *	ata_port_wait_eh - Wait for the currently pending EH to complete
748  *	@ap: Port to wait EH for
749  *
750  *	Wait until the currently pending EH is complete.
751  *
752  *	LOCKING:
753  *	Kernel thread context (may sleep).
754  */
755 void ata_port_wait_eh(struct ata_port *ap)
756 {
757 	unsigned long flags;
758 	DEFINE_WAIT(wait);
759 
760  retry:
761 	spin_lock_irqsave(ap->lock, flags);
762 
763 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
764 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
765 		spin_unlock_irqrestore(ap->lock, flags);
766 		schedule();
767 		spin_lock_irqsave(ap->lock, flags);
768 	}
769 	finish_wait(&ap->eh_wait_q, &wait);
770 
771 	spin_unlock_irqrestore(ap->lock, flags);
772 
773 	/* make sure SCSI EH is complete */
774 	if (scsi_host_in_recovery(ap->scsi_host)) {
775 		msleep(10);
776 		goto retry;
777 	}
778 }
779 
780 static int ata_eh_nr_in_flight(struct ata_port *ap)
781 {
782 	unsigned int tag;
783 	int nr = 0;
784 
785 	/* count only non-internal commands */
786 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
787 		if (ata_qc_from_tag(ap, tag))
788 			nr++;
789 
790 	return nr;
791 }
792 
793 void ata_eh_fastdrain_timerfn(unsigned long arg)
794 {
795 	struct ata_port *ap = (void *)arg;
796 	unsigned long flags;
797 	int cnt;
798 
799 	spin_lock_irqsave(ap->lock, flags);
800 
801 	cnt = ata_eh_nr_in_flight(ap);
802 
803 	/* are we done? */
804 	if (!cnt)
805 		goto out_unlock;
806 
807 	if (cnt == ap->fastdrain_cnt) {
808 		unsigned int tag;
809 
810 		/* No progress during the last interval, tag all
811 		 * in-flight qcs as timed out and freeze the port.
812 		 */
813 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
814 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
815 			if (qc)
816 				qc->err_mask |= AC_ERR_TIMEOUT;
817 		}
818 
819 		ata_port_freeze(ap);
820 	} else {
821 		/* some qcs have finished, give it another chance */
822 		ap->fastdrain_cnt = cnt;
823 		ap->fastdrain_timer.expires =
824 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
825 		add_timer(&ap->fastdrain_timer);
826 	}
827 
828  out_unlock:
829 	spin_unlock_irqrestore(ap->lock, flags);
830 }
831 
832 /**
833  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
834  *	@ap: target ATA port
835  *	@fastdrain: activate fast drain
836  *
837  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
838  *	is non-zero and EH wasn't pending before.  Fast drain ensures
839  *	that EH kicks in in timely manner.
840  *
841  *	LOCKING:
842  *	spin_lock_irqsave(host lock)
843  */
844 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
845 {
846 	int cnt;
847 
848 	/* already scheduled? */
849 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
850 		return;
851 
852 	ap->pflags |= ATA_PFLAG_EH_PENDING;
853 
854 	if (!fastdrain)
855 		return;
856 
857 	/* do we have in-flight qcs? */
858 	cnt = ata_eh_nr_in_flight(ap);
859 	if (!cnt)
860 		return;
861 
862 	/* activate fast drain */
863 	ap->fastdrain_cnt = cnt;
864 	ap->fastdrain_timer.expires =
865 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
866 	add_timer(&ap->fastdrain_timer);
867 }
868 
869 /**
870  *	ata_qc_schedule_eh - schedule qc for error handling
871  *	@qc: command to schedule error handling for
872  *
873  *	Schedule error handling for @qc.  EH will kick in as soon as
874  *	other commands are drained.
875  *
876  *	LOCKING:
877  *	spin_lock_irqsave(host lock)
878  */
879 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880 {
881 	struct ata_port *ap = qc->ap;
882 
883 	WARN_ON(!ap->ops->error_handler);
884 
885 	qc->flags |= ATA_QCFLAG_FAILED;
886 	ata_eh_set_pending(ap, 1);
887 
888 	/* The following will fail if timeout has already expired.
889 	 * ata_scsi_error() takes care of such scmds on EH entry.
890 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
891 	 * this function completes.
892 	 */
893 	blk_abort_request(qc->scsicmd->request);
894 }
895 
896 /**
897  *	ata_port_schedule_eh - schedule error handling without a qc
898  *	@ap: ATA port to schedule EH for
899  *
900  *	Schedule error handling for @ap.  EH will kick in as soon as
901  *	all commands are drained.
902  *
903  *	LOCKING:
904  *	spin_lock_irqsave(host lock)
905  */
906 void ata_port_schedule_eh(struct ata_port *ap)
907 {
908 	WARN_ON(!ap->ops->error_handler);
909 
910 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
911 		return;
912 
913 	ata_eh_set_pending(ap, 1);
914 	scsi_schedule_eh(ap->scsi_host);
915 
916 	DPRINTK("port EH scheduled\n");
917 }
918 
919 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
920 {
921 	int tag, nr_aborted = 0;
922 
923 	WARN_ON(!ap->ops->error_handler);
924 
925 	/* we're gonna abort all commands, no need for fast drain */
926 	ata_eh_set_pending(ap, 0);
927 
928 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
929 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
930 
931 		if (qc && (!link || qc->dev->link == link)) {
932 			qc->flags |= ATA_QCFLAG_FAILED;
933 			ata_qc_complete(qc);
934 			nr_aborted++;
935 		}
936 	}
937 
938 	if (!nr_aborted)
939 		ata_port_schedule_eh(ap);
940 
941 	return nr_aborted;
942 }
943 
944 /**
945  *	ata_link_abort - abort all qc's on the link
946  *	@link: ATA link to abort qc's for
947  *
948  *	Abort all active qc's active on @link and schedule EH.
949  *
950  *	LOCKING:
951  *	spin_lock_irqsave(host lock)
952  *
953  *	RETURNS:
954  *	Number of aborted qc's.
955  */
956 int ata_link_abort(struct ata_link *link)
957 {
958 	return ata_do_link_abort(link->ap, link);
959 }
960 
961 /**
962  *	ata_port_abort - abort all qc's on the port
963  *	@ap: ATA port to abort qc's for
964  *
965  *	Abort all active qc's of @ap and schedule EH.
966  *
967  *	LOCKING:
968  *	spin_lock_irqsave(host_set lock)
969  *
970  *	RETURNS:
971  *	Number of aborted qc's.
972  */
973 int ata_port_abort(struct ata_port *ap)
974 {
975 	return ata_do_link_abort(ap, NULL);
976 }
977 
978 /**
979  *	__ata_port_freeze - freeze port
980  *	@ap: ATA port to freeze
981  *
982  *	This function is called when HSM violation or some other
983  *	condition disrupts normal operation of the port.  Frozen port
984  *	is not allowed to perform any operation until the port is
985  *	thawed, which usually follows a successful reset.
986  *
987  *	ap->ops->freeze() callback can be used for freezing the port
988  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
989  *	port cannot be frozen hardware-wise, the interrupt handler
990  *	must ack and clear interrupts unconditionally while the port
991  *	is frozen.
992  *
993  *	LOCKING:
994  *	spin_lock_irqsave(host lock)
995  */
996 static void __ata_port_freeze(struct ata_port *ap)
997 {
998 	WARN_ON(!ap->ops->error_handler);
999 
1000 	if (ap->ops->freeze)
1001 		ap->ops->freeze(ap);
1002 
1003 	ap->pflags |= ATA_PFLAG_FROZEN;
1004 
1005 	DPRINTK("ata%u port frozen\n", ap->print_id);
1006 }
1007 
1008 /**
1009  *	ata_port_freeze - abort & freeze port
1010  *	@ap: ATA port to freeze
1011  *
1012  *	Abort and freeze @ap.  The freeze operation must be called
1013  *	first, because some hardware requires special operations
1014  *	before the taskfile registers are accessible.
1015  *
1016  *	LOCKING:
1017  *	spin_lock_irqsave(host lock)
1018  *
1019  *	RETURNS:
1020  *	Number of aborted commands.
1021  */
1022 int ata_port_freeze(struct ata_port *ap)
1023 {
1024 	int nr_aborted;
1025 
1026 	WARN_ON(!ap->ops->error_handler);
1027 
1028 	__ata_port_freeze(ap);
1029 	nr_aborted = ata_port_abort(ap);
1030 
1031 	return nr_aborted;
1032 }
1033 
1034 /**
1035  *	sata_async_notification - SATA async notification handler
1036  *	@ap: ATA port where async notification is received
1037  *
1038  *	Handler to be called when async notification via SDB FIS is
1039  *	received.  This function schedules EH if necessary.
1040  *
1041  *	LOCKING:
1042  *	spin_lock_irqsave(host lock)
1043  *
1044  *	RETURNS:
1045  *	1 if EH is scheduled, 0 otherwise.
1046  */
1047 int sata_async_notification(struct ata_port *ap)
1048 {
1049 	u32 sntf;
1050 	int rc;
1051 
1052 	if (!(ap->flags & ATA_FLAG_AN))
1053 		return 0;
1054 
1055 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1056 	if (rc == 0)
1057 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1058 
1059 	if (!sata_pmp_attached(ap) || rc) {
1060 		/* PMP is not attached or SNTF is not available */
1061 		if (!sata_pmp_attached(ap)) {
1062 			/* PMP is not attached.  Check whether ATAPI
1063 			 * AN is configured.  If so, notify media
1064 			 * change.
1065 			 */
1066 			struct ata_device *dev = ap->link.device;
1067 
1068 			if ((dev->class == ATA_DEV_ATAPI) &&
1069 			    (dev->flags & ATA_DFLAG_AN))
1070 				ata_scsi_media_change_notify(dev);
1071 			return 0;
1072 		} else {
1073 			/* PMP is attached but SNTF is not available.
1074 			 * ATAPI async media change notification is
1075 			 * not used.  The PMP must be reporting PHY
1076 			 * status change, schedule EH.
1077 			 */
1078 			ata_port_schedule_eh(ap);
1079 			return 1;
1080 		}
1081 	} else {
1082 		/* PMP is attached and SNTF is available */
1083 		struct ata_link *link;
1084 
1085 		/* check and notify ATAPI AN */
1086 		ata_for_each_link(link, ap, EDGE) {
1087 			if (!(sntf & (1 << link->pmp)))
1088 				continue;
1089 
1090 			if ((link->device->class == ATA_DEV_ATAPI) &&
1091 			    (link->device->flags & ATA_DFLAG_AN))
1092 				ata_scsi_media_change_notify(link->device);
1093 		}
1094 
1095 		/* If PMP is reporting that PHY status of some
1096 		 * downstream ports has changed, schedule EH.
1097 		 */
1098 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1099 			ata_port_schedule_eh(ap);
1100 			return 1;
1101 		}
1102 
1103 		return 0;
1104 	}
1105 }
1106 
1107 /**
1108  *	ata_eh_freeze_port - EH helper to freeze port
1109  *	@ap: ATA port to freeze
1110  *
1111  *	Freeze @ap.
1112  *
1113  *	LOCKING:
1114  *	None.
1115  */
1116 void ata_eh_freeze_port(struct ata_port *ap)
1117 {
1118 	unsigned long flags;
1119 
1120 	if (!ap->ops->error_handler)
1121 		return;
1122 
1123 	spin_lock_irqsave(ap->lock, flags);
1124 	__ata_port_freeze(ap);
1125 	spin_unlock_irqrestore(ap->lock, flags);
1126 }
1127 
1128 /**
1129  *	ata_port_thaw_port - EH helper to thaw port
1130  *	@ap: ATA port to thaw
1131  *
1132  *	Thaw frozen port @ap.
1133  *
1134  *	LOCKING:
1135  *	None.
1136  */
1137 void ata_eh_thaw_port(struct ata_port *ap)
1138 {
1139 	unsigned long flags;
1140 
1141 	if (!ap->ops->error_handler)
1142 		return;
1143 
1144 	spin_lock_irqsave(ap->lock, flags);
1145 
1146 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1147 
1148 	if (ap->ops->thaw)
1149 		ap->ops->thaw(ap);
1150 
1151 	spin_unlock_irqrestore(ap->lock, flags);
1152 
1153 	DPRINTK("ata%u port thawed\n", ap->print_id);
1154 }
1155 
1156 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1157 {
1158 	/* nada */
1159 }
1160 
1161 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1162 {
1163 	struct ata_port *ap = qc->ap;
1164 	struct scsi_cmnd *scmd = qc->scsicmd;
1165 	unsigned long flags;
1166 
1167 	spin_lock_irqsave(ap->lock, flags);
1168 	qc->scsidone = ata_eh_scsidone;
1169 	__ata_qc_complete(qc);
1170 	WARN_ON(ata_tag_valid(qc->tag));
1171 	spin_unlock_irqrestore(ap->lock, flags);
1172 
1173 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1174 }
1175 
1176 /**
1177  *	ata_eh_qc_complete - Complete an active ATA command from EH
1178  *	@qc: Command to complete
1179  *
1180  *	Indicate to the mid and upper layers that an ATA command has
1181  *	completed.  To be used from EH.
1182  */
1183 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1184 {
1185 	struct scsi_cmnd *scmd = qc->scsicmd;
1186 	scmd->retries = scmd->allowed;
1187 	__ata_eh_qc_complete(qc);
1188 }
1189 
1190 /**
1191  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1192  *	@qc: Command to retry
1193  *
1194  *	Indicate to the mid and upper layers that an ATA command
1195  *	should be retried.  To be used from EH.
1196  *
1197  *	SCSI midlayer limits the number of retries to scmd->allowed.
1198  *	scmd->retries is decremented for commands which get retried
1199  *	due to unrelated failures (qc->err_mask is zero).
1200  */
1201 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1202 {
1203 	struct scsi_cmnd *scmd = qc->scsicmd;
1204 	if (!qc->err_mask && scmd->retries)
1205 		scmd->retries--;
1206 	__ata_eh_qc_complete(qc);
1207 }
1208 
1209 /**
1210  *	ata_dev_disable - disable ATA device
1211  *	@dev: ATA device to disable
1212  *
1213  *	Disable @dev.
1214  *
1215  *	Locking:
1216  *	EH context.
1217  */
1218 void ata_dev_disable(struct ata_device *dev)
1219 {
1220 	if (!ata_dev_enabled(dev))
1221 		return;
1222 
1223 	if (ata_msg_drv(dev->link->ap))
1224 		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1225 	ata_acpi_on_disable(dev);
1226 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1227 	dev->class++;
1228 
1229 	/* From now till the next successful probe, ering is used to
1230 	 * track probe failures.  Clear accumulated device error info.
1231 	 */
1232 	ata_ering_clear(&dev->ering);
1233 }
1234 
1235 /**
1236  *	ata_eh_detach_dev - detach ATA device
1237  *	@dev: ATA device to detach
1238  *
1239  *	Detach @dev.
1240  *
1241  *	LOCKING:
1242  *	None.
1243  */
1244 void ata_eh_detach_dev(struct ata_device *dev)
1245 {
1246 	struct ata_link *link = dev->link;
1247 	struct ata_port *ap = link->ap;
1248 	struct ata_eh_context *ehc = &link->eh_context;
1249 	unsigned long flags;
1250 
1251 	ata_dev_disable(dev);
1252 
1253 	spin_lock_irqsave(ap->lock, flags);
1254 
1255 	dev->flags &= ~ATA_DFLAG_DETACH;
1256 
1257 	if (ata_scsi_offline_dev(dev)) {
1258 		dev->flags |= ATA_DFLAG_DETACHED;
1259 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1260 	}
1261 
1262 	/* clear per-dev EH info */
1263 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1264 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1265 	ehc->saved_xfer_mode[dev->devno] = 0;
1266 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1267 
1268 	spin_unlock_irqrestore(ap->lock, flags);
1269 }
1270 
1271 /**
1272  *	ata_eh_about_to_do - about to perform eh_action
1273  *	@link: target ATA link
1274  *	@dev: target ATA dev for per-dev action (can be NULL)
1275  *	@action: action about to be performed
1276  *
1277  *	Called just before performing EH actions to clear related bits
1278  *	in @link->eh_info such that eh actions are not unnecessarily
1279  *	repeated.
1280  *
1281  *	LOCKING:
1282  *	None.
1283  */
1284 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1285 			unsigned int action)
1286 {
1287 	struct ata_port *ap = link->ap;
1288 	struct ata_eh_info *ehi = &link->eh_info;
1289 	struct ata_eh_context *ehc = &link->eh_context;
1290 	unsigned long flags;
1291 
1292 	spin_lock_irqsave(ap->lock, flags);
1293 
1294 	ata_eh_clear_action(link, dev, ehi, action);
1295 
1296 	/* About to take EH action, set RECOVERED.  Ignore actions on
1297 	 * slave links as master will do them again.
1298 	 */
1299 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1300 		ap->pflags |= ATA_PFLAG_RECOVERED;
1301 
1302 	spin_unlock_irqrestore(ap->lock, flags);
1303 }
1304 
1305 /**
1306  *	ata_eh_done - EH action complete
1307 *	@ap: target ATA port
1308  *	@dev: target ATA dev for per-dev action (can be NULL)
1309  *	@action: action just completed
1310  *
1311  *	Called right after performing EH actions to clear related bits
1312  *	in @link->eh_context.
1313  *
1314  *	LOCKING:
1315  *	None.
1316  */
1317 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1318 		 unsigned int action)
1319 {
1320 	struct ata_eh_context *ehc = &link->eh_context;
1321 
1322 	ata_eh_clear_action(link, dev, &ehc->i, action);
1323 }
1324 
1325 /**
1326  *	ata_err_string - convert err_mask to descriptive string
1327  *	@err_mask: error mask to convert to string
1328  *
1329  *	Convert @err_mask to descriptive string.  Errors are
1330  *	prioritized according to severity and only the most severe
1331  *	error is reported.
1332  *
1333  *	LOCKING:
1334  *	None.
1335  *
1336  *	RETURNS:
1337  *	Descriptive string for @err_mask
1338  */
1339 static const char *ata_err_string(unsigned int err_mask)
1340 {
1341 	if (err_mask & AC_ERR_HOST_BUS)
1342 		return "host bus error";
1343 	if (err_mask & AC_ERR_ATA_BUS)
1344 		return "ATA bus error";
1345 	if (err_mask & AC_ERR_TIMEOUT)
1346 		return "timeout";
1347 	if (err_mask & AC_ERR_HSM)
1348 		return "HSM violation";
1349 	if (err_mask & AC_ERR_SYSTEM)
1350 		return "internal error";
1351 	if (err_mask & AC_ERR_MEDIA)
1352 		return "media error";
1353 	if (err_mask & AC_ERR_INVALID)
1354 		return "invalid argument";
1355 	if (err_mask & AC_ERR_DEV)
1356 		return "device error";
1357 	return "unknown error";
1358 }
1359 
1360 /**
1361  *	ata_read_log_page - read a specific log page
1362  *	@dev: target device
1363  *	@page: page to read
1364  *	@buf: buffer to store read page
1365  *	@sectors: number of sectors to read
1366  *
1367  *	Read log page using READ_LOG_EXT command.
1368  *
1369  *	LOCKING:
1370  *	Kernel thread context (may sleep).
1371  *
1372  *	RETURNS:
1373  *	0 on success, AC_ERR_* mask otherwise.
1374  */
1375 static unsigned int ata_read_log_page(struct ata_device *dev,
1376 				      u8 page, void *buf, unsigned int sectors)
1377 {
1378 	struct ata_taskfile tf;
1379 	unsigned int err_mask;
1380 
1381 	DPRINTK("read log page - page %d\n", page);
1382 
1383 	ata_tf_init(dev, &tf);
1384 	tf.command = ATA_CMD_READ_LOG_EXT;
1385 	tf.lbal = page;
1386 	tf.nsect = sectors;
1387 	tf.hob_nsect = sectors >> 8;
1388 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1389 	tf.protocol = ATA_PROT_PIO;
1390 
1391 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1392 				     buf, sectors * ATA_SECT_SIZE, 0);
1393 
1394 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1395 	return err_mask;
1396 }
1397 
1398 /**
1399  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1400  *	@dev: Device to read log page 10h from
1401  *	@tag: Resulting tag of the failed command
1402  *	@tf: Resulting taskfile registers of the failed command
1403  *
1404  *	Read log page 10h to obtain NCQ error details and clear error
1405  *	condition.
1406  *
1407  *	LOCKING:
1408  *	Kernel thread context (may sleep).
1409  *
1410  *	RETURNS:
1411  *	0 on success, -errno otherwise.
1412  */
1413 static int ata_eh_read_log_10h(struct ata_device *dev,
1414 			       int *tag, struct ata_taskfile *tf)
1415 {
1416 	u8 *buf = dev->link->ap->sector_buf;
1417 	unsigned int err_mask;
1418 	u8 csum;
1419 	int i;
1420 
1421 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1422 	if (err_mask)
1423 		return -EIO;
1424 
1425 	csum = 0;
1426 	for (i = 0; i < ATA_SECT_SIZE; i++)
1427 		csum += buf[i];
1428 	if (csum)
1429 		ata_dev_printk(dev, KERN_WARNING,
1430 			       "invalid checksum 0x%x on log page 10h\n", csum);
1431 
1432 	if (buf[0] & 0x80)
1433 		return -ENOENT;
1434 
1435 	*tag = buf[0] & 0x1f;
1436 
1437 	tf->command = buf[2];
1438 	tf->feature = buf[3];
1439 	tf->lbal = buf[4];
1440 	tf->lbam = buf[5];
1441 	tf->lbah = buf[6];
1442 	tf->device = buf[7];
1443 	tf->hob_lbal = buf[8];
1444 	tf->hob_lbam = buf[9];
1445 	tf->hob_lbah = buf[10];
1446 	tf->nsect = buf[12];
1447 	tf->hob_nsect = buf[13];
1448 
1449 	return 0;
1450 }
1451 
1452 /**
1453  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1454  *	@dev: target ATAPI device
1455  *	@r_sense_key: out parameter for sense_key
1456  *
1457  *	Perform ATAPI TEST_UNIT_READY.
1458  *
1459  *	LOCKING:
1460  *	EH context (may sleep).
1461  *
1462  *	RETURNS:
1463  *	0 on success, AC_ERR_* mask on failure.
1464  */
1465 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1466 {
1467 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1468 	struct ata_taskfile tf;
1469 	unsigned int err_mask;
1470 
1471 	ata_tf_init(dev, &tf);
1472 
1473 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1474 	tf.command = ATA_CMD_PACKET;
1475 	tf.protocol = ATAPI_PROT_NODATA;
1476 
1477 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1478 	if (err_mask == AC_ERR_DEV)
1479 		*r_sense_key = tf.feature >> 4;
1480 	return err_mask;
1481 }
1482 
1483 /**
1484  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1485  *	@dev: device to perform REQUEST_SENSE to
1486  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1487  *	@dfl_sense_key: default sense key to use
1488  *
1489  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1490  *	SENSE.  This function is EH helper.
1491  *
1492  *	LOCKING:
1493  *	Kernel thread context (may sleep).
1494  *
1495  *	RETURNS:
1496  *	0 on success, AC_ERR_* mask on failure
1497  */
1498 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1499 					   u8 *sense_buf, u8 dfl_sense_key)
1500 {
1501 	u8 cdb[ATAPI_CDB_LEN] =
1502 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1503 	struct ata_port *ap = dev->link->ap;
1504 	struct ata_taskfile tf;
1505 
1506 	DPRINTK("ATAPI request sense\n");
1507 
1508 	/* FIXME: is this needed? */
1509 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1510 
1511 	/* initialize sense_buf with the error register,
1512 	 * for the case where they are -not- overwritten
1513 	 */
1514 	sense_buf[0] = 0x70;
1515 	sense_buf[2] = dfl_sense_key;
1516 
1517 	/* some devices time out if garbage left in tf */
1518 	ata_tf_init(dev, &tf);
1519 
1520 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1521 	tf.command = ATA_CMD_PACKET;
1522 
1523 	/* is it pointless to prefer PIO for "safety reasons"? */
1524 	if (ap->flags & ATA_FLAG_PIO_DMA) {
1525 		tf.protocol = ATAPI_PROT_DMA;
1526 		tf.feature |= ATAPI_PKT_DMA;
1527 	} else {
1528 		tf.protocol = ATAPI_PROT_PIO;
1529 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1530 		tf.lbah = 0;
1531 	}
1532 
1533 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1534 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1535 }
1536 
1537 /**
1538  *	ata_eh_analyze_serror - analyze SError for a failed port
1539  *	@link: ATA link to analyze SError for
1540  *
1541  *	Analyze SError if available and further determine cause of
1542  *	failure.
1543  *
1544  *	LOCKING:
1545  *	None.
1546  */
1547 static void ata_eh_analyze_serror(struct ata_link *link)
1548 {
1549 	struct ata_eh_context *ehc = &link->eh_context;
1550 	u32 serror = ehc->i.serror;
1551 	unsigned int err_mask = 0, action = 0;
1552 	u32 hotplug_mask;
1553 
1554 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1555 		err_mask |= AC_ERR_ATA_BUS;
1556 		action |= ATA_EH_RESET;
1557 	}
1558 	if (serror & SERR_PROTOCOL) {
1559 		err_mask |= AC_ERR_HSM;
1560 		action |= ATA_EH_RESET;
1561 	}
1562 	if (serror & SERR_INTERNAL) {
1563 		err_mask |= AC_ERR_SYSTEM;
1564 		action |= ATA_EH_RESET;
1565 	}
1566 
1567 	/* Determine whether a hotplug event has occurred.  Both
1568 	 * SError.N/X are considered hotplug events for enabled or
1569 	 * host links.  For disabled PMP links, only N bit is
1570 	 * considered as X bit is left at 1 for link plugging.
1571 	 */
1572 	hotplug_mask = 0;
1573 
1574 	if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1575 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1576 	else
1577 		hotplug_mask = SERR_PHYRDY_CHG;
1578 
1579 	if (serror & hotplug_mask)
1580 		ata_ehi_hotplugged(&ehc->i);
1581 
1582 	ehc->i.err_mask |= err_mask;
1583 	ehc->i.action |= action;
1584 }
1585 
1586 /**
1587  *	ata_eh_analyze_ncq_error - analyze NCQ error
1588  *	@link: ATA link to analyze NCQ error for
1589  *
1590  *	Read log page 10h, determine the offending qc and acquire
1591  *	error status TF.  For NCQ device errors, all LLDDs have to do
1592  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1593  *	care of the rest.
1594  *
1595  *	LOCKING:
1596  *	Kernel thread context (may sleep).
1597  */
1598 void ata_eh_analyze_ncq_error(struct ata_link *link)
1599 {
1600 	struct ata_port *ap = link->ap;
1601 	struct ata_eh_context *ehc = &link->eh_context;
1602 	struct ata_device *dev = link->device;
1603 	struct ata_queued_cmd *qc;
1604 	struct ata_taskfile tf;
1605 	int tag, rc;
1606 
1607 	/* if frozen, we can't do much */
1608 	if (ap->pflags & ATA_PFLAG_FROZEN)
1609 		return;
1610 
1611 	/* is it NCQ device error? */
1612 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1613 		return;
1614 
1615 	/* has LLDD analyzed already? */
1616 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1617 		qc = __ata_qc_from_tag(ap, tag);
1618 
1619 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1620 			continue;
1621 
1622 		if (qc->err_mask)
1623 			return;
1624 	}
1625 
1626 	/* okay, this error is ours */
1627 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1628 	if (rc) {
1629 		ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1630 				"(errno=%d)\n", rc);
1631 		return;
1632 	}
1633 
1634 	if (!(link->sactive & (1 << tag))) {
1635 		ata_link_printk(link, KERN_ERR, "log page 10h reported "
1636 				"inactive tag %d\n", tag);
1637 		return;
1638 	}
1639 
1640 	/* we've got the perpetrator, condemn it */
1641 	qc = __ata_qc_from_tag(ap, tag);
1642 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1643 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1644 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1645 	ehc->i.err_mask &= ~AC_ERR_DEV;
1646 }
1647 
1648 /**
1649  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1650  *	@qc: qc to analyze
1651  *	@tf: Taskfile registers to analyze
1652  *
1653  *	Analyze taskfile of @qc and further determine cause of
1654  *	failure.  This function also requests ATAPI sense data if
1655  *	avaliable.
1656  *
1657  *	LOCKING:
1658  *	Kernel thread context (may sleep).
1659  *
1660  *	RETURNS:
1661  *	Determined recovery action
1662  */
1663 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1664 				      const struct ata_taskfile *tf)
1665 {
1666 	unsigned int tmp, action = 0;
1667 	u8 stat = tf->command, err = tf->feature;
1668 
1669 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1670 		qc->err_mask |= AC_ERR_HSM;
1671 		return ATA_EH_RESET;
1672 	}
1673 
1674 	if (stat & (ATA_ERR | ATA_DF))
1675 		qc->err_mask |= AC_ERR_DEV;
1676 	else
1677 		return 0;
1678 
1679 	switch (qc->dev->class) {
1680 	case ATA_DEV_ATA:
1681 		if (err & ATA_ICRC)
1682 			qc->err_mask |= AC_ERR_ATA_BUS;
1683 		if (err & ATA_UNC)
1684 			qc->err_mask |= AC_ERR_MEDIA;
1685 		if (err & ATA_IDNF)
1686 			qc->err_mask |= AC_ERR_INVALID;
1687 		break;
1688 
1689 	case ATA_DEV_ATAPI:
1690 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1691 			tmp = atapi_eh_request_sense(qc->dev,
1692 						qc->scsicmd->sense_buffer,
1693 						qc->result_tf.feature >> 4);
1694 			if (!tmp) {
1695 				/* ATA_QCFLAG_SENSE_VALID is used to
1696 				 * tell atapi_qc_complete() that sense
1697 				 * data is already valid.
1698 				 *
1699 				 * TODO: interpret sense data and set
1700 				 * appropriate err_mask.
1701 				 */
1702 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1703 			} else
1704 				qc->err_mask |= tmp;
1705 		}
1706 	}
1707 
1708 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1709 		action |= ATA_EH_RESET;
1710 
1711 	return action;
1712 }
1713 
1714 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1715 				   int *xfer_ok)
1716 {
1717 	int base = 0;
1718 
1719 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1720 		*xfer_ok = 1;
1721 
1722 	if (!*xfer_ok)
1723 		base = ATA_ECAT_DUBIOUS_NONE;
1724 
1725 	if (err_mask & AC_ERR_ATA_BUS)
1726 		return base + ATA_ECAT_ATA_BUS;
1727 
1728 	if (err_mask & AC_ERR_TIMEOUT)
1729 		return base + ATA_ECAT_TOUT_HSM;
1730 
1731 	if (eflags & ATA_EFLAG_IS_IO) {
1732 		if (err_mask & AC_ERR_HSM)
1733 			return base + ATA_ECAT_TOUT_HSM;
1734 		if ((err_mask &
1735 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1736 			return base + ATA_ECAT_UNK_DEV;
1737 	}
1738 
1739 	return 0;
1740 }
1741 
1742 struct speed_down_verdict_arg {
1743 	u64 since;
1744 	int xfer_ok;
1745 	int nr_errors[ATA_ECAT_NR];
1746 };
1747 
1748 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1749 {
1750 	struct speed_down_verdict_arg *arg = void_arg;
1751 	int cat;
1752 
1753 	if (ent->timestamp < arg->since)
1754 		return -1;
1755 
1756 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1757 				      &arg->xfer_ok);
1758 	arg->nr_errors[cat]++;
1759 
1760 	return 0;
1761 }
1762 
1763 /**
1764  *	ata_eh_speed_down_verdict - Determine speed down verdict
1765  *	@dev: Device of interest
1766  *
1767  *	This function examines error ring of @dev and determines
1768  *	whether NCQ needs to be turned off, transfer speed should be
1769  *	stepped down, or falling back to PIO is necessary.
1770  *
1771  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1772  *
1773  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
1774  *			  IO commands
1775  *
1776  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1777  *
1778  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
1779  *			  data transfer hasn't been verified.
1780  *
1781  *	Verdicts are
1782  *
1783  *	NCQ_OFF		: Turn off NCQ.
1784  *
1785  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
1786  *			  to PIO.
1787  *
1788  *	FALLBACK_TO_PIO	: Fall back to PIO.
1789  *
1790  *	Even if multiple verdicts are returned, only one action is
1791  *	taken per error.  An action triggered by non-DUBIOUS errors
1792  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
1793  *	This is to expedite speed down decisions right after device is
1794  *	initially configured.
1795  *
1796  *	The followings are speed down rules.  #1 and #2 deal with
1797  *	DUBIOUS errors.
1798  *
1799  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1800  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1801  *
1802  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1803  *	   occurred during last 5 mins, NCQ_OFF.
1804  *
1805  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1806  *	   ocurred during last 5 mins, FALLBACK_TO_PIO
1807  *
1808  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1809  *	   during last 10 mins, NCQ_OFF.
1810  *
1811  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1812  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1813  *
1814  *	LOCKING:
1815  *	Inherited from caller.
1816  *
1817  *	RETURNS:
1818  *	OR of ATA_EH_SPDN_* flags.
1819  */
1820 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1821 {
1822 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1823 	u64 j64 = get_jiffies_64();
1824 	struct speed_down_verdict_arg arg;
1825 	unsigned int verdict = 0;
1826 
1827 	/* scan past 5 mins of error history */
1828 	memset(&arg, 0, sizeof(arg));
1829 	arg.since = j64 - min(j64, j5mins);
1830 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1831 
1832 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1833 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1834 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
1835 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1836 
1837 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1838 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1839 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1840 
1841 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1842 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1843 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1844 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1845 
1846 	/* scan past 10 mins of error history */
1847 	memset(&arg, 0, sizeof(arg));
1848 	arg.since = j64 - min(j64, j10mins);
1849 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1850 
1851 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1852 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1853 		verdict |= ATA_EH_SPDN_NCQ_OFF;
1854 
1855 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1856 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1857 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1858 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1859 
1860 	return verdict;
1861 }
1862 
1863 /**
1864  *	ata_eh_speed_down - record error and speed down if necessary
1865  *	@dev: Failed device
1866  *	@eflags: mask of ATA_EFLAG_* flags
1867  *	@err_mask: err_mask of the error
1868  *
1869  *	Record error and examine error history to determine whether
1870  *	adjusting transmission speed is necessary.  It also sets
1871  *	transmission limits appropriately if such adjustment is
1872  *	necessary.
1873  *
1874  *	LOCKING:
1875  *	Kernel thread context (may sleep).
1876  *
1877  *	RETURNS:
1878  *	Determined recovery action.
1879  */
1880 static unsigned int ata_eh_speed_down(struct ata_device *dev,
1881 				unsigned int eflags, unsigned int err_mask)
1882 {
1883 	struct ata_link *link = ata_dev_phys_link(dev);
1884 	int xfer_ok = 0;
1885 	unsigned int verdict;
1886 	unsigned int action = 0;
1887 
1888 	/* don't bother if Cat-0 error */
1889 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1890 		return 0;
1891 
1892 	/* record error and determine whether speed down is necessary */
1893 	ata_ering_record(&dev->ering, eflags, err_mask);
1894 	verdict = ata_eh_speed_down_verdict(dev);
1895 
1896 	/* turn off NCQ? */
1897 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1898 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1899 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1900 		dev->flags |= ATA_DFLAG_NCQ_OFF;
1901 		ata_dev_printk(dev, KERN_WARNING,
1902 			       "NCQ disabled due to excessive errors\n");
1903 		goto done;
1904 	}
1905 
1906 	/* speed down? */
1907 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1908 		/* speed down SATA link speed if possible */
1909 		if (sata_down_spd_limit(link, 0) == 0) {
1910 			action |= ATA_EH_RESET;
1911 			goto done;
1912 		}
1913 
1914 		/* lower transfer mode */
1915 		if (dev->spdn_cnt < 2) {
1916 			static const int dma_dnxfer_sel[] =
1917 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
1918 			static const int pio_dnxfer_sel[] =
1919 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1920 			int sel;
1921 
1922 			if (dev->xfer_shift != ATA_SHIFT_PIO)
1923 				sel = dma_dnxfer_sel[dev->spdn_cnt];
1924 			else
1925 				sel = pio_dnxfer_sel[dev->spdn_cnt];
1926 
1927 			dev->spdn_cnt++;
1928 
1929 			if (ata_down_xfermask_limit(dev, sel) == 0) {
1930 				action |= ATA_EH_RESET;
1931 				goto done;
1932 			}
1933 		}
1934 	}
1935 
1936 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
1937 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
1938 	 */
1939 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1940 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1941 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
1942 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1943 			dev->spdn_cnt = 0;
1944 			action |= ATA_EH_RESET;
1945 			goto done;
1946 		}
1947 	}
1948 
1949 	return 0;
1950  done:
1951 	/* device has been slowed down, blow error history */
1952 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1953 		ata_ering_clear(&dev->ering);
1954 	return action;
1955 }
1956 
1957 /**
1958  *	ata_eh_link_autopsy - analyze error and determine recovery action
1959  *	@link: host link to perform autopsy on
1960  *
1961  *	Analyze why @link failed and determine which recovery actions
1962  *	are needed.  This function also sets more detailed AC_ERR_*
1963  *	values and fills sense data for ATAPI CHECK SENSE.
1964  *
1965  *	LOCKING:
1966  *	Kernel thread context (may sleep).
1967  */
1968 static void ata_eh_link_autopsy(struct ata_link *link)
1969 {
1970 	struct ata_port *ap = link->ap;
1971 	struct ata_eh_context *ehc = &link->eh_context;
1972 	struct ata_device *dev;
1973 	unsigned int all_err_mask = 0, eflags = 0;
1974 	int tag;
1975 	u32 serror;
1976 	int rc;
1977 
1978 	DPRINTK("ENTER\n");
1979 
1980 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1981 		return;
1982 
1983 	/* obtain and analyze SError */
1984 	rc = sata_scr_read(link, SCR_ERROR, &serror);
1985 	if (rc == 0) {
1986 		ehc->i.serror |= serror;
1987 		ata_eh_analyze_serror(link);
1988 	} else if (rc != -EOPNOTSUPP) {
1989 		/* SError read failed, force reset and probing */
1990 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
1991 		ehc->i.action |= ATA_EH_RESET;
1992 		ehc->i.err_mask |= AC_ERR_OTHER;
1993 	}
1994 
1995 	/* analyze NCQ failure */
1996 	ata_eh_analyze_ncq_error(link);
1997 
1998 	/* any real error trumps AC_ERR_OTHER */
1999 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2000 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2001 
2002 	all_err_mask |= ehc->i.err_mask;
2003 
2004 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2005 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2006 
2007 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2008 		    ata_dev_phys_link(qc->dev) != link)
2009 			continue;
2010 
2011 		/* inherit upper level err_mask */
2012 		qc->err_mask |= ehc->i.err_mask;
2013 
2014 		/* analyze TF */
2015 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2016 
2017 		/* DEV errors are probably spurious in case of ATA_BUS error */
2018 		if (qc->err_mask & AC_ERR_ATA_BUS)
2019 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2020 					  AC_ERR_INVALID);
2021 
2022 		/* any real error trumps unknown error */
2023 		if (qc->err_mask & ~AC_ERR_OTHER)
2024 			qc->err_mask &= ~AC_ERR_OTHER;
2025 
2026 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2027 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2028 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2029 
2030 		/* determine whether the command is worth retrying */
2031 		if (qc->flags & ATA_QCFLAG_IO ||
2032 		    (!(qc->err_mask & AC_ERR_INVALID) &&
2033 		     qc->err_mask != AC_ERR_DEV))
2034 			qc->flags |= ATA_QCFLAG_RETRY;
2035 
2036 		/* accumulate error info */
2037 		ehc->i.dev = qc->dev;
2038 		all_err_mask |= qc->err_mask;
2039 		if (qc->flags & ATA_QCFLAG_IO)
2040 			eflags |= ATA_EFLAG_IS_IO;
2041 	}
2042 
2043 	/* enforce default EH actions */
2044 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2045 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2046 		ehc->i.action |= ATA_EH_RESET;
2047 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2048 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2049 		ehc->i.action |= ATA_EH_REVALIDATE;
2050 
2051 	/* If we have offending qcs and the associated failed device,
2052 	 * perform per-dev EH action only on the offending device.
2053 	 */
2054 	if (ehc->i.dev) {
2055 		ehc->i.dev_action[ehc->i.dev->devno] |=
2056 			ehc->i.action & ATA_EH_PERDEV_MASK;
2057 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2058 	}
2059 
2060 	/* propagate timeout to host link */
2061 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2062 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2063 
2064 	/* record error and consider speeding down */
2065 	dev = ehc->i.dev;
2066 	if (!dev && ((ata_link_max_devices(link) == 1 &&
2067 		      ata_dev_enabled(link->device))))
2068 	    dev = link->device;
2069 
2070 	if (dev) {
2071 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2072 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
2073 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2074 	}
2075 
2076 	DPRINTK("EXIT\n");
2077 }
2078 
2079 /**
2080  *	ata_eh_autopsy - analyze error and determine recovery action
2081  *	@ap: host port to perform autopsy on
2082  *
2083  *	Analyze all links of @ap and determine why they failed and
2084  *	which recovery actions are needed.
2085  *
2086  *	LOCKING:
2087  *	Kernel thread context (may sleep).
2088  */
2089 void ata_eh_autopsy(struct ata_port *ap)
2090 {
2091 	struct ata_link *link;
2092 
2093 	ata_for_each_link(link, ap, EDGE)
2094 		ata_eh_link_autopsy(link);
2095 
2096 	/* Handle the frigging slave link.  Autopsy is done similarly
2097 	 * but actions and flags are transferred over to the master
2098 	 * link and handled from there.
2099 	 */
2100 	if (ap->slave_link) {
2101 		struct ata_eh_context *mehc = &ap->link.eh_context;
2102 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2103 
2104 		/* transfer control flags from master to slave */
2105 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2106 
2107 		/* perform autopsy on the slave link */
2108 		ata_eh_link_autopsy(ap->slave_link);
2109 
2110 		/* transfer actions from slave to master and clear slave */
2111 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2112 		mehc->i.action		|= sehc->i.action;
2113 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2114 		mehc->i.flags		|= sehc->i.flags;
2115 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2116 	}
2117 
2118 	/* Autopsy of fanout ports can affect host link autopsy.
2119 	 * Perform host link autopsy last.
2120 	 */
2121 	if (sata_pmp_attached(ap))
2122 		ata_eh_link_autopsy(&ap->link);
2123 }
2124 
2125 /**
2126  *	ata_get_cmd_descript - get description for ATA command
2127  *	@command: ATA command code to get description for
2128  *
2129  *	Return a textual description of the given command, or NULL if the
2130  *	command is not known.
2131  *
2132  *	LOCKING:
2133  *	None
2134  */
2135 const char *ata_get_cmd_descript(u8 command)
2136 {
2137 #ifdef CONFIG_ATA_VERBOSE_ERROR
2138 	static const struct
2139 	{
2140 		u8 command;
2141 		const char *text;
2142 	} cmd_descr[] = {
2143 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
2144 		{ ATA_CMD_CHK_POWER, 		"CHECK POWER MODE" },
2145 		{ ATA_CMD_STANDBY, 		"STANDBY" },
2146 		{ ATA_CMD_IDLE, 		"IDLE" },
2147 		{ ATA_CMD_EDD, 			"EXECUTE DEVICE DIAGNOSTIC" },
2148 		{ ATA_CMD_DOWNLOAD_MICRO,   	"DOWNLOAD MICROCODE" },
2149 		{ ATA_CMD_NOP,			"NOP" },
2150 		{ ATA_CMD_FLUSH, 		"FLUSH CACHE" },
2151 		{ ATA_CMD_FLUSH_EXT, 		"FLUSH CACHE EXT" },
2152 		{ ATA_CMD_ID_ATA,  		"IDENTIFY DEVICE" },
2153 		{ ATA_CMD_ID_ATAPI, 		"IDENTIFY PACKET DEVICE" },
2154 		{ ATA_CMD_SERVICE, 		"SERVICE" },
2155 		{ ATA_CMD_READ, 		"READ DMA" },
2156 		{ ATA_CMD_READ_EXT, 		"READ DMA EXT" },
2157 		{ ATA_CMD_READ_QUEUED, 		"READ DMA QUEUED" },
2158 		{ ATA_CMD_READ_STREAM_EXT, 	"READ STREAM EXT" },
2159 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
2160 		{ ATA_CMD_WRITE, 		"WRITE DMA" },
2161 		{ ATA_CMD_WRITE_EXT, 		"WRITE DMA EXT" },
2162 		{ ATA_CMD_WRITE_QUEUED, 	"WRITE DMA QUEUED EXT" },
2163 		{ ATA_CMD_WRITE_STREAM_EXT, 	"WRITE STREAM EXT" },
2164 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2165 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
2166 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2167 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
2168 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
2169 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
2170 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
2171 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
2172 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
2173 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
2174 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
2175 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
2176 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
2177 		{ ATA_CMD_WRITE_MULTI_FUA_EXT, 	"WRITE MULTIPLE FUA EXT" },
2178 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
2179 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
2180 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
2181 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
2182 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
2183 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
2184 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
2185 		{ ATA_CMD_SLEEP,		"SLEEP" },
2186 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
2187 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
2188 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
2189 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
2190 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
2191 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
2192 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
2193 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
2194 		{ ATA_CMD_WRITE_LOG_DMA_EXT, 	"WRITE LOG DMA EXT" },
2195 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
2196 		{ ATA_CMD_TRUSTED_RCV_DMA, 	"TRUSTED RECEIVE DMA" },
2197 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
2198 		{ ATA_CMD_TRUSTED_SND_DMA, 	"TRUSTED SEND DMA" },
2199 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
2200 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
2201 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
2202 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
2203 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
2204 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
2205 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
2206 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
2207 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
2208 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
2209 		{ ATA_CMD_SMART,		"SMART" },
2210 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
2211 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2212 		{ ATA_CMD_CHK_MED_CRD_TYP, 	"CHECK MEDIA CARD TYPE" },
2213 		{ ATA_CMD_CFA_REQ_EXT_ERR, 	"CFA REQUEST EXTENDED ERROR" },
2214 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
2215 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
2216 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
2217 		{ ATA_CMD_CFA_WRITE_MULT_NE, 	"CFA WRITE MULTIPLE WITHOUT ERASE" },
2218 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
2219 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
2220 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
2221 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
2222 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
2223 		{ 0,				NULL } /* terminate list */
2224 	};
2225 
2226 	unsigned int i;
2227 	for (i = 0; cmd_descr[i].text; i++)
2228 		if (cmd_descr[i].command == command)
2229 			return cmd_descr[i].text;
2230 #endif
2231 
2232 	return NULL;
2233 }
2234 
2235 /**
2236  *	ata_eh_link_report - report error handling to user
2237  *	@link: ATA link EH is going on
2238  *
2239  *	Report EH to user.
2240  *
2241  *	LOCKING:
2242  *	None.
2243  */
2244 static void ata_eh_link_report(struct ata_link *link)
2245 {
2246 	struct ata_port *ap = link->ap;
2247 	struct ata_eh_context *ehc = &link->eh_context;
2248 	const char *frozen, *desc;
2249 	char tries_buf[6];
2250 	int tag, nr_failed = 0;
2251 
2252 	if (ehc->i.flags & ATA_EHI_QUIET)
2253 		return;
2254 
2255 	desc = NULL;
2256 	if (ehc->i.desc[0] != '\0')
2257 		desc = ehc->i.desc;
2258 
2259 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2260 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2261 
2262 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2263 		    ata_dev_phys_link(qc->dev) != link ||
2264 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2265 		     qc->err_mask == AC_ERR_DEV))
2266 			continue;
2267 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2268 			continue;
2269 
2270 		nr_failed++;
2271 	}
2272 
2273 	if (!nr_failed && !ehc->i.err_mask)
2274 		return;
2275 
2276 	frozen = "";
2277 	if (ap->pflags & ATA_PFLAG_FROZEN)
2278 		frozen = " frozen";
2279 
2280 	memset(tries_buf, 0, sizeof(tries_buf));
2281 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2282 		snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2283 			 ap->eh_tries);
2284 
2285 	if (ehc->i.dev) {
2286 		ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2287 			       "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2288 			       ehc->i.err_mask, link->sactive, ehc->i.serror,
2289 			       ehc->i.action, frozen, tries_buf);
2290 		if (desc)
2291 			ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2292 	} else {
2293 		ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2294 				"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2295 				ehc->i.err_mask, link->sactive, ehc->i.serror,
2296 				ehc->i.action, frozen, tries_buf);
2297 		if (desc)
2298 			ata_link_printk(link, KERN_ERR, "%s\n", desc);
2299 	}
2300 
2301 #ifdef CONFIG_ATA_VERBOSE_ERROR
2302 	if (ehc->i.serror)
2303 		ata_link_printk(link, KERN_ERR,
2304 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2305 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2306 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2307 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2308 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2309 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2310 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2311 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2312 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2313 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2314 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2315 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2316 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2317 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2318 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2319 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2320 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2321 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2322 #endif
2323 
2324 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2325 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2326 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2327 		const u8 *cdb = qc->cdb;
2328 		char data_buf[20] = "";
2329 		char cdb_buf[70] = "";
2330 
2331 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2332 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2333 			continue;
2334 
2335 		if (qc->dma_dir != DMA_NONE) {
2336 			static const char *dma_str[] = {
2337 				[DMA_BIDIRECTIONAL]	= "bidi",
2338 				[DMA_TO_DEVICE]		= "out",
2339 				[DMA_FROM_DEVICE]	= "in",
2340 			};
2341 			static const char *prot_str[] = {
2342 				[ATA_PROT_PIO]		= "pio",
2343 				[ATA_PROT_DMA]		= "dma",
2344 				[ATA_PROT_NCQ]		= "ncq",
2345 				[ATAPI_PROT_PIO]	= "pio",
2346 				[ATAPI_PROT_DMA]	= "dma",
2347 			};
2348 
2349 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2350 				 prot_str[qc->tf.protocol], qc->nbytes,
2351 				 dma_str[qc->dma_dir]);
2352 		}
2353 
2354 		if (ata_is_atapi(qc->tf.protocol)) {
2355 			if (qc->scsicmd)
2356 				scsi_print_command(qc->scsicmd);
2357 			else
2358 				snprintf(cdb_buf, sizeof(cdb_buf),
2359 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2360 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2361 				 cdb[0], cdb[1], cdb[2], cdb[3],
2362 				 cdb[4], cdb[5], cdb[6], cdb[7],
2363 				 cdb[8], cdb[9], cdb[10], cdb[11],
2364 				 cdb[12], cdb[13], cdb[14], cdb[15]);
2365 		} else {
2366 			const char *descr = ata_get_cmd_descript(cmd->command);
2367 			if (descr)
2368 				ata_dev_printk(qc->dev, KERN_ERR,
2369 					"failed command: %s\n", descr);
2370 		}
2371 
2372 		ata_dev_printk(qc->dev, KERN_ERR,
2373 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2374 			"tag %d%s\n         %s"
2375 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2376 			"Emask 0x%x (%s)%s\n",
2377 			cmd->command, cmd->feature, cmd->nsect,
2378 			cmd->lbal, cmd->lbam, cmd->lbah,
2379 			cmd->hob_feature, cmd->hob_nsect,
2380 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2381 			cmd->device, qc->tag, data_buf, cdb_buf,
2382 			res->command, res->feature, res->nsect,
2383 			res->lbal, res->lbam, res->lbah,
2384 			res->hob_feature, res->hob_nsect,
2385 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
2386 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
2387 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2388 
2389 #ifdef CONFIG_ATA_VERBOSE_ERROR
2390 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2391 				    ATA_ERR)) {
2392 			if (res->command & ATA_BUSY)
2393 				ata_dev_printk(qc->dev, KERN_ERR,
2394 				  "status: { Busy }\n");
2395 			else
2396 				ata_dev_printk(qc->dev, KERN_ERR,
2397 				  "status: { %s%s%s%s}\n",
2398 				  res->command & ATA_DRDY ? "DRDY " : "",
2399 				  res->command & ATA_DF ? "DF " : "",
2400 				  res->command & ATA_DRQ ? "DRQ " : "",
2401 				  res->command & ATA_ERR ? "ERR " : "");
2402 		}
2403 
2404 		if (cmd->command != ATA_CMD_PACKET &&
2405 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2406 				     ATA_ABORTED)))
2407 			ata_dev_printk(qc->dev, KERN_ERR,
2408 			  "error: { %s%s%s%s}\n",
2409 			  res->feature & ATA_ICRC ? "ICRC " : "",
2410 			  res->feature & ATA_UNC ? "UNC " : "",
2411 			  res->feature & ATA_IDNF ? "IDNF " : "",
2412 			  res->feature & ATA_ABORTED ? "ABRT " : "");
2413 #endif
2414 	}
2415 }
2416 
2417 /**
2418  *	ata_eh_report - report error handling to user
2419  *	@ap: ATA port to report EH about
2420  *
2421  *	Report EH to user.
2422  *
2423  *	LOCKING:
2424  *	None.
2425  */
2426 void ata_eh_report(struct ata_port *ap)
2427 {
2428 	struct ata_link *link;
2429 
2430 	ata_for_each_link(link, ap, HOST_FIRST)
2431 		ata_eh_link_report(link);
2432 }
2433 
2434 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2435 			unsigned int *classes, unsigned long deadline,
2436 			bool clear_classes)
2437 {
2438 	struct ata_device *dev;
2439 
2440 	if (clear_classes)
2441 		ata_for_each_dev(dev, link, ALL)
2442 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2443 
2444 	return reset(link, classes, deadline);
2445 }
2446 
2447 static int ata_eh_followup_srst_needed(struct ata_link *link,
2448 				       int rc, const unsigned int *classes)
2449 {
2450 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2451 		return 0;
2452 	if (rc == -EAGAIN)
2453 		return 1;
2454 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2455 		return 1;
2456 	return 0;
2457 }
2458 
2459 int ata_eh_reset(struct ata_link *link, int classify,
2460 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2461 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2462 {
2463 	struct ata_port *ap = link->ap;
2464 	struct ata_link *slave = ap->slave_link;
2465 	struct ata_eh_context *ehc = &link->eh_context;
2466 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2467 	unsigned int *classes = ehc->classes;
2468 	unsigned int lflags = link->flags;
2469 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2470 	int max_tries = 0, try = 0;
2471 	struct ata_link *failed_link;
2472 	struct ata_device *dev;
2473 	unsigned long deadline, now;
2474 	ata_reset_fn_t reset;
2475 	unsigned long flags;
2476 	u32 sstatus;
2477 	int nr_unknown, rc;
2478 
2479 	/*
2480 	 * Prepare to reset
2481 	 */
2482 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2483 		max_tries++;
2484 	if (link->flags & ATA_LFLAG_NO_HRST)
2485 		hardreset = NULL;
2486 	if (link->flags & ATA_LFLAG_NO_SRST)
2487 		softreset = NULL;
2488 
2489 	/* make sure each reset attemp is at least COOL_DOWN apart */
2490 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
2491 		now = jiffies;
2492 		WARN_ON(time_after(ehc->last_reset, now));
2493 		deadline = ata_deadline(ehc->last_reset,
2494 					ATA_EH_RESET_COOL_DOWN);
2495 		if (time_before(now, deadline))
2496 			schedule_timeout_uninterruptible(deadline - now);
2497 	}
2498 
2499 	spin_lock_irqsave(ap->lock, flags);
2500 	ap->pflags |= ATA_PFLAG_RESETTING;
2501 	spin_unlock_irqrestore(ap->lock, flags);
2502 
2503 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2504 
2505 	ata_for_each_dev(dev, link, ALL) {
2506 		/* If we issue an SRST then an ATA drive (not ATAPI)
2507 		 * may change configuration and be in PIO0 timing. If
2508 		 * we do a hard reset (or are coming from power on)
2509 		 * this is true for ATA or ATAPI. Until we've set a
2510 		 * suitable controller mode we should not touch the
2511 		 * bus as we may be talking too fast.
2512 		 */
2513 		dev->pio_mode = XFER_PIO_0;
2514 
2515 		/* If the controller has a pio mode setup function
2516 		 * then use it to set the chipset to rights. Don't
2517 		 * touch the DMA setup as that will be dealt with when
2518 		 * configuring devices.
2519 		 */
2520 		if (ap->ops->set_piomode)
2521 			ap->ops->set_piomode(ap, dev);
2522 	}
2523 
2524 	/* prefer hardreset */
2525 	reset = NULL;
2526 	ehc->i.action &= ~ATA_EH_RESET;
2527 	if (hardreset) {
2528 		reset = hardreset;
2529 		ehc->i.action |= ATA_EH_HARDRESET;
2530 	} else if (softreset) {
2531 		reset = softreset;
2532 		ehc->i.action |= ATA_EH_SOFTRESET;
2533 	}
2534 
2535 	if (prereset) {
2536 		unsigned long deadline = ata_deadline(jiffies,
2537 						      ATA_EH_PRERESET_TIMEOUT);
2538 
2539 		if (slave) {
2540 			sehc->i.action &= ~ATA_EH_RESET;
2541 			sehc->i.action |= ehc->i.action;
2542 		}
2543 
2544 		rc = prereset(link, deadline);
2545 
2546 		/* If present, do prereset on slave link too.  Reset
2547 		 * is skipped iff both master and slave links report
2548 		 * -ENOENT or clear ATA_EH_RESET.
2549 		 */
2550 		if (slave && (rc == 0 || rc == -ENOENT)) {
2551 			int tmp;
2552 
2553 			tmp = prereset(slave, deadline);
2554 			if (tmp != -ENOENT)
2555 				rc = tmp;
2556 
2557 			ehc->i.action |= sehc->i.action;
2558 		}
2559 
2560 		if (rc) {
2561 			if (rc == -ENOENT) {
2562 				ata_link_printk(link, KERN_DEBUG,
2563 						"port disabled. ignoring.\n");
2564 				ehc->i.action &= ~ATA_EH_RESET;
2565 
2566 				ata_for_each_dev(dev, link, ALL)
2567 					classes[dev->devno] = ATA_DEV_NONE;
2568 
2569 				rc = 0;
2570 			} else
2571 				ata_link_printk(link, KERN_ERR,
2572 					"prereset failed (errno=%d)\n", rc);
2573 			goto out;
2574 		}
2575 
2576 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2577 		 * bang classes, thaw and return.
2578 		 */
2579 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2580 			ata_for_each_dev(dev, link, ALL)
2581 				classes[dev->devno] = ATA_DEV_NONE;
2582 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2583 			    ata_is_host_link(link))
2584 				ata_eh_thaw_port(ap);
2585 			rc = 0;
2586 			goto out;
2587 		}
2588 	}
2589 
2590  retry:
2591 	/*
2592 	 * Perform reset
2593 	 */
2594 	if (ata_is_host_link(link))
2595 		ata_eh_freeze_port(ap);
2596 
2597 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2598 
2599 	if (reset) {
2600 		if (verbose)
2601 			ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2602 					reset == softreset ? "soft" : "hard");
2603 
2604 		/* mark that this EH session started with reset */
2605 		ehc->last_reset = jiffies;
2606 		if (reset == hardreset)
2607 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2608 		else
2609 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2610 
2611 		rc = ata_do_reset(link, reset, classes, deadline, true);
2612 		if (rc && rc != -EAGAIN) {
2613 			failed_link = link;
2614 			goto fail;
2615 		}
2616 
2617 		/* hardreset slave link if existent */
2618 		if (slave && reset == hardreset) {
2619 			int tmp;
2620 
2621 			if (verbose)
2622 				ata_link_printk(slave, KERN_INFO,
2623 						"hard resetting link\n");
2624 
2625 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2626 			tmp = ata_do_reset(slave, reset, classes, deadline,
2627 					   false);
2628 			switch (tmp) {
2629 			case -EAGAIN:
2630 				rc = -EAGAIN;
2631 			case 0:
2632 				break;
2633 			default:
2634 				failed_link = slave;
2635 				rc = tmp;
2636 				goto fail;
2637 			}
2638 		}
2639 
2640 		/* perform follow-up SRST if necessary */
2641 		if (reset == hardreset &&
2642 		    ata_eh_followup_srst_needed(link, rc, classes)) {
2643 			reset = softreset;
2644 
2645 			if (!reset) {
2646 				ata_link_printk(link, KERN_ERR,
2647 						"follow-up softreset required "
2648 						"but no softreset avaliable\n");
2649 				failed_link = link;
2650 				rc = -EINVAL;
2651 				goto fail;
2652 			}
2653 
2654 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2655 			rc = ata_do_reset(link, reset, classes, deadline, true);
2656 			if (rc) {
2657 				failed_link = link;
2658 				goto fail;
2659 			}
2660 		}
2661 	} else {
2662 		if (verbose)
2663 			ata_link_printk(link, KERN_INFO, "no reset method "
2664 					"available, skipping reset\n");
2665 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2666 			lflags |= ATA_LFLAG_ASSUME_ATA;
2667 	}
2668 
2669 	/*
2670 	 * Post-reset processing
2671 	 */
2672 	ata_for_each_dev(dev, link, ALL) {
2673 		/* After the reset, the device state is PIO 0 and the
2674 		 * controller state is undefined.  Reset also wakes up
2675 		 * drives from sleeping mode.
2676 		 */
2677 		dev->pio_mode = XFER_PIO_0;
2678 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2679 
2680 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2681 			continue;
2682 
2683 		/* apply class override */
2684 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2685 			classes[dev->devno] = ATA_DEV_ATA;
2686 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2687 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2688 	}
2689 
2690 	/* record current link speed */
2691 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2692 		link->sata_spd = (sstatus >> 4) & 0xf;
2693 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2694 		slave->sata_spd = (sstatus >> 4) & 0xf;
2695 
2696 	/* thaw the port */
2697 	if (ata_is_host_link(link))
2698 		ata_eh_thaw_port(ap);
2699 
2700 	/* postreset() should clear hardware SError.  Although SError
2701 	 * is cleared during link resume, clearing SError here is
2702 	 * necessary as some PHYs raise hotplug events after SRST.
2703 	 * This introduces race condition where hotplug occurs between
2704 	 * reset and here.  This race is mediated by cross checking
2705 	 * link onlineness and classification result later.
2706 	 */
2707 	if (postreset) {
2708 		postreset(link, classes);
2709 		if (slave)
2710 			postreset(slave, classes);
2711 	}
2712 
2713 	/*
2714 	 * Some controllers can't be frozen very well and may set
2715 	 * spuruious error conditions during reset.  Clear accumulated
2716 	 * error information.  As reset is the final recovery action,
2717 	 * nothing is lost by doing this.
2718 	 */
2719 	spin_lock_irqsave(link->ap->lock, flags);
2720 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2721 	if (slave)
2722 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
2723 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2724 	spin_unlock_irqrestore(link->ap->lock, flags);
2725 
2726 	/*
2727 	 * Make sure onlineness and classification result correspond.
2728 	 * Hotplug could have happened during reset and some
2729 	 * controllers fail to wait while a drive is spinning up after
2730 	 * being hotplugged causing misdetection.  By cross checking
2731 	 * link on/offlineness and classification result, those
2732 	 * conditions can be reliably detected and retried.
2733 	 */
2734 	nr_unknown = 0;
2735 	ata_for_each_dev(dev, link, ALL) {
2736 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2737 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2738 				ata_dev_printk(dev, KERN_DEBUG, "link online "
2739 					       "but device misclassifed\n");
2740 				classes[dev->devno] = ATA_DEV_NONE;
2741 				nr_unknown++;
2742 			}
2743 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2744 			if (ata_class_enabled(classes[dev->devno]))
2745 				ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2746 					       "clearing class %d to NONE\n",
2747 					       classes[dev->devno]);
2748 			classes[dev->devno] = ATA_DEV_NONE;
2749 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2750 			ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2751 				       "clearing UNKNOWN to NONE\n");
2752 			classes[dev->devno] = ATA_DEV_NONE;
2753 		}
2754 	}
2755 
2756 	if (classify && nr_unknown) {
2757 		if (try < max_tries) {
2758 			ata_link_printk(link, KERN_WARNING, "link online but "
2759 					"%d devices misclassified, retrying\n",
2760 					nr_unknown);
2761 			failed_link = link;
2762 			rc = -EAGAIN;
2763 			goto fail;
2764 		}
2765 		ata_link_printk(link, KERN_WARNING,
2766 				"link online but %d devices misclassified, "
2767 				"device detection might fail\n", nr_unknown);
2768 	}
2769 
2770 	/* reset successful, schedule revalidation */
2771 	ata_eh_done(link, NULL, ATA_EH_RESET);
2772 	if (slave)
2773 		ata_eh_done(slave, NULL, ATA_EH_RESET);
2774 	ehc->last_reset = jiffies;	/* update to completion time */
2775 	ehc->i.action |= ATA_EH_REVALIDATE;
2776 
2777 	rc = 0;
2778  out:
2779 	/* clear hotplug flag */
2780 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2781 	if (slave)
2782 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2783 
2784 	spin_lock_irqsave(ap->lock, flags);
2785 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2786 	spin_unlock_irqrestore(ap->lock, flags);
2787 
2788 	return rc;
2789 
2790  fail:
2791 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2792 	if (!ata_is_host_link(link) &&
2793 	    sata_scr_read(link, SCR_STATUS, &sstatus))
2794 		rc = -ERESTART;
2795 
2796 	if (rc == -ERESTART || try >= max_tries)
2797 		goto out;
2798 
2799 	now = jiffies;
2800 	if (time_before(now, deadline)) {
2801 		unsigned long delta = deadline - now;
2802 
2803 		ata_link_printk(failed_link, KERN_WARNING,
2804 			"reset failed (errno=%d), retrying in %u secs\n",
2805 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2806 
2807 		while (delta)
2808 			delta = schedule_timeout_uninterruptible(delta);
2809 	}
2810 
2811 	if (try == max_tries - 1) {
2812 		sata_down_spd_limit(link, 0);
2813 		if (slave)
2814 			sata_down_spd_limit(slave, 0);
2815 	} else if (rc == -EPIPE)
2816 		sata_down_spd_limit(failed_link, 0);
2817 
2818 	if (hardreset)
2819 		reset = hardreset;
2820 	goto retry;
2821 }
2822 
2823 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2824 {
2825 	struct ata_link *link;
2826 	struct ata_device *dev;
2827 	unsigned long flags;
2828 
2829 	/*
2830 	 * This function can be thought of as an extended version of
2831 	 * ata_eh_about_to_do() specially crafted to accommodate the
2832 	 * requirements of ATA_EH_PARK handling. Since the EH thread
2833 	 * does not leave the do {} while () loop in ata_eh_recover as
2834 	 * long as the timeout for a park request to *one* device on
2835 	 * the port has not expired, and since we still want to pick
2836 	 * up park requests to other devices on the same port or
2837 	 * timeout updates for the same device, we have to pull
2838 	 * ATA_EH_PARK actions from eh_info into eh_context.i
2839 	 * ourselves at the beginning of each pass over the loop.
2840 	 *
2841 	 * Additionally, all write accesses to &ap->park_req_pending
2842 	 * through INIT_COMPLETION() (see below) or complete_all()
2843 	 * (see ata_scsi_park_store()) are protected by the host lock.
2844 	 * As a result we have that park_req_pending.done is zero on
2845 	 * exit from this function, i.e. when ATA_EH_PARK actions for
2846 	 * *all* devices on port ap have been pulled into the
2847 	 * respective eh_context structs. If, and only if,
2848 	 * park_req_pending.done is non-zero by the time we reach
2849 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
2850 	 * has been scheduled for at least one of the devices on port
2851 	 * ap and we have to cycle over the do {} while () loop in
2852 	 * ata_eh_recover() again.
2853 	 */
2854 
2855 	spin_lock_irqsave(ap->lock, flags);
2856 	INIT_COMPLETION(ap->park_req_pending);
2857 	ata_for_each_link(link, ap, EDGE) {
2858 		ata_for_each_dev(dev, link, ALL) {
2859 			struct ata_eh_info *ehi = &link->eh_info;
2860 
2861 			link->eh_context.i.dev_action[dev->devno] |=
2862 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
2863 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2864 		}
2865 	}
2866 	spin_unlock_irqrestore(ap->lock, flags);
2867 }
2868 
2869 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2870 {
2871 	struct ata_eh_context *ehc = &dev->link->eh_context;
2872 	struct ata_taskfile tf;
2873 	unsigned int err_mask;
2874 
2875 	ata_tf_init(dev, &tf);
2876 	if (park) {
2877 		ehc->unloaded_mask |= 1 << dev->devno;
2878 		tf.command = ATA_CMD_IDLEIMMEDIATE;
2879 		tf.feature = 0x44;
2880 		tf.lbal = 0x4c;
2881 		tf.lbam = 0x4e;
2882 		tf.lbah = 0x55;
2883 	} else {
2884 		ehc->unloaded_mask &= ~(1 << dev->devno);
2885 		tf.command = ATA_CMD_CHK_POWER;
2886 	}
2887 
2888 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2889 	tf.protocol |= ATA_PROT_NODATA;
2890 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2891 	if (park && (err_mask || tf.lbal != 0xc4)) {
2892 		ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2893 		ehc->unloaded_mask &= ~(1 << dev->devno);
2894 	}
2895 }
2896 
2897 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2898 					struct ata_device **r_failed_dev)
2899 {
2900 	struct ata_port *ap = link->ap;
2901 	struct ata_eh_context *ehc = &link->eh_context;
2902 	struct ata_device *dev;
2903 	unsigned int new_mask = 0;
2904 	unsigned long flags;
2905 	int rc = 0;
2906 
2907 	DPRINTK("ENTER\n");
2908 
2909 	/* For PATA drive side cable detection to work, IDENTIFY must
2910 	 * be done backwards such that PDIAG- is released by the slave
2911 	 * device before the master device is identified.
2912 	 */
2913 	ata_for_each_dev(dev, link, ALL_REVERSE) {
2914 		unsigned int action = ata_eh_dev_action(dev);
2915 		unsigned int readid_flags = 0;
2916 
2917 		if (ehc->i.flags & ATA_EHI_DID_RESET)
2918 			readid_flags |= ATA_READID_POSTRESET;
2919 
2920 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2921 			WARN_ON(dev->class == ATA_DEV_PMP);
2922 
2923 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2924 				rc = -EIO;
2925 				goto err;
2926 			}
2927 
2928 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2929 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2930 						readid_flags);
2931 			if (rc)
2932 				goto err;
2933 
2934 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2935 
2936 			/* Configuration may have changed, reconfigure
2937 			 * transfer mode.
2938 			 */
2939 			ehc->i.flags |= ATA_EHI_SETMODE;
2940 
2941 			/* schedule the scsi_rescan_device() here */
2942 			queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
2943 		} else if (dev->class == ATA_DEV_UNKNOWN &&
2944 			   ehc->tries[dev->devno] &&
2945 			   ata_class_enabled(ehc->classes[dev->devno])) {
2946 			/* Temporarily set dev->class, it will be
2947 			 * permanently set once all configurations are
2948 			 * complete.  This is necessary because new
2949 			 * device configuration is done in two
2950 			 * separate loops.
2951 			 */
2952 			dev->class = ehc->classes[dev->devno];
2953 
2954 			if (dev->class == ATA_DEV_PMP)
2955 				rc = sata_pmp_attach(dev);
2956 			else
2957 				rc = ata_dev_read_id(dev, &dev->class,
2958 						     readid_flags, dev->id);
2959 
2960 			/* read_id might have changed class, store and reset */
2961 			ehc->classes[dev->devno] = dev->class;
2962 			dev->class = ATA_DEV_UNKNOWN;
2963 
2964 			switch (rc) {
2965 			case 0:
2966 				/* clear error info accumulated during probe */
2967 				ata_ering_clear(&dev->ering);
2968 				new_mask |= 1 << dev->devno;
2969 				break;
2970 			case -ENOENT:
2971 				/* IDENTIFY was issued to non-existent
2972 				 * device.  No need to reset.  Just
2973 				 * thaw and ignore the device.
2974 				 */
2975 				ata_eh_thaw_port(ap);
2976 				break;
2977 			default:
2978 				goto err;
2979 			}
2980 		}
2981 	}
2982 
2983 	/* PDIAG- should have been released, ask cable type if post-reset */
2984 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2985 		if (ap->ops->cable_detect)
2986 			ap->cbl = ap->ops->cable_detect(ap);
2987 		ata_force_cbl(ap);
2988 	}
2989 
2990 	/* Configure new devices forward such that user doesn't see
2991 	 * device detection messages backwards.
2992 	 */
2993 	ata_for_each_dev(dev, link, ALL) {
2994 		if (!(new_mask & (1 << dev->devno)))
2995 			continue;
2996 
2997 		dev->class = ehc->classes[dev->devno];
2998 
2999 		if (dev->class == ATA_DEV_PMP)
3000 			continue;
3001 
3002 		ehc->i.flags |= ATA_EHI_PRINTINFO;
3003 		rc = ata_dev_configure(dev);
3004 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3005 		if (rc) {
3006 			dev->class = ATA_DEV_UNKNOWN;
3007 			goto err;
3008 		}
3009 
3010 		spin_lock_irqsave(ap->lock, flags);
3011 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3012 		spin_unlock_irqrestore(ap->lock, flags);
3013 
3014 		/* new device discovered, configure xfermode */
3015 		ehc->i.flags |= ATA_EHI_SETMODE;
3016 	}
3017 
3018 	return 0;
3019 
3020  err:
3021 	*r_failed_dev = dev;
3022 	DPRINTK("EXIT rc=%d\n", rc);
3023 	return rc;
3024 }
3025 
3026 /**
3027  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
3028  *	@link: link on which timings will be programmed
3029  *	@r_failed_dev: out parameter for failed device
3030  *
3031  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3032  *	ata_set_mode() fails, pointer to the failing device is
3033  *	returned in @r_failed_dev.
3034  *
3035  *	LOCKING:
3036  *	PCI/etc. bus probe sem.
3037  *
3038  *	RETURNS:
3039  *	0 on success, negative errno otherwise
3040  */
3041 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3042 {
3043 	struct ata_port *ap = link->ap;
3044 	struct ata_device *dev;
3045 	int rc;
3046 
3047 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3048 	ata_for_each_dev(dev, link, ENABLED) {
3049 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3050 			struct ata_ering_entry *ent;
3051 
3052 			ent = ata_ering_top(&dev->ering);
3053 			if (ent)
3054 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3055 		}
3056 	}
3057 
3058 	/* has private set_mode? */
3059 	if (ap->ops->set_mode)
3060 		rc = ap->ops->set_mode(link, r_failed_dev);
3061 	else
3062 		rc = ata_do_set_mode(link, r_failed_dev);
3063 
3064 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
3065 	ata_for_each_dev(dev, link, ENABLED) {
3066 		struct ata_eh_context *ehc = &link->eh_context;
3067 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3068 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3069 
3070 		if (dev->xfer_mode != saved_xfer_mode ||
3071 		    ata_ncq_enabled(dev) != saved_ncq)
3072 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3073 	}
3074 
3075 	return rc;
3076 }
3077 
3078 /**
3079  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3080  *	@dev: ATAPI device to clear UA for
3081  *
3082  *	Resets and other operations can make an ATAPI device raise
3083  *	UNIT ATTENTION which causes the next operation to fail.  This
3084  *	function clears UA.
3085  *
3086  *	LOCKING:
3087  *	EH context (may sleep).
3088  *
3089  *	RETURNS:
3090  *	0 on success, -errno on failure.
3091  */
3092 static int atapi_eh_clear_ua(struct ata_device *dev)
3093 {
3094 	int i;
3095 
3096 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3097 		u8 *sense_buffer = dev->link->ap->sector_buf;
3098 		u8 sense_key = 0;
3099 		unsigned int err_mask;
3100 
3101 		err_mask = atapi_eh_tur(dev, &sense_key);
3102 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3103 			ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3104 				"failed (err_mask=0x%x)\n", err_mask);
3105 			return -EIO;
3106 		}
3107 
3108 		if (!err_mask || sense_key != UNIT_ATTENTION)
3109 			return 0;
3110 
3111 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3112 		if (err_mask) {
3113 			ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3114 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3115 			return -EIO;
3116 		}
3117 	}
3118 
3119 	ata_dev_printk(dev, KERN_WARNING,
3120 		"UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3121 
3122 	return 0;
3123 }
3124 
3125 /**
3126  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3127  *	@dev: ATA device which may need FLUSH retry
3128  *
3129  *	If @dev failed FLUSH, it needs to be reported upper layer
3130  *	immediately as it means that @dev failed to remap and already
3131  *	lost at least a sector and further FLUSH retrials won't make
3132  *	any difference to the lost sector.  However, if FLUSH failed
3133  *	for other reasons, for example transmission error, FLUSH needs
3134  *	to be retried.
3135  *
3136  *	This function determines whether FLUSH failure retry is
3137  *	necessary and performs it if so.
3138  *
3139  *	RETURNS:
3140  *	0 if EH can continue, -errno if EH needs to be repeated.
3141  */
3142 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3143 {
3144 	struct ata_link *link = dev->link;
3145 	struct ata_port *ap = link->ap;
3146 	struct ata_queued_cmd *qc;
3147 	struct ata_taskfile tf;
3148 	unsigned int err_mask;
3149 	int rc = 0;
3150 
3151 	/* did flush fail for this device? */
3152 	if (!ata_tag_valid(link->active_tag))
3153 		return 0;
3154 
3155 	qc = __ata_qc_from_tag(ap, link->active_tag);
3156 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3157 			       qc->tf.command != ATA_CMD_FLUSH))
3158 		return 0;
3159 
3160 	/* if the device failed it, it should be reported to upper layers */
3161 	if (qc->err_mask & AC_ERR_DEV)
3162 		return 0;
3163 
3164 	/* flush failed for some other reason, give it another shot */
3165 	ata_tf_init(dev, &tf);
3166 
3167 	tf.command = qc->tf.command;
3168 	tf.flags |= ATA_TFLAG_DEVICE;
3169 	tf.protocol = ATA_PROT_NODATA;
3170 
3171 	ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3172 		       tf.command, qc->err_mask);
3173 
3174 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3175 	if (!err_mask) {
3176 		/*
3177 		 * FLUSH is complete but there's no way to
3178 		 * successfully complete a failed command from EH.
3179 		 * Making sure retry is allowed at least once and
3180 		 * retrying it should do the trick - whatever was in
3181 		 * the cache is already on the platter and this won't
3182 		 * cause infinite loop.
3183 		 */
3184 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3185 	} else {
3186 		ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3187 			       err_mask);
3188 		rc = -EIO;
3189 
3190 		/* if device failed it, report it to upper layers */
3191 		if (err_mask & AC_ERR_DEV) {
3192 			qc->err_mask |= AC_ERR_DEV;
3193 			qc->result_tf = tf;
3194 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
3195 				rc = 0;
3196 		}
3197 	}
3198 	return rc;
3199 }
3200 
3201 static int ata_link_nr_enabled(struct ata_link *link)
3202 {
3203 	struct ata_device *dev;
3204 	int cnt = 0;
3205 
3206 	ata_for_each_dev(dev, link, ENABLED)
3207 		cnt++;
3208 	return cnt;
3209 }
3210 
3211 static int ata_link_nr_vacant(struct ata_link *link)
3212 {
3213 	struct ata_device *dev;
3214 	int cnt = 0;
3215 
3216 	ata_for_each_dev(dev, link, ALL)
3217 		if (dev->class == ATA_DEV_UNKNOWN)
3218 			cnt++;
3219 	return cnt;
3220 }
3221 
3222 static int ata_eh_skip_recovery(struct ata_link *link)
3223 {
3224 	struct ata_port *ap = link->ap;
3225 	struct ata_eh_context *ehc = &link->eh_context;
3226 	struct ata_device *dev;
3227 
3228 	/* skip disabled links */
3229 	if (link->flags & ATA_LFLAG_DISABLED)
3230 		return 1;
3231 
3232 	/* thaw frozen port and recover failed devices */
3233 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3234 		return 0;
3235 
3236 	/* reset at least once if reset is requested */
3237 	if ((ehc->i.action & ATA_EH_RESET) &&
3238 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3239 		return 0;
3240 
3241 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
3242 	ata_for_each_dev(dev, link, ALL) {
3243 		if (dev->class == ATA_DEV_UNKNOWN &&
3244 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3245 			return 0;
3246 	}
3247 
3248 	return 1;
3249 }
3250 
3251 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3252 {
3253 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3254 	u64 now = get_jiffies_64();
3255 	int *trials = void_arg;
3256 
3257 	if (ent->timestamp < now - min(now, interval))
3258 		return -1;
3259 
3260 	(*trials)++;
3261 	return 0;
3262 }
3263 
3264 static int ata_eh_schedule_probe(struct ata_device *dev)
3265 {
3266 	struct ata_eh_context *ehc = &dev->link->eh_context;
3267 	struct ata_link *link = ata_dev_phys_link(dev);
3268 	int trials = 0;
3269 
3270 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3271 	    (ehc->did_probe_mask & (1 << dev->devno)))
3272 		return 0;
3273 
3274 	ata_eh_detach_dev(dev);
3275 	ata_dev_init(dev);
3276 	ehc->did_probe_mask |= (1 << dev->devno);
3277 	ehc->i.action |= ATA_EH_RESET;
3278 	ehc->saved_xfer_mode[dev->devno] = 0;
3279 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3280 
3281 	/* Record and count probe trials on the ering.  The specific
3282 	 * error mask used is irrelevant.  Because a successful device
3283 	 * detection clears the ering, this count accumulates only if
3284 	 * there are consecutive failed probes.
3285 	 *
3286 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3287 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3288 	 * forced to 1.5Gbps.
3289 	 *
3290 	 * This is to work around cases where failed link speed
3291 	 * negotiation results in device misdetection leading to
3292 	 * infinite DEVXCHG or PHRDY CHG events.
3293 	 */
3294 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3295 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3296 
3297 	if (trials > ATA_EH_PROBE_TRIALS)
3298 		sata_down_spd_limit(link, 1);
3299 
3300 	return 1;
3301 }
3302 
3303 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3304 {
3305 	struct ata_eh_context *ehc = &dev->link->eh_context;
3306 
3307 	/* -EAGAIN from EH routine indicates retry without prejudice.
3308 	 * The requester is responsible for ensuring forward progress.
3309 	 */
3310 	if (err != -EAGAIN)
3311 		ehc->tries[dev->devno]--;
3312 
3313 	switch (err) {
3314 	case -ENODEV:
3315 		/* device missing or wrong IDENTIFY data, schedule probing */
3316 		ehc->i.probe_mask |= (1 << dev->devno);
3317 	case -EINVAL:
3318 		/* give it just one more chance */
3319 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3320 	case -EIO:
3321 		if (ehc->tries[dev->devno] == 1) {
3322 			/* This is the last chance, better to slow
3323 			 * down than lose it.
3324 			 */
3325 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3326 			if (dev->pio_mode > XFER_PIO_0)
3327 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3328 		}
3329 	}
3330 
3331 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3332 		/* disable device if it has used up all its chances */
3333 		ata_dev_disable(dev);
3334 
3335 		/* detach if offline */
3336 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3337 			ata_eh_detach_dev(dev);
3338 
3339 		/* schedule probe if necessary */
3340 		if (ata_eh_schedule_probe(dev)) {
3341 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3342 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
3343 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
3344 		}
3345 
3346 		return 1;
3347 	} else {
3348 		ehc->i.action |= ATA_EH_RESET;
3349 		return 0;
3350 	}
3351 }
3352 
3353 /**
3354  *	ata_eh_recover - recover host port after error
3355  *	@ap: host port to recover
3356  *	@prereset: prereset method (can be NULL)
3357  *	@softreset: softreset method (can be NULL)
3358  *	@hardreset: hardreset method (can be NULL)
3359  *	@postreset: postreset method (can be NULL)
3360  *	@r_failed_link: out parameter for failed link
3361  *
3362  *	This is the alpha and omega, eum and yang, heart and soul of
3363  *	libata exception handling.  On entry, actions required to
3364  *	recover each link and hotplug requests are recorded in the
3365  *	link's eh_context.  This function executes all the operations
3366  *	with appropriate retrials and fallbacks to resurrect failed
3367  *	devices, detach goners and greet newcomers.
3368  *
3369  *	LOCKING:
3370  *	Kernel thread context (may sleep).
3371  *
3372  *	RETURNS:
3373  *	0 on success, -errno on failure.
3374  */
3375 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3376 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3377 		   ata_postreset_fn_t postreset,
3378 		   struct ata_link **r_failed_link)
3379 {
3380 	struct ata_link *link;
3381 	struct ata_device *dev;
3382 	int nr_failed_devs;
3383 	int rc;
3384 	unsigned long flags, deadline;
3385 
3386 	DPRINTK("ENTER\n");
3387 
3388 	/* prep for recovery */
3389 	ata_for_each_link(link, ap, EDGE) {
3390 		struct ata_eh_context *ehc = &link->eh_context;
3391 
3392 		/* re-enable link? */
3393 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3394 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3395 			spin_lock_irqsave(ap->lock, flags);
3396 			link->flags &= ~ATA_LFLAG_DISABLED;
3397 			spin_unlock_irqrestore(ap->lock, flags);
3398 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3399 		}
3400 
3401 		ata_for_each_dev(dev, link, ALL) {
3402 			if (link->flags & ATA_LFLAG_NO_RETRY)
3403 				ehc->tries[dev->devno] = 1;
3404 			else
3405 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3406 
3407 			/* collect port action mask recorded in dev actions */
3408 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
3409 					 ~ATA_EH_PERDEV_MASK;
3410 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3411 
3412 			/* process hotplug request */
3413 			if (dev->flags & ATA_DFLAG_DETACH)
3414 				ata_eh_detach_dev(dev);
3415 
3416 			/* schedule probe if necessary */
3417 			if (!ata_dev_enabled(dev))
3418 				ata_eh_schedule_probe(dev);
3419 		}
3420 	}
3421 
3422  retry:
3423 	rc = 0;
3424 	nr_failed_devs = 0;
3425 
3426 	/* if UNLOADING, finish immediately */
3427 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3428 		goto out;
3429 
3430 	/* prep for EH */
3431 	ata_for_each_link(link, ap, EDGE) {
3432 		struct ata_eh_context *ehc = &link->eh_context;
3433 
3434 		/* skip EH if possible. */
3435 		if (ata_eh_skip_recovery(link))
3436 			ehc->i.action = 0;
3437 
3438 		ata_for_each_dev(dev, link, ALL)
3439 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3440 	}
3441 
3442 	/* reset */
3443 	ata_for_each_link(link, ap, EDGE) {
3444 		struct ata_eh_context *ehc = &link->eh_context;
3445 
3446 		if (!(ehc->i.action & ATA_EH_RESET))
3447 			continue;
3448 
3449 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3450 				  prereset, softreset, hardreset, postreset);
3451 		if (rc) {
3452 			ata_link_printk(link, KERN_ERR,
3453 					"reset failed, giving up\n");
3454 			goto out;
3455 		}
3456 	}
3457 
3458 	do {
3459 		unsigned long now;
3460 
3461 		/*
3462 		 * clears ATA_EH_PARK in eh_info and resets
3463 		 * ap->park_req_pending
3464 		 */
3465 		ata_eh_pull_park_action(ap);
3466 
3467 		deadline = jiffies;
3468 		ata_for_each_link(link, ap, EDGE) {
3469 			ata_for_each_dev(dev, link, ALL) {
3470 				struct ata_eh_context *ehc = &link->eh_context;
3471 				unsigned long tmp;
3472 
3473 				if (dev->class != ATA_DEV_ATA)
3474 					continue;
3475 				if (!(ehc->i.dev_action[dev->devno] &
3476 				      ATA_EH_PARK))
3477 					continue;
3478 				tmp = dev->unpark_deadline;
3479 				if (time_before(deadline, tmp))
3480 					deadline = tmp;
3481 				else if (time_before_eq(tmp, jiffies))
3482 					continue;
3483 				if (ehc->unloaded_mask & (1 << dev->devno))
3484 					continue;
3485 
3486 				ata_eh_park_issue_cmd(dev, 1);
3487 			}
3488 		}
3489 
3490 		now = jiffies;
3491 		if (time_before_eq(deadline, now))
3492 			break;
3493 
3494 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
3495 						       deadline - now);
3496 	} while (deadline);
3497 	ata_for_each_link(link, ap, EDGE) {
3498 		ata_for_each_dev(dev, link, ALL) {
3499 			if (!(link->eh_context.unloaded_mask &
3500 			      (1 << dev->devno)))
3501 				continue;
3502 
3503 			ata_eh_park_issue_cmd(dev, 0);
3504 			ata_eh_done(link, dev, ATA_EH_PARK);
3505 		}
3506 	}
3507 
3508 	/* the rest */
3509 	ata_for_each_link(link, ap, EDGE) {
3510 		struct ata_eh_context *ehc = &link->eh_context;
3511 
3512 		/* revalidate existing devices and attach new ones */
3513 		rc = ata_eh_revalidate_and_attach(link, &dev);
3514 		if (rc)
3515 			goto dev_fail;
3516 
3517 		/* if PMP got attached, return, pmp EH will take care of it */
3518 		if (link->device->class == ATA_DEV_PMP) {
3519 			ehc->i.action = 0;
3520 			return 0;
3521 		}
3522 
3523 		/* configure transfer mode if necessary */
3524 		if (ehc->i.flags & ATA_EHI_SETMODE) {
3525 			rc = ata_set_mode(link, &dev);
3526 			if (rc)
3527 				goto dev_fail;
3528 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3529 		}
3530 
3531 		/* If reset has been issued, clear UA to avoid
3532 		 * disrupting the current users of the device.
3533 		 */
3534 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
3535 			ata_for_each_dev(dev, link, ALL) {
3536 				if (dev->class != ATA_DEV_ATAPI)
3537 					continue;
3538 				rc = atapi_eh_clear_ua(dev);
3539 				if (rc)
3540 					goto dev_fail;
3541 			}
3542 		}
3543 
3544 		/* retry flush if necessary */
3545 		ata_for_each_dev(dev, link, ALL) {
3546 			if (dev->class != ATA_DEV_ATA)
3547 				continue;
3548 			rc = ata_eh_maybe_retry_flush(dev);
3549 			if (rc)
3550 				goto dev_fail;
3551 		}
3552 
3553 		/* configure link power saving */
3554 		if (ehc->i.action & ATA_EH_LPM)
3555 			ata_for_each_dev(dev, link, ALL)
3556 				ata_dev_enable_pm(dev, ap->pm_policy);
3557 
3558 		/* this link is okay now */
3559 		ehc->i.flags = 0;
3560 		continue;
3561 
3562 dev_fail:
3563 		nr_failed_devs++;
3564 		ata_eh_handle_dev_fail(dev, rc);
3565 
3566 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3567 			/* PMP reset requires working host port.
3568 			 * Can't retry if it's frozen.
3569 			 */
3570 			if (sata_pmp_attached(ap))
3571 				goto out;
3572 			break;
3573 		}
3574 	}
3575 
3576 	if (nr_failed_devs)
3577 		goto retry;
3578 
3579  out:
3580 	if (rc && r_failed_link)
3581 		*r_failed_link = link;
3582 
3583 	DPRINTK("EXIT, rc=%d\n", rc);
3584 	return rc;
3585 }
3586 
3587 /**
3588  *	ata_eh_finish - finish up EH
3589  *	@ap: host port to finish EH for
3590  *
3591  *	Recovery is complete.  Clean up EH states and retry or finish
3592  *	failed qcs.
3593  *
3594  *	LOCKING:
3595  *	None.
3596  */
3597 void ata_eh_finish(struct ata_port *ap)
3598 {
3599 	int tag;
3600 
3601 	/* retry or finish qcs */
3602 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3603 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3604 
3605 		if (!(qc->flags & ATA_QCFLAG_FAILED))
3606 			continue;
3607 
3608 		if (qc->err_mask) {
3609 			/* FIXME: Once EH migration is complete,
3610 			 * generate sense data in this function,
3611 			 * considering both err_mask and tf.
3612 			 */
3613 			if (qc->flags & ATA_QCFLAG_RETRY)
3614 				ata_eh_qc_retry(qc);
3615 			else
3616 				ata_eh_qc_complete(qc);
3617 		} else {
3618 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3619 				ata_eh_qc_complete(qc);
3620 			} else {
3621 				/* feed zero TF to sense generation */
3622 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3623 				ata_eh_qc_retry(qc);
3624 			}
3625 		}
3626 	}
3627 
3628 	/* make sure nr_active_links is zero after EH */
3629 	WARN_ON(ap->nr_active_links);
3630 	ap->nr_active_links = 0;
3631 }
3632 
3633 /**
3634  *	ata_do_eh - do standard error handling
3635  *	@ap: host port to handle error for
3636  *
3637  *	@prereset: prereset method (can be NULL)
3638  *	@softreset: softreset method (can be NULL)
3639  *	@hardreset: hardreset method (can be NULL)
3640  *	@postreset: postreset method (can be NULL)
3641  *
3642  *	Perform standard error handling sequence.
3643  *
3644  *	LOCKING:
3645  *	Kernel thread context (may sleep).
3646  */
3647 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3648 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3649 	       ata_postreset_fn_t postreset)
3650 {
3651 	struct ata_device *dev;
3652 	int rc;
3653 
3654 	ata_eh_autopsy(ap);
3655 	ata_eh_report(ap);
3656 
3657 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3658 			    NULL);
3659 	if (rc) {
3660 		ata_for_each_dev(dev, &ap->link, ALL)
3661 			ata_dev_disable(dev);
3662 	}
3663 
3664 	ata_eh_finish(ap);
3665 }
3666 
3667 /**
3668  *	ata_std_error_handler - standard error handler
3669  *	@ap: host port to handle error for
3670  *
3671  *	Standard error handler
3672  *
3673  *	LOCKING:
3674  *	Kernel thread context (may sleep).
3675  */
3676 void ata_std_error_handler(struct ata_port *ap)
3677 {
3678 	struct ata_port_operations *ops = ap->ops;
3679 	ata_reset_fn_t hardreset = ops->hardreset;
3680 
3681 	/* ignore built-in hardreset if SCR access is not available */
3682 	if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
3683 		hardreset = NULL;
3684 
3685 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3686 }
3687 
3688 #ifdef CONFIG_PM
3689 /**
3690  *	ata_eh_handle_port_suspend - perform port suspend operation
3691  *	@ap: port to suspend
3692  *
3693  *	Suspend @ap.
3694  *
3695  *	LOCKING:
3696  *	Kernel thread context (may sleep).
3697  */
3698 static void ata_eh_handle_port_suspend(struct ata_port *ap)
3699 {
3700 	unsigned long flags;
3701 	int rc = 0;
3702 
3703 	/* are we suspending? */
3704 	spin_lock_irqsave(ap->lock, flags);
3705 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3706 	    ap->pm_mesg.event == PM_EVENT_ON) {
3707 		spin_unlock_irqrestore(ap->lock, flags);
3708 		return;
3709 	}
3710 	spin_unlock_irqrestore(ap->lock, flags);
3711 
3712 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3713 
3714 	/* tell ACPI we're suspending */
3715 	rc = ata_acpi_on_suspend(ap);
3716 	if (rc)
3717 		goto out;
3718 
3719 	/* suspend */
3720 	ata_eh_freeze_port(ap);
3721 
3722 	if (ap->ops->port_suspend)
3723 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3724 
3725 	ata_acpi_set_state(ap, PMSG_SUSPEND);
3726  out:
3727 	/* report result */
3728 	spin_lock_irqsave(ap->lock, flags);
3729 
3730 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3731 	if (rc == 0)
3732 		ap->pflags |= ATA_PFLAG_SUSPENDED;
3733 	else if (ap->pflags & ATA_PFLAG_FROZEN)
3734 		ata_port_schedule_eh(ap);
3735 
3736 	if (ap->pm_result) {
3737 		*ap->pm_result = rc;
3738 		ap->pm_result = NULL;
3739 	}
3740 
3741 	spin_unlock_irqrestore(ap->lock, flags);
3742 
3743 	return;
3744 }
3745 
3746 /**
3747  *	ata_eh_handle_port_resume - perform port resume operation
3748  *	@ap: port to resume
3749  *
3750  *	Resume @ap.
3751  *
3752  *	LOCKING:
3753  *	Kernel thread context (may sleep).
3754  */
3755 static void ata_eh_handle_port_resume(struct ata_port *ap)
3756 {
3757 	struct ata_link *link;
3758 	struct ata_device *dev;
3759 	unsigned long flags;
3760 	int rc = 0;
3761 
3762 	/* are we resuming? */
3763 	spin_lock_irqsave(ap->lock, flags);
3764 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3765 	    ap->pm_mesg.event != PM_EVENT_ON) {
3766 		spin_unlock_irqrestore(ap->lock, flags);
3767 		return;
3768 	}
3769 	spin_unlock_irqrestore(ap->lock, flags);
3770 
3771 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3772 
3773 	/*
3774 	 * Error timestamps are in jiffies which doesn't run while
3775 	 * suspended and PHY events during resume isn't too uncommon.
3776 	 * When the two are combined, it can lead to unnecessary speed
3777 	 * downs if the machine is suspended and resumed repeatedly.
3778 	 * Clear error history.
3779 	 */
3780 	ata_for_each_link(link, ap, HOST_FIRST)
3781 		ata_for_each_dev(dev, link, ALL)
3782 			ata_ering_clear(&dev->ering);
3783 
3784 	ata_acpi_set_state(ap, PMSG_ON);
3785 
3786 	if (ap->ops->port_resume)
3787 		rc = ap->ops->port_resume(ap);
3788 
3789 	/* tell ACPI that we're resuming */
3790 	ata_acpi_on_resume(ap);
3791 
3792 	/* report result */
3793 	spin_lock_irqsave(ap->lock, flags);
3794 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3795 	if (ap->pm_result) {
3796 		*ap->pm_result = rc;
3797 		ap->pm_result = NULL;
3798 	}
3799 	spin_unlock_irqrestore(ap->lock, flags);
3800 }
3801 #endif /* CONFIG_PM */
3802