xref: /linux/drivers/ata/libata-eh.c (revision 176000734ee2978121fde22a954eb1eabb204329)
1c82ee6d3SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c6fd2807SJeff Garzik /*
3c6fd2807SJeff Garzik  *  libata-eh.c - libata error handling
4c6fd2807SJeff Garzik  *
5c6fd2807SJeff Garzik  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
6c6fd2807SJeff Garzik  *
7c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
89bb9a39cSMauro Carvalho Chehab  *  as Documentation/driver-api/libata.rst
9c6fd2807SJeff Garzik  *
10c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
11c6fd2807SJeff Garzik  *  http://www.sata-io.org/
12c6fd2807SJeff Garzik  */
13c6fd2807SJeff Garzik 
14c6fd2807SJeff Garzik #include <linux/kernel.h>
15242f9dcbSJens Axboe #include <linux/blkdev.h>
1638789fdaSPaul Gortmaker #include <linux/export.h>
172855568bSJeff Garzik #include <linux/pci.h>
18c6fd2807SJeff Garzik #include <scsi/scsi.h>
19c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
20c6fd2807SJeff Garzik #include <scsi/scsi_eh.h>
21c6fd2807SJeff Garzik #include <scsi/scsi_device.h>
22c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
236521148cSRobert Hancock #include <scsi/scsi_dbg.h>
24c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h"
25c6fd2807SJeff Garzik 
26c6fd2807SJeff Garzik #include <linux/libata.h>
27c6fd2807SJeff Garzik 
28255c03d1SHannes Reinecke #include <trace/events/libata.h>
29c6fd2807SJeff Garzik #include "libata.h"
30c6fd2807SJeff Garzik 
317d47e8d4STejun Heo enum {
323884f7b0STejun Heo 	/* speed down verdicts */
337d47e8d4STejun Heo 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
347d47e8d4STejun Heo 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
357d47e8d4STejun Heo 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
3676326ac1STejun Heo 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
373884f7b0STejun Heo 
383884f7b0STejun Heo 	/* error flags */
393884f7b0STejun Heo 	ATA_EFLAG_IS_IO			= (1 << 0),
4076326ac1STejun Heo 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
41d9027470SGwendal Grignou 	ATA_EFLAG_OLD_ER                = (1 << 31),
423884f7b0STejun Heo 
433884f7b0STejun Heo 	/* error categories */
443884f7b0STejun Heo 	ATA_ECAT_NONE			= 0,
453884f7b0STejun Heo 	ATA_ECAT_ATA_BUS		= 1,
463884f7b0STejun Heo 	ATA_ECAT_TOUT_HSM		= 2,
473884f7b0STejun Heo 	ATA_ECAT_UNK_DEV		= 3,
4875f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_NONE		= 4,
4975f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
5075f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
5175f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
5275f9cafcSTejun Heo 	ATA_ECAT_NR			= 8,
537d47e8d4STejun Heo 
5487fbc5a0STejun Heo 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
5587fbc5a0STejun Heo 
560a2c0f56STejun Heo 	/* always put at least this amount of time between resets */
570a2c0f56STejun Heo 	ATA_EH_RESET_COOL_DOWN		=  5000,
580a2c0f56STejun Heo 
59341c2c95STejun Heo 	/* Waiting in ->prereset can never be reliable.  It's
60341c2c95STejun Heo 	 * sometimes nice to wait there but it can't be depended upon;
61341c2c95STejun Heo 	 * otherwise, we wouldn't be resetting.  Just give it enough
62341c2c95STejun Heo 	 * time for most drives to spin up.
6331daabdaSTejun Heo 	 */
64341c2c95STejun Heo 	ATA_EH_PRERESET_TIMEOUT		= 10000,
65341c2c95STejun Heo 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
6611fc33daSTejun Heo 
6711fc33daSTejun Heo 	ATA_EH_UA_TRIES			= 5,
68c2c7a89cSTejun Heo 
69c2c7a89cSTejun Heo 	/* probe speed down parameters, see ata_eh_schedule_probe() */
70c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
71c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIALS		= 2,
7231daabdaSTejun Heo };
7331daabdaSTejun Heo 
7431daabdaSTejun Heo /* The following table determines how we sequence resets.  Each entry
7531daabdaSTejun Heo  * represents timeout for that try.  The first try can be soft or
7631daabdaSTejun Heo  * hardreset.  All others are hardreset if available.  In most cases
7731daabdaSTejun Heo  * the first reset w/ 10sec timeout should succeed.  Following entries
7835bf8821SDan Williams  * are mostly for error handling, hotplug and those outlier devices that
7935bf8821SDan Williams  * take an exceptionally long time to recover from reset.
8031daabdaSTejun Heo  */
81ca02f225SSergey Shtylyov static const unsigned int ata_eh_reset_timeouts[] = {
82341c2c95STejun Heo 	10000,	/* most drives spin up by 10sec */
83341c2c95STejun Heo 	10000,	/* > 99% working drives spin up before 20sec */
8435bf8821SDan Williams 	35000,	/* give > 30 secs of idleness for outlier devices */
85341c2c95STejun Heo 	 5000,	/* and sweet one last chance */
86ca02f225SSergey Shtylyov 	UINT_MAX, /* > 1 min has elapsed, give up */
8731daabdaSTejun Heo };
8831daabdaSTejun Heo 
89e06233f9SSergey Shtylyov static const unsigned int ata_eh_identify_timeouts[] = {
9087fbc5a0STejun Heo 	 5000,	/* covers > 99% of successes and not too boring on failures */
9187fbc5a0STejun Heo 	10000,  /* combined time till here is enough even for media access */
9287fbc5a0STejun Heo 	30000,	/* for true idiots */
93e06233f9SSergey Shtylyov 	UINT_MAX,
9487fbc5a0STejun Heo };
9587fbc5a0STejun Heo 
96e06233f9SSergey Shtylyov static const unsigned int ata_eh_revalidate_timeouts[] = {
9768dbbe7dSDamien Le Moal 	15000,	/* Some drives are slow to read log pages when waking-up */
9868dbbe7dSDamien Le Moal 	15000,  /* combined time till here is enough even for media access */
99e06233f9SSergey Shtylyov 	UINT_MAX,
10068dbbe7dSDamien Le Moal };
10168dbbe7dSDamien Le Moal 
102e06233f9SSergey Shtylyov static const unsigned int ata_eh_flush_timeouts[] = {
1036013efd8STejun Heo 	15000,	/* be generous with flush */
1046013efd8STejun Heo 	15000,  /* ditto */
1056013efd8STejun Heo 	30000,	/* and even more generous */
106e06233f9SSergey Shtylyov 	UINT_MAX,
1076013efd8STejun Heo };
1086013efd8STejun Heo 
109e06233f9SSergey Shtylyov static const unsigned int ata_eh_other_timeouts[] = {
11087fbc5a0STejun Heo 	 5000,	/* same rationale as identify timeout */
11187fbc5a0STejun Heo 	10000,	/* ditto */
11287fbc5a0STejun Heo 	/* but no merciful 30sec for other commands, it just isn't worth it */
113e06233f9SSergey Shtylyov 	UINT_MAX,
11487fbc5a0STejun Heo };
11587fbc5a0STejun Heo 
11687fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent {
11787fbc5a0STejun Heo 	const u8		*commands;
118e06233f9SSergey Shtylyov 	const unsigned int	*timeouts;
11987fbc5a0STejun Heo };
12087fbc5a0STejun Heo 
12187fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal
12287fbc5a0STejun Heo  * commands.  Each table entry is a command class and matches the
12387fbc5a0STejun Heo  * commands the entry applies to and the timeout table to use.
12487fbc5a0STejun Heo  *
12587fbc5a0STejun Heo  * On the retry after a command timed out, the next timeout value from
12687fbc5a0STejun Heo  * the table is used.  If the table doesn't contain further entries,
12787fbc5a0STejun Heo  * the last value is used.
12887fbc5a0STejun Heo  *
12987fbc5a0STejun Heo  * ehc->cmd_timeout_idx keeps track of which timeout to use per
13087fbc5a0STejun Heo  * command class, so if SET_FEATURES times out on the first try, the
13187fbc5a0STejun Heo  * next try will use the second timeout value only for that class.
13287fbc5a0STejun Heo  */
13387fbc5a0STejun Heo #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
13487fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent
13587fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
13687fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
13787fbc5a0STejun Heo 	  .timeouts = ata_eh_identify_timeouts, },
13868dbbe7dSDamien Le Moal 	{ .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT),
13968dbbe7dSDamien Le Moal 	  .timeouts = ata_eh_revalidate_timeouts, },
14087fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
14187fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
14287fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
14387fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
14487fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
14587fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
14687fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
14787fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
1486013efd8STejun Heo 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
1496013efd8STejun Heo 	  .timeouts = ata_eh_flush_timeouts },
150aa3998dbSDamien Le Moal 	{ .commands = CMDS(ATA_CMD_VERIFY),
151aa3998dbSDamien Le Moal 	  .timeouts = ata_eh_reset_timeouts },
15287fbc5a0STejun Heo };
15387fbc5a0STejun Heo #undef CMDS
15487fbc5a0STejun Heo 
155c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap);
15671d7b6e5SNiklas Cassel static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
15771d7b6e5SNiklas Cassel 			  struct ata_device **r_failed_dev);
1586ffa01d8STejun Heo #ifdef CONFIG_PM
159c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap);
160c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap);
1616ffa01d8STejun Heo #else /* CONFIG_PM */
1626ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap)
1636ffa01d8STejun Heo { }
1646ffa01d8STejun Heo 
1656ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap)
1666ffa01d8STejun Heo { }
1676ffa01d8STejun Heo #endif /* CONFIG_PM */
168c6fd2807SJeff Garzik 
1690d74d872SMathieu Malaterre static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
1700d74d872SMathieu Malaterre 				 const char *fmt, va_list args)
171b64bbc39STejun Heo {
172b64bbc39STejun Heo 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
173b64bbc39STejun Heo 				     ATA_EH_DESC_LEN - ehi->desc_len,
174b64bbc39STejun Heo 				     fmt, args);
175b64bbc39STejun Heo }
176b64bbc39STejun Heo 
177b64bbc39STejun Heo /**
178b64bbc39STejun Heo  *	__ata_ehi_push_desc - push error description without adding separator
179b64bbc39STejun Heo  *	@ehi: target EHI
180b64bbc39STejun Heo  *	@fmt: printf format string
181b64bbc39STejun Heo  *
182b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
183b64bbc39STejun Heo  *
184b64bbc39STejun Heo  *	LOCKING:
185b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
186b64bbc39STejun Heo  */
187b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
188b64bbc39STejun Heo {
189b64bbc39STejun Heo 	va_list args;
190b64bbc39STejun Heo 
191b64bbc39STejun Heo 	va_start(args, fmt);
192b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
193b64bbc39STejun Heo 	va_end(args);
194b64bbc39STejun Heo }
195a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
196b64bbc39STejun Heo 
197b64bbc39STejun Heo /**
198b64bbc39STejun Heo  *	ata_ehi_push_desc - push error description with separator
199b64bbc39STejun Heo  *	@ehi: target EHI
200b64bbc39STejun Heo  *	@fmt: printf format string
201b64bbc39STejun Heo  *
202b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
203b64bbc39STejun Heo  *	If @ehi->desc is not empty, ", " is added in-between.
204b64bbc39STejun Heo  *
205b64bbc39STejun Heo  *	LOCKING:
206b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
207b64bbc39STejun Heo  */
208b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
209b64bbc39STejun Heo {
210b64bbc39STejun Heo 	va_list args;
211b64bbc39STejun Heo 
212b64bbc39STejun Heo 	if (ehi->desc_len)
213b64bbc39STejun Heo 		__ata_ehi_push_desc(ehi, ", ");
214b64bbc39STejun Heo 
215b64bbc39STejun Heo 	va_start(args, fmt);
216b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
217b64bbc39STejun Heo 	va_end(args);
218b64bbc39STejun Heo }
219a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
220b64bbc39STejun Heo 
221b64bbc39STejun Heo /**
222b64bbc39STejun Heo  *	ata_ehi_clear_desc - clean error description
223b64bbc39STejun Heo  *	@ehi: target EHI
224b64bbc39STejun Heo  *
225b64bbc39STejun Heo  *	Clear @ehi->desc.
226b64bbc39STejun Heo  *
227b64bbc39STejun Heo  *	LOCKING:
228b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
229b64bbc39STejun Heo  */
230b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi)
231b64bbc39STejun Heo {
232b64bbc39STejun Heo 	ehi->desc[0] = '\0';
233b64bbc39STejun Heo 	ehi->desc_len = 0;
234b64bbc39STejun Heo }
235a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
236b64bbc39STejun Heo 
237cbcdd875STejun Heo /**
238cbcdd875STejun Heo  *	ata_port_desc - append port description
239cbcdd875STejun Heo  *	@ap: target ATA port
240cbcdd875STejun Heo  *	@fmt: printf format string
241cbcdd875STejun Heo  *
242cbcdd875STejun Heo  *	Format string according to @fmt and append it to port
243cbcdd875STejun Heo  *	description.  If port description is not empty, " " is added
244cbcdd875STejun Heo  *	in-between.  This function is to be used while initializing
245cbcdd875STejun Heo  *	ata_host.  The description is printed on host registration.
246cbcdd875STejun Heo  *
247cbcdd875STejun Heo  *	LOCKING:
248cbcdd875STejun Heo  *	None.
249cbcdd875STejun Heo  */
250cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
251cbcdd875STejun Heo {
252cbcdd875STejun Heo 	va_list args;
253cbcdd875STejun Heo 
254cbcdd875STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
255cbcdd875STejun Heo 
256cbcdd875STejun Heo 	if (ap->link.eh_info.desc_len)
257cbcdd875STejun Heo 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
258cbcdd875STejun Heo 
259cbcdd875STejun Heo 	va_start(args, fmt);
260cbcdd875STejun Heo 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
261cbcdd875STejun Heo 	va_end(args);
262cbcdd875STejun Heo }
263a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_desc);
264cbcdd875STejun Heo 
265cbcdd875STejun Heo #ifdef CONFIG_PCI
266cbcdd875STejun Heo /**
267cbcdd875STejun Heo  *	ata_port_pbar_desc - append PCI BAR description
268cbcdd875STejun Heo  *	@ap: target ATA port
269cbcdd875STejun Heo  *	@bar: target PCI BAR
270cbcdd875STejun Heo  *	@offset: offset into PCI BAR
271cbcdd875STejun Heo  *	@name: name of the area
272cbcdd875STejun Heo  *
273cbcdd875STejun Heo  *	If @offset is negative, this function formats a string which
274cbcdd875STejun Heo  *	contains the name, address, size and type of the BAR and
275cbcdd875STejun Heo  *	appends it to the port description.  If @offset is zero or
276cbcdd875STejun Heo  *	positive, only name and offsetted address is appended.
277cbcdd875STejun Heo  *
278cbcdd875STejun Heo  *	LOCKING:
279cbcdd875STejun Heo  *	None.
280cbcdd875STejun Heo  */
281cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
282cbcdd875STejun Heo 			const char *name)
283cbcdd875STejun Heo {
284cbcdd875STejun Heo 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
285cbcdd875STejun Heo 	char *type = "";
286cbcdd875STejun Heo 	unsigned long long start, len;
287cbcdd875STejun Heo 
288cbcdd875STejun Heo 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
289cbcdd875STejun Heo 		type = "m";
290cbcdd875STejun Heo 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
291cbcdd875STejun Heo 		type = "i";
292cbcdd875STejun Heo 
293cbcdd875STejun Heo 	start = (unsigned long long)pci_resource_start(pdev, bar);
294cbcdd875STejun Heo 	len = (unsigned long long)pci_resource_len(pdev, bar);
295cbcdd875STejun Heo 
296cbcdd875STejun Heo 	if (offset < 0)
297cbcdd875STejun Heo 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
298cbcdd875STejun Heo 	else
299e6a73ab1SAndrew Morton 		ata_port_desc(ap, "%s 0x%llx", name,
300e6a73ab1SAndrew Morton 				start + (unsigned long long)offset);
301cbcdd875STejun Heo }
302a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
303cbcdd875STejun Heo #endif /* CONFIG_PCI */
304cbcdd875STejun Heo 
30587fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd)
30687fbc5a0STejun Heo {
30787fbc5a0STejun Heo 	int i;
30887fbc5a0STejun Heo 
30987fbc5a0STejun Heo 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
31087fbc5a0STejun Heo 		const u8 *cur;
31187fbc5a0STejun Heo 
31287fbc5a0STejun Heo 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
31387fbc5a0STejun Heo 			if (*cur == cmd)
31487fbc5a0STejun Heo 				return i;
31587fbc5a0STejun Heo 	}
31687fbc5a0STejun Heo 
31787fbc5a0STejun Heo 	return -1;
31887fbc5a0STejun Heo }
31987fbc5a0STejun Heo 
32087fbc5a0STejun Heo /**
32187fbc5a0STejun Heo  *	ata_internal_cmd_timeout - determine timeout for an internal command
32287fbc5a0STejun Heo  *	@dev: target device
32387fbc5a0STejun Heo  *	@cmd: internal command to be issued
32487fbc5a0STejun Heo  *
32587fbc5a0STejun Heo  *	Determine timeout for internal command @cmd for @dev.
32687fbc5a0STejun Heo  *
32787fbc5a0STejun Heo  *	LOCKING:
32887fbc5a0STejun Heo  *	EH context.
32987fbc5a0STejun Heo  *
33087fbc5a0STejun Heo  *	RETURNS:
33187fbc5a0STejun Heo  *	Determined timeout.
33287fbc5a0STejun Heo  */
333e06233f9SSergey Shtylyov unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
33487fbc5a0STejun Heo {
33587fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
33687fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
33787fbc5a0STejun Heo 	int idx;
33887fbc5a0STejun Heo 
33987fbc5a0STejun Heo 	if (ent < 0)
34087fbc5a0STejun Heo 		return ATA_EH_CMD_DFL_TIMEOUT;
34187fbc5a0STejun Heo 
34287fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
34387fbc5a0STejun Heo 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
34487fbc5a0STejun Heo }
34587fbc5a0STejun Heo 
34687fbc5a0STejun Heo /**
34787fbc5a0STejun Heo  *	ata_internal_cmd_timed_out - notification for internal command timeout
34887fbc5a0STejun Heo  *	@dev: target device
34987fbc5a0STejun Heo  *	@cmd: internal command which timed out
35087fbc5a0STejun Heo  *
35187fbc5a0STejun Heo  *	Notify EH that internal command @cmd for @dev timed out.  This
35287fbc5a0STejun Heo  *	function should be called only for commands whose timeouts are
35387fbc5a0STejun Heo  *	determined using ata_internal_cmd_timeout().
35487fbc5a0STejun Heo  *
35587fbc5a0STejun Heo  *	LOCKING:
35687fbc5a0STejun Heo  *	EH context.
35787fbc5a0STejun Heo  */
35887fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
35987fbc5a0STejun Heo {
36087fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
36187fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
36287fbc5a0STejun Heo 	int idx;
36387fbc5a0STejun Heo 
36487fbc5a0STejun Heo 	if (ent < 0)
36587fbc5a0STejun Heo 		return;
36687fbc5a0STejun Heo 
36787fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
368e06233f9SSergey Shtylyov 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != UINT_MAX)
36987fbc5a0STejun Heo 		ehc->cmd_timeout_idx[dev->devno][ent]++;
37087fbc5a0STejun Heo }
37187fbc5a0STejun Heo 
3723884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
373c6fd2807SJeff Garzik 			     unsigned int err_mask)
374c6fd2807SJeff Garzik {
375c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
376c6fd2807SJeff Garzik 
377c6fd2807SJeff Garzik 	WARN_ON(!err_mask);
378c6fd2807SJeff Garzik 
379c6fd2807SJeff Garzik 	ering->cursor++;
380c6fd2807SJeff Garzik 	ering->cursor %= ATA_ERING_SIZE;
381c6fd2807SJeff Garzik 
382c6fd2807SJeff Garzik 	ent = &ering->ring[ering->cursor];
3833884f7b0STejun Heo 	ent->eflags = eflags;
384c6fd2807SJeff Garzik 	ent->err_mask = err_mask;
385c6fd2807SJeff Garzik 	ent->timestamp = get_jiffies_64();
386c6fd2807SJeff Garzik }
387c6fd2807SJeff Garzik 
38876326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
38976326ac1STejun Heo {
39076326ac1STejun Heo 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
39176326ac1STejun Heo 
39276326ac1STejun Heo 	if (ent->err_mask)
39376326ac1STejun Heo 		return ent;
39476326ac1STejun Heo 	return NULL;
39576326ac1STejun Heo }
39676326ac1STejun Heo 
397d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering,
398c6fd2807SJeff Garzik 		  int (*map_fn)(struct ata_ering_entry *, void *),
399c6fd2807SJeff Garzik 		  void *arg)
400c6fd2807SJeff Garzik {
401c6fd2807SJeff Garzik 	int idx, rc = 0;
402c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
403c6fd2807SJeff Garzik 
404c6fd2807SJeff Garzik 	idx = ering->cursor;
405c6fd2807SJeff Garzik 	do {
406c6fd2807SJeff Garzik 		ent = &ering->ring[idx];
407c6fd2807SJeff Garzik 		if (!ent->err_mask)
408c6fd2807SJeff Garzik 			break;
409c6fd2807SJeff Garzik 		rc = map_fn(ent, arg);
410c6fd2807SJeff Garzik 		if (rc)
411c6fd2807SJeff Garzik 			break;
412c6fd2807SJeff Garzik 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
413c6fd2807SJeff Garzik 	} while (idx != ering->cursor);
414c6fd2807SJeff Garzik 
415c6fd2807SJeff Garzik 	return rc;
416c6fd2807SJeff Garzik }
417c6fd2807SJeff Garzik 
41860428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
419d9027470SGwendal Grignou {
420d9027470SGwendal Grignou 	ent->eflags |= ATA_EFLAG_OLD_ER;
421d9027470SGwendal Grignou 	return 0;
422d9027470SGwendal Grignou }
423d9027470SGwendal Grignou 
424d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering)
425d9027470SGwendal Grignou {
426d9027470SGwendal Grignou 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
427d9027470SGwendal Grignou }
428d9027470SGwendal Grignou 
429c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev)
430c6fd2807SJeff Garzik {
4319af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
432c6fd2807SJeff Garzik 
433c6fd2807SJeff Garzik 	return ehc->i.action | ehc->i.dev_action[dev->devno];
434c6fd2807SJeff Garzik }
435c6fd2807SJeff Garzik 
436f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
437c6fd2807SJeff Garzik 				struct ata_eh_info *ehi, unsigned int action)
438c6fd2807SJeff Garzik {
439f58229f8STejun Heo 	struct ata_device *tdev;
440c6fd2807SJeff Garzik 
441c6fd2807SJeff Garzik 	if (!dev) {
442c6fd2807SJeff Garzik 		ehi->action &= ~action;
4431eca4365STejun Heo 		ata_for_each_dev(tdev, link, ALL)
444f58229f8STejun Heo 			ehi->dev_action[tdev->devno] &= ~action;
445c6fd2807SJeff Garzik 	} else {
446c6fd2807SJeff Garzik 		/* doesn't make sense for port-wide EH actions */
447c6fd2807SJeff Garzik 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
448c6fd2807SJeff Garzik 
449c6fd2807SJeff Garzik 		/* break ehi->action into ehi->dev_action */
450c6fd2807SJeff Garzik 		if (ehi->action & action) {
4511eca4365STejun Heo 			ata_for_each_dev(tdev, link, ALL)
452f58229f8STejun Heo 				ehi->dev_action[tdev->devno] |=
453f58229f8STejun Heo 					ehi->action & action;
454c6fd2807SJeff Garzik 			ehi->action &= ~action;
455c6fd2807SJeff Garzik 		}
456c6fd2807SJeff Garzik 
457c6fd2807SJeff Garzik 		/* turn off the specified per-dev action */
458c6fd2807SJeff Garzik 		ehi->dev_action[dev->devno] &= ~action;
459c6fd2807SJeff Garzik 	}
460c6fd2807SJeff Garzik }
461c6fd2807SJeff Garzik 
462c6fd2807SJeff Garzik /**
463c0c362b6STejun Heo  *	ata_eh_acquire - acquire EH ownership
464c0c362b6STejun Heo  *	@ap: ATA port to acquire EH ownership for
465c0c362b6STejun Heo  *
466c0c362b6STejun Heo  *	Acquire EH ownership for @ap.  This is the basic exclusion
467c0c362b6STejun Heo  *	mechanism for ports sharing a host.  Only one port hanging off
468c0c362b6STejun Heo  *	the same host can claim the ownership of EH.
469c0c362b6STejun Heo  *
470c0c362b6STejun Heo  *	LOCKING:
471c0c362b6STejun Heo  *	EH context.
472c0c362b6STejun Heo  */
473c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap)
474c0c362b6STejun Heo {
475c0c362b6STejun Heo 	mutex_lock(&ap->host->eh_mutex);
476c0c362b6STejun Heo 	WARN_ON_ONCE(ap->host->eh_owner);
477c0c362b6STejun Heo 	ap->host->eh_owner = current;
478c0c362b6STejun Heo }
479c0c362b6STejun Heo 
480c0c362b6STejun Heo /**
481c0c362b6STejun Heo  *	ata_eh_release - release EH ownership
482c0c362b6STejun Heo  *	@ap: ATA port to release EH ownership for
483c0c362b6STejun Heo  *
484c0c362b6STejun Heo  *	Release EH ownership for @ap if the caller.  The caller must
485c0c362b6STejun Heo  *	have acquired EH ownership using ata_eh_acquire() previously.
486c0c362b6STejun Heo  *
487c0c362b6STejun Heo  *	LOCKING:
488c0c362b6STejun Heo  *	EH context.
489c0c362b6STejun Heo  */
490c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap)
491c0c362b6STejun Heo {
492c0c362b6STejun Heo 	WARN_ON_ONCE(ap->host->eh_owner != current);
493c0c362b6STejun Heo 	ap->host->eh_owner = NULL;
494c0c362b6STejun Heo 	mutex_unlock(&ap->host->eh_mutex);
495c0c362b6STejun Heo }
496c0c362b6STejun Heo 
4970fecb508SDamien Le Moal static void ata_eh_dev_disable(struct ata_device *dev)
4980fecb508SDamien Le Moal {
4990fecb508SDamien Le Moal 	ata_acpi_on_disable(dev);
5000fecb508SDamien Le Moal 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
5010fecb508SDamien Le Moal 	dev->class++;
5020fecb508SDamien Le Moal 
5035f8319c4SDamien Le Moal 	/*
5045f8319c4SDamien Le Moal 	 * From now till the next successful probe, ering is used to
5050fecb508SDamien Le Moal 	 * track probe failures.  Clear accumulated device error info.
5060fecb508SDamien Le Moal 	 */
5070fecb508SDamien Le Moal 	ata_ering_clear(&dev->ering);
5085f8319c4SDamien Le Moal 
5095f8319c4SDamien Le Moal 	ata_dev_free_resources(dev);
5100fecb508SDamien Le Moal }
5110fecb508SDamien Le Moal 
512ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap)
513ece180d1STejun Heo {
514ece180d1STejun Heo 	struct ata_link *link;
515ece180d1STejun Heo 	struct ata_device *dev;
516ece180d1STejun Heo 	unsigned long flags;
517ece180d1STejun Heo 
518aa3998dbSDamien Le Moal 	/*
519aa3998dbSDamien Le Moal 	 * Unless we are restarting, transition all enabled devices to
520aa3998dbSDamien Le Moal 	 * standby power mode.
521aa3998dbSDamien Le Moal 	 */
522aa3998dbSDamien Le Moal 	if (system_state != SYSTEM_RESTART) {
523aa3998dbSDamien Le Moal 		ata_for_each_link(link, ap, PMP_FIRST) {
524aa3998dbSDamien Le Moal 			ata_for_each_dev(dev, link, ENABLED)
525aa3998dbSDamien Le Moal 				ata_dev_power_set_standby(dev);
526aa3998dbSDamien Le Moal 		}
527aa3998dbSDamien Le Moal 	}
528aa3998dbSDamien Le Moal 
529aa3998dbSDamien Le Moal 	/*
530aa3998dbSDamien Le Moal 	 * Restore SControl IPM and SPD for the next driver and
531ece180d1STejun Heo 	 * disable attached devices.
532ece180d1STejun Heo 	 */
533ece180d1STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
534ece180d1STejun Heo 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
5350fecb508SDamien Le Moal 		ata_for_each_dev(dev, link, ENABLED)
5360fecb508SDamien Le Moal 			ata_eh_dev_disable(dev);
537ece180d1STejun Heo 	}
538ece180d1STejun Heo 
539ece180d1STejun Heo 	/* freeze and set UNLOADED */
540ece180d1STejun Heo 	spin_lock_irqsave(ap->lock, flags);
541ece180d1STejun Heo 
542ece180d1STejun Heo 	ata_port_freeze(ap);			/* won't be thawed */
543ece180d1STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
544ece180d1STejun Heo 	ap->pflags |= ATA_PFLAG_UNLOADED;
545ece180d1STejun Heo 
546ece180d1STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
547ece180d1STejun Heo }
548ece180d1STejun Heo 
549c6fd2807SJeff Garzik /**
550c6fd2807SJeff Garzik  *	ata_scsi_error - SCSI layer error handler callback
551c6fd2807SJeff Garzik  *	@host: SCSI host on which error occurred
552c6fd2807SJeff Garzik  *
553c6fd2807SJeff Garzik  *	Handles SCSI-layer-thrown error events.
554c6fd2807SJeff Garzik  *
555c6fd2807SJeff Garzik  *	LOCKING:
556c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
557c6fd2807SJeff Garzik  *
558c6fd2807SJeff Garzik  *	RETURNS:
559c6fd2807SJeff Garzik  *	Zero.
560c6fd2807SJeff Garzik  */
561c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host)
562c6fd2807SJeff Garzik {
563c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
564c6fd2807SJeff Garzik 	unsigned long flags;
565c34aeebcSJames Bottomley 	LIST_HEAD(eh_work_q);
566c6fd2807SJeff Garzik 
567c34aeebcSJames Bottomley 	spin_lock_irqsave(host->host_lock, flags);
568c34aeebcSJames Bottomley 	list_splice_init(&host->eh_cmd_q, &eh_work_q);
569c34aeebcSJames Bottomley 	spin_unlock_irqrestore(host->host_lock, flags);
570c34aeebcSJames Bottomley 
5710e0b494cSJames Bottomley 	ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
5720e0b494cSJames Bottomley 
5730e0b494cSJames Bottomley 	/* If we timed raced normal completion and there is nothing to
5740e0b494cSJames Bottomley 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
5750e0b494cSJames Bottomley 	ata_scsi_port_error_handler(host, ap);
5760e0b494cSJames Bottomley 
5770e0b494cSJames Bottomley 	/* finish or retry handled scmd's and clean up */
57872d8c36eSWei Fang 	WARN_ON(!list_empty(&eh_work_q));
5790e0b494cSJames Bottomley 
5800e0b494cSJames Bottomley }
5810e0b494cSJames Bottomley 
5820e0b494cSJames Bottomley /**
5830e0b494cSJames Bottomley  * ata_scsi_cmd_error_handler - error callback for a list of commands
5840e0b494cSJames Bottomley  * @host:	scsi host containing the port
5850e0b494cSJames Bottomley  * @ap:		ATA port within the host
5860e0b494cSJames Bottomley  * @eh_work_q:	list of commands to process
5870e0b494cSJames Bottomley  *
5880e0b494cSJames Bottomley  * process the given list of commands and return those finished to the
5890e0b494cSJames Bottomley  * ap->eh_done_q.  This function is the first part of the libata error
5900e0b494cSJames Bottomley  * handler which processes a given list of failed commands.
5910e0b494cSJames Bottomley  */
5920e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
5930e0b494cSJames Bottomley 				struct list_head *eh_work_q)
5940e0b494cSJames Bottomley {
5950e0b494cSJames Bottomley 	int i;
5960e0b494cSJames Bottomley 	unsigned long flags;
597b83ad9eeSWenchao Hao 	struct scsi_cmnd *scmd, *tmp;
598b83ad9eeSWenchao Hao 	int nr_timedout = 0;
5990e0b494cSJames Bottomley 
600c429137aSTejun Heo 	/* make sure sff pio task is not running */
601c429137aSTejun Heo 	ata_sff_flush_pio_task(ap);
602c6fd2807SJeff Garzik 
603cca3974eSJeff Garzik 	/* synchronize with host lock and sort out timeouts */
604c6fd2807SJeff Garzik 
605b83ad9eeSWenchao Hao 	/*
606ff8072d5SHannes Reinecke 	 * For EH, all qcs are finished in one of three ways -
607c6fd2807SJeff Garzik 	 * normal completion, error completion, and SCSI timeout.
608c96f1732SAlan Cox 	 * Both completions can race against SCSI timeout.  When normal
609c6fd2807SJeff Garzik 	 * completion wins, the qc never reaches EH.  When error
61087629312SNiklas Cassel 	 * completion wins, the qc has ATA_QCFLAG_EH set.
611c6fd2807SJeff Garzik 	 *
612c6fd2807SJeff Garzik 	 * When SCSI timeout wins, things are a bit more complex.
613c6fd2807SJeff Garzik 	 * Normal or error completion can occur after the timeout but
614c6fd2807SJeff Garzik 	 * before this point.  In such cases, both types of
615c6fd2807SJeff Garzik 	 * completions are honored.  A scmd is determined to have
616c6fd2807SJeff Garzik 	 * timed out iff its associated qc is active and not failed.
617c6fd2807SJeff Garzik 	 */
618a4f08141SPaul E. McKenney 	spin_lock_irqsave(ap->lock, flags);
619c6fd2807SJeff Garzik 
620b83ad9eeSWenchao Hao 	/*
621b83ad9eeSWenchao Hao 	 * This must occur under the ap->lock as we don't want
622b83ad9eeSWenchao Hao 	 * a polled recovery to race the real interrupt handler
623b83ad9eeSWenchao Hao 	 *
624b83ad9eeSWenchao Hao 	 * The lost_interrupt handler checks for any completed but
625b83ad9eeSWenchao Hao 	 * non-notified command and completes much like an IRQ handler.
626b83ad9eeSWenchao Hao 	 *
627b83ad9eeSWenchao Hao 	 * We then fall into the error recovery code which will treat
628b83ad9eeSWenchao Hao 	 * this as if normal completion won the race
629b83ad9eeSWenchao Hao 	 */
630c96f1732SAlan Cox 	if (ap->ops->lost_interrupt)
631c96f1732SAlan Cox 		ap->ops->lost_interrupt(ap);
632c96f1732SAlan Cox 
6330e0b494cSJames Bottomley 	list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
634c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
635c6fd2807SJeff Garzik 
636*e5dd410aSNiklas Cassel 		/*
637*e5dd410aSNiklas Cassel 		 * If the scmd was added to EH, via ata_qc_schedule_eh() ->
638*e5dd410aSNiklas Cassel 		 * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
639*e5dd410aSNiklas Cassel 		 * have set DID_TIME_OUT (since libata does not have an abort
640*e5dd410aSNiklas Cassel 		 * handler). Thus, to clear DID_TIME_OUT, clear the host byte.
641*e5dd410aSNiklas Cassel 		 */
642*e5dd410aSNiklas Cassel 		set_host_byte(scmd, DID_OK);
643*e5dd410aSNiklas Cassel 
644258c4e5cSJens Axboe 		ata_qc_for_each_raw(ap, qc, i) {
645c6fd2807SJeff Garzik 			if (qc->flags & ATA_QCFLAG_ACTIVE &&
646c6fd2807SJeff Garzik 			    qc->scsicmd == scmd)
647c6fd2807SJeff Garzik 				break;
648c6fd2807SJeff Garzik 		}
649c6fd2807SJeff Garzik 
650c6fd2807SJeff Garzik 		if (i < ATA_MAX_QUEUE) {
651c6fd2807SJeff Garzik 			/* the scmd has an associated qc */
65287629312SNiklas Cassel 			if (!(qc->flags & ATA_QCFLAG_EH)) {
653c6fd2807SJeff Garzik 				/* which hasn't failed yet, timeout */
654c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_TIMEOUT;
65587629312SNiklas Cassel 				qc->flags |= ATA_QCFLAG_EH;
656c6fd2807SJeff Garzik 				nr_timedout++;
657c6fd2807SJeff Garzik 			}
658c6fd2807SJeff Garzik 		} else {
659c6fd2807SJeff Garzik 			/* Normal completion occurred after
660c6fd2807SJeff Garzik 			 * SCSI timeout but before this point.
661c6fd2807SJeff Garzik 			 * Successfully complete it.
662c6fd2807SJeff Garzik 			 */
663c6fd2807SJeff Garzik 			scmd->retries = scmd->allowed;
664c6fd2807SJeff Garzik 			scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
665c6fd2807SJeff Garzik 		}
666c6fd2807SJeff Garzik 	}
667c6fd2807SJeff Garzik 
668b83ad9eeSWenchao Hao 	/*
669b83ad9eeSWenchao Hao 	 * If we have timed out qcs.  They belong to EH from
670c6fd2807SJeff Garzik 	 * this point but the state of the controller is
671c6fd2807SJeff Garzik 	 * unknown.  Freeze the port to make sure the IRQ
672c6fd2807SJeff Garzik 	 * handler doesn't diddle with those qcs.  This must
67387629312SNiklas Cassel 	 * be done atomically w.r.t. setting ATA_QCFLAG_EH.
674c6fd2807SJeff Garzik 	 */
675c6fd2807SJeff Garzik 	if (nr_timedout)
676c6fd2807SJeff Garzik 		__ata_port_freeze(ap);
677c6fd2807SJeff Garzik 
678a1e10f7eSTejun Heo 	/* initialize eh_tries */
679a1e10f7eSTejun Heo 	ap->eh_tries = ATA_EH_MAX_TRIES;
680c6fd2807SJeff Garzik 
681b83ad9eeSWenchao Hao 	spin_unlock_irqrestore(ap->lock, flags);
6820e0b494cSJames Bottomley }
6830e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
6840e0b494cSJames Bottomley 
6850e0b494cSJames Bottomley /**
6860e0b494cSJames Bottomley  * ata_scsi_port_error_handler - recover the port after the commands
6870e0b494cSJames Bottomley  * @host:	SCSI host containing the port
6880e0b494cSJames Bottomley  * @ap:		the ATA port
6890e0b494cSJames Bottomley  *
6900e0b494cSJames Bottomley  * Handle the recovery of the port @ap after all the commands
6910e0b494cSJames Bottomley  * have been recovered.
6920e0b494cSJames Bottomley  */
6930e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
6940e0b494cSJames Bottomley {
6950e0b494cSJames Bottomley 	unsigned long flags;
696cf1b86c8STejun Heo 	struct ata_link *link;
697cf1b86c8STejun Heo 
698c0c362b6STejun Heo 	/* acquire EH ownership */
699c0c362b6STejun Heo 	ata_eh_acquire(ap);
700c0c362b6STejun Heo  repeat:
7015ddf24c5STejun Heo 	/* kill fast drain timer */
7025ddf24c5STejun Heo 	del_timer_sync(&ap->fastdrain_timer);
7035ddf24c5STejun Heo 
704c6fd2807SJeff Garzik 	/* process port resume request */
705c6fd2807SJeff Garzik 	ata_eh_handle_port_resume(ap);
706c6fd2807SJeff Garzik 
707c6fd2807SJeff Garzik 	/* fetch & clear EH info */
708c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
709c6fd2807SJeff Garzik 
7101eca4365STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST) {
71100115e0fSTejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
71200115e0fSTejun Heo 		struct ata_device *dev;
71300115e0fSTejun Heo 
714cf1b86c8STejun Heo 		memset(&link->eh_context, 0, sizeof(link->eh_context));
715cf1b86c8STejun Heo 		link->eh_context.i = link->eh_info;
716cf1b86c8STejun Heo 		memset(&link->eh_info, 0, sizeof(link->eh_info));
71700115e0fSTejun Heo 
7181eca4365STejun Heo 		ata_for_each_dev(dev, link, ENABLED) {
71900115e0fSTejun Heo 			int devno = dev->devno;
72000115e0fSTejun Heo 
72100115e0fSTejun Heo 			ehc->saved_xfer_mode[devno] = dev->xfer_mode;
72200115e0fSTejun Heo 			if (ata_ncq_enabled(dev))
72300115e0fSTejun Heo 				ehc->saved_ncq_enabled |= 1 << devno;
724aa3998dbSDamien Le Moal 
725aa3998dbSDamien Le Moal 			/* If we are resuming, wake up the device */
7260c76106cSDamien Le Moal 			if (ap->pflags & ATA_PFLAG_RESUMING) {
7270c76106cSDamien Le Moal 				dev->flags |= ATA_DFLAG_RESUMING;
728aa3998dbSDamien Le Moal 				ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
72900115e0fSTejun Heo 			}
730cf1b86c8STejun Heo 		}
7310c76106cSDamien Le Moal 	}
732c6fd2807SJeff Garzik 
733c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
734c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
735da917d69STejun Heo 	ap->excl_link = NULL;	/* don't maintain exclusion over EH */
736c6fd2807SJeff Garzik 
737c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
738c6fd2807SJeff Garzik 
739c6fd2807SJeff Garzik 	/* invoke EH, skip if unloading or suspended */
740c6fd2807SJeff Garzik 	if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
741c6fd2807SJeff Garzik 		ap->ops->error_handler(ap);
742ece180d1STejun Heo 	else {
743ece180d1STejun Heo 		/* if unloading, commence suicide */
744ece180d1STejun Heo 		if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
745ece180d1STejun Heo 		    !(ap->pflags & ATA_PFLAG_UNLOADED))
746ece180d1STejun Heo 			ata_eh_unload(ap);
747c6fd2807SJeff Garzik 		ata_eh_finish(ap);
748ece180d1STejun Heo 	}
749c6fd2807SJeff Garzik 
750c6fd2807SJeff Garzik 	/* process port suspend request */
751c6fd2807SJeff Garzik 	ata_eh_handle_port_suspend(ap);
752c6fd2807SJeff Garzik 
753ff8072d5SHannes Reinecke 	/*
754ff8072d5SHannes Reinecke 	 * Exception might have happened after ->error_handler recovered the
755ff8072d5SHannes Reinecke 	 * port but before this point.  Repeat EH in such case.
756c6fd2807SJeff Garzik 	 */
757c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
758c6fd2807SJeff Garzik 
759c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_EH_PENDING) {
760a1e10f7eSTejun Heo 		if (--ap->eh_tries) {
761c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
762c6fd2807SJeff Garzik 			goto repeat;
763c6fd2807SJeff Garzik 		}
764a9a79dfeSJoe Perches 		ata_port_err(ap,
765a9a79dfeSJoe Perches 			     "EH pending after %d tries, giving up\n",
766a9a79dfeSJoe Perches 			     ATA_EH_MAX_TRIES);
767914616a3STejun Heo 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
768c6fd2807SJeff Garzik 	}
769c6fd2807SJeff Garzik 
770c6fd2807SJeff Garzik 	/* this run is complete, make sure EH info is clear */
7711eca4365STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
772cf1b86c8STejun Heo 		memset(&link->eh_info, 0, sizeof(link->eh_info));
773c6fd2807SJeff Garzik 
774ff8072d5SHannes Reinecke 	/*
775ff8072d5SHannes Reinecke 	 * end eh (clear host_eh_scheduled) while holding ap->lock such that if
776ff8072d5SHannes Reinecke 	 * exception occurs after this point but before EH completion, SCSI
777ff8072d5SHannes Reinecke 	 * midlayer will re-initiate EH.
778c6fd2807SJeff Garzik 	 */
779e4a9c373SDan Williams 	ap->ops->end_eh(ap);
780c6fd2807SJeff Garzik 
781c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
782c0c362b6STejun Heo 	ata_eh_release(ap);
783c6fd2807SJeff Garzik 
784c6fd2807SJeff Garzik 	scsi_eh_flush_done_q(&ap->eh_done_q);
785c6fd2807SJeff Garzik 
786c6fd2807SJeff Garzik 	/* clean up */
787c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
788c6fd2807SJeff Garzik 
789aa3998dbSDamien Le Moal 	ap->pflags &= ~ATA_PFLAG_RESUMING;
790aa3998dbSDamien Le Moal 
791c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_LOADING)
792c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_LOADING;
7936f54120eSJason Yan 	else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
7946f54120eSJason Yan 		!(ap->flags & ATA_FLAG_SAS_HOST))
795ad72cf98STejun Heo 		schedule_delayed_work(&ap->hotplug_task, 0);
796c6fd2807SJeff Garzik 
797c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_RECOVERED)
798a9a79dfeSJoe Perches 		ata_port_info(ap, "EH complete\n");
799c6fd2807SJeff Garzik 
800c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
801c6fd2807SJeff Garzik 
802c6fd2807SJeff Garzik 	/* tell wait_eh that we're done */
803c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
804c6fd2807SJeff Garzik 	wake_up_all(&ap->eh_wait_q);
805c6fd2807SJeff Garzik 
806c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
807c6fd2807SJeff Garzik }
8080e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
809c6fd2807SJeff Garzik 
810c6fd2807SJeff Garzik /**
811c6fd2807SJeff Garzik  *	ata_port_wait_eh - Wait for the currently pending EH to complete
812c6fd2807SJeff Garzik  *	@ap: Port to wait EH for
813c6fd2807SJeff Garzik  *
814c6fd2807SJeff Garzik  *	Wait until the currently pending EH is complete.
815c6fd2807SJeff Garzik  *
816c6fd2807SJeff Garzik  *	LOCKING:
817c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
818c6fd2807SJeff Garzik  */
819c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap)
820c6fd2807SJeff Garzik {
821c6fd2807SJeff Garzik 	unsigned long flags;
822c6fd2807SJeff Garzik 	DEFINE_WAIT(wait);
823c6fd2807SJeff Garzik 
824c6fd2807SJeff Garzik  retry:
825c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
826c6fd2807SJeff Garzik 
827c6fd2807SJeff Garzik 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
828c6fd2807SJeff Garzik 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
829c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
830c6fd2807SJeff Garzik 		schedule();
831c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
832c6fd2807SJeff Garzik 	}
833c6fd2807SJeff Garzik 	finish_wait(&ap->eh_wait_q, &wait);
834c6fd2807SJeff Garzik 
835c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
836c6fd2807SJeff Garzik 
837c6fd2807SJeff Garzik 	/* make sure SCSI EH is complete */
838cca3974eSJeff Garzik 	if (scsi_host_in_recovery(ap->scsi_host)) {
83997750cebSTejun Heo 		ata_msleep(ap, 10);
840c6fd2807SJeff Garzik 		goto retry;
841c6fd2807SJeff Garzik 	}
842c6fd2807SJeff Garzik }
84381c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh);
844c6fd2807SJeff Garzik 
845afae461aSSergey Shtylyov static unsigned int ata_eh_nr_in_flight(struct ata_port *ap)
8465ddf24c5STejun Heo {
847258c4e5cSJens Axboe 	struct ata_queued_cmd *qc;
8485ddf24c5STejun Heo 	unsigned int tag;
849afae461aSSergey Shtylyov 	unsigned int nr = 0;
8505ddf24c5STejun Heo 
8515ddf24c5STejun Heo 	/* count only non-internal commands */
852258c4e5cSJens Axboe 	ata_qc_for_each(ap, qc, tag) {
853258c4e5cSJens Axboe 		if (qc)
8545ddf24c5STejun Heo 			nr++;
8559d207accSJens Axboe 	}
8565ddf24c5STejun Heo 
8575ddf24c5STejun Heo 	return nr;
8585ddf24c5STejun Heo }
8595ddf24c5STejun Heo 
860b93ab338SKees Cook void ata_eh_fastdrain_timerfn(struct timer_list *t)
8615ddf24c5STejun Heo {
862b93ab338SKees Cook 	struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
8635ddf24c5STejun Heo 	unsigned long flags;
864afae461aSSergey Shtylyov 	unsigned int cnt;
8655ddf24c5STejun Heo 
8665ddf24c5STejun Heo 	spin_lock_irqsave(ap->lock, flags);
8675ddf24c5STejun Heo 
8685ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
8695ddf24c5STejun Heo 
8705ddf24c5STejun Heo 	/* are we done? */
8715ddf24c5STejun Heo 	if (!cnt)
8725ddf24c5STejun Heo 		goto out_unlock;
8735ddf24c5STejun Heo 
8745ddf24c5STejun Heo 	if (cnt == ap->fastdrain_cnt) {
875258c4e5cSJens Axboe 		struct ata_queued_cmd *qc;
8765ddf24c5STejun Heo 		unsigned int tag;
8775ddf24c5STejun Heo 
8785ddf24c5STejun Heo 		/* No progress during the last interval, tag all
8795ddf24c5STejun Heo 		 * in-flight qcs as timed out and freeze the port.
8805ddf24c5STejun Heo 		 */
881258c4e5cSJens Axboe 		ata_qc_for_each(ap, qc, tag) {
8825ddf24c5STejun Heo 			if (qc)
8835ddf24c5STejun Heo 				qc->err_mask |= AC_ERR_TIMEOUT;
8845ddf24c5STejun Heo 		}
8855ddf24c5STejun Heo 
8865ddf24c5STejun Heo 		ata_port_freeze(ap);
8875ddf24c5STejun Heo 	} else {
8885ddf24c5STejun Heo 		/* some qcs have finished, give it another chance */
8895ddf24c5STejun Heo 		ap->fastdrain_cnt = cnt;
8905ddf24c5STejun Heo 		ap->fastdrain_timer.expires =
891341c2c95STejun Heo 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
8925ddf24c5STejun Heo 		add_timer(&ap->fastdrain_timer);
8935ddf24c5STejun Heo 	}
8945ddf24c5STejun Heo 
8955ddf24c5STejun Heo  out_unlock:
8965ddf24c5STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
8975ddf24c5STejun Heo }
8985ddf24c5STejun Heo 
8995ddf24c5STejun Heo /**
9005ddf24c5STejun Heo  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
9015ddf24c5STejun Heo  *	@ap: target ATA port
9025ddf24c5STejun Heo  *	@fastdrain: activate fast drain
9035ddf24c5STejun Heo  *
9045ddf24c5STejun Heo  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
9055ddf24c5STejun Heo  *	is non-zero and EH wasn't pending before.  Fast drain ensures
9065ddf24c5STejun Heo  *	that EH kicks in in timely manner.
9075ddf24c5STejun Heo  *
9085ddf24c5STejun Heo  *	LOCKING:
9095ddf24c5STejun Heo  *	spin_lock_irqsave(host lock)
9105ddf24c5STejun Heo  */
9115ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
9125ddf24c5STejun Heo {
913afae461aSSergey Shtylyov 	unsigned int cnt;
9145ddf24c5STejun Heo 
9155ddf24c5STejun Heo 	/* already scheduled? */
9165ddf24c5STejun Heo 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
9175ddf24c5STejun Heo 		return;
9185ddf24c5STejun Heo 
9195ddf24c5STejun Heo 	ap->pflags |= ATA_PFLAG_EH_PENDING;
9205ddf24c5STejun Heo 
9215ddf24c5STejun Heo 	if (!fastdrain)
9225ddf24c5STejun Heo 		return;
9235ddf24c5STejun Heo 
9245ddf24c5STejun Heo 	/* do we have in-flight qcs? */
9255ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
9265ddf24c5STejun Heo 	if (!cnt)
9275ddf24c5STejun Heo 		return;
9285ddf24c5STejun Heo 
9295ddf24c5STejun Heo 	/* activate fast drain */
9305ddf24c5STejun Heo 	ap->fastdrain_cnt = cnt;
931341c2c95STejun Heo 	ap->fastdrain_timer.expires =
932341c2c95STejun Heo 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
9335ddf24c5STejun Heo 	add_timer(&ap->fastdrain_timer);
9345ddf24c5STejun Heo }
9355ddf24c5STejun Heo 
936c6fd2807SJeff Garzik /**
937c6fd2807SJeff Garzik  *	ata_qc_schedule_eh - schedule qc for error handling
938c6fd2807SJeff Garzik  *	@qc: command to schedule error handling for
939c6fd2807SJeff Garzik  *
940c6fd2807SJeff Garzik  *	Schedule error handling for @qc.  EH will kick in as soon as
941c6fd2807SJeff Garzik  *	other commands are drained.
942c6fd2807SJeff Garzik  *
943c6fd2807SJeff Garzik  *	LOCKING:
944cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
945c6fd2807SJeff Garzik  */
946c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
947c6fd2807SJeff Garzik {
948c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
949c6fd2807SJeff Garzik 
95087629312SNiklas Cassel 	qc->flags |= ATA_QCFLAG_EH;
9515ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
952c6fd2807SJeff Garzik 
953c6fd2807SJeff Garzik 	/* The following will fail if timeout has already expired.
954c6fd2807SJeff Garzik 	 * ata_scsi_error() takes care of such scmds on EH entry.
95587629312SNiklas Cassel 	 * Note that ATA_QCFLAG_EH is unconditionally set after
956c6fd2807SJeff Garzik 	 * this function completes.
957c6fd2807SJeff Garzik 	 */
958c8329cd5SBart Van Assche 	blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
959c6fd2807SJeff Garzik }
960c6fd2807SJeff Garzik 
961c6fd2807SJeff Garzik /**
962e4a9c373SDan Williams  * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
963e4a9c373SDan Williams  * @ap: ATA port to schedule EH for
964e4a9c373SDan Williams  *
965e4a9c373SDan Williams  *	LOCKING: inherited from ata_port_schedule_eh
966e4a9c373SDan Williams  *	spin_lock_irqsave(host lock)
967e4a9c373SDan Williams  */
968e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap)
969e4a9c373SDan Williams {
970e4a9c373SDan Williams 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
971e4a9c373SDan Williams 		return;
972e4a9c373SDan Williams 
973e4a9c373SDan Williams 	ata_eh_set_pending(ap, 1);
974e4a9c373SDan Williams 	scsi_schedule_eh(ap->scsi_host);
975e4a9c373SDan Williams 
976c318458cSHannes Reinecke 	trace_ata_std_sched_eh(ap);
977e4a9c373SDan Williams }
978e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh);
979e4a9c373SDan Williams 
980e4a9c373SDan Williams /**
981e4a9c373SDan Williams  * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
982e4a9c373SDan Williams  * @ap: ATA port to end EH for
983e4a9c373SDan Williams  *
984e4a9c373SDan Williams  * In the libata object model there is a 1:1 mapping of ata_port to
985e4a9c373SDan Williams  * shost, so host fields can be directly manipulated under ap->lock, in
986e4a9c373SDan Williams  * the libsas case we need to hold a lock at the ha->level to coordinate
987e4a9c373SDan Williams  * these events.
988e4a9c373SDan Williams  *
989e4a9c373SDan Williams  *	LOCKING:
990e4a9c373SDan Williams  *	spin_lock_irqsave(host lock)
991e4a9c373SDan Williams  */
992e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap)
993e4a9c373SDan Williams {
994e4a9c373SDan Williams 	struct Scsi_Host *host = ap->scsi_host;
995e4a9c373SDan Williams 
996e4a9c373SDan Williams 	host->host_eh_scheduled = 0;
997e4a9c373SDan Williams }
998e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh);
999e4a9c373SDan Williams 
1000e4a9c373SDan Williams 
1001e4a9c373SDan Williams /**
1002c6fd2807SJeff Garzik  *	ata_port_schedule_eh - schedule error handling without a qc
1003c6fd2807SJeff Garzik  *	@ap: ATA port to schedule EH for
1004c6fd2807SJeff Garzik  *
1005c6fd2807SJeff Garzik  *	Schedule error handling for @ap.  EH will kick in as soon as
1006c6fd2807SJeff Garzik  *	all commands are drained.
1007c6fd2807SJeff Garzik  *
1008c6fd2807SJeff Garzik  *	LOCKING:
1009cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1010c6fd2807SJeff Garzik  */
1011c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap)
1012c6fd2807SJeff Garzik {
1013e4a9c373SDan Williams 	/* see: ata_std_sched_eh, unless you know better */
1014e4a9c373SDan Williams 	ap->ops->sched_eh(ap);
1015c6fd2807SJeff Garzik }
1016a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
1017c6fd2807SJeff Garzik 
1018dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1019c6fd2807SJeff Garzik {
1020258c4e5cSJens Axboe 	struct ata_queued_cmd *qc;
1021c6fd2807SJeff Garzik 	int tag, nr_aborted = 0;
1022c6fd2807SJeff Garzik 
10235ddf24c5STejun Heo 	/* we're gonna abort all commands, no need for fast drain */
10245ddf24c5STejun Heo 	ata_eh_set_pending(ap, 0);
10255ddf24c5STejun Heo 
102628361c40SJens Axboe 	/* include internal tag in iteration */
1027258c4e5cSJens Axboe 	ata_qc_for_each_with_internal(ap, qc, tag) {
1028dbd82616STejun Heo 		if (qc && (!link || qc->dev->link == link)) {
102987629312SNiklas Cassel 			qc->flags |= ATA_QCFLAG_EH;
1030c6fd2807SJeff Garzik 			ata_qc_complete(qc);
1031c6fd2807SJeff Garzik 			nr_aborted++;
1032c6fd2807SJeff Garzik 		}
1033c6fd2807SJeff Garzik 	}
1034c6fd2807SJeff Garzik 
1035c6fd2807SJeff Garzik 	if (!nr_aborted)
1036c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
1037c6fd2807SJeff Garzik 
1038c6fd2807SJeff Garzik 	return nr_aborted;
1039c6fd2807SJeff Garzik }
1040c6fd2807SJeff Garzik 
1041c6fd2807SJeff Garzik /**
1042dbd82616STejun Heo  *	ata_link_abort - abort all qc's on the link
1043dbd82616STejun Heo  *	@link: ATA link to abort qc's for
1044dbd82616STejun Heo  *
1045dbd82616STejun Heo  *	Abort all active qc's active on @link and schedule EH.
1046dbd82616STejun Heo  *
1047dbd82616STejun Heo  *	LOCKING:
1048dbd82616STejun Heo  *	spin_lock_irqsave(host lock)
1049dbd82616STejun Heo  *
1050dbd82616STejun Heo  *	RETURNS:
1051dbd82616STejun Heo  *	Number of aborted qc's.
1052dbd82616STejun Heo  */
1053dbd82616STejun Heo int ata_link_abort(struct ata_link *link)
1054dbd82616STejun Heo {
1055dbd82616STejun Heo 	return ata_do_link_abort(link->ap, link);
1056dbd82616STejun Heo }
1057a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_link_abort);
1058dbd82616STejun Heo 
1059dbd82616STejun Heo /**
1060dbd82616STejun Heo  *	ata_port_abort - abort all qc's on the port
1061dbd82616STejun Heo  *	@ap: ATA port to abort qc's for
1062dbd82616STejun Heo  *
1063dbd82616STejun Heo  *	Abort all active qc's of @ap and schedule EH.
1064dbd82616STejun Heo  *
1065dbd82616STejun Heo  *	LOCKING:
1066dbd82616STejun Heo  *	spin_lock_irqsave(host_set lock)
1067dbd82616STejun Heo  *
1068dbd82616STejun Heo  *	RETURNS:
1069dbd82616STejun Heo  *	Number of aborted qc's.
1070dbd82616STejun Heo  */
1071dbd82616STejun Heo int ata_port_abort(struct ata_port *ap)
1072dbd82616STejun Heo {
1073dbd82616STejun Heo 	return ata_do_link_abort(ap, NULL);
1074dbd82616STejun Heo }
1075a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_abort);
1076dbd82616STejun Heo 
1077dbd82616STejun Heo /**
1078c6fd2807SJeff Garzik  *	__ata_port_freeze - freeze port
1079c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1080c6fd2807SJeff Garzik  *
1081c6fd2807SJeff Garzik  *	This function is called when HSM violation or some other
1082c6fd2807SJeff Garzik  *	condition disrupts normal operation of the port.  Frozen port
1083c6fd2807SJeff Garzik  *	is not allowed to perform any operation until the port is
1084c6fd2807SJeff Garzik  *	thawed, which usually follows a successful reset.
1085c6fd2807SJeff Garzik  *
1086c6fd2807SJeff Garzik  *	ap->ops->freeze() callback can be used for freezing the port
1087c6fd2807SJeff Garzik  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1088c6fd2807SJeff Garzik  *	port cannot be frozen hardware-wise, the interrupt handler
1089c6fd2807SJeff Garzik  *	must ack and clear interrupts unconditionally while the port
1090c6fd2807SJeff Garzik  *	is frozen.
1091c6fd2807SJeff Garzik  *
1092c6fd2807SJeff Garzik  *	LOCKING:
1093cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1094c6fd2807SJeff Garzik  */
1095c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap)
1096c6fd2807SJeff Garzik {
1097c6fd2807SJeff Garzik 	if (ap->ops->freeze)
1098c6fd2807SJeff Garzik 		ap->ops->freeze(ap);
1099c6fd2807SJeff Garzik 
1100c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FROZEN;
1101c6fd2807SJeff Garzik 
1102c318458cSHannes Reinecke 	trace_ata_port_freeze(ap);
1103c6fd2807SJeff Garzik }
1104c6fd2807SJeff Garzik 
1105c6fd2807SJeff Garzik /**
1106c6fd2807SJeff Garzik  *	ata_port_freeze - abort & freeze port
1107c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1108c6fd2807SJeff Garzik  *
110954c38444SJeff Garzik  *	Abort and freeze @ap.  The freeze operation must be called
111054c38444SJeff Garzik  *	first, because some hardware requires special operations
111154c38444SJeff Garzik  *	before the taskfile registers are accessible.
1112c6fd2807SJeff Garzik  *
1113c6fd2807SJeff Garzik  *	LOCKING:
1114cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1115c6fd2807SJeff Garzik  *
1116c6fd2807SJeff Garzik  *	RETURNS:
1117c6fd2807SJeff Garzik  *	Number of aborted commands.
1118c6fd2807SJeff Garzik  */
1119c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap)
1120c6fd2807SJeff Garzik {
1121c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
1122c6fd2807SJeff Garzik 
1123cb6e73aaSye xingchen 	return ata_port_abort(ap);
1124c6fd2807SJeff Garzik }
1125a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_freeze);
1126c6fd2807SJeff Garzik 
1127c6fd2807SJeff Garzik /**
1128c6fd2807SJeff Garzik  *	ata_eh_freeze_port - EH helper to freeze port
1129c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1130c6fd2807SJeff Garzik  *
1131c6fd2807SJeff Garzik  *	Freeze @ap.
1132c6fd2807SJeff Garzik  *
1133c6fd2807SJeff Garzik  *	LOCKING:
1134c6fd2807SJeff Garzik  *	None.
1135c6fd2807SJeff Garzik  */
1136c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap)
1137c6fd2807SJeff Garzik {
1138c6fd2807SJeff Garzik 	unsigned long flags;
1139c6fd2807SJeff Garzik 
1140c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1141c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
1142c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1143c6fd2807SJeff Garzik }
1144a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
1145c6fd2807SJeff Garzik 
1146c6fd2807SJeff Garzik /**
114794bd5719SMauro Carvalho Chehab  *	ata_eh_thaw_port - EH helper to thaw port
1148c6fd2807SJeff Garzik  *	@ap: ATA port to thaw
1149c6fd2807SJeff Garzik  *
1150c6fd2807SJeff Garzik  *	Thaw frozen port @ap.
1151c6fd2807SJeff Garzik  *
1152c6fd2807SJeff Garzik  *	LOCKING:
1153c6fd2807SJeff Garzik  *	None.
1154c6fd2807SJeff Garzik  */
1155c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap)
1156c6fd2807SJeff Garzik {
1157c6fd2807SJeff Garzik 	unsigned long flags;
1158c6fd2807SJeff Garzik 
1159c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1160c6fd2807SJeff Garzik 
1161c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1162c6fd2807SJeff Garzik 
1163c6fd2807SJeff Garzik 	if (ap->ops->thaw)
1164c6fd2807SJeff Garzik 		ap->ops->thaw(ap);
1165c6fd2807SJeff Garzik 
1166c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1167c6fd2807SJeff Garzik 
1168c318458cSHannes Reinecke 	trace_ata_port_thaw(ap);
1169c6fd2807SJeff Garzik }
1170c6fd2807SJeff Garzik 
1171c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1172c6fd2807SJeff Garzik {
1173c6fd2807SJeff Garzik 	/* nada */
1174c6fd2807SJeff Garzik }
1175c6fd2807SJeff Garzik 
1176c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1177c6fd2807SJeff Garzik {
1178c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
1179c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1180c6fd2807SJeff Garzik 	unsigned long flags;
1181c6fd2807SJeff Garzik 
1182c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1183c6fd2807SJeff Garzik 	qc->scsidone = ata_eh_scsidone;
1184c6fd2807SJeff Garzik 	__ata_qc_complete(qc);
1185c6fd2807SJeff Garzik 	WARN_ON(ata_tag_valid(qc->tag));
1186c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1187c6fd2807SJeff Garzik 
1188c6fd2807SJeff Garzik 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1189c6fd2807SJeff Garzik }
1190c6fd2807SJeff Garzik 
1191c6fd2807SJeff Garzik /**
1192c6fd2807SJeff Garzik  *	ata_eh_qc_complete - Complete an active ATA command from EH
1193c6fd2807SJeff Garzik  *	@qc: Command to complete
1194c6fd2807SJeff Garzik  *
1195c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command has
1196c6fd2807SJeff Garzik  *	completed.  To be used from EH.
1197c6fd2807SJeff Garzik  */
1198c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1199c6fd2807SJeff Garzik {
1200c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1201c6fd2807SJeff Garzik 	scmd->retries = scmd->allowed;
1202c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1203c6fd2807SJeff Garzik }
1204c6fd2807SJeff Garzik 
1205c6fd2807SJeff Garzik /**
1206c6fd2807SJeff Garzik  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1207c6fd2807SJeff Garzik  *	@qc: Command to retry
1208c6fd2807SJeff Garzik  *
1209c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command
1210c6fd2807SJeff Garzik  *	should be retried.  To be used from EH.
1211c6fd2807SJeff Garzik  *
1212c6fd2807SJeff Garzik  *	SCSI midlayer limits the number of retries to scmd->allowed.
1213f13e2201SGwendal Grignou  *	scmd->allowed is incremented for commands which get retried
1214c6fd2807SJeff Garzik  *	due to unrelated failures (qc->err_mask is zero).
1215c6fd2807SJeff Garzik  */
1216c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1217c6fd2807SJeff Garzik {
1218c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1219f13e2201SGwendal Grignou 	if (!qc->err_mask)
1220f13e2201SGwendal Grignou 		scmd->allowed++;
1221c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1222c6fd2807SJeff Garzik }
1223c6fd2807SJeff Garzik 
1224c6fd2807SJeff Garzik /**
1225678afac6STejun Heo  *	ata_dev_disable - disable ATA device
1226678afac6STejun Heo  *	@dev: ATA device to disable
1227678afac6STejun Heo  *
1228678afac6STejun Heo  *	Disable @dev.
1229678afac6STejun Heo  *
1230678afac6STejun Heo  *	Locking:
1231678afac6STejun Heo  *	EH context.
1232678afac6STejun Heo  */
1233678afac6STejun Heo void ata_dev_disable(struct ata_device *dev)
1234678afac6STejun Heo {
1235678afac6STejun Heo 	if (!ata_dev_enabled(dev))
1236678afac6STejun Heo 		return;
1237678afac6STejun Heo 
12381c95a27cSHannes Reinecke 	ata_dev_warn(dev, "disable device\n");
123999cf610aSTejun Heo 
12400fecb508SDamien Le Moal 	ata_eh_dev_disable(dev);
1241678afac6STejun Heo }
1242a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_dev_disable);
1243678afac6STejun Heo 
1244678afac6STejun Heo /**
1245c6fd2807SJeff Garzik  *	ata_eh_detach_dev - detach ATA device
1246c6fd2807SJeff Garzik  *	@dev: ATA device to detach
1247c6fd2807SJeff Garzik  *
1248c6fd2807SJeff Garzik  *	Detach @dev.
1249c6fd2807SJeff Garzik  *
1250c6fd2807SJeff Garzik  *	LOCKING:
1251c6fd2807SJeff Garzik  *	None.
1252c6fd2807SJeff Garzik  */
1253fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev)
1254c6fd2807SJeff Garzik {
1255f58229f8STejun Heo 	struct ata_link *link = dev->link;
1256f58229f8STejun Heo 	struct ata_port *ap = link->ap;
125790484ebfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1258c6fd2807SJeff Garzik 	unsigned long flags;
1259c6fd2807SJeff Garzik 
1260aa3998dbSDamien Le Moal 	/*
1261aa3998dbSDamien Le Moal 	 * If the device is still enabled, transition it to standby power mode
12620fecb508SDamien Le Moal 	 * (i.e. spin down HDDs) and disable it.
1263aa3998dbSDamien Le Moal 	 */
12640fecb508SDamien Le Moal 	if (ata_dev_enabled(dev)) {
1265aa3998dbSDamien Le Moal 		ata_dev_power_set_standby(dev);
12660fecb508SDamien Le Moal 		ata_eh_dev_disable(dev);
12670fecb508SDamien Le Moal 	}
1268c6fd2807SJeff Garzik 
1269c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1270c6fd2807SJeff Garzik 
1271c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_DETACH;
1272c6fd2807SJeff Garzik 
1273c6fd2807SJeff Garzik 	if (ata_scsi_offline_dev(dev)) {
1274c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_DETACHED;
1275c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1276c6fd2807SJeff Garzik 	}
1277c6fd2807SJeff Garzik 
127890484ebfSTejun Heo 	/* clear per-dev EH info */
1279f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1280f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
128190484ebfSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
128290484ebfSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1283c6fd2807SJeff Garzik 
1284c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1285c6fd2807SJeff Garzik }
1286c6fd2807SJeff Garzik 
1287c6fd2807SJeff Garzik /**
1288c6fd2807SJeff Garzik  *	ata_eh_about_to_do - about to perform eh_action
1289955e57dfSTejun Heo  *	@link: target ATA link
1290c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1291c6fd2807SJeff Garzik  *	@action: action about to be performed
1292c6fd2807SJeff Garzik  *
1293c6fd2807SJeff Garzik  *	Called just before performing EH actions to clear related bits
1294955e57dfSTejun Heo  *	in @link->eh_info such that eh actions are not unnecessarily
1295955e57dfSTejun Heo  *	repeated.
1296c6fd2807SJeff Garzik  *
1297c6fd2807SJeff Garzik  *	LOCKING:
1298c6fd2807SJeff Garzik  *	None.
1299c6fd2807SJeff Garzik  */
1300fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1301c6fd2807SJeff Garzik 			unsigned int action)
1302c6fd2807SJeff Garzik {
1303955e57dfSTejun Heo 	struct ata_port *ap = link->ap;
1304955e57dfSTejun Heo 	struct ata_eh_info *ehi = &link->eh_info;
1305955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1306c6fd2807SJeff Garzik 	unsigned long flags;
1307c6fd2807SJeff Garzik 
1308c318458cSHannes Reinecke 	trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action);
1309c318458cSHannes Reinecke 
1310c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1311c6fd2807SJeff Garzik 
1312955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, ehi, action);
1313c6fd2807SJeff Garzik 
1314a568d1d2STejun Heo 	/* About to take EH action, set RECOVERED.  Ignore actions on
1315a568d1d2STejun Heo 	 * slave links as master will do them again.
1316a568d1d2STejun Heo 	 */
1317a568d1d2STejun Heo 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1318c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_RECOVERED;
1319c6fd2807SJeff Garzik 
1320c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1321c6fd2807SJeff Garzik }
1322c6fd2807SJeff Garzik 
1323c6fd2807SJeff Garzik /**
1324c6fd2807SJeff Garzik  *	ata_eh_done - EH action complete
13252f60e1abSJonathan Corbet  *	@link: ATA link for which EH actions are complete
1326c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1327c6fd2807SJeff Garzik  *	@action: action just completed
1328c6fd2807SJeff Garzik  *
1329c6fd2807SJeff Garzik  *	Called right after performing EH actions to clear related bits
1330955e57dfSTejun Heo  *	in @link->eh_context.
1331c6fd2807SJeff Garzik  *
1332c6fd2807SJeff Garzik  *	LOCKING:
1333c6fd2807SJeff Garzik  *	None.
1334c6fd2807SJeff Garzik  */
1335fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1336c6fd2807SJeff Garzik 		 unsigned int action)
1337c6fd2807SJeff Garzik {
1338955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
13399af5c9c9STejun Heo 
1340c318458cSHannes Reinecke 	trace_ata_eh_done(link, dev ? dev->devno : 0, action);
1341c318458cSHannes Reinecke 
1342955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, &ehc->i, action);
1343c6fd2807SJeff Garzik }
1344c6fd2807SJeff Garzik 
1345c6fd2807SJeff Garzik /**
1346c6fd2807SJeff Garzik  *	ata_err_string - convert err_mask to descriptive string
1347c6fd2807SJeff Garzik  *	@err_mask: error mask to convert to string
1348c6fd2807SJeff Garzik  *
1349c6fd2807SJeff Garzik  *	Convert @err_mask to descriptive string.  Errors are
1350c6fd2807SJeff Garzik  *	prioritized according to severity and only the most severe
1351c6fd2807SJeff Garzik  *	error is reported.
1352c6fd2807SJeff Garzik  *
1353c6fd2807SJeff Garzik  *	LOCKING:
1354c6fd2807SJeff Garzik  *	None.
1355c6fd2807SJeff Garzik  *
1356c6fd2807SJeff Garzik  *	RETURNS:
1357c6fd2807SJeff Garzik  *	Descriptive string for @err_mask
1358c6fd2807SJeff Garzik  */
1359c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask)
1360c6fd2807SJeff Garzik {
1361c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HOST_BUS)
1362c6fd2807SJeff Garzik 		return "host bus error";
1363c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_ATA_BUS)
1364c6fd2807SJeff Garzik 		return "ATA bus error";
1365c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_TIMEOUT)
1366c6fd2807SJeff Garzik 		return "timeout";
1367c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HSM)
1368c6fd2807SJeff Garzik 		return "HSM violation";
1369c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_SYSTEM)
1370c6fd2807SJeff Garzik 		return "internal error";
1371c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_MEDIA)
1372c6fd2807SJeff Garzik 		return "media error";
1373c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_INVALID)
1374c6fd2807SJeff Garzik 		return "invalid argument";
1375c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_DEV)
1376c6fd2807SJeff Garzik 		return "device error";
137754fb131bSDamien Le Moal 	if (err_mask & AC_ERR_NCQ)
137854fb131bSDamien Le Moal 		return "NCQ error";
137954fb131bSDamien Le Moal 	if (err_mask & AC_ERR_NODEV_HINT)
138054fb131bSDamien Le Moal 		return "Polling detection error";
1381c6fd2807SJeff Garzik 	return "unknown error";
1382c6fd2807SJeff Garzik }
1383c6fd2807SJeff Garzik 
1384c6fd2807SJeff Garzik /**
138511fc33daSTejun Heo  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
138611fc33daSTejun Heo  *	@dev: target ATAPI device
138711fc33daSTejun Heo  *	@r_sense_key: out parameter for sense_key
138811fc33daSTejun Heo  *
138911fc33daSTejun Heo  *	Perform ATAPI TEST_UNIT_READY.
139011fc33daSTejun Heo  *
139111fc33daSTejun Heo  *	LOCKING:
139211fc33daSTejun Heo  *	EH context (may sleep).
139311fc33daSTejun Heo  *
139411fc33daSTejun Heo  *	RETURNS:
139511fc33daSTejun Heo  *	0 on success, AC_ERR_* mask on failure.
139611fc33daSTejun Heo  */
13973dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
139811fc33daSTejun Heo {
139911fc33daSTejun Heo 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
140011fc33daSTejun Heo 	struct ata_taskfile tf;
140111fc33daSTejun Heo 	unsigned int err_mask;
140211fc33daSTejun Heo 
140311fc33daSTejun Heo 	ata_tf_init(dev, &tf);
140411fc33daSTejun Heo 
140511fc33daSTejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
140611fc33daSTejun Heo 	tf.command = ATA_CMD_PACKET;
140711fc33daSTejun Heo 	tf.protocol = ATAPI_PROT_NODATA;
140811fc33daSTejun Heo 
140911fc33daSTejun Heo 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
141011fc33daSTejun Heo 	if (err_mask == AC_ERR_DEV)
1411efcef265SSergey Shtylyov 		*r_sense_key = tf.error >> 4;
141211fc33daSTejun Heo 	return err_mask;
141311fc33daSTejun Heo }
141411fc33daSTejun Heo 
141511fc33daSTejun Heo /**
14169526dec2SNiklas Cassel  *	ata_eh_decide_disposition - Disposition a qc based on sense data
14179526dec2SNiklas Cassel  *	@qc: qc to examine
14189526dec2SNiklas Cassel  *
14199526dec2SNiklas Cassel  *	For a regular SCSI command, the SCSI completion callback (scsi_done())
14209526dec2SNiklas Cassel  *	will call scsi_complete(), which will call scsi_decide_disposition(),
14219526dec2SNiklas Cassel  *	which will call scsi_check_sense(). scsi_complete() finally calls
14229526dec2SNiklas Cassel  *	scsi_finish_command(). This is fine for SCSI, since any eventual sense
14239526dec2SNiklas Cassel  *	data is usually returned in the completion itself (without invoking SCSI
14249526dec2SNiklas Cassel  *	EH). However, for a QC, we always need to fetch the sense data
14259526dec2SNiklas Cassel  *	explicitly using SCSI EH.
14269526dec2SNiklas Cassel  *
14279526dec2SNiklas Cassel  *	A command that is completed via SCSI EH will instead be completed using
14289526dec2SNiklas Cassel  *	scsi_eh_flush_done_q(), which will call scsi_finish_command() directly
14299526dec2SNiklas Cassel  *	(without ever calling scsi_check_sense()).
14309526dec2SNiklas Cassel  *
14319526dec2SNiklas Cassel  *	For a command that went through SCSI EH, it is the responsibility of the
14329526dec2SNiklas Cassel  *	SCSI EH strategy handler to call scsi_decide_disposition(), see e.g. how
14339526dec2SNiklas Cassel  *	scsi_eh_get_sense() calls scsi_decide_disposition() for SCSI LLDDs that
14349526dec2SNiklas Cassel  *	do not get the sense data as part of the completion.
14359526dec2SNiklas Cassel  *
14369526dec2SNiklas Cassel  *	Thus, for QC commands that went via SCSI EH, we need to call
14379526dec2SNiklas Cassel  *	scsi_check_sense() ourselves, similar to how scsi_eh_get_sense() calls
14389526dec2SNiklas Cassel  *	scsi_decide_disposition(), which calls scsi_check_sense(), in order to
14399526dec2SNiklas Cassel  *	set the correct SCSI ML byte (if any).
14409526dec2SNiklas Cassel  *
14419526dec2SNiklas Cassel  *	LOCKING:
14429526dec2SNiklas Cassel  *	EH context.
14439526dec2SNiklas Cassel  *
14449526dec2SNiklas Cassel  *	RETURNS:
14459526dec2SNiklas Cassel  *	SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
14469526dec2SNiklas Cassel  */
14479526dec2SNiklas Cassel enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc)
14489526dec2SNiklas Cassel {
14499526dec2SNiklas Cassel 	return scsi_check_sense(qc->scsicmd);
14509526dec2SNiklas Cassel }
14519526dec2SNiklas Cassel 
14529526dec2SNiklas Cassel /**
1453e87fd28cSHannes Reinecke  *	ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
14542f60e1abSJonathan Corbet  *	@qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
1455e87fd28cSHannes Reinecke  *
1456e87fd28cSHannes Reinecke  *	Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1457e87fd28cSHannes Reinecke  *	SENSE.  This function is an EH helper.
1458e87fd28cSHannes Reinecke  *
1459e87fd28cSHannes Reinecke  *	LOCKING:
1460e87fd28cSHannes Reinecke  *	Kernel thread context (may sleep).
146124aeebbfSNiklas Cassel  *
146224aeebbfSNiklas Cassel  *	RETURNS:
146324aeebbfSNiklas Cassel  *	true if sense data could be fetched, false otherwise.
1464e87fd28cSHannes Reinecke  */
146524aeebbfSNiklas Cassel static bool ata_eh_request_sense(struct ata_queued_cmd *qc)
1466e87fd28cSHannes Reinecke {
1467b46c760eSNiklas Cassel 	struct scsi_cmnd *cmd = qc->scsicmd;
1468e87fd28cSHannes Reinecke 	struct ata_device *dev = qc->dev;
1469e87fd28cSHannes Reinecke 	struct ata_taskfile tf;
1470e87fd28cSHannes Reinecke 	unsigned int err_mask;
1471e87fd28cSHannes Reinecke 
14724cb7c6f1SNiklas Cassel 	if (ata_port_is_frozen(qc->ap)) {
1473e87fd28cSHannes Reinecke 		ata_dev_warn(dev, "sense data available but port frozen\n");
147424aeebbfSNiklas Cassel 		return false;
1475e87fd28cSHannes Reinecke 	}
1476e87fd28cSHannes Reinecke 
1477e87fd28cSHannes Reinecke 	if (!ata_id_sense_reporting_enabled(dev->id)) {
1478e87fd28cSHannes Reinecke 		ata_dev_warn(qc->dev, "sense data reporting disabled\n");
147924aeebbfSNiklas Cassel 		return false;
1480e87fd28cSHannes Reinecke 	}
1481e87fd28cSHannes Reinecke 
1482e87fd28cSHannes Reinecke 	ata_tf_init(dev, &tf);
1483e87fd28cSHannes Reinecke 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1484e87fd28cSHannes Reinecke 	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1485e87fd28cSHannes Reinecke 	tf.command = ATA_CMD_REQ_SENSE_DATA;
1486e87fd28cSHannes Reinecke 	tf.protocol = ATA_PROT_NODATA;
1487e87fd28cSHannes Reinecke 
1488e87fd28cSHannes Reinecke 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1489e87fd28cSHannes Reinecke 	/* Ignore err_mask; ATA_ERR might be set */
1490efcef265SSergey Shtylyov 	if (tf.status & ATA_SENSE) {
14914b89ad8eSNiklas Cassel 		if (ata_scsi_sense_is_valid(tf.lbah, tf.lbam, tf.lbal)) {
149224aeebbfSNiklas Cassel 			/* Set sense without also setting scsicmd->result */
149324aeebbfSNiklas Cassel 			scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
149424aeebbfSNiklas Cassel 						cmd->sense_buffer, tf.lbah,
149524aeebbfSNiklas Cassel 						tf.lbam, tf.lbal);
1496e87fd28cSHannes Reinecke 			qc->flags |= ATA_QCFLAG_SENSE_VALID;
149724aeebbfSNiklas Cassel 			return true;
14984b89ad8eSNiklas Cassel 		}
1499e87fd28cSHannes Reinecke 	} else {
1500e87fd28cSHannes Reinecke 		ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1501efcef265SSergey Shtylyov 			     tf.status, err_mask);
1502e87fd28cSHannes Reinecke 	}
150324aeebbfSNiklas Cassel 
150424aeebbfSNiklas Cassel 	return false;
1505e87fd28cSHannes Reinecke }
1506e87fd28cSHannes Reinecke 
1507e87fd28cSHannes Reinecke /**
1508c6fd2807SJeff Garzik  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1509c6fd2807SJeff Garzik  *	@dev: device to perform REQUEST_SENSE to
1510c6fd2807SJeff Garzik  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
15113eabddb8STejun Heo  *	@dfl_sense_key: default sense key to use
1512c6fd2807SJeff Garzik  *
1513c6fd2807SJeff Garzik  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1514c6fd2807SJeff Garzik  *	SENSE.  This function is EH helper.
1515c6fd2807SJeff Garzik  *
1516c6fd2807SJeff Garzik  *	LOCKING:
1517c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1518c6fd2807SJeff Garzik  *
1519c6fd2807SJeff Garzik  *	RETURNS:
1520c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask on failure
1521c6fd2807SJeff Garzik  */
15223dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev,
15233eabddb8STejun Heo 					   u8 *sense_buf, u8 dfl_sense_key)
1524c6fd2807SJeff Garzik {
15253eabddb8STejun Heo 	u8 cdb[ATAPI_CDB_LEN] =
15263eabddb8STejun Heo 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
15279af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1528c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1529c6fd2807SJeff Garzik 
1530c6fd2807SJeff Garzik 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1531c6fd2807SJeff Garzik 
153256287768SAlbert Lee 	/* initialize sense_buf with the error register,
153356287768SAlbert Lee 	 * for the case where they are -not- overwritten
153456287768SAlbert Lee 	 */
1535c6fd2807SJeff Garzik 	sense_buf[0] = 0x70;
15363eabddb8STejun Heo 	sense_buf[2] = dfl_sense_key;
153756287768SAlbert Lee 
153856287768SAlbert Lee 	/* some devices time out if garbage left in tf */
153956287768SAlbert Lee 	ata_tf_init(dev, &tf);
1540c6fd2807SJeff Garzik 
1541c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1542c6fd2807SJeff Garzik 	tf.command = ATA_CMD_PACKET;
1543c6fd2807SJeff Garzik 
1544c6fd2807SJeff Garzik 	/* is it pointless to prefer PIO for "safety reasons"? */
1545c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_DMA) {
15460dc36888STejun Heo 		tf.protocol = ATAPI_PROT_DMA;
1547c6fd2807SJeff Garzik 		tf.feature |= ATAPI_PKT_DMA;
1548c6fd2807SJeff Garzik 	} else {
15490dc36888STejun Heo 		tf.protocol = ATAPI_PROT_PIO;
1550f2dfc1a1STejun Heo 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1551f2dfc1a1STejun Heo 		tf.lbah = 0;
1552c6fd2807SJeff Garzik 	}
1553c6fd2807SJeff Garzik 
1554c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
15552b789108STejun Heo 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1556c6fd2807SJeff Garzik }
1557c6fd2807SJeff Garzik 
1558c6fd2807SJeff Garzik /**
1559c6fd2807SJeff Garzik  *	ata_eh_analyze_serror - analyze SError for a failed port
15600260731fSTejun Heo  *	@link: ATA link to analyze SError for
1561c6fd2807SJeff Garzik  *
1562c6fd2807SJeff Garzik  *	Analyze SError if available and further determine cause of
1563c6fd2807SJeff Garzik  *	failure.
1564c6fd2807SJeff Garzik  *
1565c6fd2807SJeff Garzik  *	LOCKING:
1566c6fd2807SJeff Garzik  *	None.
1567c6fd2807SJeff Garzik  */
15680260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link)
1569c6fd2807SJeff Garzik {
15700260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1571c6fd2807SJeff Garzik 	u32 serror = ehc->i.serror;
1572c6fd2807SJeff Garzik 	unsigned int err_mask = 0, action = 0;
1573f9df58cbSTejun Heo 	u32 hotplug_mask;
1574c6fd2807SJeff Garzik 
1575e0614db2STejun Heo 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1576c6fd2807SJeff Garzik 		err_mask |= AC_ERR_ATA_BUS;
1577cf480626STejun Heo 		action |= ATA_EH_RESET;
1578c6fd2807SJeff Garzik 	}
1579c6fd2807SJeff Garzik 	if (serror & SERR_PROTOCOL) {
1580c6fd2807SJeff Garzik 		err_mask |= AC_ERR_HSM;
1581cf480626STejun Heo 		action |= ATA_EH_RESET;
1582c6fd2807SJeff Garzik 	}
1583c6fd2807SJeff Garzik 	if (serror & SERR_INTERNAL) {
1584c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1585cf480626STejun Heo 		action |= ATA_EH_RESET;
1586c6fd2807SJeff Garzik 	}
1587f9df58cbSTejun Heo 
1588f9df58cbSTejun Heo 	/* Determine whether a hotplug event has occurred.  Both
1589f9df58cbSTejun Heo 	 * SError.N/X are considered hotplug events for enabled or
1590f9df58cbSTejun Heo 	 * host links.  For disabled PMP links, only N bit is
1591f9df58cbSTejun Heo 	 * considered as X bit is left at 1 for link plugging.
1592f9df58cbSTejun Heo 	 */
1593eb0e85e3STejun Heo 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
15946b7ae954STejun Heo 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
15956b7ae954STejun Heo 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1596f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1597f9df58cbSTejun Heo 	else
1598f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG;
1599f9df58cbSTejun Heo 
1600f9df58cbSTejun Heo 	if (serror & hotplug_mask)
1601c6fd2807SJeff Garzik 		ata_ehi_hotplugged(&ehc->i);
1602c6fd2807SJeff Garzik 
1603c6fd2807SJeff Garzik 	ehc->i.err_mask |= err_mask;
1604c6fd2807SJeff Garzik 	ehc->i.action |= action;
1605c6fd2807SJeff Garzik }
1606c6fd2807SJeff Garzik 
1607c6fd2807SJeff Garzik /**
1608c6fd2807SJeff Garzik  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1609c6fd2807SJeff Garzik  *	@qc: qc to analyze
1610c6fd2807SJeff Garzik  *
1611c6fd2807SJeff Garzik  *	Analyze taskfile of @qc and further determine cause of
1612c6fd2807SJeff Garzik  *	failure.  This function also requests ATAPI sense data if
161325985edcSLucas De Marchi  *	available.
1614c6fd2807SJeff Garzik  *
1615c6fd2807SJeff Garzik  *	LOCKING:
1616c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1617c6fd2807SJeff Garzik  *
1618c6fd2807SJeff Garzik  *	RETURNS:
1619c6fd2807SJeff Garzik  *	Determined recovery action
1620c6fd2807SJeff Garzik  */
1621e3b1fff6SNiklas Cassel static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc)
1622c6fd2807SJeff Garzik {
1623e3b1fff6SNiklas Cassel 	const struct ata_taskfile *tf = &qc->result_tf;
1624c6fd2807SJeff Garzik 	unsigned int tmp, action = 0;
1625efcef265SSergey Shtylyov 	u8 stat = tf->status, err = tf->error;
1626c6fd2807SJeff Garzik 
1627c6fd2807SJeff Garzik 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1628c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_HSM;
1629cf480626STejun Heo 		return ATA_EH_RESET;
1630c6fd2807SJeff Garzik 	}
1631c6fd2807SJeff Garzik 
1632e87fd28cSHannes Reinecke 	if (stat & (ATA_ERR | ATA_DF)) {
1633a51d644aSTejun Heo 		qc->err_mask |= AC_ERR_DEV;
1634e87fd28cSHannes Reinecke 		/*
1635e87fd28cSHannes Reinecke 		 * Sense data reporting does not work if the
1636e87fd28cSHannes Reinecke 		 * device fault bit is set.
1637e87fd28cSHannes Reinecke 		 */
1638e87fd28cSHannes Reinecke 		if (stat & ATA_DF)
1639e87fd28cSHannes Reinecke 			stat &= ~ATA_SENSE;
1640e87fd28cSHannes Reinecke 	} else {
1641c6fd2807SJeff Garzik 		return 0;
1642e87fd28cSHannes Reinecke 	}
1643c6fd2807SJeff Garzik 
1644c6fd2807SJeff Garzik 	switch (qc->dev->class) {
1645013115d9SNiklas Cassel 	case ATA_DEV_ATA:
16469162c657SHannes Reinecke 	case ATA_DEV_ZAC:
1647461ec040SNiklas Cassel 		/*
1648461ec040SNiklas Cassel 		 * Fetch the sense data explicitly if:
1649461ec040SNiklas Cassel 		 * -It was a non-NCQ command that failed, or
1650461ec040SNiklas Cassel 		 * -It was a NCQ command that failed, but the sense data
1651461ec040SNiklas Cassel 		 *  was not included in the NCQ command error log
1652461ec040SNiklas Cassel 		 *  (i.e. NCQ autosense is not supported by the device).
1653461ec040SNiklas Cassel 		 */
165424aeebbfSNiklas Cassel 		if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) &&
165524aeebbfSNiklas Cassel 		    (stat & ATA_SENSE) && ata_eh_request_sense(qc))
165624aeebbfSNiklas Cassel 			set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
1657c6fd2807SJeff Garzik 		if (err & ATA_ICRC)
1658c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_ATA_BUS;
1659eec7e1c1SAlexey Asemov 		if (err & (ATA_UNC | ATA_AMNF))
1660c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_MEDIA;
1661c6fd2807SJeff Garzik 		if (err & ATA_IDNF)
1662c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_INVALID;
1663c6fd2807SJeff Garzik 		break;
1664c6fd2807SJeff Garzik 
1665c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
16664cb7c6f1SNiklas Cassel 		if (!ata_port_is_frozen(qc->ap)) {
16673eabddb8STejun Heo 			tmp = atapi_eh_request_sense(qc->dev,
16683eabddb8STejun Heo 						qc->scsicmd->sense_buffer,
1669efcef265SSergey Shtylyov 						qc->result_tf.error >> 4);
16703852e373SHannes Reinecke 			if (!tmp)
1671c6fd2807SJeff Garzik 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
16723852e373SHannes Reinecke 			else
1673c6fd2807SJeff Garzik 				qc->err_mask |= tmp;
1674c6fd2807SJeff Garzik 		}
1675a569a30dSTejun Heo 	}
1676c6fd2807SJeff Garzik 
16773852e373SHannes Reinecke 	if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
16789526dec2SNiklas Cassel 		enum scsi_disposition ret = ata_eh_decide_disposition(qc);
16799526dec2SNiklas Cassel 
16803852e373SHannes Reinecke 		/*
168179487259SDamien Le Moal 		 * SUCCESS here means that the sense code could be
16823852e373SHannes Reinecke 		 * evaluated and should be passed to the upper layers
16833852e373SHannes Reinecke 		 * for correct evaluation.
168479487259SDamien Le Moal 		 * FAILED means the sense code could not be interpreted
16853852e373SHannes Reinecke 		 * and the device would need to be reset.
16863852e373SHannes Reinecke 		 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
16873852e373SHannes Reinecke 		 * command would need to be retried.
16883852e373SHannes Reinecke 		 */
16893852e373SHannes Reinecke 		if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
16903852e373SHannes Reinecke 			qc->flags |= ATA_QCFLAG_RETRY;
16913852e373SHannes Reinecke 			qc->err_mask |= AC_ERR_OTHER;
16923852e373SHannes Reinecke 		} else if (ret != SUCCESS) {
16933852e373SHannes Reinecke 			qc->err_mask |= AC_ERR_HSM;
16943852e373SHannes Reinecke 		}
16953852e373SHannes Reinecke 	}
1696c6fd2807SJeff Garzik 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1697cf480626STejun Heo 		action |= ATA_EH_RESET;
1698c6fd2807SJeff Garzik 
1699c6fd2807SJeff Garzik 	return action;
1700c6fd2807SJeff Garzik }
1701c6fd2807SJeff Garzik 
170276326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
170376326ac1STejun Heo 				   int *xfer_ok)
1704c6fd2807SJeff Garzik {
170576326ac1STejun Heo 	int base = 0;
170676326ac1STejun Heo 
170776326ac1STejun Heo 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
170876326ac1STejun Heo 		*xfer_ok = 1;
170976326ac1STejun Heo 
171076326ac1STejun Heo 	if (!*xfer_ok)
171175f9cafcSTejun Heo 		base = ATA_ECAT_DUBIOUS_NONE;
171276326ac1STejun Heo 
17137d47e8d4STejun Heo 	if (err_mask & AC_ERR_ATA_BUS)
171476326ac1STejun Heo 		return base + ATA_ECAT_ATA_BUS;
1715c6fd2807SJeff Garzik 
17167d47e8d4STejun Heo 	if (err_mask & AC_ERR_TIMEOUT)
171776326ac1STejun Heo 		return base + ATA_ECAT_TOUT_HSM;
17187d47e8d4STejun Heo 
17193884f7b0STejun Heo 	if (eflags & ATA_EFLAG_IS_IO) {
17207d47e8d4STejun Heo 		if (err_mask & AC_ERR_HSM)
172176326ac1STejun Heo 			return base + ATA_ECAT_TOUT_HSM;
17227d47e8d4STejun Heo 		if ((err_mask &
17237d47e8d4STejun Heo 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
172476326ac1STejun Heo 			return base + ATA_ECAT_UNK_DEV;
1725c6fd2807SJeff Garzik 	}
1726c6fd2807SJeff Garzik 
1727c6fd2807SJeff Garzik 	return 0;
1728c6fd2807SJeff Garzik }
1729c6fd2807SJeff Garzik 
17307d47e8d4STejun Heo struct speed_down_verdict_arg {
1731c6fd2807SJeff Garzik 	u64 since;
173276326ac1STejun Heo 	int xfer_ok;
17333884f7b0STejun Heo 	int nr_errors[ATA_ECAT_NR];
1734c6fd2807SJeff Garzik };
1735c6fd2807SJeff Garzik 
17367d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1737c6fd2807SJeff Garzik {
17387d47e8d4STejun Heo 	struct speed_down_verdict_arg *arg = void_arg;
173976326ac1STejun Heo 	int cat;
1740c6fd2807SJeff Garzik 
1741d9027470SGwendal Grignou 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1742c6fd2807SJeff Garzik 		return -1;
1743c6fd2807SJeff Garzik 
174476326ac1STejun Heo 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
174576326ac1STejun Heo 				      &arg->xfer_ok);
17467d47e8d4STejun Heo 	arg->nr_errors[cat]++;
174776326ac1STejun Heo 
1748c6fd2807SJeff Garzik 	return 0;
1749c6fd2807SJeff Garzik }
1750c6fd2807SJeff Garzik 
1751c6fd2807SJeff Garzik /**
17527d47e8d4STejun Heo  *	ata_eh_speed_down_verdict - Determine speed down verdict
1753c6fd2807SJeff Garzik  *	@dev: Device of interest
1754c6fd2807SJeff Garzik  *
1755c6fd2807SJeff Garzik  *	This function examines error ring of @dev and determines
17567d47e8d4STejun Heo  *	whether NCQ needs to be turned off, transfer speed should be
17577d47e8d4STejun Heo  *	stepped down, or falling back to PIO is necessary.
1758c6fd2807SJeff Garzik  *
17593884f7b0STejun Heo  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1760c6fd2807SJeff Garzik  *
17613884f7b0STejun Heo  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
17623884f7b0STejun Heo  *			  IO commands
17637d47e8d4STejun Heo  *
17643884f7b0STejun Heo  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1765c6fd2807SJeff Garzik  *
176676326ac1STejun Heo  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
176776326ac1STejun Heo  *			  data transfer hasn't been verified.
176876326ac1STejun Heo  *
17693884f7b0STejun Heo  *	Verdicts are
17707d47e8d4STejun Heo  *
17713884f7b0STejun Heo  *	NCQ_OFF		: Turn off NCQ.
17727d47e8d4STejun Heo  *
17733884f7b0STejun Heo  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
17743884f7b0STejun Heo  *			  to PIO.
17753884f7b0STejun Heo  *
17763884f7b0STejun Heo  *	FALLBACK_TO_PIO	: Fall back to PIO.
17773884f7b0STejun Heo  *
17783884f7b0STejun Heo  *	Even if multiple verdicts are returned, only one action is
177976326ac1STejun Heo  *	taken per error.  An action triggered by non-DUBIOUS errors
178076326ac1STejun Heo  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
178176326ac1STejun Heo  *	This is to expedite speed down decisions right after device is
178276326ac1STejun Heo  *	initially configured.
17833884f7b0STejun Heo  *
17844091fb95SMasahiro Yamada  *	The following are speed down rules.  #1 and #2 deal with
178576326ac1STejun Heo  *	DUBIOUS errors.
178676326ac1STejun Heo  *
178776326ac1STejun Heo  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
178876326ac1STejun Heo  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
178976326ac1STejun Heo  *
179076326ac1STejun Heo  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
179176326ac1STejun Heo  *	   occurred during last 5 mins, NCQ_OFF.
179276326ac1STejun Heo  *
179376326ac1STejun Heo  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
179425985edcSLucas De Marchi  *	   occurred during last 5 mins, FALLBACK_TO_PIO
17953884f7b0STejun Heo  *
179676326ac1STejun Heo  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
17973884f7b0STejun Heo  *	   during last 10 mins, NCQ_OFF.
17983884f7b0STejun Heo  *
179976326ac1STejun Heo  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
18003884f7b0STejun Heo  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
18017d47e8d4STejun Heo  *
1802c6fd2807SJeff Garzik  *	LOCKING:
1803c6fd2807SJeff Garzik  *	Inherited from caller.
1804c6fd2807SJeff Garzik  *
1805c6fd2807SJeff Garzik  *	RETURNS:
18067d47e8d4STejun Heo  *	OR of ATA_EH_SPDN_* flags.
1807c6fd2807SJeff Garzik  */
18087d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1809c6fd2807SJeff Garzik {
18107d47e8d4STejun Heo 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
18117d47e8d4STejun Heo 	u64 j64 = get_jiffies_64();
18127d47e8d4STejun Heo 	struct speed_down_verdict_arg arg;
18137d47e8d4STejun Heo 	unsigned int verdict = 0;
1814c6fd2807SJeff Garzik 
18153884f7b0STejun Heo 	/* scan past 5 mins of error history */
18163884f7b0STejun Heo 	memset(&arg, 0, sizeof(arg));
18173884f7b0STejun Heo 	arg.since = j64 - min(j64, j5mins);
18183884f7b0STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
18193884f7b0STejun Heo 
182076326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
182176326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
182276326ac1STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
182376326ac1STejun Heo 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
182476326ac1STejun Heo 
182576326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
182676326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
182776326ac1STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
182876326ac1STejun Heo 
18293884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
18303884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1831663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
18323884f7b0STejun Heo 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
18333884f7b0STejun Heo 
18347d47e8d4STejun Heo 	/* scan past 10 mins of error history */
1835c6fd2807SJeff Garzik 	memset(&arg, 0, sizeof(arg));
18367d47e8d4STejun Heo 	arg.since = j64 - min(j64, j10mins);
18377d47e8d4STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1838c6fd2807SJeff Garzik 
18393884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
18403884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
18417d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF;
18423884f7b0STejun Heo 
18433884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
18443884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1845663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
18467d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1847c6fd2807SJeff Garzik 
18487d47e8d4STejun Heo 	return verdict;
1849c6fd2807SJeff Garzik }
1850c6fd2807SJeff Garzik 
1851c6fd2807SJeff Garzik /**
1852c6fd2807SJeff Garzik  *	ata_eh_speed_down - record error and speed down if necessary
1853c6fd2807SJeff Garzik  *	@dev: Failed device
18543884f7b0STejun Heo  *	@eflags: mask of ATA_EFLAG_* flags
1855c6fd2807SJeff Garzik  *	@err_mask: err_mask of the error
1856c6fd2807SJeff Garzik  *
1857c6fd2807SJeff Garzik  *	Record error and examine error history to determine whether
1858c6fd2807SJeff Garzik  *	adjusting transmission speed is necessary.  It also sets
1859c6fd2807SJeff Garzik  *	transmission limits appropriately if such adjustment is
1860c6fd2807SJeff Garzik  *	necessary.
1861c6fd2807SJeff Garzik  *
1862c6fd2807SJeff Garzik  *	LOCKING:
1863c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1864c6fd2807SJeff Garzik  *
1865c6fd2807SJeff Garzik  *	RETURNS:
18667d47e8d4STejun Heo  *	Determined recovery action.
1867c6fd2807SJeff Garzik  */
18683884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev,
18693884f7b0STejun Heo 				unsigned int eflags, unsigned int err_mask)
1870c6fd2807SJeff Garzik {
1871b1c72916STejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
187276326ac1STejun Heo 	int xfer_ok = 0;
18737d47e8d4STejun Heo 	unsigned int verdict;
18747d47e8d4STejun Heo 	unsigned int action = 0;
18757d47e8d4STejun Heo 
18767d47e8d4STejun Heo 	/* don't bother if Cat-0 error */
187776326ac1STejun Heo 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1878c6fd2807SJeff Garzik 		return 0;
1879c6fd2807SJeff Garzik 
1880c6fd2807SJeff Garzik 	/* record error and determine whether speed down is necessary */
18813884f7b0STejun Heo 	ata_ering_record(&dev->ering, eflags, err_mask);
18827d47e8d4STejun Heo 	verdict = ata_eh_speed_down_verdict(dev);
1883c6fd2807SJeff Garzik 
18847d47e8d4STejun Heo 	/* turn off NCQ? */
188512980c1fSDamien Le Moal 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) && ata_ncq_enabled(dev)) {
18867d47e8d4STejun Heo 		dev->flags |= ATA_DFLAG_NCQ_OFF;
1887a9a79dfeSJoe Perches 		ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
18887d47e8d4STejun Heo 		goto done;
18897d47e8d4STejun Heo 	}
1890c6fd2807SJeff Garzik 
18917d47e8d4STejun Heo 	/* speed down? */
18927d47e8d4STejun Heo 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1893c6fd2807SJeff Garzik 		/* speed down SATA link speed if possible */
1894a07d499bSTejun Heo 		if (sata_down_spd_limit(link, 0) == 0) {
1895cf480626STejun Heo 			action |= ATA_EH_RESET;
18967d47e8d4STejun Heo 			goto done;
18977d47e8d4STejun Heo 		}
1898c6fd2807SJeff Garzik 
1899c6fd2807SJeff Garzik 		/* lower transfer mode */
19007d47e8d4STejun Heo 		if (dev->spdn_cnt < 2) {
19017d47e8d4STejun Heo 			static const int dma_dnxfer_sel[] =
19027d47e8d4STejun Heo 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
19037d47e8d4STejun Heo 			static const int pio_dnxfer_sel[] =
19047d47e8d4STejun Heo 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
19057d47e8d4STejun Heo 			int sel;
1906c6fd2807SJeff Garzik 
19077d47e8d4STejun Heo 			if (dev->xfer_shift != ATA_SHIFT_PIO)
19087d47e8d4STejun Heo 				sel = dma_dnxfer_sel[dev->spdn_cnt];
19097d47e8d4STejun Heo 			else
19107d47e8d4STejun Heo 				sel = pio_dnxfer_sel[dev->spdn_cnt];
19117d47e8d4STejun Heo 
19127d47e8d4STejun Heo 			dev->spdn_cnt++;
19137d47e8d4STejun Heo 
19147d47e8d4STejun Heo 			if (ata_down_xfermask_limit(dev, sel) == 0) {
1915cf480626STejun Heo 				action |= ATA_EH_RESET;
19167d47e8d4STejun Heo 				goto done;
19177d47e8d4STejun Heo 			}
19187d47e8d4STejun Heo 		}
19197d47e8d4STejun Heo 	}
19207d47e8d4STejun Heo 
19217d47e8d4STejun Heo 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
1922663f99b8STejun Heo 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
19237d47e8d4STejun Heo 	 */
19247d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1925663f99b8STejun Heo 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
19267d47e8d4STejun Heo 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
19277d47e8d4STejun Heo 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
19287d47e8d4STejun Heo 			dev->spdn_cnt = 0;
1929cf480626STejun Heo 			action |= ATA_EH_RESET;
19307d47e8d4STejun Heo 			goto done;
19317d47e8d4STejun Heo 		}
19327d47e8d4STejun Heo 	}
19337d47e8d4STejun Heo 
1934c6fd2807SJeff Garzik 	return 0;
19357d47e8d4STejun Heo  done:
19367d47e8d4STejun Heo 	/* device has been slowed down, blow error history */
193776326ac1STejun Heo 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
19387d47e8d4STejun Heo 		ata_ering_clear(&dev->ering);
19397d47e8d4STejun Heo 	return action;
1940c6fd2807SJeff Garzik }
1941c6fd2807SJeff Garzik 
1942c6fd2807SJeff Garzik /**
19438d899e70SMark Lord  *	ata_eh_worth_retry - analyze error and decide whether to retry
19448d899e70SMark Lord  *	@qc: qc to possibly retry
19458d899e70SMark Lord  *
19468d899e70SMark Lord  *	Look at the cause of the error and decide if a retry
19478d899e70SMark Lord  * 	might be useful or not.  We don't want to retry media errors
19488d899e70SMark Lord  *	because the drive itself has probably already taken 10-30 seconds
19498d899e70SMark Lord  *	doing its own internal retries before reporting the failure.
19508d899e70SMark Lord  */
19518d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
19528d899e70SMark Lord {
19531eaca39aSBian Yu 	if (qc->err_mask & AC_ERR_MEDIA)
19548d899e70SMark Lord 		return 0;	/* don't retry media errors */
19558d899e70SMark Lord 	if (qc->flags & ATA_QCFLAG_IO)
19568d899e70SMark Lord 		return 1;	/* otherwise retry anything from fs stack */
19578d899e70SMark Lord 	if (qc->err_mask & AC_ERR_INVALID)
19588d899e70SMark Lord 		return 0;	/* don't retry these */
19598d899e70SMark Lord 	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
19608d899e70SMark Lord }
19618d899e70SMark Lord 
19628d899e70SMark Lord /**
19637eb49509SDamien Le Moal  *      ata_eh_quiet - check if we need to be quiet about a command error
19647eb49509SDamien Le Moal  *      @qc: qc to check
19657eb49509SDamien Le Moal  *
19667eb49509SDamien Le Moal  *      Look at the qc flags anbd its scsi command request flags to determine
19677eb49509SDamien Le Moal  *      if we need to be quiet about the command failure.
19687eb49509SDamien Le Moal  */
19697eb49509SDamien Le Moal static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
19707eb49509SDamien Le Moal {
1971c8329cd5SBart Van Assche 	if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET)
19727eb49509SDamien Le Moal 		qc->flags |= ATA_QCFLAG_QUIET;
19737eb49509SDamien Le Moal 	return qc->flags & ATA_QCFLAG_QUIET;
19747eb49509SDamien Le Moal }
19757eb49509SDamien Le Moal 
197610e80763SDamien Le Moal static int ata_eh_get_non_ncq_success_sense(struct ata_link *link)
197718bd7718SNiklas Cassel {
197818bd7718SNiklas Cassel 	struct ata_port *ap = link->ap;
197918bd7718SNiklas Cassel 	struct ata_queued_cmd *qc;
198018bd7718SNiklas Cassel 
198118bd7718SNiklas Cassel 	qc = __ata_qc_from_tag(ap, link->active_tag);
198218bd7718SNiklas Cassel 	if (!qc)
198318bd7718SNiklas Cassel 		return -EIO;
198418bd7718SNiklas Cassel 
198518bd7718SNiklas Cassel 	if (!(qc->flags & ATA_QCFLAG_EH) ||
198618bd7718SNiklas Cassel 	    !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) ||
198718bd7718SNiklas Cassel 	    qc->err_mask)
198818bd7718SNiklas Cassel 		return -EIO;
198918bd7718SNiklas Cassel 
199018bd7718SNiklas Cassel 	if (!ata_eh_request_sense(qc))
199118bd7718SNiklas Cassel 		return -EIO;
199218bd7718SNiklas Cassel 
199318bd7718SNiklas Cassel 	/*
19949526dec2SNiklas Cassel 	 * No point in checking the return value, since the command has already
19959526dec2SNiklas Cassel 	 * completed successfully.
199618bd7718SNiklas Cassel 	 */
19979526dec2SNiklas Cassel 	ata_eh_decide_disposition(qc);
199818bd7718SNiklas Cassel 
199918bd7718SNiklas Cassel 	return 0;
200018bd7718SNiklas Cassel }
200118bd7718SNiklas Cassel 
200218bd7718SNiklas Cassel static void ata_eh_get_success_sense(struct ata_link *link)
200318bd7718SNiklas Cassel {
200418bd7718SNiklas Cassel 	struct ata_eh_context *ehc = &link->eh_context;
200518bd7718SNiklas Cassel 	struct ata_device *dev = link->device;
200618bd7718SNiklas Cassel 	struct ata_port *ap = link->ap;
200718bd7718SNiklas Cassel 	struct ata_queued_cmd *qc;
200818bd7718SNiklas Cassel 	int tag, ret = 0;
200918bd7718SNiklas Cassel 
201018bd7718SNiklas Cassel 	if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE))
201118bd7718SNiklas Cassel 		return;
201218bd7718SNiklas Cassel 
201318bd7718SNiklas Cassel 	/* if frozen, we can't do much */
201418bd7718SNiklas Cassel 	if (ata_port_is_frozen(ap)) {
201518bd7718SNiklas Cassel 		ata_dev_warn(dev,
201618bd7718SNiklas Cassel 			"successful sense data available but port frozen\n");
201718bd7718SNiklas Cassel 		goto out;
201818bd7718SNiklas Cassel 	}
201918bd7718SNiklas Cassel 
202018bd7718SNiklas Cassel 	/*
202118bd7718SNiklas Cassel 	 * If the link has sactive set, then we have outstanding NCQ commands
202218bd7718SNiklas Cassel 	 * and have to read the Successful NCQ Commands log to get the sense
202318bd7718SNiklas Cassel 	 * data. Otherwise, we are dealing with a non-NCQ command and use
202418bd7718SNiklas Cassel 	 * request sense ext command to retrieve the sense data.
202518bd7718SNiklas Cassel 	 */
202618bd7718SNiklas Cassel 	if (link->sactive)
202710e80763SDamien Le Moal 		ret = ata_eh_get_ncq_success_sense(link);
202818bd7718SNiklas Cassel 	else
202910e80763SDamien Le Moal 		ret = ata_eh_get_non_ncq_success_sense(link);
203018bd7718SNiklas Cassel 	if (ret)
203118bd7718SNiklas Cassel 		goto out;
203218bd7718SNiklas Cassel 
203318bd7718SNiklas Cassel 	ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
203418bd7718SNiklas Cassel 	return;
203518bd7718SNiklas Cassel 
203618bd7718SNiklas Cassel out:
203718bd7718SNiklas Cassel 	/*
203818bd7718SNiklas Cassel 	 * If we failed to get sense data for a successful command that ought to
203918bd7718SNiklas Cassel 	 * have sense data, we cannot simply return BLK_STS_OK to user space.
204018bd7718SNiklas Cassel 	 * This is because we can't know if the sense data that we couldn't get
204118bd7718SNiklas Cassel 	 * was actually "DATA CURRENTLY UNAVAILABLE". Reporting such a command
204218bd7718SNiklas Cassel 	 * as success to user space would result in a silent data corruption.
204318bd7718SNiklas Cassel 	 * Thus, add a bogus ABORTED_COMMAND sense data to such commands, such
204418bd7718SNiklas Cassel 	 * that SCSI will report these commands as BLK_STS_IOERR to user space.
204518bd7718SNiklas Cassel 	 */
204618bd7718SNiklas Cassel 	ata_qc_for_each_raw(ap, qc, tag) {
204718bd7718SNiklas Cassel 		if (!(qc->flags & ATA_QCFLAG_EH) ||
204818bd7718SNiklas Cassel 		    !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) ||
204918bd7718SNiklas Cassel 		    qc->err_mask ||
205018bd7718SNiklas Cassel 		    ata_dev_phys_link(qc->dev) != link)
205118bd7718SNiklas Cassel 			continue;
205218bd7718SNiklas Cassel 
205318bd7718SNiklas Cassel 		/* We managed to get sense for this success command, skip. */
205418bd7718SNiklas Cassel 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
205518bd7718SNiklas Cassel 			continue;
205618bd7718SNiklas Cassel 
205718bd7718SNiklas Cassel 		/* This success command did not have any sense data, skip. */
205818bd7718SNiklas Cassel 		if (!(qc->result_tf.status & ATA_SENSE))
205918bd7718SNiklas Cassel 			continue;
206018bd7718SNiklas Cassel 
206118bd7718SNiklas Cassel 		/* This success command had sense data, but we failed to get. */
206218bd7718SNiklas Cassel 		ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0);
206318bd7718SNiklas Cassel 		qc->flags |= ATA_QCFLAG_SENSE_VALID;
206418bd7718SNiklas Cassel 	}
206518bd7718SNiklas Cassel 	ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
206618bd7718SNiklas Cassel }
206718bd7718SNiklas Cassel 
20687eb49509SDamien Le Moal /**
20699b1e2658STejun Heo  *	ata_eh_link_autopsy - analyze error and determine recovery action
20709b1e2658STejun Heo  *	@link: host link to perform autopsy on
2071c6fd2807SJeff Garzik  *
20720260731fSTejun Heo  *	Analyze why @link failed and determine which recovery actions
20730260731fSTejun Heo  *	are needed.  This function also sets more detailed AC_ERR_*
20740260731fSTejun Heo  *	values and fills sense data for ATAPI CHECK SENSE.
2075c6fd2807SJeff Garzik  *
2076c6fd2807SJeff Garzik  *	LOCKING:
2077c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2078c6fd2807SJeff Garzik  */
20799b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link)
2080c6fd2807SJeff Garzik {
20810260731fSTejun Heo 	struct ata_port *ap = link->ap;
2082936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2083258c4e5cSJens Axboe 	struct ata_queued_cmd *qc;
2084dfcc173dSTejun Heo 	struct ata_device *dev;
20853884f7b0STejun Heo 	unsigned int all_err_mask = 0, eflags = 0;
20867eb49509SDamien Le Moal 	int tag, nr_failed = 0, nr_quiet = 0;
2087c6fd2807SJeff Garzik 	u32 serror;
2088c6fd2807SJeff Garzik 	int rc;
2089c6fd2807SJeff Garzik 
2090c6fd2807SJeff Garzik 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2091c6fd2807SJeff Garzik 		return;
2092c6fd2807SJeff Garzik 
2093c6fd2807SJeff Garzik 	/* obtain and analyze SError */
2094936fd732STejun Heo 	rc = sata_scr_read(link, SCR_ERROR, &serror);
2095c6fd2807SJeff Garzik 	if (rc == 0) {
2096c6fd2807SJeff Garzik 		ehc->i.serror |= serror;
20970260731fSTejun Heo 		ata_eh_analyze_serror(link);
20984e57c517STejun Heo 	} else if (rc != -EOPNOTSUPP) {
2099cf480626STejun Heo 		/* SError read failed, force reset and probing */
2100b558edddSTejun Heo 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2101cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
21024e57c517STejun Heo 		ehc->i.err_mask |= AC_ERR_OTHER;
21034e57c517STejun Heo 	}
2104c6fd2807SJeff Garzik 
2105c6fd2807SJeff Garzik 	/* analyze NCQ failure */
21060260731fSTejun Heo 	ata_eh_analyze_ncq_error(link);
2107c6fd2807SJeff Garzik 
210818bd7718SNiklas Cassel 	/*
210918bd7718SNiklas Cassel 	 * Check if this was a successful command that simply needs sense data.
211018bd7718SNiklas Cassel 	 * Since the sense data is not part of the completion, we need to fetch
211118bd7718SNiklas Cassel 	 * it using an additional command. Since this can't be done from irq
211218bd7718SNiklas Cassel 	 * context, the sense data for successful commands are fetched by EH.
211318bd7718SNiklas Cassel 	 */
211418bd7718SNiklas Cassel 	ata_eh_get_success_sense(link);
211518bd7718SNiklas Cassel 
2116c6fd2807SJeff Garzik 	/* any real error trumps AC_ERR_OTHER */
2117c6fd2807SJeff Garzik 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2118c6fd2807SJeff Garzik 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2119c6fd2807SJeff Garzik 
2120c6fd2807SJeff Garzik 	all_err_mask |= ehc->i.err_mask;
2121c6fd2807SJeff Garzik 
2122258c4e5cSJens Axboe 	ata_qc_for_each_raw(ap, qc, tag) {
212387629312SNiklas Cassel 		if (!(qc->flags & ATA_QCFLAG_EH) ||
21243d8a3ae3SNiklas Cassel 		    qc->flags & ATA_QCFLAG_RETRY ||
212518bd7718SNiklas Cassel 		    qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD ||
2126b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link)
2127c6fd2807SJeff Garzik 			continue;
2128c6fd2807SJeff Garzik 
2129c6fd2807SJeff Garzik 		/* inherit upper level err_mask */
2130c6fd2807SJeff Garzik 		qc->err_mask |= ehc->i.err_mask;
2131c6fd2807SJeff Garzik 
2132c6fd2807SJeff Garzik 		/* analyze TF */
2133e3b1fff6SNiklas Cassel 		ehc->i.action |= ata_eh_analyze_tf(qc);
2134c6fd2807SJeff Garzik 
2135c6fd2807SJeff Garzik 		/* DEV errors are probably spurious in case of ATA_BUS error */
2136c6fd2807SJeff Garzik 		if (qc->err_mask & AC_ERR_ATA_BUS)
2137c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2138c6fd2807SJeff Garzik 					  AC_ERR_INVALID);
2139c6fd2807SJeff Garzik 
2140c6fd2807SJeff Garzik 		/* any real error trumps unknown error */
2141c6fd2807SJeff Garzik 		if (qc->err_mask & ~AC_ERR_OTHER)
2142c6fd2807SJeff Garzik 			qc->err_mask &= ~AC_ERR_OTHER;
2143c6fd2807SJeff Garzik 
2144804689adSDamien Le Moal 		/*
2145804689adSDamien Le Moal 		 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
2146804689adSDamien Le Moal 		 * layers will determine whether the command is worth retrying
2147804689adSDamien Le Moal 		 * based on the sense data and device class/type. Otherwise,
2148804689adSDamien Le Moal 		 * determine directly if the command is worth retrying using its
2149804689adSDamien Le Moal 		 * error mask and flags.
2150804689adSDamien Le Moal 		 */
2151f90f0828STejun Heo 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2152c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2153804689adSDamien Le Moal 		else if (ata_eh_worth_retry(qc))
215403faab78STejun Heo 			qc->flags |= ATA_QCFLAG_RETRY;
215503faab78STejun Heo 
2156c6fd2807SJeff Garzik 		/* accumulate error info */
2157c6fd2807SJeff Garzik 		ehc->i.dev = qc->dev;
2158c6fd2807SJeff Garzik 		all_err_mask |= qc->err_mask;
2159c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_IO)
21603884f7b0STejun Heo 			eflags |= ATA_EFLAG_IS_IO;
2161255c03d1SHannes Reinecke 		trace_ata_eh_link_autopsy_qc(qc);
21627eb49509SDamien Le Moal 
21637eb49509SDamien Le Moal 		/* Count quiet errors */
21647eb49509SDamien Le Moal 		if (ata_eh_quiet(qc))
21657eb49509SDamien Le Moal 			nr_quiet++;
21667eb49509SDamien Le Moal 		nr_failed++;
2167c6fd2807SJeff Garzik 	}
2168c6fd2807SJeff Garzik 
21697eb49509SDamien Le Moal 	/* If all failed commands requested silence, then be quiet */
21707eb49509SDamien Le Moal 	if (nr_quiet == nr_failed)
21717eb49509SDamien Le Moal 		ehc->i.flags |= ATA_EHI_QUIET;
21727eb49509SDamien Le Moal 
2173c6fd2807SJeff Garzik 	/* enforce default EH actions */
21744cb7c6f1SNiklas Cassel 	if (ata_port_is_frozen(ap) ||
2175c6fd2807SJeff Garzik 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2176cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
21773884f7b0STejun Heo 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
21783884f7b0STejun Heo 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2179c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_REVALIDATE;
2180c6fd2807SJeff Garzik 
2181dfcc173dSTejun Heo 	/* If we have offending qcs and the associated failed device,
2182dfcc173dSTejun Heo 	 * perform per-dev EH action only on the offending device.
2183dfcc173dSTejun Heo 	 */
2184c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2185c6fd2807SJeff Garzik 		ehc->i.dev_action[ehc->i.dev->devno] |=
2186c6fd2807SJeff Garzik 			ehc->i.action & ATA_EH_PERDEV_MASK;
2187c6fd2807SJeff Garzik 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2188c6fd2807SJeff Garzik 	}
2189c6fd2807SJeff Garzik 
21902695e366STejun Heo 	/* propagate timeout to host link */
21912695e366STejun Heo 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
21922695e366STejun Heo 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
21932695e366STejun Heo 
21942695e366STejun Heo 	/* record error and consider speeding down */
2195dfcc173dSTejun Heo 	dev = ehc->i.dev;
21962695e366STejun Heo 	if (!dev && ((ata_link_max_devices(link) == 1 &&
21972695e366STejun Heo 		      ata_dev_enabled(link->device))))
2198dfcc173dSTejun Heo 	    dev = link->device;
2199dfcc173dSTejun Heo 
220076326ac1STejun Heo 	if (dev) {
220176326ac1STejun Heo 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
220276326ac1STejun Heo 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
22033884f7b0STejun Heo 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2204255c03d1SHannes Reinecke 		trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
2205f1601113SRameshwar Prasad Sahu 	}
2206c6fd2807SJeff Garzik }
2207c6fd2807SJeff Garzik 
2208c6fd2807SJeff Garzik /**
22099b1e2658STejun Heo  *	ata_eh_autopsy - analyze error and determine recovery action
22109b1e2658STejun Heo  *	@ap: host port to perform autopsy on
22119b1e2658STejun Heo  *
22129b1e2658STejun Heo  *	Analyze all links of @ap and determine why they failed and
22139b1e2658STejun Heo  *	which recovery actions are needed.
22149b1e2658STejun Heo  *
22159b1e2658STejun Heo  *	LOCKING:
22169b1e2658STejun Heo  *	Kernel thread context (may sleep).
22179b1e2658STejun Heo  */
2218fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap)
22199b1e2658STejun Heo {
22209b1e2658STejun Heo 	struct ata_link *link;
22219b1e2658STejun Heo 
22221eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE)
22239b1e2658STejun Heo 		ata_eh_link_autopsy(link);
22242695e366STejun Heo 
2225b1c72916STejun Heo 	/* Handle the frigging slave link.  Autopsy is done similarly
2226b1c72916STejun Heo 	 * but actions and flags are transferred over to the master
2227b1c72916STejun Heo 	 * link and handled from there.
2228b1c72916STejun Heo 	 */
2229b1c72916STejun Heo 	if (ap->slave_link) {
2230b1c72916STejun Heo 		struct ata_eh_context *mehc = &ap->link.eh_context;
2231b1c72916STejun Heo 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2232b1c72916STejun Heo 
2233848e4c68STejun Heo 		/* transfer control flags from master to slave */
2234848e4c68STejun Heo 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2235848e4c68STejun Heo 
2236848e4c68STejun Heo 		/* perform autopsy on the slave link */
2237b1c72916STejun Heo 		ata_eh_link_autopsy(ap->slave_link);
2238b1c72916STejun Heo 
2239848e4c68STejun Heo 		/* transfer actions from slave to master and clear slave */
2240b1c72916STejun Heo 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2241b1c72916STejun Heo 		mehc->i.action		|= sehc->i.action;
2242b1c72916STejun Heo 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2243b1c72916STejun Heo 		mehc->i.flags		|= sehc->i.flags;
2244b1c72916STejun Heo 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2245b1c72916STejun Heo 	}
2246b1c72916STejun Heo 
22472695e366STejun Heo 	/* Autopsy of fanout ports can affect host link autopsy.
22482695e366STejun Heo 	 * Perform host link autopsy last.
22492695e366STejun Heo 	 */
2250071f44b1STejun Heo 	if (sata_pmp_attached(ap))
22512695e366STejun Heo 		ata_eh_link_autopsy(&ap->link);
22529b1e2658STejun Heo }
22539b1e2658STejun Heo 
22549b1e2658STejun Heo /**
2255d4520903SHannes Reinecke  *	ata_get_cmd_name - get name for ATA command
2256d4520903SHannes Reinecke  *	@command: ATA command code to get name for
22576521148cSRobert Hancock  *
2258d4520903SHannes Reinecke  *	Return a textual name of the given command or "unknown"
22596521148cSRobert Hancock  *
22606521148cSRobert Hancock  *	LOCKING:
22616521148cSRobert Hancock  *	None
22626521148cSRobert Hancock  */
2263d4520903SHannes Reinecke const char *ata_get_cmd_name(u8 command)
22646521148cSRobert Hancock {
22656521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
22666521148cSRobert Hancock 	static const struct
22676521148cSRobert Hancock 	{
22686521148cSRobert Hancock 		u8 command;
22696521148cSRobert Hancock 		const char *text;
22706521148cSRobert Hancock 	} cmd_descr[] = {
22716521148cSRobert Hancock 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
22726521148cSRobert Hancock 		{ ATA_CMD_CHK_POWER,		"CHECK POWER MODE" },
22736521148cSRobert Hancock 		{ ATA_CMD_STANDBY,		"STANDBY" },
22746521148cSRobert Hancock 		{ ATA_CMD_IDLE,			"IDLE" },
22756521148cSRobert Hancock 		{ ATA_CMD_EDD,			"EXECUTE DEVICE DIAGNOSTIC" },
22766521148cSRobert Hancock 		{ ATA_CMD_DOWNLOAD_MICRO,	"DOWNLOAD MICROCODE" },
22773915c3b5SRobert Hancock 		{ ATA_CMD_DOWNLOAD_MICRO_DMA,	"DOWNLOAD MICROCODE DMA" },
22786521148cSRobert Hancock 		{ ATA_CMD_NOP,			"NOP" },
22796521148cSRobert Hancock 		{ ATA_CMD_FLUSH,		"FLUSH CACHE" },
22806521148cSRobert Hancock 		{ ATA_CMD_FLUSH_EXT,		"FLUSH CACHE EXT" },
22816521148cSRobert Hancock 		{ ATA_CMD_ID_ATA,		"IDENTIFY DEVICE" },
22826521148cSRobert Hancock 		{ ATA_CMD_ID_ATAPI,		"IDENTIFY PACKET DEVICE" },
22836521148cSRobert Hancock 		{ ATA_CMD_SERVICE,		"SERVICE" },
22846521148cSRobert Hancock 		{ ATA_CMD_READ,			"READ DMA" },
22856521148cSRobert Hancock 		{ ATA_CMD_READ_EXT,		"READ DMA EXT" },
22866521148cSRobert Hancock 		{ ATA_CMD_READ_QUEUED,		"READ DMA QUEUED" },
22876521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_EXT,	"READ STREAM EXT" },
22886521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
22896521148cSRobert Hancock 		{ ATA_CMD_WRITE,		"WRITE DMA" },
22906521148cSRobert Hancock 		{ ATA_CMD_WRITE_EXT,		"WRITE DMA EXT" },
22916521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED,		"WRITE DMA QUEUED EXT" },
22926521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_EXT,	"WRITE STREAM EXT" },
22936521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
22946521148cSRobert Hancock 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
22956521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
22966521148cSRobert Hancock 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
22976521148cSRobert Hancock 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
2298d3122bf9SDamien Le Moal 		{ ATA_CMD_NCQ_NON_DATA,		"NCQ NON-DATA" },
22993915c3b5SRobert Hancock 		{ ATA_CMD_FPDMA_SEND,		"SEND FPDMA QUEUED" },
23003915c3b5SRobert Hancock 		{ ATA_CMD_FPDMA_RECV,		"RECEIVE FPDMA QUEUED" },
23016521148cSRobert Hancock 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
23026521148cSRobert Hancock 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
23036521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
23046521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
23056521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
23066521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
23076521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
23086521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
23096521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_FUA_EXT,	"WRITE MULTIPLE FUA EXT" },
23106521148cSRobert Hancock 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
23116521148cSRobert Hancock 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
23126521148cSRobert Hancock 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
23136521148cSRobert Hancock 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
23146521148cSRobert Hancock 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
23156521148cSRobert Hancock 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
23166521148cSRobert Hancock 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
23176521148cSRobert Hancock 		{ ATA_CMD_SLEEP,		"SLEEP" },
23186521148cSRobert Hancock 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
23196521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
23206521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
23216521148cSRobert Hancock 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
23226521148cSRobert Hancock 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
23236521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
23246521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
23256521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
23266521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_DMA_EXT,	"WRITE LOG DMA EXT" },
23273915c3b5SRobert Hancock 		{ ATA_CMD_TRUSTED_NONDATA,	"TRUSTED NON-DATA" },
23286521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
23296521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV_DMA,	"TRUSTED RECEIVE DMA" },
23306521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
23316521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND_DMA,	"TRUSTED SEND DMA" },
23326521148cSRobert Hancock 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
23333915c3b5SRobert Hancock 		{ ATA_CMD_PMP_READ_DMA,		"READ BUFFER DMA" },
23346521148cSRobert Hancock 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
23353915c3b5SRobert Hancock 		{ ATA_CMD_PMP_WRITE_DMA,	"WRITE BUFFER DMA" },
23366521148cSRobert Hancock 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
23376521148cSRobert Hancock 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
23386521148cSRobert Hancock 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
23396521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
23406521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
23416521148cSRobert Hancock 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
23426521148cSRobert Hancock 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
23436521148cSRobert Hancock 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
23446521148cSRobert Hancock 		{ ATA_CMD_SMART,		"SMART" },
23456521148cSRobert Hancock 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
23466521148cSRobert Hancock 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2347acad7627SFUJITA Tomonori 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
23486521148cSRobert Hancock 		{ ATA_CMD_CHK_MED_CRD_TYP,	"CHECK MEDIA CARD TYPE" },
23496521148cSRobert Hancock 		{ ATA_CMD_CFA_REQ_EXT_ERR,	"CFA REQUEST EXTENDED ERROR" },
23506521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
23516521148cSRobert Hancock 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
23526521148cSRobert Hancock 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
23536521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_MULT_NE,	"CFA WRITE MULTIPLE WITHOUT ERASE" },
23543915c3b5SRobert Hancock 		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
23553915c3b5SRobert Hancock 		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
235628a3fc22SHannes Reinecke 		{ ATA_CMD_ZAC_MGMT_IN,		"ZAC MANAGEMENT IN" },
235727708a95SHannes Reinecke 		{ ATA_CMD_ZAC_MGMT_OUT,		"ZAC MANAGEMENT OUT" },
23586521148cSRobert Hancock 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
23596521148cSRobert Hancock 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
23606521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
23616521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
23626521148cSRobert Hancock 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
23636521148cSRobert Hancock 		{ 0,				NULL } /* terminate list */
23646521148cSRobert Hancock 	};
23656521148cSRobert Hancock 
23666521148cSRobert Hancock 	unsigned int i;
23676521148cSRobert Hancock 	for (i = 0; cmd_descr[i].text; i++)
23686521148cSRobert Hancock 		if (cmd_descr[i].command == command)
23696521148cSRobert Hancock 			return cmd_descr[i].text;
23706521148cSRobert Hancock #endif
23716521148cSRobert Hancock 
2372d4520903SHannes Reinecke 	return "unknown";
23736521148cSRobert Hancock }
2374d4520903SHannes Reinecke EXPORT_SYMBOL_GPL(ata_get_cmd_name);
23756521148cSRobert Hancock 
23766521148cSRobert Hancock /**
23779b1e2658STejun Heo  *	ata_eh_link_report - report error handling to user
23780260731fSTejun Heo  *	@link: ATA link EH is going on
2379c6fd2807SJeff Garzik  *
2380c6fd2807SJeff Garzik  *	Report EH to user.
2381c6fd2807SJeff Garzik  *
2382c6fd2807SJeff Garzik  *	LOCKING:
2383c6fd2807SJeff Garzik  *	None.
2384c6fd2807SJeff Garzik  */
23859b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link)
2386c6fd2807SJeff Garzik {
23870260731fSTejun Heo 	struct ata_port *ap = link->ap;
23880260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2389258c4e5cSJens Axboe 	struct ata_queued_cmd *qc;
2390c6fd2807SJeff Garzik 	const char *frozen, *desc;
239149728bdcSDamien Le Moal 	char tries_buf[16] = "";
2392c6fd2807SJeff Garzik 	int tag, nr_failed = 0;
2393c6fd2807SJeff Garzik 
239494ff3d54STejun Heo 	if (ehc->i.flags & ATA_EHI_QUIET)
239594ff3d54STejun Heo 		return;
239694ff3d54STejun Heo 
2397c6fd2807SJeff Garzik 	desc = NULL;
2398c6fd2807SJeff Garzik 	if (ehc->i.desc[0] != '\0')
2399c6fd2807SJeff Garzik 		desc = ehc->i.desc;
2400c6fd2807SJeff Garzik 
2401258c4e5cSJens Axboe 	ata_qc_for_each_raw(ap, qc, tag) {
240287629312SNiklas Cassel 		if (!(qc->flags & ATA_QCFLAG_EH) ||
2403b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link ||
2404e027bd36STejun Heo 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2405e027bd36STejun Heo 		     qc->err_mask == AC_ERR_DEV))
2406c6fd2807SJeff Garzik 			continue;
2407c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2408c6fd2807SJeff Garzik 			continue;
2409c6fd2807SJeff Garzik 
2410c6fd2807SJeff Garzik 		nr_failed++;
2411c6fd2807SJeff Garzik 	}
2412c6fd2807SJeff Garzik 
2413c6fd2807SJeff Garzik 	if (!nr_failed && !ehc->i.err_mask)
2414c6fd2807SJeff Garzik 		return;
2415c6fd2807SJeff Garzik 
2416c6fd2807SJeff Garzik 	frozen = "";
24174cb7c6f1SNiklas Cassel 	if (ata_port_is_frozen(ap))
2418c6fd2807SJeff Garzik 		frozen = " frozen";
2419c6fd2807SJeff Garzik 
2420a1e10f7eSTejun Heo 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2421462098b0SLevente Kurusa 		snprintf(tries_buf, sizeof(tries_buf), " t%d",
2422a1e10f7eSTejun Heo 			 ap->eh_tries);
2423a1e10f7eSTejun Heo 
2424c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2425a9a79dfeSJoe Perches 		ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2426a1e10f7eSTejun Heo 			    "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2427a1e10f7eSTejun Heo 			    ehc->i.err_mask, link->sactive, ehc->i.serror,
2428a1e10f7eSTejun Heo 			    ehc->i.action, frozen, tries_buf);
2429c6fd2807SJeff Garzik 		if (desc)
2430a9a79dfeSJoe Perches 			ata_dev_err(ehc->i.dev, "%s\n", desc);
2431c6fd2807SJeff Garzik 	} else {
2432a9a79dfeSJoe Perches 		ata_link_err(link, "exception Emask 0x%x "
2433a1e10f7eSTejun Heo 			     "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2434a1e10f7eSTejun Heo 			     ehc->i.err_mask, link->sactive, ehc->i.serror,
2435a1e10f7eSTejun Heo 			     ehc->i.action, frozen, tries_buf);
2436c6fd2807SJeff Garzik 		if (desc)
2437a9a79dfeSJoe Perches 			ata_link_err(link, "%s\n", desc);
2438c6fd2807SJeff Garzik 	}
2439c6fd2807SJeff Garzik 
24406521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
24411333e194SRobert Hancock 	if (ehc->i.serror)
2442a9a79dfeSJoe Perches 		ata_link_err(link,
24431333e194SRobert Hancock 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
24441333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
24451333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
24461333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
24471333e194SRobert Hancock 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
24481333e194SRobert Hancock 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
24491333e194SRobert Hancock 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
24501333e194SRobert Hancock 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
24511333e194SRobert Hancock 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
24521333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
24531333e194SRobert Hancock 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
24541333e194SRobert Hancock 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
24551333e194SRobert Hancock 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
24561333e194SRobert Hancock 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
24571333e194SRobert Hancock 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
24581333e194SRobert Hancock 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
24591333e194SRobert Hancock 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
24601333e194SRobert Hancock 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
24616521148cSRobert Hancock #endif
24621333e194SRobert Hancock 
2463258c4e5cSJens Axboe 	ata_qc_for_each_raw(ap, qc, tag) {
24648a937581STejun Heo 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2465abb6a889STejun Heo 		char data_buf[20] = "";
2466abb6a889STejun Heo 		char cdb_buf[70] = "";
2467c6fd2807SJeff Garzik 
246887629312SNiklas Cassel 		if (!(qc->flags & ATA_QCFLAG_EH) ||
2469b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2470c6fd2807SJeff Garzik 			continue;
2471c6fd2807SJeff Garzik 
2472abb6a889STejun Heo 		if (qc->dma_dir != DMA_NONE) {
2473abb6a889STejun Heo 			static const char *dma_str[] = {
2474abb6a889STejun Heo 				[DMA_BIDIRECTIONAL]	= "bidi",
2475abb6a889STejun Heo 				[DMA_TO_DEVICE]		= "out",
2476abb6a889STejun Heo 				[DMA_FROM_DEVICE]	= "in",
2477abb6a889STejun Heo 			};
2478fb1b8b11SGeert Uytterhoeven 			const char *prot_str = NULL;
2479abb6a889STejun Heo 
2480fb1b8b11SGeert Uytterhoeven 			switch (qc->tf.protocol) {
2481fb1b8b11SGeert Uytterhoeven 			case ATA_PROT_UNKNOWN:
2482fb1b8b11SGeert Uytterhoeven 				prot_str = "unknown";
2483fb1b8b11SGeert Uytterhoeven 				break;
2484fb1b8b11SGeert Uytterhoeven 			case ATA_PROT_NODATA:
2485fb1b8b11SGeert Uytterhoeven 				prot_str = "nodata";
2486fb1b8b11SGeert Uytterhoeven 				break;
2487fb1b8b11SGeert Uytterhoeven 			case ATA_PROT_PIO:
2488fb1b8b11SGeert Uytterhoeven 				prot_str = "pio";
2489fb1b8b11SGeert Uytterhoeven 				break;
2490fb1b8b11SGeert Uytterhoeven 			case ATA_PROT_DMA:
2491fb1b8b11SGeert Uytterhoeven 				prot_str = "dma";
2492fb1b8b11SGeert Uytterhoeven 				break;
2493fb1b8b11SGeert Uytterhoeven 			case ATA_PROT_NCQ:
2494fb1b8b11SGeert Uytterhoeven 				prot_str = "ncq dma";
2495fb1b8b11SGeert Uytterhoeven 				break;
2496fb1b8b11SGeert Uytterhoeven 			case ATA_PROT_NCQ_NODATA:
2497fb1b8b11SGeert Uytterhoeven 				prot_str = "ncq nodata";
2498fb1b8b11SGeert Uytterhoeven 				break;
2499fb1b8b11SGeert Uytterhoeven 			case ATAPI_PROT_NODATA:
2500fb1b8b11SGeert Uytterhoeven 				prot_str = "nodata";
2501fb1b8b11SGeert Uytterhoeven 				break;
2502fb1b8b11SGeert Uytterhoeven 			case ATAPI_PROT_PIO:
2503fb1b8b11SGeert Uytterhoeven 				prot_str = "pio";
2504fb1b8b11SGeert Uytterhoeven 				break;
2505fb1b8b11SGeert Uytterhoeven 			case ATAPI_PROT_DMA:
2506fb1b8b11SGeert Uytterhoeven 				prot_str = "dma";
2507fb1b8b11SGeert Uytterhoeven 				break;
2508fb1b8b11SGeert Uytterhoeven 			}
2509abb6a889STejun Heo 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2510fb1b8b11SGeert Uytterhoeven 				 prot_str, qc->nbytes, dma_str[qc->dma_dir]);
2511abb6a889STejun Heo 		}
2512abb6a889STejun Heo 
25136521148cSRobert Hancock 		if (ata_is_atapi(qc->tf.protocol)) {
2514a13b0c9dSHannes Reinecke 			const u8 *cdb = qc->cdb;
2515a13b0c9dSHannes Reinecke 			size_t cdb_len = qc->dev->cdb_len;
2516a13b0c9dSHannes Reinecke 
2517cbba5b0eSHannes Reinecke 			if (qc->scsicmd) {
2518cbba5b0eSHannes Reinecke 				cdb = qc->scsicmd->cmnd;
2519cbba5b0eSHannes Reinecke 				cdb_len = qc->scsicmd->cmd_len;
2520cbba5b0eSHannes Reinecke 			}
2521cbba5b0eSHannes Reinecke 			__scsi_format_command(cdb_buf, sizeof(cdb_buf),
2522cbba5b0eSHannes Reinecke 					      cdb, cdb_len);
2523d4520903SHannes Reinecke 		} else
2524a9a79dfeSJoe Perches 			ata_dev_err(qc->dev, "failed command: %s\n",
2525d4520903SHannes Reinecke 				    ata_get_cmd_name(cmd->command));
2526abb6a889STejun Heo 
2527a9a79dfeSJoe Perches 		ata_dev_err(qc->dev,
25288a937581STejun Heo 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2529abb6a889STejun Heo 			"tag %d%s\n         %s"
25308a937581STejun Heo 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
25315335b729STejun Heo 			"Emask 0x%x (%s)%s\n",
25328a937581STejun Heo 			cmd->command, cmd->feature, cmd->nsect,
25338a937581STejun Heo 			cmd->lbal, cmd->lbam, cmd->lbah,
25348a937581STejun Heo 			cmd->hob_feature, cmd->hob_nsect,
25358a937581STejun Heo 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2536abb6a889STejun Heo 			cmd->device, qc->tag, data_buf, cdb_buf,
2537efcef265SSergey Shtylyov 			res->status, res->error, res->nsect,
25388a937581STejun Heo 			res->lbal, res->lbam, res->lbah,
25398a937581STejun Heo 			res->hob_feature, res->hob_nsect,
25408a937581STejun Heo 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
25415335b729STejun Heo 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
25425335b729STejun Heo 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
25431333e194SRobert Hancock 
25446521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
2545efcef265SSergey Shtylyov 		if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2546e87fd28cSHannes Reinecke 				   ATA_SENSE | ATA_ERR)) {
2547efcef265SSergey Shtylyov 			if (res->status & ATA_BUSY)
2548a9a79dfeSJoe Perches 				ata_dev_err(qc->dev, "status: { Busy }\n");
25491333e194SRobert Hancock 			else
2550e87fd28cSHannes Reinecke 				ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
2551efcef265SSergey Shtylyov 				  res->status & ATA_DRDY ? "DRDY " : "",
2552efcef265SSergey Shtylyov 				  res->status & ATA_DF ? "DF " : "",
2553efcef265SSergey Shtylyov 				  res->status & ATA_DRQ ? "DRQ " : "",
2554efcef265SSergey Shtylyov 				  res->status & ATA_SENSE ? "SENSE " : "",
2555efcef265SSergey Shtylyov 				  res->status & ATA_ERR ? "ERR " : "");
25561333e194SRobert Hancock 		}
25571333e194SRobert Hancock 
25581333e194SRobert Hancock 		if (cmd->command != ATA_CMD_PACKET &&
2559efcef265SSergey Shtylyov 		    (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF |
2560efcef265SSergey Shtylyov 				   ATA_ABORTED)))
2561eec7e1c1SAlexey Asemov 			ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2562efcef265SSergey Shtylyov 				    res->error & ATA_ICRC ? "ICRC " : "",
2563efcef265SSergey Shtylyov 				    res->error & ATA_UNC ? "UNC " : "",
2564efcef265SSergey Shtylyov 				    res->error & ATA_AMNF ? "AMNF " : "",
2565efcef265SSergey Shtylyov 				    res->error & ATA_IDNF ? "IDNF " : "",
2566efcef265SSergey Shtylyov 				    res->error & ATA_ABORTED ? "ABRT " : "");
25676521148cSRobert Hancock #endif
2568c6fd2807SJeff Garzik 	}
2569c6fd2807SJeff Garzik }
2570c6fd2807SJeff Garzik 
25719b1e2658STejun Heo /**
25729b1e2658STejun Heo  *	ata_eh_report - report error handling to user
25739b1e2658STejun Heo  *	@ap: ATA port to report EH about
25749b1e2658STejun Heo  *
25759b1e2658STejun Heo  *	Report EH to user.
25769b1e2658STejun Heo  *
25779b1e2658STejun Heo  *	LOCKING:
25789b1e2658STejun Heo  *	None.
25799b1e2658STejun Heo  */
2580fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap)
25819b1e2658STejun Heo {
25829b1e2658STejun Heo 	struct ata_link *link;
25839b1e2658STejun Heo 
25841eca4365STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
25859b1e2658STejun Heo 		ata_eh_link_report(link);
25869b1e2658STejun Heo }
25879b1e2658STejun Heo 
2588cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2589b1c72916STejun Heo 			unsigned int *classes, unsigned long deadline,
2590b1c72916STejun Heo 			bool clear_classes)
2591c6fd2807SJeff Garzik {
2592f58229f8STejun Heo 	struct ata_device *dev;
2593c6fd2807SJeff Garzik 
2594b1c72916STejun Heo 	if (clear_classes)
25951eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
2596f58229f8STejun Heo 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2597c6fd2807SJeff Garzik 
2598f046519fSTejun Heo 	return reset(link, classes, deadline);
2599c6fd2807SJeff Garzik }
2600c6fd2807SJeff Garzik 
2601e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2602c6fd2807SJeff Garzik {
260345db2f6cSTejun Heo 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2604ae791c05STejun Heo 		return 0;
26055dbfc9cbSTejun Heo 	if (rc == -EAGAIN)
2606c6fd2807SJeff Garzik 		return 1;
2607071f44b1STejun Heo 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
26083495de73STejun Heo 		return 1;
2609c6fd2807SJeff Garzik 	return 0;
2610c6fd2807SJeff Garzik }
2611c6fd2807SJeff Garzik 
2612fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify,
2613c6fd2807SJeff Garzik 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2614c6fd2807SJeff Garzik 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2615c6fd2807SJeff Garzik {
2616afaa5c37STejun Heo 	struct ata_port *ap = link->ap;
2617b1c72916STejun Heo 	struct ata_link *slave = ap->slave_link;
2618936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2619705d2014SBartlomiej Zolnierkiewicz 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2620c6fd2807SJeff Garzik 	unsigned int *classes = ehc->classes;
2621416dc9edSTejun Heo 	unsigned int lflags = link->flags;
2622c6fd2807SJeff Garzik 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2623d8af0eb6STejun Heo 	int max_tries = 0, try = 0;
2624b1c72916STejun Heo 	struct ata_link *failed_link;
2625f58229f8STejun Heo 	struct ata_device *dev;
2626416dc9edSTejun Heo 	unsigned long deadline, now;
2627c6fd2807SJeff Garzik 	ata_reset_fn_t reset;
2628afaa5c37STejun Heo 	unsigned long flags;
2629416dc9edSTejun Heo 	u32 sstatus;
2630b1c72916STejun Heo 	int nr_unknown, rc;
2631c6fd2807SJeff Garzik 
2632932648b0STejun Heo 	/*
2633932648b0STejun Heo 	 * Prepare to reset
2634932648b0STejun Heo 	 */
2635ca02f225SSergey Shtylyov 	while (ata_eh_reset_timeouts[max_tries] != UINT_MAX)
2636d8af0eb6STejun Heo 		max_tries++;
2637ca6d43b0SDan Williams 	if (link->flags & ATA_LFLAG_RST_ONCE)
2638ca6d43b0SDan Williams 		max_tries = 1;
263905944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_HRST)
264005944bdfSTejun Heo 		hardreset = NULL;
264105944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_SRST)
264205944bdfSTejun Heo 		softreset = NULL;
2643d8af0eb6STejun Heo 
264425985edcSLucas De Marchi 	/* make sure each reset attempt is at least COOL_DOWN apart */
264519b72321STejun Heo 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
26460a2c0f56STejun Heo 		now = jiffies;
264719b72321STejun Heo 		WARN_ON(time_after(ehc->last_reset, now));
264819b72321STejun Heo 		deadline = ata_deadline(ehc->last_reset,
264919b72321STejun Heo 					ATA_EH_RESET_COOL_DOWN);
26500a2c0f56STejun Heo 		if (time_before(now, deadline))
26510a2c0f56STejun Heo 			schedule_timeout_uninterruptible(deadline - now);
265219b72321STejun Heo 	}
26530a2c0f56STejun Heo 
2654afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2655afaa5c37STejun Heo 	ap->pflags |= ATA_PFLAG_RESETTING;
2656afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2657afaa5c37STejun Heo 
2658cf480626STejun Heo 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2659c6fd2807SJeff Garzik 
26601eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2661cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2662cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2663cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2664cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2665cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2666cdeab114STejun Heo 		 * bus as we may be talking too fast.
2667cdeab114STejun Heo 		 */
2668cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
26695416912aSAaron Lu 		dev->dma_mode = 0xff;
2670cdeab114STejun Heo 
2671cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2672cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2673cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2674cdeab114STejun Heo 		 * configuring devices.
2675cdeab114STejun Heo 		 */
2676cdeab114STejun Heo 		if (ap->ops->set_piomode)
2677cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2678cdeab114STejun Heo 	}
2679cdeab114STejun Heo 
2680cf480626STejun Heo 	/* prefer hardreset */
2681932648b0STejun Heo 	reset = NULL;
2682cf480626STejun Heo 	ehc->i.action &= ~ATA_EH_RESET;
2683cf480626STejun Heo 	if (hardreset) {
2684cf480626STejun Heo 		reset = hardreset;
2685a674050eSTejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
26864f7faa3fSTejun Heo 	} else if (softreset) {
2687cf480626STejun Heo 		reset = softreset;
2688a674050eSTejun Heo 		ehc->i.action |= ATA_EH_SOFTRESET;
2689cf480626STejun Heo 	}
2690c6fd2807SJeff Garzik 
2691c6fd2807SJeff Garzik 	if (prereset) {
2692b1c72916STejun Heo 		unsigned long deadline = ata_deadline(jiffies,
2693b1c72916STejun Heo 						      ATA_EH_PRERESET_TIMEOUT);
2694b1c72916STejun Heo 
2695b1c72916STejun Heo 		if (slave) {
2696b1c72916STejun Heo 			sehc->i.action &= ~ATA_EH_RESET;
2697b1c72916STejun Heo 			sehc->i.action |= ehc->i.action;
2698b1c72916STejun Heo 		}
2699b1c72916STejun Heo 
2700b1c72916STejun Heo 		rc = prereset(link, deadline);
2701b1c72916STejun Heo 
2702b1c72916STejun Heo 		/* If present, do prereset on slave link too.  Reset
2703b1c72916STejun Heo 		 * is skipped iff both master and slave links report
2704b1c72916STejun Heo 		 * -ENOENT or clear ATA_EH_RESET.
2705b1c72916STejun Heo 		 */
2706b1c72916STejun Heo 		if (slave && (rc == 0 || rc == -ENOENT)) {
2707b1c72916STejun Heo 			int tmp;
2708b1c72916STejun Heo 
2709b1c72916STejun Heo 			tmp = prereset(slave, deadline);
2710b1c72916STejun Heo 			if (tmp != -ENOENT)
2711b1c72916STejun Heo 				rc = tmp;
2712b1c72916STejun Heo 
2713b1c72916STejun Heo 			ehc->i.action |= sehc->i.action;
2714b1c72916STejun Heo 		}
2715b1c72916STejun Heo 
2716c6fd2807SJeff Garzik 		if (rc) {
2717c961922bSAlan Cox 			if (rc == -ENOENT) {
2718a9a79dfeSJoe Perches 				ata_link_dbg(link, "port disabled--ignoring\n");
2719cf480626STejun Heo 				ehc->i.action &= ~ATA_EH_RESET;
27204aa9ab67STejun Heo 
27211eca4365STejun Heo 				ata_for_each_dev(dev, link, ALL)
2722f58229f8STejun Heo 					classes[dev->devno] = ATA_DEV_NONE;
27234aa9ab67STejun Heo 
27244aa9ab67STejun Heo 				rc = 0;
2725c961922bSAlan Cox 			} else
2726a9a79dfeSJoe Perches 				ata_link_err(link,
2727a9a79dfeSJoe Perches 					     "prereset failed (errno=%d)\n",
2728a9a79dfeSJoe Perches 					     rc);
2729fccb6ea5STejun Heo 			goto out;
2730c6fd2807SJeff Garzik 		}
2731c6fd2807SJeff Garzik 
2732932648b0STejun Heo 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2733d6515e6fSTejun Heo 		 * bang classes, thaw and return.
2734932648b0STejun Heo 		 */
2735932648b0STejun Heo 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
27361eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL)
2737f58229f8STejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
27384cb7c6f1SNiklas Cassel 			if (ata_port_is_frozen(ap) && ata_is_host_link(link))
2739d6515e6fSTejun Heo 				ata_eh_thaw_port(ap);
2740fccb6ea5STejun Heo 			rc = 0;
2741fccb6ea5STejun Heo 			goto out;
2742c6fd2807SJeff Garzik 		}
2743932648b0STejun Heo 	}
2744c6fd2807SJeff Garzik 
2745c6fd2807SJeff Garzik  retry:
2746932648b0STejun Heo 	/*
2747932648b0STejun Heo 	 * Perform reset
2748932648b0STejun Heo 	 */
2749dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2750dc98c32cSTejun Heo 		ata_eh_freeze_port(ap);
2751dc98c32cSTejun Heo 
2752341c2c95STejun Heo 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
275331daabdaSTejun Heo 
2754932648b0STejun Heo 	if (reset) {
2755c6fd2807SJeff Garzik 		if (verbose)
2756a9a79dfeSJoe Perches 			ata_link_info(link, "%s resetting link\n",
2757c6fd2807SJeff Garzik 				      reset == softreset ? "soft" : "hard");
2758c6fd2807SJeff Garzik 
2759c6fd2807SJeff Garzik 		/* mark that this EH session started with reset */
276019b72321STejun Heo 		ehc->last_reset = jiffies;
2761f8ec26d0SHannes Reinecke 		if (reset == hardreset) {
27620d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2763f8ec26d0SHannes Reinecke 			trace_ata_link_hardreset_begin(link, classes, deadline);
2764f8ec26d0SHannes Reinecke 		} else {
27650d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2766f8ec26d0SHannes Reinecke 			trace_ata_link_softreset_begin(link, classes, deadline);
2767f8ec26d0SHannes Reinecke 		}
2768c6fd2807SJeff Garzik 
2769b1c72916STejun Heo 		rc = ata_do_reset(link, reset, classes, deadline, true);
2770f8ec26d0SHannes Reinecke 		if (reset == hardreset)
2771f8ec26d0SHannes Reinecke 			trace_ata_link_hardreset_end(link, classes, rc);
2772f8ec26d0SHannes Reinecke 		else
2773f8ec26d0SHannes Reinecke 			trace_ata_link_softreset_end(link, classes, rc);
2774b1c72916STejun Heo 		if (rc && rc != -EAGAIN) {
2775b1c72916STejun Heo 			failed_link = link;
27765dbfc9cbSTejun Heo 			goto fail;
2777b1c72916STejun Heo 		}
2778c6fd2807SJeff Garzik 
2779b1c72916STejun Heo 		/* hardreset slave link if existent */
2780b1c72916STejun Heo 		if (slave && reset == hardreset) {
2781b1c72916STejun Heo 			int tmp;
2782b1c72916STejun Heo 
2783b1c72916STejun Heo 			if (verbose)
2784a9a79dfeSJoe Perches 				ata_link_info(slave, "hard resetting link\n");
2785b1c72916STejun Heo 
2786b1c72916STejun Heo 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2787f8ec26d0SHannes Reinecke 			trace_ata_slave_hardreset_begin(slave, classes,
2788f8ec26d0SHannes Reinecke 							deadline);
2789b1c72916STejun Heo 			tmp = ata_do_reset(slave, reset, classes, deadline,
2790b1c72916STejun Heo 					   false);
2791f8ec26d0SHannes Reinecke 			trace_ata_slave_hardreset_end(slave, classes, tmp);
2792b1c72916STejun Heo 			switch (tmp) {
2793b1c72916STejun Heo 			case -EAGAIN:
2794b1c72916STejun Heo 				rc = -EAGAIN;
2795e06abcc6SGustavo A. R. Silva 				break;
2796b1c72916STejun Heo 			case 0:
2797b1c72916STejun Heo 				break;
2798b1c72916STejun Heo 			default:
2799b1c72916STejun Heo 				failed_link = slave;
2800b1c72916STejun Heo 				rc = tmp;
2801b1c72916STejun Heo 				goto fail;
2802b1c72916STejun Heo 			}
2803b1c72916STejun Heo 		}
2804b1c72916STejun Heo 
2805b1c72916STejun Heo 		/* perform follow-up SRST if necessary */
2806c6fd2807SJeff Garzik 		if (reset == hardreset &&
2807e8411fbaSSergei Shtylyov 		    ata_eh_followup_srst_needed(link, rc)) {
2808c6fd2807SJeff Garzik 			reset = softreset;
2809c6fd2807SJeff Garzik 
2810c6fd2807SJeff Garzik 			if (!reset) {
2811a9a79dfeSJoe Perches 				ata_link_err(link,
2812a9a79dfeSJoe Perches 	     "follow-up softreset required but no softreset available\n");
2813b1c72916STejun Heo 				failed_link = link;
2814fccb6ea5STejun Heo 				rc = -EINVAL;
281508cf69d0STejun Heo 				goto fail;
2816c6fd2807SJeff Garzik 			}
2817c6fd2807SJeff Garzik 
2818cf480626STejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2819f8ec26d0SHannes Reinecke 			trace_ata_link_softreset_begin(link, classes, deadline);
2820b1c72916STejun Heo 			rc = ata_do_reset(link, reset, classes, deadline, true);
2821f8ec26d0SHannes Reinecke 			trace_ata_link_softreset_end(link, classes, rc);
2822fe2c4d01STejun Heo 			if (rc) {
2823fe2c4d01STejun Heo 				failed_link = link;
2824fe2c4d01STejun Heo 				goto fail;
2825fe2c4d01STejun Heo 			}
2826c6fd2807SJeff Garzik 		}
2827932648b0STejun Heo 	} else {
2828932648b0STejun Heo 		if (verbose)
2829a9a79dfeSJoe Perches 			ata_link_info(link,
2830a9a79dfeSJoe Perches 	"no reset method available, skipping reset\n");
2831932648b0STejun Heo 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2832932648b0STejun Heo 			lflags |= ATA_LFLAG_ASSUME_ATA;
2833932648b0STejun Heo 	}
2834008a7896STejun Heo 
2835932648b0STejun Heo 	/*
2836932648b0STejun Heo 	 * Post-reset processing
2837932648b0STejun Heo 	 */
28381eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2839416dc9edSTejun Heo 		/* After the reset, the device state is PIO 0 and the
2840416dc9edSTejun Heo 		 * controller state is undefined.  Reset also wakes up
2841416dc9edSTejun Heo 		 * drives from sleeping mode.
2842c6fd2807SJeff Garzik 		 */
2843f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2844054a5fbaSTejun Heo 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2845c6fd2807SJeff Garzik 
28463b761d3dSTejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
28473b761d3dSTejun Heo 			continue;
28483b761d3dSTejun Heo 
28494ccd3329STejun Heo 		/* apply class override */
2850416dc9edSTejun Heo 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2851ae791c05STejun Heo 			classes[dev->devno] = ATA_DEV_ATA;
2852416dc9edSTejun Heo 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2853816ab897STejun Heo 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2854ae791c05STejun Heo 	}
2855ae791c05STejun Heo 
2856008a7896STejun Heo 	/* record current link speed */
2857936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2858936fd732STejun Heo 		link->sata_spd = (sstatus >> 4) & 0xf;
2859b1c72916STejun Heo 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2860b1c72916STejun Heo 		slave->sata_spd = (sstatus >> 4) & 0xf;
2861008a7896STejun Heo 
2862dc98c32cSTejun Heo 	/* thaw the port */
2863dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2864dc98c32cSTejun Heo 		ata_eh_thaw_port(ap);
2865dc98c32cSTejun Heo 
2866f046519fSTejun Heo 	/* postreset() should clear hardware SError.  Although SError
2867f046519fSTejun Heo 	 * is cleared during link resume, clearing SError here is
2868f046519fSTejun Heo 	 * necessary as some PHYs raise hotplug events after SRST.
2869f046519fSTejun Heo 	 * This introduces race condition where hotplug occurs between
2870f046519fSTejun Heo 	 * reset and here.  This race is mediated by cross checking
2871f046519fSTejun Heo 	 * link onlineness and classification result later.
2872f046519fSTejun Heo 	 */
2873b1c72916STejun Heo 	if (postreset) {
2874cc0680a5STejun Heo 		postreset(link, classes);
2875f8ec26d0SHannes Reinecke 		trace_ata_link_postreset(link, classes, rc);
2876f8ec26d0SHannes Reinecke 		if (slave) {
2877b1c72916STejun Heo 			postreset(slave, classes);
2878f8ec26d0SHannes Reinecke 			trace_ata_slave_postreset(slave, classes, rc);
2879f8ec26d0SHannes Reinecke 		}
2880b1c72916STejun Heo 	}
2881c6fd2807SJeff Garzik 
288280cc944eSNiklas Cassel 	/* clear cached SError */
2883f046519fSTejun Heo 	spin_lock_irqsave(link->ap->lock, flags);
288480cc944eSNiklas Cassel 	link->eh_info.serror = 0;
2885b1c72916STejun Heo 	if (slave)
288680cc944eSNiklas Cassel 		slave->eh_info.serror = 0;
2887f046519fSTejun Heo 	spin_unlock_irqrestore(link->ap->lock, flags);
2888f046519fSTejun Heo 
28893b761d3dSTejun Heo 	/*
28903b761d3dSTejun Heo 	 * Make sure onlineness and classification result correspond.
2891f046519fSTejun Heo 	 * Hotplug could have happened during reset and some
2892f046519fSTejun Heo 	 * controllers fail to wait while a drive is spinning up after
2893f046519fSTejun Heo 	 * being hotplugged causing misdetection.  By cross checking
28943b761d3dSTejun Heo 	 * link on/offlineness and classification result, those
28953b761d3dSTejun Heo 	 * conditions can be reliably detected and retried.
2896f046519fSTejun Heo 	 */
2897b1c72916STejun Heo 	nr_unknown = 0;
28981eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
28993b761d3dSTejun Heo 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2900b1c72916STejun Heo 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2901a9a79dfeSJoe Perches 				ata_dev_dbg(dev, "link online but device misclassified\n");
2902f046519fSTejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2903b1c72916STejun Heo 				nr_unknown++;
2904b1c72916STejun Heo 			}
29053b761d3dSTejun Heo 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
29063b761d3dSTejun Heo 			if (ata_class_enabled(classes[dev->devno]))
2907a9a79dfeSJoe Perches 				ata_dev_dbg(dev,
2908a9a79dfeSJoe Perches 					    "link offline, clearing class %d to NONE\n",
29093b761d3dSTejun Heo 					    classes[dev->devno]);
29103b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
29113b761d3dSTejun Heo 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2912a9a79dfeSJoe Perches 			ata_dev_dbg(dev,
2913a9a79dfeSJoe Perches 				    "link status unknown, clearing UNKNOWN to NONE\n");
29143b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
29153b761d3dSTejun Heo 		}
2916f046519fSTejun Heo 	}
2917f046519fSTejun Heo 
2918b1c72916STejun Heo 	if (classify && nr_unknown) {
2919f046519fSTejun Heo 		if (try < max_tries) {
2920a9a79dfeSJoe Perches 			ata_link_warn(link,
2921a9a79dfeSJoe Perches 				      "link online but %d devices misclassified, retrying\n",
29223b761d3dSTejun Heo 				      nr_unknown);
2923b1c72916STejun Heo 			failed_link = link;
2924f046519fSTejun Heo 			rc = -EAGAIN;
2925f046519fSTejun Heo 			goto fail;
2926f046519fSTejun Heo 		}
2927a9a79dfeSJoe Perches 		ata_link_warn(link,
29283b761d3dSTejun Heo 			      "link online but %d devices misclassified, "
29293b761d3dSTejun Heo 			      "device detection might fail\n", nr_unknown);
2930f046519fSTejun Heo 	}
2931f046519fSTejun Heo 
2932c6fd2807SJeff Garzik 	/* reset successful, schedule revalidation */
2933cf480626STejun Heo 	ata_eh_done(link, NULL, ATA_EH_RESET);
2934b1c72916STejun Heo 	if (slave)
2935b1c72916STejun Heo 		ata_eh_done(slave, NULL, ATA_EH_RESET);
293619b72321STejun Heo 	ehc->last_reset = jiffies;		/* update to completion time */
2937c6fd2807SJeff Garzik 	ehc->i.action |= ATA_EH_REVALIDATE;
29386b7ae954STejun Heo 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
2939416dc9edSTejun Heo 
2940416dc9edSTejun Heo 	rc = 0;
2941fccb6ea5STejun Heo  out:
2942fccb6ea5STejun Heo 	/* clear hotplug flag */
2943fccb6ea5STejun Heo 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2944b1c72916STejun Heo 	if (slave)
2945b1c72916STejun Heo 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2946afaa5c37STejun Heo 
2947afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2948afaa5c37STejun Heo 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2949afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2950afaa5c37STejun Heo 
2951c6fd2807SJeff Garzik 	return rc;
2952416dc9edSTejun Heo 
2953416dc9edSTejun Heo  fail:
29545958e302STejun Heo 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
29555958e302STejun Heo 	if (!ata_is_host_link(link) &&
29565958e302STejun Heo 	    sata_scr_read(link, SCR_STATUS, &sstatus))
29575958e302STejun Heo 		rc = -ERESTART;
29585958e302STejun Heo 
29597a46c078SGwendal Grignou 	if (try >= max_tries) {
29608ea7645cSTejun Heo 		/*
29618ea7645cSTejun Heo 		 * Thaw host port even if reset failed, so that the port
29628ea7645cSTejun Heo 		 * can be retried on the next phy event.  This risks
29638ea7645cSTejun Heo 		 * repeated EH runs but seems to be a better tradeoff than
29648ea7645cSTejun Heo 		 * shutting down a port after a botched hotplug attempt.
29658ea7645cSTejun Heo 		 */
29668ea7645cSTejun Heo 		if (ata_is_host_link(link))
29678ea7645cSTejun Heo 			ata_eh_thaw_port(ap);
29687f95731cSDamien Le Moal 		ata_link_warn(link, "%s failed\n",
29697f95731cSDamien Le Moal 			      reset == hardreset ? "hardreset" : "softreset");
2970416dc9edSTejun Heo 		goto out;
29718ea7645cSTejun Heo 	}
2972416dc9edSTejun Heo 
2973416dc9edSTejun Heo 	now = jiffies;
2974416dc9edSTejun Heo 	if (time_before(now, deadline)) {
2975416dc9edSTejun Heo 		unsigned long delta = deadline - now;
2976416dc9edSTejun Heo 
2977a9a79dfeSJoe Perches 		ata_link_warn(failed_link,
29780a2c0f56STejun Heo 			"reset failed (errno=%d), retrying in %u secs\n",
29790a2c0f56STejun Heo 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2980416dc9edSTejun Heo 
2981c0c362b6STejun Heo 		ata_eh_release(ap);
2982416dc9edSTejun Heo 		while (delta)
2983416dc9edSTejun Heo 			delta = schedule_timeout_uninterruptible(delta);
2984c0c362b6STejun Heo 		ata_eh_acquire(ap);
2985416dc9edSTejun Heo 	}
2986416dc9edSTejun Heo 
29877a46c078SGwendal Grignou 	/*
29887a46c078SGwendal Grignou 	 * While disks spinup behind PMP, some controllers fail sending SRST.
29897a46c078SGwendal Grignou 	 * They need to be reset - as well as the PMP - before retrying.
29907a46c078SGwendal Grignou 	 */
29917a46c078SGwendal Grignou 	if (rc == -ERESTART) {
29927a46c078SGwendal Grignou 		if (ata_is_host_link(link))
29937a46c078SGwendal Grignou 			ata_eh_thaw_port(ap);
29947a46c078SGwendal Grignou 		goto out;
29957a46c078SGwendal Grignou 	}
29967a46c078SGwendal Grignou 
2997b1c72916STejun Heo 	if (try == max_tries - 1) {
2998a07d499bSTejun Heo 		sata_down_spd_limit(link, 0);
2999b1c72916STejun Heo 		if (slave)
3000a07d499bSTejun Heo 			sata_down_spd_limit(slave, 0);
3001b1c72916STejun Heo 	} else if (rc == -EPIPE)
3002a07d499bSTejun Heo 		sata_down_spd_limit(failed_link, 0);
3003b1c72916STejun Heo 
3004416dc9edSTejun Heo 	if (hardreset)
3005416dc9edSTejun Heo 		reset = hardreset;
3006416dc9edSTejun Heo 	goto retry;
3007c6fd2807SJeff Garzik }
3008c6fd2807SJeff Garzik 
300945fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap)
301045fabbb7SElias Oltmanns {
301145fabbb7SElias Oltmanns 	struct ata_link *link;
301245fabbb7SElias Oltmanns 	struct ata_device *dev;
301345fabbb7SElias Oltmanns 	unsigned long flags;
301445fabbb7SElias Oltmanns 
301545fabbb7SElias Oltmanns 	/*
301645fabbb7SElias Oltmanns 	 * This function can be thought of as an extended version of
301745fabbb7SElias Oltmanns 	 * ata_eh_about_to_do() specially crafted to accommodate the
301845fabbb7SElias Oltmanns 	 * requirements of ATA_EH_PARK handling. Since the EH thread
301945fabbb7SElias Oltmanns 	 * does not leave the do {} while () loop in ata_eh_recover as
302045fabbb7SElias Oltmanns 	 * long as the timeout for a park request to *one* device on
302145fabbb7SElias Oltmanns 	 * the port has not expired, and since we still want to pick
302245fabbb7SElias Oltmanns 	 * up park requests to other devices on the same port or
302345fabbb7SElias Oltmanns 	 * timeout updates for the same device, we have to pull
302445fabbb7SElias Oltmanns 	 * ATA_EH_PARK actions from eh_info into eh_context.i
302545fabbb7SElias Oltmanns 	 * ourselves at the beginning of each pass over the loop.
302645fabbb7SElias Oltmanns 	 *
302745fabbb7SElias Oltmanns 	 * Additionally, all write accesses to &ap->park_req_pending
302816735d02SWolfram Sang 	 * through reinit_completion() (see below) or complete_all()
302945fabbb7SElias Oltmanns 	 * (see ata_scsi_park_store()) are protected by the host lock.
303045fabbb7SElias Oltmanns 	 * As a result we have that park_req_pending.done is zero on
303145fabbb7SElias Oltmanns 	 * exit from this function, i.e. when ATA_EH_PARK actions for
303245fabbb7SElias Oltmanns 	 * *all* devices on port ap have been pulled into the
303345fabbb7SElias Oltmanns 	 * respective eh_context structs. If, and only if,
303445fabbb7SElias Oltmanns 	 * park_req_pending.done is non-zero by the time we reach
303545fabbb7SElias Oltmanns 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
303645fabbb7SElias Oltmanns 	 * has been scheduled for at least one of the devices on port
303745fabbb7SElias Oltmanns 	 * ap and we have to cycle over the do {} while () loop in
303845fabbb7SElias Oltmanns 	 * ata_eh_recover() again.
303945fabbb7SElias Oltmanns 	 */
304045fabbb7SElias Oltmanns 
304145fabbb7SElias Oltmanns 	spin_lock_irqsave(ap->lock, flags);
304216735d02SWolfram Sang 	reinit_completion(&ap->park_req_pending);
30431eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
30441eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
304545fabbb7SElias Oltmanns 			struct ata_eh_info *ehi = &link->eh_info;
304645fabbb7SElias Oltmanns 
304745fabbb7SElias Oltmanns 			link->eh_context.i.dev_action[dev->devno] |=
304845fabbb7SElias Oltmanns 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
304945fabbb7SElias Oltmanns 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
305045fabbb7SElias Oltmanns 		}
305145fabbb7SElias Oltmanns 	}
305245fabbb7SElias Oltmanns 	spin_unlock_irqrestore(ap->lock, flags);
305345fabbb7SElias Oltmanns }
305445fabbb7SElias Oltmanns 
305545fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
305645fabbb7SElias Oltmanns {
305745fabbb7SElias Oltmanns 	struct ata_eh_context *ehc = &dev->link->eh_context;
305845fabbb7SElias Oltmanns 	struct ata_taskfile tf;
305945fabbb7SElias Oltmanns 	unsigned int err_mask;
306045fabbb7SElias Oltmanns 
306145fabbb7SElias Oltmanns 	ata_tf_init(dev, &tf);
306245fabbb7SElias Oltmanns 	if (park) {
306345fabbb7SElias Oltmanns 		ehc->unloaded_mask |= 1 << dev->devno;
306445fabbb7SElias Oltmanns 		tf.command = ATA_CMD_IDLEIMMEDIATE;
306545fabbb7SElias Oltmanns 		tf.feature = 0x44;
306645fabbb7SElias Oltmanns 		tf.lbal = 0x4c;
306745fabbb7SElias Oltmanns 		tf.lbam = 0x4e;
306845fabbb7SElias Oltmanns 		tf.lbah = 0x55;
306945fabbb7SElias Oltmanns 	} else {
307045fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
307145fabbb7SElias Oltmanns 		tf.command = ATA_CMD_CHK_POWER;
307245fabbb7SElias Oltmanns 	}
307345fabbb7SElias Oltmanns 
307445fabbb7SElias Oltmanns 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3075bd18bc04SHannes Reinecke 	tf.protocol = ATA_PROT_NODATA;
307645fabbb7SElias Oltmanns 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
307745fabbb7SElias Oltmanns 	if (park && (err_mask || tf.lbal != 0xc4)) {
3078a9a79dfeSJoe Perches 		ata_dev_err(dev, "head unload failed!\n");
307945fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
308045fabbb7SElias Oltmanns 	}
308145fabbb7SElias Oltmanns }
308245fabbb7SElias Oltmanns 
30830260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link,
3084c6fd2807SJeff Garzik 					struct ata_device **r_failed_dev)
3085c6fd2807SJeff Garzik {
30860260731fSTejun Heo 	struct ata_port *ap = link->ap;
30870260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3088c6fd2807SJeff Garzik 	struct ata_device *dev;
30898c3c52a8STejun Heo 	unsigned int new_mask = 0;
3090c6fd2807SJeff Garzik 	unsigned long flags;
3091f58229f8STejun Heo 	int rc = 0;
3092c6fd2807SJeff Garzik 
30938c3c52a8STejun Heo 	/* For PATA drive side cable detection to work, IDENTIFY must
30948c3c52a8STejun Heo 	 * be done backwards such that PDIAG- is released by the slave
30958c3c52a8STejun Heo 	 * device before the master device is identified.
30968c3c52a8STejun Heo 	 */
30971eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL_REVERSE) {
3098f58229f8STejun Heo 		unsigned int action = ata_eh_dev_action(dev);
3099f58229f8STejun Heo 		unsigned int readid_flags = 0;
3100c6fd2807SJeff Garzik 
3101bff04647STejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET)
3102bff04647STejun Heo 			readid_flags |= ATA_READID_POSTRESET;
3103bff04647STejun Heo 
31049666f400STejun Heo 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3105633273a3STejun Heo 			WARN_ON(dev->class == ATA_DEV_PMP);
3106633273a3STejun Heo 
310771d7b6e5SNiklas Cassel 			/*
310871d7b6e5SNiklas Cassel 			 * The link may be in a deep sleep, wake it up.
310971d7b6e5SNiklas Cassel 			 *
311071d7b6e5SNiklas Cassel 			 * If the link is in deep sleep, ata_phys_link_offline()
311171d7b6e5SNiklas Cassel 			 * will return true, causing the revalidation to fail,
311271d7b6e5SNiklas Cassel 			 * which leads to a (potentially) needless hard reset.
311371d7b6e5SNiklas Cassel 			 *
311471d7b6e5SNiklas Cassel 			 * ata_eh_recover() will later restore the link policy
311571d7b6e5SNiklas Cassel 			 * to ap->target_lpm_policy after revalidation is done.
311671d7b6e5SNiklas Cassel 			 */
311771d7b6e5SNiklas Cassel 			if (link->lpm_policy > ATA_LPM_MAX_POWER) {
311871d7b6e5SNiklas Cassel 				rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER,
311971d7b6e5SNiklas Cassel 						    r_failed_dev);
312071d7b6e5SNiklas Cassel 				if (rc)
312171d7b6e5SNiklas Cassel 					goto err;
312271d7b6e5SNiklas Cassel 			}
312371d7b6e5SNiklas Cassel 
3124b1c72916STejun Heo 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3125c6fd2807SJeff Garzik 				rc = -EIO;
31268c3c52a8STejun Heo 				goto err;
3127c6fd2807SJeff Garzik 			}
3128c6fd2807SJeff Garzik 
31290260731fSTejun Heo 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3130422c9daaSTejun Heo 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3131422c9daaSTejun Heo 						readid_flags);
3132c6fd2807SJeff Garzik 			if (rc)
31338c3c52a8STejun Heo 				goto err;
3134c6fd2807SJeff Garzik 
31350260731fSTejun Heo 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3136c6fd2807SJeff Garzik 
3137baa1e78aSTejun Heo 			/* Configuration may have changed, reconfigure
3138baa1e78aSTejun Heo 			 * transfer mode.
3139baa1e78aSTejun Heo 			 */
3140baa1e78aSTejun Heo 			ehc->i.flags |= ATA_EHI_SETMODE;
3141baa1e78aSTejun Heo 
3142c6fd2807SJeff Garzik 			/* schedule the scsi_rescan_device() here */
31436aa0365aSDamien Le Moal 			schedule_delayed_work(&ap->scsi_rescan_task, 0);
3144c6fd2807SJeff Garzik 		} else if (dev->class == ATA_DEV_UNKNOWN &&
3145c6fd2807SJeff Garzik 			   ehc->tries[dev->devno] &&
3146c6fd2807SJeff Garzik 			   ata_class_enabled(ehc->classes[dev->devno])) {
3147842faa6cSTejun Heo 			/* Temporarily set dev->class, it will be
3148842faa6cSTejun Heo 			 * permanently set once all configurations are
3149842faa6cSTejun Heo 			 * complete.  This is necessary because new
3150842faa6cSTejun Heo 			 * device configuration is done in two
3151842faa6cSTejun Heo 			 * separate loops.
3152842faa6cSTejun Heo 			 */
3153c6fd2807SJeff Garzik 			dev->class = ehc->classes[dev->devno];
3154c6fd2807SJeff Garzik 
3155633273a3STejun Heo 			if (dev->class == ATA_DEV_PMP)
3156633273a3STejun Heo 				rc = sata_pmp_attach(dev);
3157633273a3STejun Heo 			else
3158633273a3STejun Heo 				rc = ata_dev_read_id(dev, &dev->class,
3159633273a3STejun Heo 						     readid_flags, dev->id);
3160842faa6cSTejun Heo 
3161842faa6cSTejun Heo 			/* read_id might have changed class, store and reset */
3162842faa6cSTejun Heo 			ehc->classes[dev->devno] = dev->class;
3163842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
3164842faa6cSTejun Heo 
31658c3c52a8STejun Heo 			switch (rc) {
31668c3c52a8STejun Heo 			case 0:
316799cf610aSTejun Heo 				/* clear error info accumulated during probe */
316899cf610aSTejun Heo 				ata_ering_clear(&dev->ering);
3169f58229f8STejun Heo 				new_mask |= 1 << dev->devno;
31708c3c52a8STejun Heo 				break;
31718c3c52a8STejun Heo 			case -ENOENT:
317255a8e2c8STejun Heo 				/* IDENTIFY was issued to non-existent
317355a8e2c8STejun Heo 				 * device.  No need to reset.  Just
3174842faa6cSTejun Heo 				 * thaw and ignore the device.
317555a8e2c8STejun Heo 				 */
317655a8e2c8STejun Heo 				ata_eh_thaw_port(ap);
3177c6fd2807SJeff Garzik 				break;
31788c3c52a8STejun Heo 			default:
31798c3c52a8STejun Heo 				goto err;
31808c3c52a8STejun Heo 			}
31818c3c52a8STejun Heo 		}
3182c6fd2807SJeff Garzik 	}
3183c6fd2807SJeff Garzik 
3184c1c4e8d5STejun Heo 	/* PDIAG- should have been released, ask cable type if post-reset */
318533267325STejun Heo 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
318633267325STejun Heo 		if (ap->ops->cable_detect)
3187c1c4e8d5STejun Heo 			ap->cbl = ap->ops->cable_detect(ap);
318833267325STejun Heo 		ata_force_cbl(ap);
318933267325STejun Heo 	}
3190c1c4e8d5STejun Heo 
31918c3c52a8STejun Heo 	/* Configure new devices forward such that user doesn't see
31928c3c52a8STejun Heo 	 * device detection messages backwards.
31938c3c52a8STejun Heo 	 */
31941eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
31954f7c2874STejun Heo 		if (!(new_mask & (1 << dev->devno)))
31968c3c52a8STejun Heo 			continue;
31978c3c52a8STejun Heo 
3198842faa6cSTejun Heo 		dev->class = ehc->classes[dev->devno];
3199842faa6cSTejun Heo 
32004f7c2874STejun Heo 		if (dev->class == ATA_DEV_PMP)
32014f7c2874STejun Heo 			continue;
32024f7c2874STejun Heo 
32038c3c52a8STejun Heo 		ehc->i.flags |= ATA_EHI_PRINTINFO;
32048c3c52a8STejun Heo 		rc = ata_dev_configure(dev);
32058c3c52a8STejun Heo 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3206842faa6cSTejun Heo 		if (rc) {
3207842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
32088c3c52a8STejun Heo 			goto err;
3209842faa6cSTejun Heo 		}
32108c3c52a8STejun Heo 
3211c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
3212c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3213c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3214baa1e78aSTejun Heo 
321555a8e2c8STejun Heo 		/* new device discovered, configure xfermode */
3216baa1e78aSTejun Heo 		ehc->i.flags |= ATA_EHI_SETMODE;
3217c6fd2807SJeff Garzik 	}
3218c6fd2807SJeff Garzik 
32198c3c52a8STejun Heo 	return 0;
32208c3c52a8STejun Heo 
32218c3c52a8STejun Heo  err:
32220c76106cSDamien Le Moal 	dev->flags &= ~ATA_DFLAG_RESUMING;
3223c6fd2807SJeff Garzik 	*r_failed_dev = dev;
3224c6fd2807SJeff Garzik 	return rc;
3225c6fd2807SJeff Garzik }
3226c6fd2807SJeff Garzik 
32276f1d1e3aSTejun Heo /**
32286f1d1e3aSTejun Heo  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
32296f1d1e3aSTejun Heo  *	@link: link on which timings will be programmed
323098a1708dSMartin Olsson  *	@r_failed_dev: out parameter for failed device
32316f1d1e3aSTejun Heo  *
32326f1d1e3aSTejun Heo  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
32336f1d1e3aSTejun Heo  *	ata_set_mode() fails, pointer to the failing device is
32346f1d1e3aSTejun Heo  *	returned in @r_failed_dev.
32356f1d1e3aSTejun Heo  *
32366f1d1e3aSTejun Heo  *	LOCKING:
32376f1d1e3aSTejun Heo  *	PCI/etc. bus probe sem.
32386f1d1e3aSTejun Heo  *
32396f1d1e3aSTejun Heo  *	RETURNS:
32406f1d1e3aSTejun Heo  *	0 on success, negative errno otherwise
32416f1d1e3aSTejun Heo  */
32426f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
32436f1d1e3aSTejun Heo {
32446f1d1e3aSTejun Heo 	struct ata_port *ap = link->ap;
324500115e0fSTejun Heo 	struct ata_device *dev;
324600115e0fSTejun Heo 	int rc;
32476f1d1e3aSTejun Heo 
324876326ac1STejun Heo 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
32491eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
325076326ac1STejun Heo 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
325176326ac1STejun Heo 			struct ata_ering_entry *ent;
325276326ac1STejun Heo 
325376326ac1STejun Heo 			ent = ata_ering_top(&dev->ering);
325476326ac1STejun Heo 			if (ent)
325576326ac1STejun Heo 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
325676326ac1STejun Heo 		}
325776326ac1STejun Heo 	}
325876326ac1STejun Heo 
32596f1d1e3aSTejun Heo 	/* has private set_mode? */
32606f1d1e3aSTejun Heo 	if (ap->ops->set_mode)
326100115e0fSTejun Heo 		rc = ap->ops->set_mode(link, r_failed_dev);
326200115e0fSTejun Heo 	else
326300115e0fSTejun Heo 		rc = ata_do_set_mode(link, r_failed_dev);
326400115e0fSTejun Heo 
326500115e0fSTejun Heo 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
32661eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
326700115e0fSTejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
326800115e0fSTejun Heo 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
326900115e0fSTejun Heo 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
327000115e0fSTejun Heo 
327100115e0fSTejun Heo 		if (dev->xfer_mode != saved_xfer_mode ||
327200115e0fSTejun Heo 		    ata_ncq_enabled(dev) != saved_ncq)
327300115e0fSTejun Heo 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
327400115e0fSTejun Heo 	}
327500115e0fSTejun Heo 
327600115e0fSTejun Heo 	return rc;
32776f1d1e3aSTejun Heo }
32786f1d1e3aSTejun Heo 
327911fc33daSTejun Heo /**
328011fc33daSTejun Heo  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
328111fc33daSTejun Heo  *	@dev: ATAPI device to clear UA for
328211fc33daSTejun Heo  *
328311fc33daSTejun Heo  *	Resets and other operations can make an ATAPI device raise
328411fc33daSTejun Heo  *	UNIT ATTENTION which causes the next operation to fail.  This
328511fc33daSTejun Heo  *	function clears UA.
328611fc33daSTejun Heo  *
328711fc33daSTejun Heo  *	LOCKING:
328811fc33daSTejun Heo  *	EH context (may sleep).
328911fc33daSTejun Heo  *
329011fc33daSTejun Heo  *	RETURNS:
329111fc33daSTejun Heo  *	0 on success, -errno on failure.
329211fc33daSTejun Heo  */
329311fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev)
329411fc33daSTejun Heo {
329511fc33daSTejun Heo 	int i;
329611fc33daSTejun Heo 
329711fc33daSTejun Heo 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3298da65bbddSDamien Le Moal 		u8 *sense_buffer = dev->sector_buf;
329911fc33daSTejun Heo 		u8 sense_key = 0;
330011fc33daSTejun Heo 		unsigned int err_mask;
330111fc33daSTejun Heo 
330211fc33daSTejun Heo 		err_mask = atapi_eh_tur(dev, &sense_key);
330311fc33daSTejun Heo 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3304a9a79dfeSJoe Perches 			ata_dev_warn(dev,
3305a9a79dfeSJoe Perches 				     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3306a9a79dfeSJoe Perches 				     err_mask);
330711fc33daSTejun Heo 			return -EIO;
330811fc33daSTejun Heo 		}
330911fc33daSTejun Heo 
331011fc33daSTejun Heo 		if (!err_mask || sense_key != UNIT_ATTENTION)
331111fc33daSTejun Heo 			return 0;
331211fc33daSTejun Heo 
331311fc33daSTejun Heo 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
331411fc33daSTejun Heo 		if (err_mask) {
3315a9a79dfeSJoe Perches 			ata_dev_warn(dev, "failed to clear "
331611fc33daSTejun Heo 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
331711fc33daSTejun Heo 			return -EIO;
331811fc33daSTejun Heo 		}
331911fc33daSTejun Heo 	}
332011fc33daSTejun Heo 
3321a9a79dfeSJoe Perches 	ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3322a9a79dfeSJoe Perches 		     ATA_EH_UA_TRIES);
332311fc33daSTejun Heo 
332411fc33daSTejun Heo 	return 0;
332511fc33daSTejun Heo }
332611fc33daSTejun Heo 
33276013efd8STejun Heo /**
33286013efd8STejun Heo  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
33296013efd8STejun Heo  *	@dev: ATA device which may need FLUSH retry
33306013efd8STejun Heo  *
33316013efd8STejun Heo  *	If @dev failed FLUSH, it needs to be reported upper layer
33326013efd8STejun Heo  *	immediately as it means that @dev failed to remap and already
33336013efd8STejun Heo  *	lost at least a sector and further FLUSH retrials won't make
33346013efd8STejun Heo  *	any difference to the lost sector.  However, if FLUSH failed
33356013efd8STejun Heo  *	for other reasons, for example transmission error, FLUSH needs
33366013efd8STejun Heo  *	to be retried.
33376013efd8STejun Heo  *
33386013efd8STejun Heo  *	This function determines whether FLUSH failure retry is
33396013efd8STejun Heo  *	necessary and performs it if so.
33406013efd8STejun Heo  *
33416013efd8STejun Heo  *	RETURNS:
33426013efd8STejun Heo  *	0 if EH can continue, -errno if EH needs to be repeated.
33436013efd8STejun Heo  */
33446013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev)
33456013efd8STejun Heo {
33466013efd8STejun Heo 	struct ata_link *link = dev->link;
33476013efd8STejun Heo 	struct ata_port *ap = link->ap;
33486013efd8STejun Heo 	struct ata_queued_cmd *qc;
33496013efd8STejun Heo 	struct ata_taskfile tf;
33506013efd8STejun Heo 	unsigned int err_mask;
33516013efd8STejun Heo 	int rc = 0;
33526013efd8STejun Heo 
33536013efd8STejun Heo 	/* did flush fail for this device? */
33546013efd8STejun Heo 	if (!ata_tag_valid(link->active_tag))
33556013efd8STejun Heo 		return 0;
33566013efd8STejun Heo 
33576013efd8STejun Heo 	qc = __ata_qc_from_tag(ap, link->active_tag);
33586013efd8STejun Heo 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
33596013efd8STejun Heo 			       qc->tf.command != ATA_CMD_FLUSH))
33606013efd8STejun Heo 		return 0;
33616013efd8STejun Heo 
33626013efd8STejun Heo 	/* if the device failed it, it should be reported to upper layers */
33636013efd8STejun Heo 	if (qc->err_mask & AC_ERR_DEV)
33646013efd8STejun Heo 		return 0;
33656013efd8STejun Heo 
33666013efd8STejun Heo 	/* flush failed for some other reason, give it another shot */
33676013efd8STejun Heo 	ata_tf_init(dev, &tf);
33686013efd8STejun Heo 
33696013efd8STejun Heo 	tf.command = qc->tf.command;
33706013efd8STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE;
33716013efd8STejun Heo 	tf.protocol = ATA_PROT_NODATA;
33726013efd8STejun Heo 
3373a9a79dfeSJoe Perches 	ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
33746013efd8STejun Heo 		       tf.command, qc->err_mask);
33756013efd8STejun Heo 
33766013efd8STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
33776013efd8STejun Heo 	if (!err_mask) {
33786013efd8STejun Heo 		/*
33796013efd8STejun Heo 		 * FLUSH is complete but there's no way to
33806013efd8STejun Heo 		 * successfully complete a failed command from EH.
33816013efd8STejun Heo 		 * Making sure retry is allowed at least once and
33826013efd8STejun Heo 		 * retrying it should do the trick - whatever was in
33836013efd8STejun Heo 		 * the cache is already on the platter and this won't
33846013efd8STejun Heo 		 * cause infinite loop.
33856013efd8STejun Heo 		 */
33866013efd8STejun Heo 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
33876013efd8STejun Heo 	} else {
3388a9a79dfeSJoe Perches 		ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
33896013efd8STejun Heo 			       err_mask);
33906013efd8STejun Heo 		rc = -EIO;
33916013efd8STejun Heo 
33926013efd8STejun Heo 		/* if device failed it, report it to upper layers */
33936013efd8STejun Heo 		if (err_mask & AC_ERR_DEV) {
33946013efd8STejun Heo 			qc->err_mask |= AC_ERR_DEV;
33956013efd8STejun Heo 			qc->result_tf = tf;
33964cb7c6f1SNiklas Cassel 			if (!ata_port_is_frozen(ap))
33976013efd8STejun Heo 				rc = 0;
33986013efd8STejun Heo 		}
33996013efd8STejun Heo 	}
34006013efd8STejun Heo 	return rc;
34016013efd8STejun Heo }
34026013efd8STejun Heo 
34036b7ae954STejun Heo /**
34046b7ae954STejun Heo  *	ata_eh_set_lpm - configure SATA interface power management
34056b7ae954STejun Heo  *	@link: link to configure power management
34066b7ae954STejun Heo  *	@policy: the link power management policy
34076b7ae954STejun Heo  *	@r_failed_dev: out parameter for failed device
34086b7ae954STejun Heo  *
34096b7ae954STejun Heo  *	Enable SATA Interface power management.  This will enable
3410f4ac6476SHans de Goede  *	Device Interface Power Management (DIPM) for min_power and
3411f4ac6476SHans de Goede  *	medium_power_with_dipm policies, and then call driver specific
3412f4ac6476SHans de Goede  *	callbacks for enabling Host Initiated Power management.
34136b7ae954STejun Heo  *
34146b7ae954STejun Heo  *	LOCKING:
34156b7ae954STejun Heo  *	EH context.
34166b7ae954STejun Heo  *
34176b7ae954STejun Heo  *	RETURNS:
34186b7ae954STejun Heo  *	0 on success, -errno on failure.
34196b7ae954STejun Heo  */
34206b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
34216b7ae954STejun Heo 			  struct ata_device **r_failed_dev)
34226b7ae954STejun Heo {
34236c8ea89cSTejun Heo 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
34246b7ae954STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
34256b7ae954STejun Heo 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3426e5005b15STejun Heo 	enum ata_lpm_policy old_policy = link->lpm_policy;
34275f6f12ccSTejun Heo 	bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
34286b7ae954STejun Heo 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
34296b7ae954STejun Heo 	unsigned int err_mask;
34306b7ae954STejun Heo 	int rc;
34316b7ae954STejun Heo 
34326b7ae954STejun Heo 	/* if the link or host doesn't do LPM, noop */
34334c9029e7SBartlomiej Zolnierkiewicz 	if (!IS_ENABLED(CONFIG_SATA_HOST) ||
34344c9029e7SBartlomiej Zolnierkiewicz 	    (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
34356b7ae954STejun Heo 		return 0;
34366b7ae954STejun Heo 
34376b7ae954STejun Heo 	/*
34386b7ae954STejun Heo 	 * DIPM is enabled only for MIN_POWER as some devices
34396b7ae954STejun Heo 	 * misbehave when the host NACKs transition to SLUMBER.  Order
34406b7ae954STejun Heo 	 * device and link configurations such that the host always
34416b7ae954STejun Heo 	 * allows DIPM requests.
34426b7ae954STejun Heo 	 */
34436b7ae954STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
34446b7ae954STejun Heo 		bool hipm = ata_id_has_hipm(dev->id);
3445ae01b249STejun Heo 		bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
34466b7ae954STejun Heo 
34476b7ae954STejun Heo 		/* find the first enabled and LPM enabled devices */
34486b7ae954STejun Heo 		if (!link_dev)
34496b7ae954STejun Heo 			link_dev = dev;
34506b7ae954STejun Heo 
34516b7ae954STejun Heo 		if (!lpm_dev && (hipm || dipm))
34526b7ae954STejun Heo 			lpm_dev = dev;
34536b7ae954STejun Heo 
34546b7ae954STejun Heo 		hints &= ~ATA_LPM_EMPTY;
34556b7ae954STejun Heo 		if (!hipm)
34566b7ae954STejun Heo 			hints &= ~ATA_LPM_HIPM;
34576b7ae954STejun Heo 
34586b7ae954STejun Heo 		/* disable DIPM before changing link config */
3459f4ac6476SHans de Goede 		if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
34606b7ae954STejun Heo 			err_mask = ata_dev_set_feature(dev,
34616b7ae954STejun Heo 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
34626b7ae954STejun Heo 			if (err_mask && err_mask != AC_ERR_DEV) {
3463a9a79dfeSJoe Perches 				ata_dev_warn(dev,
34646b7ae954STejun Heo 					     "failed to disable DIPM, Emask 0x%x\n",
34656b7ae954STejun Heo 					     err_mask);
34666b7ae954STejun Heo 				rc = -EIO;
34676b7ae954STejun Heo 				goto fail;
34686b7ae954STejun Heo 			}
34696b7ae954STejun Heo 		}
34706b7ae954STejun Heo 	}
34716b7ae954STejun Heo 
34726c8ea89cSTejun Heo 	if (ap) {
34736b7ae954STejun Heo 		rc = ap->ops->set_lpm(link, policy, hints);
34746b7ae954STejun Heo 		if (!rc && ap->slave_link)
34756b7ae954STejun Heo 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
34766c8ea89cSTejun Heo 	} else
34776c8ea89cSTejun Heo 		rc = sata_pmp_set_lpm(link, policy, hints);
34786b7ae954STejun Heo 
34796b7ae954STejun Heo 	/*
34806b7ae954STejun Heo 	 * Attribute link config failure to the first (LPM) enabled
34816b7ae954STejun Heo 	 * device on the link.
34826b7ae954STejun Heo 	 */
34836b7ae954STejun Heo 	if (rc) {
34846b7ae954STejun Heo 		if (rc == -EOPNOTSUPP) {
34856b7ae954STejun Heo 			link->flags |= ATA_LFLAG_NO_LPM;
34866b7ae954STejun Heo 			return 0;
34876b7ae954STejun Heo 		}
34886b7ae954STejun Heo 		dev = lpm_dev ? lpm_dev : link_dev;
34896b7ae954STejun Heo 		goto fail;
34906b7ae954STejun Heo 	}
34916b7ae954STejun Heo 
3492e5005b15STejun Heo 	/*
3493e5005b15STejun Heo 	 * Low level driver acked the transition.  Issue DIPM command
3494e5005b15STejun Heo 	 * with the new policy set.
3495e5005b15STejun Heo 	 */
3496e5005b15STejun Heo 	link->lpm_policy = policy;
3497e5005b15STejun Heo 	if (ap && ap->slave_link)
3498e5005b15STejun Heo 		ap->slave_link->lpm_policy = policy;
3499e5005b15STejun Heo 
35006b7ae954STejun Heo 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
35016b7ae954STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
3502f4ac6476SHans de Goede 		if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
3503ae01b249STejun Heo 		    ata_id_has_dipm(dev->id)) {
35046b7ae954STejun Heo 			err_mask = ata_dev_set_feature(dev,
35056b7ae954STejun Heo 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
35066b7ae954STejun Heo 			if (err_mask && err_mask != AC_ERR_DEV) {
3507a9a79dfeSJoe Perches 				ata_dev_warn(dev,
35086b7ae954STejun Heo 					"failed to enable DIPM, Emask 0x%x\n",
35096b7ae954STejun Heo 					err_mask);
35106b7ae954STejun Heo 				rc = -EIO;
35116b7ae954STejun Heo 				goto fail;
35126b7ae954STejun Heo 			}
35136b7ae954STejun Heo 		}
35146b7ae954STejun Heo 	}
35156b7ae954STejun Heo 
351609c5b480SGabriele Mazzotta 	link->last_lpm_change = jiffies;
351709c5b480SGabriele Mazzotta 	link->flags |= ATA_LFLAG_CHANGED;
351809c5b480SGabriele Mazzotta 
35196b7ae954STejun Heo 	return 0;
35206b7ae954STejun Heo 
35216b7ae954STejun Heo fail:
3522e5005b15STejun Heo 	/* restore the old policy */
3523e5005b15STejun Heo 	link->lpm_policy = old_policy;
3524e5005b15STejun Heo 	if (ap && ap->slave_link)
3525e5005b15STejun Heo 		ap->slave_link->lpm_policy = old_policy;
3526e5005b15STejun Heo 
35276b7ae954STejun Heo 	/* if no device or only one more chance is left, disable LPM */
35286b7ae954STejun Heo 	if (!dev || ehc->tries[dev->devno] <= 2) {
3529a9a79dfeSJoe Perches 		ata_link_warn(link, "disabling LPM on the link\n");
35306b7ae954STejun Heo 		link->flags |= ATA_LFLAG_NO_LPM;
35316b7ae954STejun Heo 	}
35326b7ae954STejun Heo 	if (r_failed_dev)
35336b7ae954STejun Heo 		*r_failed_dev = dev;
35346b7ae954STejun Heo 	return rc;
35356b7ae954STejun Heo }
35366b7ae954STejun Heo 
35378a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link)
3538c6fd2807SJeff Garzik {
3539f58229f8STejun Heo 	struct ata_device *dev;
3540f58229f8STejun Heo 	int cnt = 0;
3541c6fd2807SJeff Garzik 
35421eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED)
3543c6fd2807SJeff Garzik 		cnt++;
3544c6fd2807SJeff Garzik 	return cnt;
3545c6fd2807SJeff Garzik }
3546c6fd2807SJeff Garzik 
35470260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link)
3548c6fd2807SJeff Garzik {
3549f58229f8STejun Heo 	struct ata_device *dev;
3550f58229f8STejun Heo 	int cnt = 0;
3551c6fd2807SJeff Garzik 
35521eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL)
3553f58229f8STejun Heo 		if (dev->class == ATA_DEV_UNKNOWN)
3554c6fd2807SJeff Garzik 			cnt++;
3555c6fd2807SJeff Garzik 	return cnt;
3556c6fd2807SJeff Garzik }
3557c6fd2807SJeff Garzik 
35580260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link)
3559c6fd2807SJeff Garzik {
3560672b2d65STejun Heo 	struct ata_port *ap = link->ap;
35610260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3562f58229f8STejun Heo 	struct ata_device *dev;
3563c6fd2807SJeff Garzik 
3564f9df58cbSTejun Heo 	/* skip disabled links */
3565f9df58cbSTejun Heo 	if (link->flags & ATA_LFLAG_DISABLED)
3566f9df58cbSTejun Heo 		return 1;
3567f9df58cbSTejun Heo 
3568e2f3d75fSTejun Heo 	/* skip if explicitly requested */
3569e2f3d75fSTejun Heo 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3570e2f3d75fSTejun Heo 		return 1;
3571e2f3d75fSTejun Heo 
3572672b2d65STejun Heo 	/* thaw frozen port and recover failed devices */
35734cb7c6f1SNiklas Cassel 	if (ata_port_is_frozen(ap) || ata_link_nr_enabled(link))
3574672b2d65STejun Heo 		return 0;
3575672b2d65STejun Heo 
3576672b2d65STejun Heo 	/* reset at least once if reset is requested */
3577672b2d65STejun Heo 	if ((ehc->i.action & ATA_EH_RESET) &&
3578672b2d65STejun Heo 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3579c6fd2807SJeff Garzik 		return 0;
3580c6fd2807SJeff Garzik 
3581c6fd2807SJeff Garzik 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
35821eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
3583c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_UNKNOWN &&
3584c6fd2807SJeff Garzik 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3585c6fd2807SJeff Garzik 			return 0;
3586c6fd2807SJeff Garzik 	}
3587c6fd2807SJeff Garzik 
3588c6fd2807SJeff Garzik 	return 1;
3589c6fd2807SJeff Garzik }
3590c6fd2807SJeff Garzik 
3591c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3592c2c7a89cSTejun Heo {
3593c2c7a89cSTejun Heo 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3594c2c7a89cSTejun Heo 	u64 now = get_jiffies_64();
3595c2c7a89cSTejun Heo 	int *trials = void_arg;
3596c2c7a89cSTejun Heo 
35976868225eSLin Ming 	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
35986868225eSLin Ming 	    (ent->timestamp < now - min(now, interval)))
3599c2c7a89cSTejun Heo 		return -1;
3600c2c7a89cSTejun Heo 
3601c2c7a89cSTejun Heo 	(*trials)++;
3602c2c7a89cSTejun Heo 	return 0;
3603c2c7a89cSTejun Heo }
3604c2c7a89cSTejun Heo 
360502c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev)
360602c05a27STejun Heo {
360702c05a27STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3608c2c7a89cSTejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
3609c2c7a89cSTejun Heo 	int trials = 0;
361002c05a27STejun Heo 
361102c05a27STejun Heo 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
361202c05a27STejun Heo 	    (ehc->did_probe_mask & (1 << dev->devno)))
361302c05a27STejun Heo 		return 0;
361402c05a27STejun Heo 
361502c05a27STejun Heo 	ata_eh_detach_dev(dev);
361602c05a27STejun Heo 	ata_dev_init(dev);
361702c05a27STejun Heo 	ehc->did_probe_mask |= (1 << dev->devno);
3618cf480626STejun Heo 	ehc->i.action |= ATA_EH_RESET;
361900115e0fSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
362000115e0fSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
362102c05a27STejun Heo 
36226b7ae954STejun Heo 	/* the link maybe in a deep sleep, wake it up */
36236c8ea89cSTejun Heo 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
36246c8ea89cSTejun Heo 		if (ata_is_host_link(link))
36256c8ea89cSTejun Heo 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
36266c8ea89cSTejun Heo 					       ATA_LPM_EMPTY);
36276c8ea89cSTejun Heo 		else
36286c8ea89cSTejun Heo 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
36296c8ea89cSTejun Heo 					 ATA_LPM_EMPTY);
36306c8ea89cSTejun Heo 	}
36316b7ae954STejun Heo 
3632c2c7a89cSTejun Heo 	/* Record and count probe trials on the ering.  The specific
3633c2c7a89cSTejun Heo 	 * error mask used is irrelevant.  Because a successful device
3634c2c7a89cSTejun Heo 	 * detection clears the ering, this count accumulates only if
3635c2c7a89cSTejun Heo 	 * there are consecutive failed probes.
3636c2c7a89cSTejun Heo 	 *
3637c2c7a89cSTejun Heo 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3638c2c7a89cSTejun Heo 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3639c2c7a89cSTejun Heo 	 * forced to 1.5Gbps.
3640c2c7a89cSTejun Heo 	 *
3641c2c7a89cSTejun Heo 	 * This is to work around cases where failed link speed
3642c2c7a89cSTejun Heo 	 * negotiation results in device misdetection leading to
3643c2c7a89cSTejun Heo 	 * infinite DEVXCHG or PHRDY CHG events.
3644c2c7a89cSTejun Heo 	 */
3645c2c7a89cSTejun Heo 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3646c2c7a89cSTejun Heo 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3647c2c7a89cSTejun Heo 
3648c2c7a89cSTejun Heo 	if (trials > ATA_EH_PROBE_TRIALS)
3649c2c7a89cSTejun Heo 		sata_down_spd_limit(link, 1);
3650c2c7a89cSTejun Heo 
365102c05a27STejun Heo 	return 1;
365202c05a27STejun Heo }
365302c05a27STejun Heo 
36549b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3655fee7ca72STejun Heo {
36569af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3657fee7ca72STejun Heo 
3658cf9a590aSTejun Heo 	/* -EAGAIN from EH routine indicates retry without prejudice.
3659cf9a590aSTejun Heo 	 * The requester is responsible for ensuring forward progress.
3660cf9a590aSTejun Heo 	 */
3661cf9a590aSTejun Heo 	if (err != -EAGAIN)
3662fee7ca72STejun Heo 		ehc->tries[dev->devno]--;
3663fee7ca72STejun Heo 
3664fee7ca72STejun Heo 	switch (err) {
3665fee7ca72STejun Heo 	case -ENODEV:
3666fee7ca72STejun Heo 		/* device missing or wrong IDENTIFY data, schedule probing */
3667fee7ca72STejun Heo 		ehc->i.probe_mask |= (1 << dev->devno);
3668df561f66SGustavo A. R. Silva 		fallthrough;
3669fee7ca72STejun Heo 	case -EINVAL:
3670fee7ca72STejun Heo 		/* give it just one more chance */
3671fee7ca72STejun Heo 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3672df561f66SGustavo A. R. Silva 		fallthrough;
3673fee7ca72STejun Heo 	case -EIO:
3674d89293abSTejun Heo 		if (ehc->tries[dev->devno] == 1) {
3675fee7ca72STejun Heo 			/* This is the last chance, better to slow
3676fee7ca72STejun Heo 			 * down than lose it.
3677fee7ca72STejun Heo 			 */
3678a07d499bSTejun Heo 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3679d89293abSTejun Heo 			if (dev->pio_mode > XFER_PIO_0)
3680fee7ca72STejun Heo 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3681fee7ca72STejun Heo 		}
3682fee7ca72STejun Heo 	}
3683fee7ca72STejun Heo 
3684fee7ca72STejun Heo 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3685fee7ca72STejun Heo 		/* disable device if it has used up all its chances */
3686fee7ca72STejun Heo 		ata_dev_disable(dev);
3687fee7ca72STejun Heo 
3688fee7ca72STejun Heo 		/* detach if offline */
3689b1c72916STejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3690fee7ca72STejun Heo 			ata_eh_detach_dev(dev);
3691fee7ca72STejun Heo 
369202c05a27STejun Heo 		/* schedule probe if necessary */
369387fbc5a0STejun Heo 		if (ata_eh_schedule_probe(dev)) {
3694fee7ca72STejun Heo 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
369587fbc5a0STejun Heo 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
369687fbc5a0STejun Heo 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
369787fbc5a0STejun Heo 		}
36989b1e2658STejun Heo 
36999b1e2658STejun Heo 		return 1;
3700fee7ca72STejun Heo 	} else {
3701cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
37029b1e2658STejun Heo 		return 0;
3703fee7ca72STejun Heo 	}
3704fee7ca72STejun Heo }
3705fee7ca72STejun Heo 
3706c6fd2807SJeff Garzik /**
3707c6fd2807SJeff Garzik  *	ata_eh_recover - recover host port after error
3708c6fd2807SJeff Garzik  *	@ap: host port to recover
3709c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3710c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3711c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3712c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
37139b1e2658STejun Heo  *	@r_failed_link: out parameter for failed link
3714c6fd2807SJeff Garzik  *
3715c6fd2807SJeff Garzik  *	This is the alpha and omega, eum and yang, heart and soul of
3716c6fd2807SJeff Garzik  *	libata exception handling.  On entry, actions required to
37179b1e2658STejun Heo  *	recover each link and hotplug requests are recorded in the
37189b1e2658STejun Heo  *	link's eh_context.  This function executes all the operations
37199b1e2658STejun Heo  *	with appropriate retrials and fallbacks to resurrect failed
3720c6fd2807SJeff Garzik  *	devices, detach goners and greet newcomers.
3721c6fd2807SJeff Garzik  *
3722c6fd2807SJeff Garzik  *	LOCKING:
3723c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3724c6fd2807SJeff Garzik  *
3725c6fd2807SJeff Garzik  *	RETURNS:
3726c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3727c6fd2807SJeff Garzik  */
3728fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3729c6fd2807SJeff Garzik 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
37309b1e2658STejun Heo 		   ata_postreset_fn_t postreset,
37319b1e2658STejun Heo 		   struct ata_link **r_failed_link)
3732c6fd2807SJeff Garzik {
37339b1e2658STejun Heo 	struct ata_link *link;
3734c6fd2807SJeff Garzik 	struct ata_device *dev;
37356b7ae954STejun Heo 	int rc, nr_fails;
373645fabbb7SElias Oltmanns 	unsigned long flags, deadline;
3737c6fd2807SJeff Garzik 
3738c6fd2807SJeff Garzik 	/* prep for recovery */
37391eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37409b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37419b1e2658STejun Heo 
3742f9df58cbSTejun Heo 		/* re-enable link? */
3743f9df58cbSTejun Heo 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3744f9df58cbSTejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3745f9df58cbSTejun Heo 			spin_lock_irqsave(ap->lock, flags);
3746f9df58cbSTejun Heo 			link->flags &= ~ATA_LFLAG_DISABLED;
3747f9df58cbSTejun Heo 			spin_unlock_irqrestore(ap->lock, flags);
3748f9df58cbSTejun Heo 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3749f9df58cbSTejun Heo 		}
3750f9df58cbSTejun Heo 
37511eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
3752fd995f70STejun Heo 			if (link->flags & ATA_LFLAG_NO_RETRY)
3753fd995f70STejun Heo 				ehc->tries[dev->devno] = 1;
3754fd995f70STejun Heo 			else
3755c6fd2807SJeff Garzik 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3756c6fd2807SJeff Garzik 
375779a55b72STejun Heo 			/* collect port action mask recorded in dev actions */
37589b1e2658STejun Heo 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
37599b1e2658STejun Heo 					 ~ATA_EH_PERDEV_MASK;
3760f58229f8STejun Heo 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
376179a55b72STejun Heo 
3762c6fd2807SJeff Garzik 			/* process hotplug request */
3763c6fd2807SJeff Garzik 			if (dev->flags & ATA_DFLAG_DETACH)
3764c6fd2807SJeff Garzik 				ata_eh_detach_dev(dev);
3765c6fd2807SJeff Garzik 
376602c05a27STejun Heo 			/* schedule probe if necessary */
376702c05a27STejun Heo 			if (!ata_dev_enabled(dev))
376802c05a27STejun Heo 				ata_eh_schedule_probe(dev);
3769c6fd2807SJeff Garzik 		}
37709b1e2658STejun Heo 	}
3771c6fd2807SJeff Garzik 
3772c6fd2807SJeff Garzik  retry:
3773c6fd2807SJeff Garzik 	rc = 0;
3774c6fd2807SJeff Garzik 
3775c6fd2807SJeff Garzik 	/* if UNLOADING, finish immediately */
3776c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3777c6fd2807SJeff Garzik 		goto out;
3778c6fd2807SJeff Garzik 
37799b1e2658STejun Heo 	/* prep for EH */
37801eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37819b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37829b1e2658STejun Heo 
3783c6fd2807SJeff Garzik 		/* skip EH if possible. */
37840260731fSTejun Heo 		if (ata_eh_skip_recovery(link))
3785c6fd2807SJeff Garzik 			ehc->i.action = 0;
3786c6fd2807SJeff Garzik 
37871eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
3788f58229f8STejun Heo 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
37899b1e2658STejun Heo 	}
3790c6fd2807SJeff Garzik 
3791c6fd2807SJeff Garzik 	/* reset */
37921eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37939b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37949b1e2658STejun Heo 
3795cf480626STejun Heo 		if (!(ehc->i.action & ATA_EH_RESET))
37969b1e2658STejun Heo 			continue;
37979b1e2658STejun Heo 
37989b1e2658STejun Heo 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3799dc98c32cSTejun Heo 				  prereset, softreset, hardreset, postreset);
3800c6fd2807SJeff Garzik 		if (rc) {
3801a9a79dfeSJoe Perches 			ata_link_err(link, "reset failed, giving up\n");
3802c6fd2807SJeff Garzik 			goto out;
3803c6fd2807SJeff Garzik 		}
38049b1e2658STejun Heo 	}
3805c6fd2807SJeff Garzik 
380645fabbb7SElias Oltmanns 	do {
380745fabbb7SElias Oltmanns 		unsigned long now;
380845fabbb7SElias Oltmanns 
380945fabbb7SElias Oltmanns 		/*
381045fabbb7SElias Oltmanns 		 * clears ATA_EH_PARK in eh_info and resets
381145fabbb7SElias Oltmanns 		 * ap->park_req_pending
381245fabbb7SElias Oltmanns 		 */
381345fabbb7SElias Oltmanns 		ata_eh_pull_park_action(ap);
381445fabbb7SElias Oltmanns 
381545fabbb7SElias Oltmanns 		deadline = jiffies;
38161eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
38171eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
381845fabbb7SElias Oltmanns 				struct ata_eh_context *ehc = &link->eh_context;
381945fabbb7SElias Oltmanns 				unsigned long tmp;
382045fabbb7SElias Oltmanns 
38219162c657SHannes Reinecke 				if (dev->class != ATA_DEV_ATA &&
38229162c657SHannes Reinecke 				    dev->class != ATA_DEV_ZAC)
382345fabbb7SElias Oltmanns 					continue;
382445fabbb7SElias Oltmanns 				if (!(ehc->i.dev_action[dev->devno] &
382545fabbb7SElias Oltmanns 				      ATA_EH_PARK))
382645fabbb7SElias Oltmanns 					continue;
382745fabbb7SElias Oltmanns 				tmp = dev->unpark_deadline;
382845fabbb7SElias Oltmanns 				if (time_before(deadline, tmp))
382945fabbb7SElias Oltmanns 					deadline = tmp;
383045fabbb7SElias Oltmanns 				else if (time_before_eq(tmp, jiffies))
383145fabbb7SElias Oltmanns 					continue;
383245fabbb7SElias Oltmanns 				if (ehc->unloaded_mask & (1 << dev->devno))
383345fabbb7SElias Oltmanns 					continue;
383445fabbb7SElias Oltmanns 
383545fabbb7SElias Oltmanns 				ata_eh_park_issue_cmd(dev, 1);
383645fabbb7SElias Oltmanns 			}
383745fabbb7SElias Oltmanns 		}
383845fabbb7SElias Oltmanns 
383945fabbb7SElias Oltmanns 		now = jiffies;
384045fabbb7SElias Oltmanns 		if (time_before_eq(deadline, now))
384145fabbb7SElias Oltmanns 			break;
384245fabbb7SElias Oltmanns 
3843c0c362b6STejun Heo 		ata_eh_release(ap);
384445fabbb7SElias Oltmanns 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
384545fabbb7SElias Oltmanns 						       deadline - now);
3846c0c362b6STejun Heo 		ata_eh_acquire(ap);
384745fabbb7SElias Oltmanns 	} while (deadline);
38481eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
38491eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
385045fabbb7SElias Oltmanns 			if (!(link->eh_context.unloaded_mask &
385145fabbb7SElias Oltmanns 			      (1 << dev->devno)))
385245fabbb7SElias Oltmanns 				continue;
385345fabbb7SElias Oltmanns 
385445fabbb7SElias Oltmanns 			ata_eh_park_issue_cmd(dev, 0);
385545fabbb7SElias Oltmanns 			ata_eh_done(link, dev, ATA_EH_PARK);
385645fabbb7SElias Oltmanns 		}
385745fabbb7SElias Oltmanns 	}
385845fabbb7SElias Oltmanns 
38599b1e2658STejun Heo 	/* the rest */
38606b7ae954STejun Heo 	nr_fails = 0;
38616b7ae954STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
38629b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
38639b1e2658STejun Heo 
38646b7ae954STejun Heo 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
38656b7ae954STejun Heo 			goto config_lpm;
38666b7ae954STejun Heo 
3867c6fd2807SJeff Garzik 		/* revalidate existing devices and attach new ones */
38680260731fSTejun Heo 		rc = ata_eh_revalidate_and_attach(link, &dev);
3869c6fd2807SJeff Garzik 		if (rc)
38706b7ae954STejun Heo 			goto rest_fail;
3871c6fd2807SJeff Garzik 
3872633273a3STejun Heo 		/* if PMP got attached, return, pmp EH will take care of it */
3873633273a3STejun Heo 		if (link->device->class == ATA_DEV_PMP) {
3874633273a3STejun Heo 			ehc->i.action = 0;
3875633273a3STejun Heo 			return 0;
3876633273a3STejun Heo 		}
3877633273a3STejun Heo 
3878baa1e78aSTejun Heo 		/* configure transfer mode if necessary */
3879baa1e78aSTejun Heo 		if (ehc->i.flags & ATA_EHI_SETMODE) {
38800260731fSTejun Heo 			rc = ata_set_mode(link, &dev);
38814ae72a1eSTejun Heo 			if (rc)
38826b7ae954STejun Heo 				goto rest_fail;
3883baa1e78aSTejun Heo 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3884c6fd2807SJeff Garzik 		}
3885c6fd2807SJeff Garzik 
388611fc33daSTejun Heo 		/* If reset has been issued, clear UA to avoid
388711fc33daSTejun Heo 		 * disrupting the current users of the device.
388811fc33daSTejun Heo 		 */
388911fc33daSTejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
38901eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
389111fc33daSTejun Heo 				if (dev->class != ATA_DEV_ATAPI)
389211fc33daSTejun Heo 					continue;
389311fc33daSTejun Heo 				rc = atapi_eh_clear_ua(dev);
389411fc33daSTejun Heo 				if (rc)
38956b7ae954STejun Heo 					goto rest_fail;
389621334205SAaron Lu 				if (zpodd_dev_enabled(dev))
389721334205SAaron Lu 					zpodd_post_poweron(dev);
389811fc33daSTejun Heo 			}
389911fc33daSTejun Heo 		}
390011fc33daSTejun Heo 
390154d7211dSDamien Le Moal 		/*
390254d7211dSDamien Le Moal 		 * Make sure to transition devices to the active power mode
390354d7211dSDamien Le Moal 		 * if needed (e.g. if we were scheduled on system resume).
390454d7211dSDamien Le Moal 		 */
390554d7211dSDamien Le Moal 		ata_for_each_dev(dev, link, ENABLED) {
390654d7211dSDamien Le Moal 			if (ehc->i.dev_action[dev->devno] & ATA_EH_SET_ACTIVE) {
390754d7211dSDamien Le Moal 				ata_dev_power_set_active(dev);
390854d7211dSDamien Le Moal 				ata_eh_done(link, dev, ATA_EH_SET_ACTIVE);
390954d7211dSDamien Le Moal 			}
391054d7211dSDamien Le Moal 		}
391154d7211dSDamien Le Moal 
39126013efd8STejun Heo 		/* retry flush if necessary */
39136013efd8STejun Heo 		ata_for_each_dev(dev, link, ALL) {
39149162c657SHannes Reinecke 			if (dev->class != ATA_DEV_ATA &&
39159162c657SHannes Reinecke 			    dev->class != ATA_DEV_ZAC)
39166013efd8STejun Heo 				continue;
39176013efd8STejun Heo 			rc = ata_eh_maybe_retry_flush(dev);
39186013efd8STejun Heo 			if (rc)
39196b7ae954STejun Heo 				goto rest_fail;
39206013efd8STejun Heo 		}
39216013efd8STejun Heo 
39226b7ae954STejun Heo 	config_lpm:
392311fc33daSTejun Heo 		/* configure link power saving */
39246b7ae954STejun Heo 		if (link->lpm_policy != ap->target_lpm_policy) {
39256b7ae954STejun Heo 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
39266b7ae954STejun Heo 			if (rc)
39276b7ae954STejun Heo 				goto rest_fail;
39286b7ae954STejun Heo 		}
3929ca77329fSKristen Carlson Accardi 
39309b1e2658STejun Heo 		/* this link is okay now */
39319b1e2658STejun Heo 		ehc->i.flags = 0;
39329b1e2658STejun Heo 		continue;
3933c6fd2807SJeff Garzik 
39346b7ae954STejun Heo 	rest_fail:
39356b7ae954STejun Heo 		nr_fails++;
39366b7ae954STejun Heo 		if (dev)
39370a2c0f56STejun Heo 			ata_eh_handle_dev_fail(dev, rc);
3938c6fd2807SJeff Garzik 
39394cb7c6f1SNiklas Cassel 		if (ata_port_is_frozen(ap)) {
3940b06ce3e5STejun Heo 			/* PMP reset requires working host port.
3941b06ce3e5STejun Heo 			 * Can't retry if it's frozen.
3942b06ce3e5STejun Heo 			 */
3943071f44b1STejun Heo 			if (sata_pmp_attached(ap))
3944b06ce3e5STejun Heo 				goto out;
39459b1e2658STejun Heo 			break;
39469b1e2658STejun Heo 		}
3947b06ce3e5STejun Heo 	}
39489b1e2658STejun Heo 
39496b7ae954STejun Heo 	if (nr_fails)
3950c6fd2807SJeff Garzik 		goto retry;
3951c6fd2807SJeff Garzik 
3952c6fd2807SJeff Garzik  out:
39539b1e2658STejun Heo 	if (rc && r_failed_link)
39549b1e2658STejun Heo 		*r_failed_link = link;
3955c6fd2807SJeff Garzik 
3956c6fd2807SJeff Garzik 	return rc;
3957c6fd2807SJeff Garzik }
3958c6fd2807SJeff Garzik 
3959c6fd2807SJeff Garzik /**
3960c6fd2807SJeff Garzik  *	ata_eh_finish - finish up EH
3961c6fd2807SJeff Garzik  *	@ap: host port to finish EH for
3962c6fd2807SJeff Garzik  *
3963c6fd2807SJeff Garzik  *	Recovery is complete.  Clean up EH states and retry or finish
3964c6fd2807SJeff Garzik  *	failed qcs.
3965c6fd2807SJeff Garzik  *
3966c6fd2807SJeff Garzik  *	LOCKING:
3967c6fd2807SJeff Garzik  *	None.
3968c6fd2807SJeff Garzik  */
3969fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap)
3970c6fd2807SJeff Garzik {
3971258c4e5cSJens Axboe 	struct ata_queued_cmd *qc;
3972c6fd2807SJeff Garzik 	int tag;
3973c6fd2807SJeff Garzik 
3974c6fd2807SJeff Garzik 	/* retry or finish qcs */
3975258c4e5cSJens Axboe 	ata_qc_for_each_raw(ap, qc, tag) {
397687629312SNiklas Cassel 		if (!(qc->flags & ATA_QCFLAG_EH))
3977c6fd2807SJeff Garzik 			continue;
3978c6fd2807SJeff Garzik 
3979c6fd2807SJeff Garzik 		if (qc->err_mask) {
3980c6fd2807SJeff Garzik 			/* FIXME: Once EH migration is complete,
3981c6fd2807SJeff Garzik 			 * generate sense data in this function,
3982c6fd2807SJeff Garzik 			 * considering both err_mask and tf.
3983c6fd2807SJeff Garzik 			 */
3984e4c26a1bSNiklas Cassel 			if (qc->flags & ATA_QCFLAG_RETRY) {
3985e4c26a1bSNiklas Cassel 				/*
3986e4c26a1bSNiklas Cassel 				 * Since qc->err_mask is set, ata_eh_qc_retry()
3987e4c26a1bSNiklas Cassel 				 * will not increment scmd->allowed, so upper
3988e4c26a1bSNiklas Cassel 				 * layer will only retry the command if it has
3989e4c26a1bSNiklas Cassel 				 * not already been retried too many times.
3990e4c26a1bSNiklas Cassel 				 */
3991c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
3992e4c26a1bSNiklas Cassel 			} else {
399303faab78STejun Heo 				ata_eh_qc_complete(qc);
3994e4c26a1bSNiklas Cassel 			}
3995c6fd2807SJeff Garzik 		} else {
399618bd7718SNiklas Cassel 			if (qc->flags & ATA_QCFLAG_SENSE_VALID ||
399718bd7718SNiklas Cassel 			    qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) {
3998c6fd2807SJeff Garzik 				ata_eh_qc_complete(qc);
3999c6fd2807SJeff Garzik 			} else {
4000c6fd2807SJeff Garzik 				/* feed zero TF to sense generation */
4001c6fd2807SJeff Garzik 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
4002e4c26a1bSNiklas Cassel 				/*
4003e4c26a1bSNiklas Cassel 				 * Since qc->err_mask is not set,
4004e4c26a1bSNiklas Cassel 				 * ata_eh_qc_retry() will increment
4005e4c26a1bSNiklas Cassel 				 * scmd->allowed, so upper layer is guaranteed
4006e4c26a1bSNiklas Cassel 				 * to retry the command.
4007e4c26a1bSNiklas Cassel 				 */
4008c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
4009c6fd2807SJeff Garzik 			}
4010c6fd2807SJeff Garzik 		}
4011c6fd2807SJeff Garzik 	}
4012da917d69STejun Heo 
4013da917d69STejun Heo 	/* make sure nr_active_links is zero after EH */
4014da917d69STejun Heo 	WARN_ON(ap->nr_active_links);
4015da917d69STejun Heo 	ap->nr_active_links = 0;
4016c6fd2807SJeff Garzik }
4017c6fd2807SJeff Garzik 
4018c6fd2807SJeff Garzik /**
4019c6fd2807SJeff Garzik  *	ata_do_eh - do standard error handling
4020c6fd2807SJeff Garzik  *	@ap: host port to handle error for
4021a1efdabaSTejun Heo  *
4022c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
4023c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
4024c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
4025c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
4026c6fd2807SJeff Garzik  *
4027c6fd2807SJeff Garzik  *	Perform standard error handling sequence.
4028c6fd2807SJeff Garzik  *
4029c6fd2807SJeff Garzik  *	LOCKING:
4030c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
4031c6fd2807SJeff Garzik  */
4032c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
4033c6fd2807SJeff Garzik 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
4034c6fd2807SJeff Garzik 	       ata_postreset_fn_t postreset)
4035c6fd2807SJeff Garzik {
40369b1e2658STejun Heo 	struct ata_device *dev;
40379b1e2658STejun Heo 	int rc;
40389b1e2658STejun Heo 
40399b1e2658STejun Heo 	ata_eh_autopsy(ap);
40409b1e2658STejun Heo 	ata_eh_report(ap);
40419b1e2658STejun Heo 
40429b1e2658STejun Heo 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
40439b1e2658STejun Heo 			    NULL);
40449b1e2658STejun Heo 	if (rc) {
40451eca4365STejun Heo 		ata_for_each_dev(dev, &ap->link, ALL)
40469b1e2658STejun Heo 			ata_dev_disable(dev);
40479b1e2658STejun Heo 	}
40489b1e2658STejun Heo 
4049c6fd2807SJeff Garzik 	ata_eh_finish(ap);
4050c6fd2807SJeff Garzik }
4051c6fd2807SJeff Garzik 
4052a1efdabaSTejun Heo /**
4053a1efdabaSTejun Heo  *	ata_std_error_handler - standard error handler
4054a1efdabaSTejun Heo  *	@ap: host port to handle error for
4055a1efdabaSTejun Heo  *
4056a1efdabaSTejun Heo  *	Standard error handler
4057a1efdabaSTejun Heo  *
4058a1efdabaSTejun Heo  *	LOCKING:
4059a1efdabaSTejun Heo  *	Kernel thread context (may sleep).
4060a1efdabaSTejun Heo  */
4061a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap)
4062a1efdabaSTejun Heo {
4063a1efdabaSTejun Heo 	struct ata_port_operations *ops = ap->ops;
4064a1efdabaSTejun Heo 	ata_reset_fn_t hardreset = ops->hardreset;
4065a1efdabaSTejun Heo 
406657c9efdfSTejun Heo 	/* ignore built-in hardreset if SCR access is not available */
4067fe06e5f9STejun Heo 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4068a1efdabaSTejun Heo 		hardreset = NULL;
4069a1efdabaSTejun Heo 
4070a1efdabaSTejun Heo 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4071a1efdabaSTejun Heo }
4072a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_std_error_handler);
4073a1efdabaSTejun Heo 
40746ffa01d8STejun Heo #ifdef CONFIG_PM
4075c6fd2807SJeff Garzik /**
4076c6fd2807SJeff Garzik  *	ata_eh_handle_port_suspend - perform port suspend operation
4077c6fd2807SJeff Garzik  *	@ap: port to suspend
4078c6fd2807SJeff Garzik  *
4079c6fd2807SJeff Garzik  *	Suspend @ap.
4080c6fd2807SJeff Garzik  *
4081c6fd2807SJeff Garzik  *	LOCKING:
4082c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
4083c6fd2807SJeff Garzik  */
4084c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap)
4085c6fd2807SJeff Garzik {
4086c6fd2807SJeff Garzik 	unsigned long flags;
4087c6fd2807SJeff Garzik 	int rc = 0;
40883dc67440SAaron Lu 	struct ata_device *dev;
4089aa3998dbSDamien Le Moal 	struct ata_link *link;
4090c6fd2807SJeff Garzik 
4091c6fd2807SJeff Garzik 	/* are we suspending? */
4092c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4093c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4094a7ff60dbSAaron Lu 	    ap->pm_mesg.event & PM_EVENT_RESUME) {
4095c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
4096c6fd2807SJeff Garzik 		return;
4097c6fd2807SJeff Garzik 	}
4098c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4099c6fd2807SJeff Garzik 
4100c6fd2807SJeff Garzik 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4101c6fd2807SJeff Garzik 
4102aa3998dbSDamien Le Moal 	/* Set all devices attached to the port in standby mode */
4103aa3998dbSDamien Le Moal 	ata_for_each_link(link, ap, HOST_FIRST) {
4104aa3998dbSDamien Le Moal 		ata_for_each_dev(dev, link, ENABLED)
4105aa3998dbSDamien Le Moal 			ata_dev_power_set_standby(dev);
4106aa3998dbSDamien Le Moal 	}
4107aa3998dbSDamien Le Moal 
41083dc67440SAaron Lu 	/*
41093dc67440SAaron Lu 	 * If we have a ZPODD attached, check its zero
41103dc67440SAaron Lu 	 * power ready status before the port is frozen.
4111a7ff60dbSAaron Lu 	 * Only needed for runtime suspend.
41123dc67440SAaron Lu 	 */
4113a7ff60dbSAaron Lu 	if (PMSG_IS_AUTO(ap->pm_mesg)) {
41143dc67440SAaron Lu 		ata_for_each_dev(dev, &ap->link, ENABLED) {
41153dc67440SAaron Lu 			if (zpodd_dev_enabled(dev))
41163dc67440SAaron Lu 				zpodd_on_suspend(dev);
41173dc67440SAaron Lu 		}
4118a7ff60dbSAaron Lu 	}
41193dc67440SAaron Lu 
4120c6fd2807SJeff Garzik 	/* suspend */
4121c6fd2807SJeff Garzik 	ata_eh_freeze_port(ap);
4122c6fd2807SJeff Garzik 
4123c6fd2807SJeff Garzik 	if (ap->ops->port_suspend)
4124c6fd2807SJeff Garzik 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4125c6fd2807SJeff Garzik 
4126a7ff60dbSAaron Lu 	ata_acpi_set_state(ap, ap->pm_mesg);
41272a7b02eaSSergey Shtylyov 
4128bc6e7c4bSDan Williams 	/* update the flags */
4129c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4130c6fd2807SJeff Garzik 
4131c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4132c6fd2807SJeff Garzik 	if (rc == 0)
4133c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SUSPENDED;
41344cb7c6f1SNiklas Cassel 	else if (ata_port_is_frozen(ap))
4135c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
4136c6fd2807SJeff Garzik 
4137c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4138c6fd2807SJeff Garzik 
4139c6fd2807SJeff Garzik 	return;
4140c6fd2807SJeff Garzik }
4141c6fd2807SJeff Garzik 
4142c6fd2807SJeff Garzik /**
4143c6fd2807SJeff Garzik  *	ata_eh_handle_port_resume - perform port resume operation
4144c6fd2807SJeff Garzik  *	@ap: port to resume
4145c6fd2807SJeff Garzik  *
4146c6fd2807SJeff Garzik  *	Resume @ap.
4147c6fd2807SJeff Garzik  *
4148c6fd2807SJeff Garzik  *	LOCKING:
4149c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
4150c6fd2807SJeff Garzik  */
4151c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap)
4152c6fd2807SJeff Garzik {
41536f9c1ea2STejun Heo 	struct ata_link *link;
41546f9c1ea2STejun Heo 	struct ata_device *dev;
4155c6fd2807SJeff Garzik 	unsigned long flags;
4156c6fd2807SJeff Garzik 
4157c6fd2807SJeff Garzik 	/* are we resuming? */
4158c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4159c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4160a7ff60dbSAaron Lu 	    !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4161c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
4162c6fd2807SJeff Garzik 		return;
4163c6fd2807SJeff Garzik 	}
4164c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4165c6fd2807SJeff Garzik 
41669666f400STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4167c6fd2807SJeff Garzik 
41686f9c1ea2STejun Heo 	/*
41696f9c1ea2STejun Heo 	 * Error timestamps are in jiffies which doesn't run while
41706f9c1ea2STejun Heo 	 * suspended and PHY events during resume isn't too uncommon.
41716f9c1ea2STejun Heo 	 * When the two are combined, it can lead to unnecessary speed
41726f9c1ea2STejun Heo 	 * downs if the machine is suspended and resumed repeatedly.
41736f9c1ea2STejun Heo 	 * Clear error history.
41746f9c1ea2STejun Heo 	 */
41756f9c1ea2STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
41766f9c1ea2STejun Heo 		ata_for_each_dev(dev, link, ALL)
41776f9c1ea2STejun Heo 			ata_ering_clear(&dev->ering);
41786f9c1ea2STejun Heo 
4179a7ff60dbSAaron Lu 	ata_acpi_set_state(ap, ap->pm_mesg);
4180bd3adca5SShaohua Li 
4181c6fd2807SJeff Garzik 	if (ap->ops->port_resume)
4182ae867937SKefeng Wang 		ap->ops->port_resume(ap);
4183c6fd2807SJeff Garzik 
41846746544cSTejun Heo 	/* tell ACPI that we're resuming */
41856746544cSTejun Heo 	ata_acpi_on_resume(ap);
41866746544cSTejun Heo 
4187bc6e7c4bSDan Williams 	/* update the flags */
4188c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4189c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4190aa3998dbSDamien Le Moal 	ap->pflags |= ATA_PFLAG_RESUMING;
4191c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4192c6fd2807SJeff Garzik }
41936ffa01d8STejun Heo #endif /* CONFIG_PM */
4194