xref: /linux/drivers/ata/libata-core.c (revision 22093997ac9220d3c606313efbf4ce564962d095)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  libata-core.c - helper library for ATA
4   *
5   *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
6   *  Copyright 2003-2004 Jeff Garzik
7   *
8   *  libata documentation is available via 'make {ps|pdf}docs',
9   *  as Documentation/driver-api/libata.rst
10   *
11   *  Hardware documentation available from http://www.t13.org/ and
12   *  http://www.sata-io.org/
13   *
14   *  Standards documents from:
15   *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
16   *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
17   *	http://www.sata-io.org (SATA)
18   *	http://www.compactflash.org (CF)
19   *	http://www.qic.org (QIC157 - Tape and DSC)
20   *	http://www.ce-ata.org (CE-ATA: not supported)
21   *
22   * libata is essentially a library of internal helper functions for
23   * low-level ATA host controller drivers.  As such, the API/ABI is
24   * likely to change as new drivers are added and updated.
25   * Do not depend on ABI/API stability.
26   */
27  
28  #include <linux/kernel.h>
29  #include <linux/module.h>
30  #include <linux/pci.h>
31  #include <linux/init.h>
32  #include <linux/list.h>
33  #include <linux/mm.h>
34  #include <linux/spinlock.h>
35  #include <linux/blkdev.h>
36  #include <linux/delay.h>
37  #include <linux/timer.h>
38  #include <linux/time.h>
39  #include <linux/interrupt.h>
40  #include <linux/completion.h>
41  #include <linux/suspend.h>
42  #include <linux/workqueue.h>
43  #include <linux/scatterlist.h>
44  #include <linux/io.h>
45  #include <linux/log2.h>
46  #include <linux/slab.h>
47  #include <linux/glob.h>
48  #include <scsi/scsi.h>
49  #include <scsi/scsi_cmnd.h>
50  #include <scsi/scsi_host.h>
51  #include <linux/libata.h>
52  #include <asm/byteorder.h>
53  #include <linux/unaligned.h>
54  #include <linux/cdrom.h>
55  #include <linux/ratelimit.h>
56  #include <linux/leds.h>
57  #include <linux/pm_runtime.h>
58  #include <linux/platform_device.h>
59  #include <asm/setup.h>
60  
61  #define CREATE_TRACE_POINTS
62  #include <trace/events/libata.h>
63  
64  #include "libata.h"
65  #include "libata-transport.h"
66  
67  const struct ata_port_operations ata_base_port_ops = {
68  	.prereset		= ata_std_prereset,
69  	.postreset		= ata_std_postreset,
70  	.error_handler		= ata_std_error_handler,
71  	.sched_eh		= ata_std_sched_eh,
72  	.end_eh			= ata_std_end_eh,
73  };
74  
75  static unsigned int ata_dev_init_params(struct ata_device *dev,
76  					u16 heads, u16 sectors);
77  static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
78  static void ata_dev_xfermask(struct ata_device *dev);
79  static unsigned int ata_dev_quirks(const struct ata_device *dev);
80  
81  static DEFINE_IDA(ata_ida);
82  
83  #ifdef CONFIG_ATA_FORCE
84  struct ata_force_param {
85  	const char	*name;
86  	u8		cbl;
87  	u8		spd_limit;
88  	unsigned int	xfer_mask;
89  	unsigned int	quirk_on;
90  	unsigned int	quirk_off;
91  	unsigned int	pflags_on;
92  	u16		lflags_on;
93  	u16		lflags_off;
94  };
95  
96  struct ata_force_ent {
97  	int			port;
98  	int			device;
99  	struct ata_force_param	param;
100  };
101  
102  static struct ata_force_ent *ata_force_tbl;
103  static int ata_force_tbl_size;
104  
105  static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
106  /* param_buf is thrown away after initialization, disallow read */
107  module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
108  MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
109  #endif
110  
111  static int atapi_enabled = 1;
112  module_param(atapi_enabled, int, 0444);
113  MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
114  
115  static int atapi_dmadir = 0;
116  module_param(atapi_dmadir, int, 0444);
117  MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
118  
119  int atapi_passthru16 = 1;
120  module_param(atapi_passthru16, int, 0444);
121  MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
122  
123  int libata_fua = 0;
124  module_param_named(fua, libata_fua, int, 0444);
125  MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
126  
127  static int ata_ignore_hpa;
128  module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
129  MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
130  
131  static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
132  module_param_named(dma, libata_dma_mask, int, 0444);
133  MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
134  
135  static int ata_probe_timeout;
136  module_param(ata_probe_timeout, int, 0444);
137  MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
138  
139  int libata_noacpi = 0;
140  module_param_named(noacpi, libata_noacpi, int, 0444);
141  MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
142  
143  int libata_allow_tpm = 0;
144  module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
145  MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
146  
147  static int atapi_an;
148  module_param(atapi_an, int, 0444);
149  MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
150  
151  MODULE_AUTHOR("Jeff Garzik");
152  MODULE_DESCRIPTION("Library module for ATA devices");
153  MODULE_LICENSE("GPL");
154  MODULE_VERSION(DRV_VERSION);
155  
ata_dev_print_info(const struct ata_device * dev)156  static inline bool ata_dev_print_info(const struct ata_device *dev)
157  {
158  	struct ata_eh_context *ehc = &dev->link->eh_context;
159  
160  	return ehc->i.flags & ATA_EHI_PRINTINFO;
161  }
162  
163  /**
164   *	ata_link_next - link iteration helper
165   *	@link: the previous link, NULL to start
166   *	@ap: ATA port containing links to iterate
167   *	@mode: iteration mode, one of ATA_LITER_*
168   *
169   *	LOCKING:
170   *	Host lock or EH context.
171   *
172   *	RETURNS:
173   *	Pointer to the next link.
174   */
ata_link_next(struct ata_link * link,struct ata_port * ap,enum ata_link_iter_mode mode)175  struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
176  			       enum ata_link_iter_mode mode)
177  {
178  	BUG_ON(mode != ATA_LITER_EDGE &&
179  	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
180  
181  	/* NULL link indicates start of iteration */
182  	if (!link)
183  		switch (mode) {
184  		case ATA_LITER_EDGE:
185  		case ATA_LITER_PMP_FIRST:
186  			if (sata_pmp_attached(ap))
187  				return ap->pmp_link;
188  			fallthrough;
189  		case ATA_LITER_HOST_FIRST:
190  			return &ap->link;
191  		}
192  
193  	/* we just iterated over the host link, what's next? */
194  	if (link == &ap->link)
195  		switch (mode) {
196  		case ATA_LITER_HOST_FIRST:
197  			if (sata_pmp_attached(ap))
198  				return ap->pmp_link;
199  			fallthrough;
200  		case ATA_LITER_PMP_FIRST:
201  			if (unlikely(ap->slave_link))
202  				return ap->slave_link;
203  			fallthrough;
204  		case ATA_LITER_EDGE:
205  			return NULL;
206  		}
207  
208  	/* slave_link excludes PMP */
209  	if (unlikely(link == ap->slave_link))
210  		return NULL;
211  
212  	/* we were over a PMP link */
213  	if (++link < ap->pmp_link + ap->nr_pmp_links)
214  		return link;
215  
216  	if (mode == ATA_LITER_PMP_FIRST)
217  		return &ap->link;
218  
219  	return NULL;
220  }
221  EXPORT_SYMBOL_GPL(ata_link_next);
222  
223  /**
224   *	ata_dev_next - device iteration helper
225   *	@dev: the previous device, NULL to start
226   *	@link: ATA link containing devices to iterate
227   *	@mode: iteration mode, one of ATA_DITER_*
228   *
229   *	LOCKING:
230   *	Host lock or EH context.
231   *
232   *	RETURNS:
233   *	Pointer to the next device.
234   */
ata_dev_next(struct ata_device * dev,struct ata_link * link,enum ata_dev_iter_mode mode)235  struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
236  				enum ata_dev_iter_mode mode)
237  {
238  	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
239  	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
240  
241  	/* NULL dev indicates start of iteration */
242  	if (!dev)
243  		switch (mode) {
244  		case ATA_DITER_ENABLED:
245  		case ATA_DITER_ALL:
246  			dev = link->device;
247  			goto check;
248  		case ATA_DITER_ENABLED_REVERSE:
249  		case ATA_DITER_ALL_REVERSE:
250  			dev = link->device + ata_link_max_devices(link) - 1;
251  			goto check;
252  		}
253  
254   next:
255  	/* move to the next one */
256  	switch (mode) {
257  	case ATA_DITER_ENABLED:
258  	case ATA_DITER_ALL:
259  		if (++dev < link->device + ata_link_max_devices(link))
260  			goto check;
261  		return NULL;
262  	case ATA_DITER_ENABLED_REVERSE:
263  	case ATA_DITER_ALL_REVERSE:
264  		if (--dev >= link->device)
265  			goto check;
266  		return NULL;
267  	}
268  
269   check:
270  	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
271  	    !ata_dev_enabled(dev))
272  		goto next;
273  	return dev;
274  }
275  EXPORT_SYMBOL_GPL(ata_dev_next);
276  
277  /**
278   *	ata_dev_phys_link - find physical link for a device
279   *	@dev: ATA device to look up physical link for
280   *
281   *	Look up physical link which @dev is attached to.  Note that
282   *	this is different from @dev->link only when @dev is on slave
283   *	link.  For all other cases, it's the same as @dev->link.
284   *
285   *	LOCKING:
286   *	Don't care.
287   *
288   *	RETURNS:
289   *	Pointer to the found physical link.
290   */
ata_dev_phys_link(struct ata_device * dev)291  struct ata_link *ata_dev_phys_link(struct ata_device *dev)
292  {
293  	struct ata_port *ap = dev->link->ap;
294  
295  	if (!ap->slave_link)
296  		return dev->link;
297  	if (!dev->devno)
298  		return &ap->link;
299  	return ap->slave_link;
300  }
301  
302  #ifdef CONFIG_ATA_FORCE
303  /**
304   *	ata_force_cbl - force cable type according to libata.force
305   *	@ap: ATA port of interest
306   *
307   *	Force cable type according to libata.force and whine about it.
308   *	The last entry which has matching port number is used, so it
309   *	can be specified as part of device force parameters.  For
310   *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
311   *	same effect.
312   *
313   *	LOCKING:
314   *	EH context.
315   */
ata_force_cbl(struct ata_port * ap)316  void ata_force_cbl(struct ata_port *ap)
317  {
318  	int i;
319  
320  	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
321  		const struct ata_force_ent *fe = &ata_force_tbl[i];
322  
323  		if (fe->port != -1 && fe->port != ap->print_id)
324  			continue;
325  
326  		if (fe->param.cbl == ATA_CBL_NONE)
327  			continue;
328  
329  		ap->cbl = fe->param.cbl;
330  		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
331  		return;
332  	}
333  }
334  
335  /**
336   *	ata_force_pflags - force port flags according to libata.force
337   *	@ap: ATA port of interest
338   *
339   *	Force port flags according to libata.force and whine about it.
340   *
341   *	LOCKING:
342   *	EH context.
343   */
ata_force_pflags(struct ata_port * ap)344  static void ata_force_pflags(struct ata_port *ap)
345  {
346  	int i;
347  
348  	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
349  		const struct ata_force_ent *fe = &ata_force_tbl[i];
350  
351  		if (fe->port != -1 && fe->port != ap->print_id)
352  			continue;
353  
354  		/* let pflags stack */
355  		if (fe->param.pflags_on) {
356  			ap->pflags |= fe->param.pflags_on;
357  			ata_port_notice(ap,
358  					"FORCE: port flag 0x%x forced -> 0x%x\n",
359  					fe->param.pflags_on, ap->pflags);
360  		}
361  	}
362  }
363  
364  /**
365   *	ata_force_link_limits - force link limits according to libata.force
366   *	@link: ATA link of interest
367   *
368   *	Force link flags and SATA spd limit according to libata.force
369   *	and whine about it.  When only the port part is specified
370   *	(e.g. 1:), the limit applies to all links connected to both
371   *	the host link and all fan-out ports connected via PMP.  If the
372   *	device part is specified as 0 (e.g. 1.00:), it specifies the
373   *	first fan-out link not the host link.  Device number 15 always
374   *	points to the host link whether PMP is attached or not.  If the
375   *	controller has slave link, device number 16 points to it.
376   *
377   *	LOCKING:
378   *	EH context.
379   */
ata_force_link_limits(struct ata_link * link)380  static void ata_force_link_limits(struct ata_link *link)
381  {
382  	bool did_spd = false;
383  	int linkno = link->pmp;
384  	int i;
385  
386  	if (ata_is_host_link(link))
387  		linkno += 15;
388  
389  	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
390  		const struct ata_force_ent *fe = &ata_force_tbl[i];
391  
392  		if (fe->port != -1 && fe->port != link->ap->print_id)
393  			continue;
394  
395  		if (fe->device != -1 && fe->device != linkno)
396  			continue;
397  
398  		/* only honor the first spd limit */
399  		if (!did_spd && fe->param.spd_limit) {
400  			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
401  			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
402  					fe->param.name);
403  			did_spd = true;
404  		}
405  
406  		/* let lflags stack */
407  		if (fe->param.lflags_on) {
408  			link->flags |= fe->param.lflags_on;
409  			ata_link_notice(link,
410  					"FORCE: link flag 0x%x forced -> 0x%x\n",
411  					fe->param.lflags_on, link->flags);
412  		}
413  		if (fe->param.lflags_off) {
414  			link->flags &= ~fe->param.lflags_off;
415  			ata_link_notice(link,
416  				"FORCE: link flag 0x%x cleared -> 0x%x\n",
417  				fe->param.lflags_off, link->flags);
418  		}
419  	}
420  }
421  
422  /**
423   *	ata_force_xfermask - force xfermask according to libata.force
424   *	@dev: ATA device of interest
425   *
426   *	Force xfer_mask according to libata.force and whine about it.
427   *	For consistency with link selection, device number 15 selects
428   *	the first device connected to the host link.
429   *
430   *	LOCKING:
431   *	EH context.
432   */
ata_force_xfermask(struct ata_device * dev)433  static void ata_force_xfermask(struct ata_device *dev)
434  {
435  	int devno = dev->link->pmp + dev->devno;
436  	int alt_devno = devno;
437  	int i;
438  
439  	/* allow n.15/16 for devices attached to host port */
440  	if (ata_is_host_link(dev->link))
441  		alt_devno += 15;
442  
443  	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
444  		const struct ata_force_ent *fe = &ata_force_tbl[i];
445  		unsigned int pio_mask, mwdma_mask, udma_mask;
446  
447  		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
448  			continue;
449  
450  		if (fe->device != -1 && fe->device != devno &&
451  		    fe->device != alt_devno)
452  			continue;
453  
454  		if (!fe->param.xfer_mask)
455  			continue;
456  
457  		ata_unpack_xfermask(fe->param.xfer_mask,
458  				    &pio_mask, &mwdma_mask, &udma_mask);
459  		if (udma_mask)
460  			dev->udma_mask = udma_mask;
461  		else if (mwdma_mask) {
462  			dev->udma_mask = 0;
463  			dev->mwdma_mask = mwdma_mask;
464  		} else {
465  			dev->udma_mask = 0;
466  			dev->mwdma_mask = 0;
467  			dev->pio_mask = pio_mask;
468  		}
469  
470  		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
471  			       fe->param.name);
472  		return;
473  	}
474  }
475  
476  /**
477   *	ata_force_quirks - force quirks according to libata.force
478   *	@dev: ATA device of interest
479   *
480   *	Force quirks according to libata.force and whine about it.
481   *	For consistency with link selection, device number 15 selects
482   *	the first device connected to the host link.
483   *
484   *	LOCKING:
485   *	EH context.
486   */
ata_force_quirks(struct ata_device * dev)487  static void ata_force_quirks(struct ata_device *dev)
488  {
489  	int devno = dev->link->pmp + dev->devno;
490  	int alt_devno = devno;
491  	int i;
492  
493  	/* allow n.15/16 for devices attached to host port */
494  	if (ata_is_host_link(dev->link))
495  		alt_devno += 15;
496  
497  	for (i = 0; i < ata_force_tbl_size; i++) {
498  		const struct ata_force_ent *fe = &ata_force_tbl[i];
499  
500  		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
501  			continue;
502  
503  		if (fe->device != -1 && fe->device != devno &&
504  		    fe->device != alt_devno)
505  			continue;
506  
507  		if (!(~dev->quirks & fe->param.quirk_on) &&
508  		    !(dev->quirks & fe->param.quirk_off))
509  			continue;
510  
511  		dev->quirks |= fe->param.quirk_on;
512  		dev->quirks &= ~fe->param.quirk_off;
513  
514  		ata_dev_notice(dev, "FORCE: modified (%s)\n",
515  			       fe->param.name);
516  	}
517  }
518  #else
ata_force_pflags(struct ata_port * ap)519  static inline void ata_force_pflags(struct ata_port *ap) { }
ata_force_link_limits(struct ata_link * link)520  static inline void ata_force_link_limits(struct ata_link *link) { }
ata_force_xfermask(struct ata_device * dev)521  static inline void ata_force_xfermask(struct ata_device *dev) { }
ata_force_quirks(struct ata_device * dev)522  static inline void ata_force_quirks(struct ata_device *dev) { }
523  #endif
524  
525  /**
526   *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
527   *	@opcode: SCSI opcode
528   *
529   *	Determine ATAPI command type from @opcode.
530   *
531   *	LOCKING:
532   *	None.
533   *
534   *	RETURNS:
535   *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
536   */
atapi_cmd_type(u8 opcode)537  int atapi_cmd_type(u8 opcode)
538  {
539  	switch (opcode) {
540  	case GPCMD_READ_10:
541  	case GPCMD_READ_12:
542  		return ATAPI_READ;
543  
544  	case GPCMD_WRITE_10:
545  	case GPCMD_WRITE_12:
546  	case GPCMD_WRITE_AND_VERIFY_10:
547  		return ATAPI_WRITE;
548  
549  	case GPCMD_READ_CD:
550  	case GPCMD_READ_CD_MSF:
551  		return ATAPI_READ_CD;
552  
553  	case ATA_16:
554  	case ATA_12:
555  		if (atapi_passthru16)
556  			return ATAPI_PASS_THRU;
557  		fallthrough;
558  	default:
559  		return ATAPI_MISC;
560  	}
561  }
562  EXPORT_SYMBOL_GPL(atapi_cmd_type);
563  
564  static const u8 ata_rw_cmds[] = {
565  	/* pio multi */
566  	ATA_CMD_READ_MULTI,
567  	ATA_CMD_WRITE_MULTI,
568  	ATA_CMD_READ_MULTI_EXT,
569  	ATA_CMD_WRITE_MULTI_EXT,
570  	0,
571  	0,
572  	0,
573  	0,
574  	/* pio */
575  	ATA_CMD_PIO_READ,
576  	ATA_CMD_PIO_WRITE,
577  	ATA_CMD_PIO_READ_EXT,
578  	ATA_CMD_PIO_WRITE_EXT,
579  	0,
580  	0,
581  	0,
582  	0,
583  	/* dma */
584  	ATA_CMD_READ,
585  	ATA_CMD_WRITE,
586  	ATA_CMD_READ_EXT,
587  	ATA_CMD_WRITE_EXT,
588  	0,
589  	0,
590  	0,
591  	ATA_CMD_WRITE_FUA_EXT
592  };
593  
594  /**
595   *	ata_set_rwcmd_protocol - set taskfile r/w command and protocol
596   *	@dev: target device for the taskfile
597   *	@tf: taskfile to examine and configure
598   *
599   *	Examine the device configuration and tf->flags to determine
600   *	the proper read/write command and protocol to use for @tf.
601   *
602   *	LOCKING:
603   *	caller.
604   */
ata_set_rwcmd_protocol(struct ata_device * dev,struct ata_taskfile * tf)605  static bool ata_set_rwcmd_protocol(struct ata_device *dev,
606  				   struct ata_taskfile *tf)
607  {
608  	u8 cmd;
609  
610  	int index, fua, lba48, write;
611  
612  	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
613  	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
614  	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
615  
616  	if (dev->flags & ATA_DFLAG_PIO) {
617  		tf->protocol = ATA_PROT_PIO;
618  		index = dev->multi_count ? 0 : 8;
619  	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
620  		/* Unable to use DMA due to host limitation */
621  		tf->protocol = ATA_PROT_PIO;
622  		index = dev->multi_count ? 0 : 8;
623  	} else {
624  		tf->protocol = ATA_PROT_DMA;
625  		index = 16;
626  	}
627  
628  	cmd = ata_rw_cmds[index + fua + lba48 + write];
629  	if (!cmd)
630  		return false;
631  
632  	tf->command = cmd;
633  
634  	return true;
635  }
636  
637  /**
638   *	ata_tf_read_block - Read block address from ATA taskfile
639   *	@tf: ATA taskfile of interest
640   *	@dev: ATA device @tf belongs to
641   *
642   *	LOCKING:
643   *	None.
644   *
645   *	Read block address from @tf.  This function can handle all
646   *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
647   *	flags select the address format to use.
648   *
649   *	RETURNS:
650   *	Block address read from @tf.
651   */
ata_tf_read_block(const struct ata_taskfile * tf,struct ata_device * dev)652  u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
653  {
654  	u64 block = 0;
655  
656  	if (tf->flags & ATA_TFLAG_LBA) {
657  		if (tf->flags & ATA_TFLAG_LBA48) {
658  			block |= (u64)tf->hob_lbah << 40;
659  			block |= (u64)tf->hob_lbam << 32;
660  			block |= (u64)tf->hob_lbal << 24;
661  		} else
662  			block |= (tf->device & 0xf) << 24;
663  
664  		block |= tf->lbah << 16;
665  		block |= tf->lbam << 8;
666  		block |= tf->lbal;
667  	} else {
668  		u32 cyl, head, sect;
669  
670  		cyl = tf->lbam | (tf->lbah << 8);
671  		head = tf->device & 0xf;
672  		sect = tf->lbal;
673  
674  		if (!sect) {
675  			ata_dev_warn(dev,
676  				     "device reported invalid CHS sector 0\n");
677  			return U64_MAX;
678  		}
679  
680  		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
681  	}
682  
683  	return block;
684  }
685  
686  /*
687   * Set a taskfile command duration limit index.
688   */
ata_set_tf_cdl(struct ata_queued_cmd * qc,int cdl)689  static inline void ata_set_tf_cdl(struct ata_queued_cmd *qc, int cdl)
690  {
691  	struct ata_taskfile *tf = &qc->tf;
692  
693  	if (tf->protocol == ATA_PROT_NCQ)
694  		tf->auxiliary |= cdl;
695  	else
696  		tf->feature |= cdl;
697  
698  	/*
699  	 * Mark this command as having a CDL and request the result
700  	 * task file so that we can inspect the sense data available
701  	 * bit on completion.
702  	 */
703  	qc->flags |= ATA_QCFLAG_HAS_CDL | ATA_QCFLAG_RESULT_TF;
704  }
705  
706  /**
707   *	ata_build_rw_tf - Build ATA taskfile for given read/write request
708   *	@qc: Metadata associated with the taskfile to build
709   *	@block: Block address
710   *	@n_block: Number of blocks
711   *	@tf_flags: RW/FUA etc...
712   *	@cdl: Command duration limit index
713   *	@class: IO priority class
714   *
715   *	LOCKING:
716   *	None.
717   *
718   *	Build ATA taskfile for the command @qc for read/write request described
719   *	by @block, @n_block, @tf_flags and @class.
720   *
721   *	RETURNS:
722   *
723   *	0 on success, -ERANGE if the request is too large for @dev,
724   *	-EINVAL if the request is invalid.
725   */
ata_build_rw_tf(struct ata_queued_cmd * qc,u64 block,u32 n_block,unsigned int tf_flags,int cdl,int class)726  int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
727  		    unsigned int tf_flags, int cdl, int class)
728  {
729  	struct ata_taskfile *tf = &qc->tf;
730  	struct ata_device *dev = qc->dev;
731  
732  	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
733  	tf->flags |= tf_flags;
734  
735  	if (ata_ncq_enabled(dev)) {
736  		/* yay, NCQ */
737  		if (!lba_48_ok(block, n_block))
738  			return -ERANGE;
739  
740  		tf->protocol = ATA_PROT_NCQ;
741  		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
742  
743  		if (tf->flags & ATA_TFLAG_WRITE)
744  			tf->command = ATA_CMD_FPDMA_WRITE;
745  		else
746  			tf->command = ATA_CMD_FPDMA_READ;
747  
748  		tf->nsect = qc->hw_tag << 3;
749  		tf->hob_feature = (n_block >> 8) & 0xff;
750  		tf->feature = n_block & 0xff;
751  
752  		tf->hob_lbah = (block >> 40) & 0xff;
753  		tf->hob_lbam = (block >> 32) & 0xff;
754  		tf->hob_lbal = (block >> 24) & 0xff;
755  		tf->lbah = (block >> 16) & 0xff;
756  		tf->lbam = (block >> 8) & 0xff;
757  		tf->lbal = block & 0xff;
758  
759  		tf->device = ATA_LBA;
760  		if (tf->flags & ATA_TFLAG_FUA)
761  			tf->device |= 1 << 7;
762  
763  		if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
764  		    class == IOPRIO_CLASS_RT)
765  			tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
766  
767  		if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
768  			ata_set_tf_cdl(qc, cdl);
769  
770  	} else if (dev->flags & ATA_DFLAG_LBA) {
771  		tf->flags |= ATA_TFLAG_LBA;
772  
773  		if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
774  			ata_set_tf_cdl(qc, cdl);
775  
776  		/* Both FUA writes and a CDL index require 48-bit commands */
777  		if (!(tf->flags & ATA_TFLAG_FUA) &&
778  		    !(qc->flags & ATA_QCFLAG_HAS_CDL) &&
779  		    lba_28_ok(block, n_block)) {
780  			/* use LBA28 */
781  			tf->device |= (block >> 24) & 0xf;
782  		} else if (lba_48_ok(block, n_block)) {
783  			if (!(dev->flags & ATA_DFLAG_LBA48))
784  				return -ERANGE;
785  
786  			/* use LBA48 */
787  			tf->flags |= ATA_TFLAG_LBA48;
788  
789  			tf->hob_nsect = (n_block >> 8) & 0xff;
790  
791  			tf->hob_lbah = (block >> 40) & 0xff;
792  			tf->hob_lbam = (block >> 32) & 0xff;
793  			tf->hob_lbal = (block >> 24) & 0xff;
794  		} else {
795  			/* request too large even for LBA48 */
796  			return -ERANGE;
797  		}
798  
799  		if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
800  			return -EINVAL;
801  
802  		tf->nsect = n_block & 0xff;
803  
804  		tf->lbah = (block >> 16) & 0xff;
805  		tf->lbam = (block >> 8) & 0xff;
806  		tf->lbal = block & 0xff;
807  
808  		tf->device |= ATA_LBA;
809  	} else {
810  		/* CHS */
811  		u32 sect, head, cyl, track;
812  
813  		/* The request -may- be too large for CHS addressing. */
814  		if (!lba_28_ok(block, n_block))
815  			return -ERANGE;
816  
817  		if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
818  			return -EINVAL;
819  
820  		/* Convert LBA to CHS */
821  		track = (u32)block / dev->sectors;
822  		cyl   = track / dev->heads;
823  		head  = track % dev->heads;
824  		sect  = (u32)block % dev->sectors + 1;
825  
826  		/* Check whether the converted CHS can fit.
827  		   Cylinder: 0-65535
828  		   Head: 0-15
829  		   Sector: 1-255*/
830  		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
831  			return -ERANGE;
832  
833  		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
834  		tf->lbal = sect;
835  		tf->lbam = cyl;
836  		tf->lbah = cyl >> 8;
837  		tf->device |= head;
838  	}
839  
840  	return 0;
841  }
842  
843  /**
844   *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
845   *	@pio_mask: pio_mask
846   *	@mwdma_mask: mwdma_mask
847   *	@udma_mask: udma_mask
848   *
849   *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
850   *	unsigned int xfer_mask.
851   *
852   *	LOCKING:
853   *	None.
854   *
855   *	RETURNS:
856   *	Packed xfer_mask.
857   */
ata_pack_xfermask(unsigned int pio_mask,unsigned int mwdma_mask,unsigned int udma_mask)858  unsigned int ata_pack_xfermask(unsigned int pio_mask,
859  			       unsigned int mwdma_mask,
860  			       unsigned int udma_mask)
861  {
862  	return	((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
863  		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
864  		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
865  }
866  EXPORT_SYMBOL_GPL(ata_pack_xfermask);
867  
868  /**
869   *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
870   *	@xfer_mask: xfer_mask to unpack
871   *	@pio_mask: resulting pio_mask
872   *	@mwdma_mask: resulting mwdma_mask
873   *	@udma_mask: resulting udma_mask
874   *
875   *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
876   *	Any NULL destination masks will be ignored.
877   */
ata_unpack_xfermask(unsigned int xfer_mask,unsigned int * pio_mask,unsigned int * mwdma_mask,unsigned int * udma_mask)878  void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
879  			 unsigned int *mwdma_mask, unsigned int *udma_mask)
880  {
881  	if (pio_mask)
882  		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
883  	if (mwdma_mask)
884  		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
885  	if (udma_mask)
886  		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
887  }
888  
889  static const struct ata_xfer_ent {
890  	int shift, bits;
891  	u8 base;
892  } ata_xfer_tbl[] = {
893  	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
894  	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
895  	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
896  	{ -1, },
897  };
898  
899  /**
900   *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
901   *	@xfer_mask: xfer_mask of interest
902   *
903   *	Return matching XFER_* value for @xfer_mask.  Only the highest
904   *	bit of @xfer_mask is considered.
905   *
906   *	LOCKING:
907   *	None.
908   *
909   *	RETURNS:
910   *	Matching XFER_* value, 0xff if no match found.
911   */
ata_xfer_mask2mode(unsigned int xfer_mask)912  u8 ata_xfer_mask2mode(unsigned int xfer_mask)
913  {
914  	int highbit = fls(xfer_mask) - 1;
915  	const struct ata_xfer_ent *ent;
916  
917  	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
918  		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
919  			return ent->base + highbit - ent->shift;
920  	return 0xff;
921  }
922  EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
923  
924  /**
925   *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
926   *	@xfer_mode: XFER_* of interest
927   *
928   *	Return matching xfer_mask for @xfer_mode.
929   *
930   *	LOCKING:
931   *	None.
932   *
933   *	RETURNS:
934   *	Matching xfer_mask, 0 if no match found.
935   */
ata_xfer_mode2mask(u8 xfer_mode)936  unsigned int ata_xfer_mode2mask(u8 xfer_mode)
937  {
938  	const struct ata_xfer_ent *ent;
939  
940  	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
941  		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
942  			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
943  				& ~((1 << ent->shift) - 1);
944  	return 0;
945  }
946  EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
947  
948  /**
949   *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
950   *	@xfer_mode: XFER_* of interest
951   *
952   *	Return matching xfer_shift for @xfer_mode.
953   *
954   *	LOCKING:
955   *	None.
956   *
957   *	RETURNS:
958   *	Matching xfer_shift, -1 if no match found.
959   */
ata_xfer_mode2shift(u8 xfer_mode)960  int ata_xfer_mode2shift(u8 xfer_mode)
961  {
962  	const struct ata_xfer_ent *ent;
963  
964  	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
965  		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
966  			return ent->shift;
967  	return -1;
968  }
969  EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
970  
971  /**
972   *	ata_mode_string - convert xfer_mask to string
973   *	@xfer_mask: mask of bits supported; only highest bit counts.
974   *
975   *	Determine string which represents the highest speed
976   *	(highest bit in @modemask).
977   *
978   *	LOCKING:
979   *	None.
980   *
981   *	RETURNS:
982   *	Constant C string representing highest speed listed in
983   *	@mode_mask, or the constant C string "<n/a>".
984   */
ata_mode_string(unsigned int xfer_mask)985  const char *ata_mode_string(unsigned int xfer_mask)
986  {
987  	static const char * const xfer_mode_str[] = {
988  		"PIO0",
989  		"PIO1",
990  		"PIO2",
991  		"PIO3",
992  		"PIO4",
993  		"PIO5",
994  		"PIO6",
995  		"MWDMA0",
996  		"MWDMA1",
997  		"MWDMA2",
998  		"MWDMA3",
999  		"MWDMA4",
1000  		"UDMA/16",
1001  		"UDMA/25",
1002  		"UDMA/33",
1003  		"UDMA/44",
1004  		"UDMA/66",
1005  		"UDMA/100",
1006  		"UDMA/133",
1007  		"UDMA7",
1008  	};
1009  	int highbit;
1010  
1011  	highbit = fls(xfer_mask) - 1;
1012  	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1013  		return xfer_mode_str[highbit];
1014  	return "<n/a>";
1015  }
1016  EXPORT_SYMBOL_GPL(ata_mode_string);
1017  
sata_spd_string(unsigned int spd)1018  const char *sata_spd_string(unsigned int spd)
1019  {
1020  	static const char * const spd_str[] = {
1021  		"1.5 Gbps",
1022  		"3.0 Gbps",
1023  		"6.0 Gbps",
1024  	};
1025  
1026  	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1027  		return "<unknown>";
1028  	return spd_str[spd - 1];
1029  }
1030  
1031  /**
1032   *	ata_dev_classify - determine device type based on ATA-spec signature
1033   *	@tf: ATA taskfile register set for device to be identified
1034   *
1035   *	Determine from taskfile register contents whether a device is
1036   *	ATA or ATAPI, as per "Signature and persistence" section
1037   *	of ATA/PI spec (volume 1, sect 5.14).
1038   *
1039   *	LOCKING:
1040   *	None.
1041   *
1042   *	RETURNS:
1043   *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1044   *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1045   */
ata_dev_classify(const struct ata_taskfile * tf)1046  unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1047  {
1048  	/* Apple's open source Darwin code hints that some devices only
1049  	 * put a proper signature into the LBA mid/high registers,
1050  	 * So, we only check those.  It's sufficient for uniqueness.
1051  	 *
1052  	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1053  	 * signatures for ATA and ATAPI devices attached on SerialATA,
1054  	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1055  	 * spec has never mentioned about using different signatures
1056  	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1057  	 * Multiplier specification began to use 0x69/0x96 to identify
1058  	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1059  	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1060  	 * 0x69/0x96 shortly and described them as reserved for
1061  	 * SerialATA.
1062  	 *
1063  	 * We follow the current spec and consider that 0x69/0x96
1064  	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1065  	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1066  	 * SEMB signature.  This is worked around in
1067  	 * ata_dev_read_id().
1068  	 */
1069  	if (tf->lbam == 0 && tf->lbah == 0)
1070  		return ATA_DEV_ATA;
1071  
1072  	if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1073  		return ATA_DEV_ATAPI;
1074  
1075  	if (tf->lbam == 0x69 && tf->lbah == 0x96)
1076  		return ATA_DEV_PMP;
1077  
1078  	if (tf->lbam == 0x3c && tf->lbah == 0xc3)
1079  		return ATA_DEV_SEMB;
1080  
1081  	if (tf->lbam == 0xcd && tf->lbah == 0xab)
1082  		return ATA_DEV_ZAC;
1083  
1084  	return ATA_DEV_UNKNOWN;
1085  }
1086  EXPORT_SYMBOL_GPL(ata_dev_classify);
1087  
1088  /**
1089   *	ata_id_string - Convert IDENTIFY DEVICE page into string
1090   *	@id: IDENTIFY DEVICE results we will examine
1091   *	@s: string into which data is output
1092   *	@ofs: offset into identify device page
1093   *	@len: length of string to return. must be an even number.
1094   *
1095   *	The strings in the IDENTIFY DEVICE page are broken up into
1096   *	16-bit chunks.  Run through the string, and output each
1097   *	8-bit chunk linearly, regardless of platform.
1098   *
1099   *	LOCKING:
1100   *	caller.
1101   */
1102  
ata_id_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1103  void ata_id_string(const u16 *id, unsigned char *s,
1104  		   unsigned int ofs, unsigned int len)
1105  {
1106  	unsigned int c;
1107  
1108  	BUG_ON(len & 1);
1109  
1110  	while (len > 0) {
1111  		c = id[ofs] >> 8;
1112  		*s = c;
1113  		s++;
1114  
1115  		c = id[ofs] & 0xff;
1116  		*s = c;
1117  		s++;
1118  
1119  		ofs++;
1120  		len -= 2;
1121  	}
1122  }
1123  EXPORT_SYMBOL_GPL(ata_id_string);
1124  
1125  /**
1126   *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1127   *	@id: IDENTIFY DEVICE results we will examine
1128   *	@s: string into which data is output
1129   *	@ofs: offset into identify device page
1130   *	@len: length of string to return. must be an odd number.
1131   *
1132   *	This function is identical to ata_id_string except that it
1133   *	trims trailing spaces and terminates the resulting string with
1134   *	null.  @len must be actual maximum length (even number) + 1.
1135   *
1136   *	LOCKING:
1137   *	caller.
1138   */
ata_id_c_string(const u16 * id,unsigned char * s,unsigned int ofs,unsigned int len)1139  void ata_id_c_string(const u16 *id, unsigned char *s,
1140  		     unsigned int ofs, unsigned int len)
1141  {
1142  	unsigned char *p;
1143  
1144  	ata_id_string(id, s, ofs, len - 1);
1145  
1146  	p = s + strnlen(s, len - 1);
1147  	while (p > s && p[-1] == ' ')
1148  		p--;
1149  	*p = '\0';
1150  }
1151  EXPORT_SYMBOL_GPL(ata_id_c_string);
1152  
ata_id_n_sectors(const u16 * id)1153  static u64 ata_id_n_sectors(const u16 *id)
1154  {
1155  	if (ata_id_has_lba(id)) {
1156  		if (ata_id_has_lba48(id))
1157  			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1158  
1159  		return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1160  	}
1161  
1162  	if (ata_id_current_chs_valid(id))
1163  		return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
1164  		       (u32)id[ATA_ID_CUR_SECTORS];
1165  
1166  	return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
1167  	       (u32)id[ATA_ID_SECTORS];
1168  }
1169  
ata_tf_to_lba48(const struct ata_taskfile * tf)1170  u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1171  {
1172  	u64 sectors = 0;
1173  
1174  	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1175  	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1176  	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1177  	sectors |= (tf->lbah & 0xff) << 16;
1178  	sectors |= (tf->lbam & 0xff) << 8;
1179  	sectors |= (tf->lbal & 0xff);
1180  
1181  	return sectors;
1182  }
1183  
ata_tf_to_lba(const struct ata_taskfile * tf)1184  u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1185  {
1186  	u64 sectors = 0;
1187  
1188  	sectors |= (tf->device & 0x0f) << 24;
1189  	sectors |= (tf->lbah & 0xff) << 16;
1190  	sectors |= (tf->lbam & 0xff) << 8;
1191  	sectors |= (tf->lbal & 0xff);
1192  
1193  	return sectors;
1194  }
1195  
1196  /**
1197   *	ata_read_native_max_address - Read native max address
1198   *	@dev: target device
1199   *	@max_sectors: out parameter for the result native max address
1200   *
1201   *	Perform an LBA48 or LBA28 native size query upon the device in
1202   *	question.
1203   *
1204   *	RETURNS:
1205   *	0 on success, -EACCES if command is aborted by the drive.
1206   *	-EIO on other errors.
1207   */
ata_read_native_max_address(struct ata_device * dev,u64 * max_sectors)1208  static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1209  {
1210  	unsigned int err_mask;
1211  	struct ata_taskfile tf;
1212  	int lba48 = ata_id_has_lba48(dev->id);
1213  
1214  	ata_tf_init(dev, &tf);
1215  
1216  	/* always clear all address registers */
1217  	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1218  
1219  	if (lba48) {
1220  		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1221  		tf.flags |= ATA_TFLAG_LBA48;
1222  	} else
1223  		tf.command = ATA_CMD_READ_NATIVE_MAX;
1224  
1225  	tf.protocol = ATA_PROT_NODATA;
1226  	tf.device |= ATA_LBA;
1227  
1228  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1229  	if (err_mask) {
1230  		ata_dev_warn(dev,
1231  			     "failed to read native max address (err_mask=0x%x)\n",
1232  			     err_mask);
1233  		if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
1234  			return -EACCES;
1235  		return -EIO;
1236  	}
1237  
1238  	if (lba48)
1239  		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1240  	else
1241  		*max_sectors = ata_tf_to_lba(&tf) + 1;
1242  	if (dev->quirks & ATA_QUIRK_HPA_SIZE)
1243  		(*max_sectors)--;
1244  	return 0;
1245  }
1246  
1247  /**
1248   *	ata_set_max_sectors - Set max sectors
1249   *	@dev: target device
1250   *	@new_sectors: new max sectors value to set for the device
1251   *
1252   *	Set max sectors of @dev to @new_sectors.
1253   *
1254   *	RETURNS:
1255   *	0 on success, -EACCES if command is aborted or denied (due to
1256   *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1257   *	errors.
1258   */
ata_set_max_sectors(struct ata_device * dev,u64 new_sectors)1259  static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1260  {
1261  	unsigned int err_mask;
1262  	struct ata_taskfile tf;
1263  	int lba48 = ata_id_has_lba48(dev->id);
1264  
1265  	new_sectors--;
1266  
1267  	ata_tf_init(dev, &tf);
1268  
1269  	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1270  
1271  	if (lba48) {
1272  		tf.command = ATA_CMD_SET_MAX_EXT;
1273  		tf.flags |= ATA_TFLAG_LBA48;
1274  
1275  		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1276  		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1277  		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1278  	} else {
1279  		tf.command = ATA_CMD_SET_MAX;
1280  
1281  		tf.device |= (new_sectors >> 24) & 0xf;
1282  	}
1283  
1284  	tf.protocol = ATA_PROT_NODATA;
1285  	tf.device |= ATA_LBA;
1286  
1287  	tf.lbal = (new_sectors >> 0) & 0xff;
1288  	tf.lbam = (new_sectors >> 8) & 0xff;
1289  	tf.lbah = (new_sectors >> 16) & 0xff;
1290  
1291  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1292  	if (err_mask) {
1293  		ata_dev_warn(dev,
1294  			     "failed to set max address (err_mask=0x%x)\n",
1295  			     err_mask);
1296  		if (err_mask == AC_ERR_DEV &&
1297  		    (tf.error & (ATA_ABORTED | ATA_IDNF)))
1298  			return -EACCES;
1299  		return -EIO;
1300  	}
1301  
1302  	return 0;
1303  }
1304  
1305  /**
1306   *	ata_hpa_resize		-	Resize a device with an HPA set
1307   *	@dev: Device to resize
1308   *
1309   *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1310   *	it if required to the full size of the media. The caller must check
1311   *	the drive has the HPA feature set enabled.
1312   *
1313   *	RETURNS:
1314   *	0 on success, -errno on failure.
1315   */
ata_hpa_resize(struct ata_device * dev)1316  static int ata_hpa_resize(struct ata_device *dev)
1317  {
1318  	bool print_info = ata_dev_print_info(dev);
1319  	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1320  	u64 sectors = ata_id_n_sectors(dev->id);
1321  	u64 native_sectors;
1322  	int rc;
1323  
1324  	/* do we need to do it? */
1325  	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1326  	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1327  	    (dev->quirks & ATA_QUIRK_BROKEN_HPA))
1328  		return 0;
1329  
1330  	/* read native max address */
1331  	rc = ata_read_native_max_address(dev, &native_sectors);
1332  	if (rc) {
1333  		/* If device aborted the command or HPA isn't going to
1334  		 * be unlocked, skip HPA resizing.
1335  		 */
1336  		if (rc == -EACCES || !unlock_hpa) {
1337  			ata_dev_warn(dev,
1338  				     "HPA support seems broken, skipping HPA handling\n");
1339  			dev->quirks |= ATA_QUIRK_BROKEN_HPA;
1340  
1341  			/* we can continue if device aborted the command */
1342  			if (rc == -EACCES)
1343  				rc = 0;
1344  		}
1345  
1346  		return rc;
1347  	}
1348  	dev->n_native_sectors = native_sectors;
1349  
1350  	/* nothing to do? */
1351  	if (native_sectors <= sectors || !unlock_hpa) {
1352  		if (!print_info || native_sectors == sectors)
1353  			return 0;
1354  
1355  		if (native_sectors > sectors)
1356  			ata_dev_info(dev,
1357  				"HPA detected: current %llu, native %llu\n",
1358  				(unsigned long long)sectors,
1359  				(unsigned long long)native_sectors);
1360  		else if (native_sectors < sectors)
1361  			ata_dev_warn(dev,
1362  				"native sectors (%llu) is smaller than sectors (%llu)\n",
1363  				(unsigned long long)native_sectors,
1364  				(unsigned long long)sectors);
1365  		return 0;
1366  	}
1367  
1368  	/* let's unlock HPA */
1369  	rc = ata_set_max_sectors(dev, native_sectors);
1370  	if (rc == -EACCES) {
1371  		/* if device aborted the command, skip HPA resizing */
1372  		ata_dev_warn(dev,
1373  			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1374  			     (unsigned long long)sectors,
1375  			     (unsigned long long)native_sectors);
1376  		dev->quirks |= ATA_QUIRK_BROKEN_HPA;
1377  		return 0;
1378  	} else if (rc)
1379  		return rc;
1380  
1381  	/* re-read IDENTIFY data */
1382  	rc = ata_dev_reread_id(dev, 0);
1383  	if (rc) {
1384  		ata_dev_err(dev,
1385  			    "failed to re-read IDENTIFY data after HPA resizing\n");
1386  		return rc;
1387  	}
1388  
1389  	if (print_info) {
1390  		u64 new_sectors = ata_id_n_sectors(dev->id);
1391  		ata_dev_info(dev,
1392  			"HPA unlocked: %llu -> %llu, native %llu\n",
1393  			(unsigned long long)sectors,
1394  			(unsigned long long)new_sectors,
1395  			(unsigned long long)native_sectors);
1396  	}
1397  
1398  	return 0;
1399  }
1400  
1401  /**
1402   *	ata_dump_id - IDENTIFY DEVICE info debugging output
1403   *	@dev: device from which the information is fetched
1404   *	@id: IDENTIFY DEVICE page to dump
1405   *
1406   *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1407   *	page.
1408   *
1409   *	LOCKING:
1410   *	caller.
1411   */
1412  
ata_dump_id(struct ata_device * dev,const u16 * id)1413  static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1414  {
1415  	ata_dev_dbg(dev,
1416  		"49==0x%04x  53==0x%04x  63==0x%04x  64==0x%04x  75==0x%04x\n"
1417  		"80==0x%04x  81==0x%04x  82==0x%04x  83==0x%04x  84==0x%04x\n"
1418  		"88==0x%04x  93==0x%04x\n",
1419  		id[49], id[53], id[63], id[64], id[75], id[80],
1420  		id[81], id[82], id[83], id[84], id[88], id[93]);
1421  }
1422  
1423  /**
1424   *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1425   *	@id: IDENTIFY data to compute xfer mask from
1426   *
1427   *	Compute the xfermask for this device. This is not as trivial
1428   *	as it seems if we must consider early devices correctly.
1429   *
1430   *	FIXME: pre IDE drive timing (do we care ?).
1431   *
1432   *	LOCKING:
1433   *	None.
1434   *
1435   *	RETURNS:
1436   *	Computed xfermask
1437   */
ata_id_xfermask(const u16 * id)1438  unsigned int ata_id_xfermask(const u16 *id)
1439  {
1440  	unsigned int pio_mask, mwdma_mask, udma_mask;
1441  
1442  	/* Usual case. Word 53 indicates word 64 is valid */
1443  	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1444  		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1445  		pio_mask <<= 3;
1446  		pio_mask |= 0x7;
1447  	} else {
1448  		/* If word 64 isn't valid then Word 51 high byte holds
1449  		 * the PIO timing number for the maximum. Turn it into
1450  		 * a mask.
1451  		 */
1452  		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1453  		if (mode < 5)	/* Valid PIO range */
1454  			pio_mask = (2 << mode) - 1;
1455  		else
1456  			pio_mask = 1;
1457  
1458  		/* But wait.. there's more. Design your standards by
1459  		 * committee and you too can get a free iordy field to
1460  		 * process. However it is the speeds not the modes that
1461  		 * are supported... Note drivers using the timing API
1462  		 * will get this right anyway
1463  		 */
1464  	}
1465  
1466  	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1467  
1468  	if (ata_id_is_cfa(id)) {
1469  		/*
1470  		 *	Process compact flash extended modes
1471  		 */
1472  		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1473  		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1474  
1475  		if (pio)
1476  			pio_mask |= (1 << 5);
1477  		if (pio > 1)
1478  			pio_mask |= (1 << 6);
1479  		if (dma)
1480  			mwdma_mask |= (1 << 3);
1481  		if (dma > 1)
1482  			mwdma_mask |= (1 << 4);
1483  	}
1484  
1485  	udma_mask = 0;
1486  	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1487  		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1488  
1489  	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1490  }
1491  EXPORT_SYMBOL_GPL(ata_id_xfermask);
1492  
ata_qc_complete_internal(struct ata_queued_cmd * qc)1493  static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1494  {
1495  	struct completion *waiting = qc->private_data;
1496  
1497  	complete(waiting);
1498  }
1499  
1500  /**
1501   *	ata_exec_internal - execute libata internal command
1502   *	@dev: Device to which the command is sent
1503   *	@tf: Taskfile registers for the command and the result
1504   *	@cdb: CDB for packet command
1505   *	@dma_dir: Data transfer direction of the command
1506   *	@buf: Data buffer of the command
1507   *	@buflen: Length of data buffer
1508   *	@timeout: Timeout in msecs (0 for default)
1509   *
1510   *	Executes libata internal command with timeout. @tf contains
1511   *	the command on entry and the result on return. Timeout and error
1512   *	conditions are reported via the return value. No recovery action
1513   *	is taken after a command times out. It is the caller's duty to
1514   *	clean up after timeout.
1515   *
1516   *	LOCKING:
1517   *	None.  Should be called with kernel context, might sleep.
1518   *
1519   *	RETURNS:
1520   *	Zero on success, AC_ERR_* mask on failure
1521   */
ata_exec_internal(struct ata_device * dev,struct ata_taskfile * tf,const u8 * cdb,enum dma_data_direction dma_dir,void * buf,unsigned int buflen,unsigned int timeout)1522  unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf,
1523  			       const u8 *cdb, enum dma_data_direction dma_dir,
1524  			       void *buf, unsigned int buflen,
1525  			       unsigned int timeout)
1526  {
1527  	struct ata_link *link = dev->link;
1528  	struct ata_port *ap = link->ap;
1529  	u8 command = tf->command;
1530  	struct ata_queued_cmd *qc;
1531  	struct scatterlist sgl;
1532  	unsigned int preempted_tag;
1533  	u32 preempted_sactive;
1534  	u64 preempted_qc_active;
1535  	int preempted_nr_active_links;
1536  	bool auto_timeout = false;
1537  	DECLARE_COMPLETION_ONSTACK(wait);
1538  	unsigned long flags;
1539  	unsigned int err_mask;
1540  	int rc;
1541  
1542  	if (WARN_ON(dma_dir != DMA_NONE && !buf))
1543  		return AC_ERR_INVALID;
1544  
1545  	spin_lock_irqsave(ap->lock, flags);
1546  
1547  	/* No internal command while frozen */
1548  	if (ata_port_is_frozen(ap)) {
1549  		spin_unlock_irqrestore(ap->lock, flags);
1550  		return AC_ERR_SYSTEM;
1551  	}
1552  
1553  	/* Initialize internal qc */
1554  	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1555  
1556  	qc->tag = ATA_TAG_INTERNAL;
1557  	qc->hw_tag = 0;
1558  	qc->scsicmd = NULL;
1559  	qc->ap = ap;
1560  	qc->dev = dev;
1561  	ata_qc_reinit(qc);
1562  
1563  	preempted_tag = link->active_tag;
1564  	preempted_sactive = link->sactive;
1565  	preempted_qc_active = ap->qc_active;
1566  	preempted_nr_active_links = ap->nr_active_links;
1567  	link->active_tag = ATA_TAG_POISON;
1568  	link->sactive = 0;
1569  	ap->qc_active = 0;
1570  	ap->nr_active_links = 0;
1571  
1572  	/* Prepare and issue qc */
1573  	qc->tf = *tf;
1574  	if (cdb)
1575  		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1576  
1577  	/* Some SATA bridges need us to indicate data xfer direction */
1578  	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1579  	    dma_dir == DMA_FROM_DEVICE)
1580  		qc->tf.feature |= ATAPI_DMADIR;
1581  
1582  	qc->flags |= ATA_QCFLAG_RESULT_TF;
1583  	qc->dma_dir = dma_dir;
1584  	if (dma_dir != DMA_NONE) {
1585  		sg_init_one(&sgl, buf, buflen);
1586  		ata_sg_init(qc, &sgl, 1);
1587  		qc->nbytes = buflen;
1588  	}
1589  
1590  	qc->private_data = &wait;
1591  	qc->complete_fn = ata_qc_complete_internal;
1592  
1593  	ata_qc_issue(qc);
1594  
1595  	spin_unlock_irqrestore(ap->lock, flags);
1596  
1597  	if (!timeout) {
1598  		if (ata_probe_timeout) {
1599  			timeout = ata_probe_timeout * 1000;
1600  		} else {
1601  			timeout = ata_internal_cmd_timeout(dev, command);
1602  			auto_timeout = true;
1603  		}
1604  	}
1605  
1606  	ata_eh_release(ap);
1607  
1608  	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1609  
1610  	ata_eh_acquire(ap);
1611  
1612  	ata_sff_flush_pio_task(ap);
1613  
1614  	if (!rc) {
1615  		/*
1616  		 * We are racing with irq here. If we lose, the following test
1617  		 * prevents us from completing the qc twice. If we win, the port
1618  		 * is frozen and will be cleaned up by ->post_internal_cmd().
1619  		 */
1620  		spin_lock_irqsave(ap->lock, flags);
1621  		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1622  			qc->err_mask |= AC_ERR_TIMEOUT;
1623  			ata_port_freeze(ap);
1624  			ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
1625  				     timeout, command);
1626  		}
1627  		spin_unlock_irqrestore(ap->lock, flags);
1628  	}
1629  
1630  	if (ap->ops->post_internal_cmd)
1631  		ap->ops->post_internal_cmd(qc);
1632  
1633  	/* Perform minimal error analysis */
1634  	if (qc->flags & ATA_QCFLAG_EH) {
1635  		if (qc->result_tf.status & (ATA_ERR | ATA_DF))
1636  			qc->err_mask |= AC_ERR_DEV;
1637  
1638  		if (!qc->err_mask)
1639  			qc->err_mask |= AC_ERR_OTHER;
1640  
1641  		if (qc->err_mask & ~AC_ERR_OTHER)
1642  			qc->err_mask &= ~AC_ERR_OTHER;
1643  	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1644  		qc->result_tf.status |= ATA_SENSE;
1645  	}
1646  
1647  	/* Finish up */
1648  	spin_lock_irqsave(ap->lock, flags);
1649  
1650  	*tf = qc->result_tf;
1651  	err_mask = qc->err_mask;
1652  
1653  	ata_qc_free(qc);
1654  	link->active_tag = preempted_tag;
1655  	link->sactive = preempted_sactive;
1656  	ap->qc_active = preempted_qc_active;
1657  	ap->nr_active_links = preempted_nr_active_links;
1658  
1659  	spin_unlock_irqrestore(ap->lock, flags);
1660  
1661  	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1662  		ata_internal_cmd_timed_out(dev, command);
1663  
1664  	return err_mask;
1665  }
1666  
1667  /**
1668   *	ata_pio_need_iordy	-	check if iordy needed
1669   *	@adev: ATA device
1670   *
1671   *	Check if the current speed of the device requires IORDY. Used
1672   *	by various controllers for chip configuration.
1673   */
ata_pio_need_iordy(const struct ata_device * adev)1674  unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1675  {
1676  	/* Don't set IORDY if we're preparing for reset.  IORDY may
1677  	 * lead to controller lock up on certain controllers if the
1678  	 * port is not occupied.  See bko#11703 for details.
1679  	 */
1680  	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1681  		return 0;
1682  	/* Controller doesn't support IORDY.  Probably a pointless
1683  	 * check as the caller should know this.
1684  	 */
1685  	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1686  		return 0;
1687  	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1688  	if (ata_id_is_cfa(adev->id)
1689  	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1690  		return 0;
1691  	/* PIO3 and higher it is mandatory */
1692  	if (adev->pio_mode > XFER_PIO_2)
1693  		return 1;
1694  	/* We turn it on when possible */
1695  	if (ata_id_has_iordy(adev->id))
1696  		return 1;
1697  	return 0;
1698  }
1699  EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1700  
1701  /**
1702   *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1703   *	@adev: ATA device
1704   *
1705   *	Compute the highest mode possible if we are not using iordy. Return
1706   *	-1 if no iordy mode is available.
1707   */
ata_pio_mask_no_iordy(const struct ata_device * adev)1708  static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1709  {
1710  	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1711  	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1712  		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1713  		/* Is the speed faster than the drive allows non IORDY ? */
1714  		if (pio) {
1715  			/* This is cycle times not frequency - watch the logic! */
1716  			if (pio > 240)	/* PIO2 is 240nS per cycle */
1717  				return 3 << ATA_SHIFT_PIO;
1718  			return 7 << ATA_SHIFT_PIO;
1719  		}
1720  	}
1721  	return 3 << ATA_SHIFT_PIO;
1722  }
1723  
1724  /**
1725   *	ata_do_dev_read_id		-	default ID read method
1726   *	@dev: device
1727   *	@tf: proposed taskfile
1728   *	@id: data buffer
1729   *
1730   *	Issue the identify taskfile and hand back the buffer containing
1731   *	identify data. For some RAID controllers and for pre ATA devices
1732   *	this function is wrapped or replaced by the driver
1733   */
ata_do_dev_read_id(struct ata_device * dev,struct ata_taskfile * tf,__le16 * id)1734  unsigned int ata_do_dev_read_id(struct ata_device *dev,
1735  				struct ata_taskfile *tf, __le16 *id)
1736  {
1737  	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1738  				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1739  }
1740  EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1741  
1742  /**
1743   *	ata_dev_read_id - Read ID data from the specified device
1744   *	@dev: target device
1745   *	@p_class: pointer to class of the target device (may be changed)
1746   *	@flags: ATA_READID_* flags
1747   *	@id: buffer to read IDENTIFY data into
1748   *
1749   *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1750   *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1751   *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1752   *	for pre-ATA4 drives.
1753   *
1754   *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1755   *	now we abort if we hit that case.
1756   *
1757   *	LOCKING:
1758   *	Kernel thread context (may sleep)
1759   *
1760   *	RETURNS:
1761   *	0 on success, -errno otherwise.
1762   */
ata_dev_read_id(struct ata_device * dev,unsigned int * p_class,unsigned int flags,u16 * id)1763  int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1764  		    unsigned int flags, u16 *id)
1765  {
1766  	struct ata_port *ap = dev->link->ap;
1767  	unsigned int class = *p_class;
1768  	struct ata_taskfile tf;
1769  	unsigned int err_mask = 0;
1770  	const char *reason;
1771  	bool is_semb = class == ATA_DEV_SEMB;
1772  	int may_fallback = 1, tried_spinup = 0;
1773  	int rc;
1774  
1775  retry:
1776  	ata_tf_init(dev, &tf);
1777  
1778  	switch (class) {
1779  	case ATA_DEV_SEMB:
1780  		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1781  		fallthrough;
1782  	case ATA_DEV_ATA:
1783  	case ATA_DEV_ZAC:
1784  		tf.command = ATA_CMD_ID_ATA;
1785  		break;
1786  	case ATA_DEV_ATAPI:
1787  		tf.command = ATA_CMD_ID_ATAPI;
1788  		break;
1789  	default:
1790  		rc = -ENODEV;
1791  		reason = "unsupported class";
1792  		goto err_out;
1793  	}
1794  
1795  	tf.protocol = ATA_PROT_PIO;
1796  
1797  	/* Some devices choke if TF registers contain garbage.  Make
1798  	 * sure those are properly initialized.
1799  	 */
1800  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1801  
1802  	/* Device presence detection is unreliable on some
1803  	 * controllers.  Always poll IDENTIFY if available.
1804  	 */
1805  	tf.flags |= ATA_TFLAG_POLLING;
1806  
1807  	if (ap->ops->read_id)
1808  		err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
1809  	else
1810  		err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
1811  
1812  	if (err_mask) {
1813  		if (err_mask & AC_ERR_NODEV_HINT) {
1814  			ata_dev_dbg(dev, "NODEV after polling detection\n");
1815  			return -ENOENT;
1816  		}
1817  
1818  		if (is_semb) {
1819  			ata_dev_info(dev,
1820  		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1821  			/* SEMB is not supported yet */
1822  			*p_class = ATA_DEV_SEMB_UNSUP;
1823  			return 0;
1824  		}
1825  
1826  		if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1827  			/* Device or controller might have reported
1828  			 * the wrong device class.  Give a shot at the
1829  			 * other IDENTIFY if the current one is
1830  			 * aborted by the device.
1831  			 */
1832  			if (may_fallback) {
1833  				may_fallback = 0;
1834  
1835  				if (class == ATA_DEV_ATA)
1836  					class = ATA_DEV_ATAPI;
1837  				else
1838  					class = ATA_DEV_ATA;
1839  				goto retry;
1840  			}
1841  
1842  			/* Control reaches here iff the device aborted
1843  			 * both flavors of IDENTIFYs which happens
1844  			 * sometimes with phantom devices.
1845  			 */
1846  			ata_dev_dbg(dev,
1847  				    "both IDENTIFYs aborted, assuming NODEV\n");
1848  			return -ENOENT;
1849  		}
1850  
1851  		rc = -EIO;
1852  		reason = "I/O error";
1853  		goto err_out;
1854  	}
1855  
1856  	if (dev->quirks & ATA_QUIRK_DUMP_ID) {
1857  		ata_dev_info(dev, "dumping IDENTIFY data, "
1858  			    "class=%d may_fallback=%d tried_spinup=%d\n",
1859  			    class, may_fallback, tried_spinup);
1860  		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1861  			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1862  	}
1863  
1864  	/* Falling back doesn't make sense if ID data was read
1865  	 * successfully at least once.
1866  	 */
1867  	may_fallback = 0;
1868  
1869  	swap_buf_le16(id, ATA_ID_WORDS);
1870  
1871  	/* sanity check */
1872  	rc = -EINVAL;
1873  	reason = "device reports invalid type";
1874  
1875  	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1876  		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1877  			goto err_out;
1878  		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1879  							ata_id_is_ata(id)) {
1880  			ata_dev_dbg(dev,
1881  				"host indicates ignore ATA devices, ignored\n");
1882  			return -ENOENT;
1883  		}
1884  	} else {
1885  		if (ata_id_is_ata(id))
1886  			goto err_out;
1887  	}
1888  
1889  	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1890  		tried_spinup = 1;
1891  		/*
1892  		 * Drive powered-up in standby mode, and requires a specific
1893  		 * SET_FEATURES spin-up subcommand before it will accept
1894  		 * anything other than the original IDENTIFY command.
1895  		 */
1896  		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1897  		if (err_mask && id[2] != 0x738c) {
1898  			rc = -EIO;
1899  			reason = "SPINUP failed";
1900  			goto err_out;
1901  		}
1902  		/*
1903  		 * If the drive initially returned incomplete IDENTIFY info,
1904  		 * we now must reissue the IDENTIFY command.
1905  		 */
1906  		if (id[2] == 0x37c8)
1907  			goto retry;
1908  	}
1909  
1910  	if ((flags & ATA_READID_POSTRESET) &&
1911  	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1912  		/*
1913  		 * The exact sequence expected by certain pre-ATA4 drives is:
1914  		 * SRST RESET
1915  		 * IDENTIFY (optional in early ATA)
1916  		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1917  		 * anything else..
1918  		 * Some drives were very specific about that exact sequence.
1919  		 *
1920  		 * Note that ATA4 says lba is mandatory so the second check
1921  		 * should never trigger.
1922  		 */
1923  		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1924  			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1925  			if (err_mask) {
1926  				rc = -EIO;
1927  				reason = "INIT_DEV_PARAMS failed";
1928  				goto err_out;
1929  			}
1930  
1931  			/* current CHS translation info (id[53-58]) might be
1932  			 * changed. reread the identify device info.
1933  			 */
1934  			flags &= ~ATA_READID_POSTRESET;
1935  			goto retry;
1936  		}
1937  	}
1938  
1939  	*p_class = class;
1940  
1941  	return 0;
1942  
1943   err_out:
1944  	ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1945  		     reason, err_mask);
1946  	return rc;
1947  }
1948  
ata_dev_power_init_tf(struct ata_device * dev,struct ata_taskfile * tf,bool set_active)1949  bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
1950  			   bool set_active)
1951  {
1952  	/* Only applies to ATA and ZAC devices */
1953  	if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
1954  		return false;
1955  
1956  	ata_tf_init(dev, tf);
1957  	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1958  	tf->protocol = ATA_PROT_NODATA;
1959  
1960  	if (set_active) {
1961  		/* VERIFY for 1 sector at lba=0 */
1962  		tf->command = ATA_CMD_VERIFY;
1963  		tf->nsect = 1;
1964  		if (dev->flags & ATA_DFLAG_LBA) {
1965  			tf->flags |= ATA_TFLAG_LBA;
1966  			tf->device |= ATA_LBA;
1967  		} else {
1968  			/* CHS */
1969  			tf->lbal = 0x1; /* sect */
1970  		}
1971  	} else {
1972  		tf->command = ATA_CMD_STANDBYNOW1;
1973  	}
1974  
1975  	return true;
1976  }
1977  
ata_dev_power_is_active(struct ata_device * dev)1978  static bool ata_dev_power_is_active(struct ata_device *dev)
1979  {
1980  	struct ata_taskfile tf;
1981  	unsigned int err_mask;
1982  
1983  	ata_tf_init(dev, &tf);
1984  	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1985  	tf.protocol = ATA_PROT_NODATA;
1986  	tf.command = ATA_CMD_CHK_POWER;
1987  
1988  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1989  	if (err_mask) {
1990  		ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
1991  			    err_mask);
1992  		/*
1993  		 * Assume we are in standby mode so that we always force a
1994  		 * spinup in ata_dev_power_set_active().
1995  		 */
1996  		return false;
1997  	}
1998  
1999  	ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
2000  
2001  	/* Active or idle */
2002  	return tf.nsect == 0xff;
2003  }
2004  
2005  /**
2006   *	ata_dev_power_set_standby - Set a device power mode to standby
2007   *	@dev: target device
2008   *
2009   *	Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
2010   *	For an HDD device, this spins down the disks.
2011   *
2012   *	LOCKING:
2013   *	Kernel thread context (may sleep).
2014   */
ata_dev_power_set_standby(struct ata_device * dev)2015  void ata_dev_power_set_standby(struct ata_device *dev)
2016  {
2017  	unsigned long ap_flags = dev->link->ap->flags;
2018  	struct ata_taskfile tf;
2019  	unsigned int err_mask;
2020  
2021  	/* If the device is already sleeping or in standby, do nothing. */
2022  	if ((dev->flags & ATA_DFLAG_SLEEPING) ||
2023  	    !ata_dev_power_is_active(dev))
2024  		return;
2025  
2026  	/*
2027  	 * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
2028  	 * causing some drives to spin up and down again. For these, do nothing
2029  	 * if we are being called on shutdown.
2030  	 */
2031  	if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
2032  	    system_state == SYSTEM_POWER_OFF)
2033  		return;
2034  
2035  	if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
2036  	    system_entering_hibernation())
2037  		return;
2038  
2039  	/* Issue STANDBY IMMEDIATE command only if supported by the device */
2040  	if (!ata_dev_power_init_tf(dev, &tf, false))
2041  		return;
2042  
2043  	ata_dev_notice(dev, "Entering standby power mode\n");
2044  
2045  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2046  	if (err_mask)
2047  		ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
2048  			    err_mask);
2049  }
2050  
2051  /**
2052   *	ata_dev_power_set_active -  Set a device power mode to active
2053   *	@dev: target device
2054   *
2055   *	Issue a VERIFY command to enter to ensure that the device is in the
2056   *	active power mode. For a spun-down HDD (standby or idle power mode),
2057   *	the VERIFY command will complete after the disk spins up.
2058   *
2059   *	LOCKING:
2060   *	Kernel thread context (may sleep).
2061   */
ata_dev_power_set_active(struct ata_device * dev)2062  void ata_dev_power_set_active(struct ata_device *dev)
2063  {
2064  	struct ata_taskfile tf;
2065  	unsigned int err_mask;
2066  
2067  	/*
2068  	 * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
2069  	 * if supported by the device.
2070  	 */
2071  	if (!ata_dev_power_init_tf(dev, &tf, true))
2072  		return;
2073  
2074  	/*
2075  	 * Check the device power state & condition and force a spinup with
2076  	 * VERIFY command only if the drive is not already ACTIVE or IDLE.
2077  	 */
2078  	if (ata_dev_power_is_active(dev))
2079  		return;
2080  
2081  	ata_dev_notice(dev, "Entering active power mode\n");
2082  
2083  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2084  	if (err_mask)
2085  		ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
2086  			    err_mask);
2087  }
2088  
2089  /**
2090   *	ata_read_log_page - read a specific log page
2091   *	@dev: target device
2092   *	@log: log to read
2093   *	@page: page to read
2094   *	@buf: buffer to store read page
2095   *	@sectors: number of sectors to read
2096   *
2097   *	Read log page using READ_LOG_EXT command.
2098   *
2099   *	LOCKING:
2100   *	Kernel thread context (may sleep).
2101   *
2102   *	RETURNS:
2103   *	0 on success, AC_ERR_* mask otherwise.
2104   */
ata_read_log_page(struct ata_device * dev,u8 log,u8 page,void * buf,unsigned int sectors)2105  unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2106  			       u8 page, void *buf, unsigned int sectors)
2107  {
2108  	unsigned long ap_flags = dev->link->ap->flags;
2109  	struct ata_taskfile tf;
2110  	unsigned int err_mask;
2111  	bool dma = false;
2112  
2113  	ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
2114  
2115  	/*
2116  	 * Return error without actually issuing the command on controllers
2117  	 * which e.g. lockup on a read log page.
2118  	 */
2119  	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2120  		return AC_ERR_DEV;
2121  
2122  retry:
2123  	ata_tf_init(dev, &tf);
2124  	if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
2125  	    !(dev->quirks & ATA_QUIRK_NO_DMA_LOG)) {
2126  		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2127  		tf.protocol = ATA_PROT_DMA;
2128  		dma = true;
2129  	} else {
2130  		tf.command = ATA_CMD_READ_LOG_EXT;
2131  		tf.protocol = ATA_PROT_PIO;
2132  		dma = false;
2133  	}
2134  	tf.lbal = log;
2135  	tf.lbam = page;
2136  	tf.nsect = sectors;
2137  	tf.hob_nsect = sectors >> 8;
2138  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2139  
2140  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2141  				     buf, sectors * ATA_SECT_SIZE, 0);
2142  
2143  	if (err_mask) {
2144  		if (dma) {
2145  			dev->quirks |= ATA_QUIRK_NO_DMA_LOG;
2146  			if (!ata_port_is_frozen(dev->link->ap))
2147  				goto retry;
2148  		}
2149  		ata_dev_err(dev,
2150  			    "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2151  			    (unsigned int)log, (unsigned int)page, err_mask);
2152  	}
2153  
2154  	return err_mask;
2155  }
2156  
ata_log_supported(struct ata_device * dev,u8 log)2157  static int ata_log_supported(struct ata_device *dev, u8 log)
2158  {
2159  	if (dev->quirks & ATA_QUIRK_NO_LOG_DIR)
2160  		return 0;
2161  
2162  	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1))
2163  		return 0;
2164  	return get_unaligned_le16(&dev->sector_buf[log * 2]);
2165  }
2166  
ata_identify_page_supported(struct ata_device * dev,u8 page)2167  static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2168  {
2169  	unsigned int err, i;
2170  
2171  	if (dev->quirks & ATA_QUIRK_NO_ID_DEV_LOG)
2172  		return false;
2173  
2174  	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2175  		/*
2176  		 * IDENTIFY DEVICE data log is defined as mandatory starting
2177  		 * with ACS-3 (ATA version 10). Warn about the missing log
2178  		 * for drives which implement this ATA level or above.
2179  		 */
2180  		if (ata_id_major_version(dev->id) >= 10)
2181  			ata_dev_warn(dev,
2182  				"ATA Identify Device Log not supported\n");
2183  		dev->quirks |= ATA_QUIRK_NO_ID_DEV_LOG;
2184  		return false;
2185  	}
2186  
2187  	/*
2188  	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2189  	 * supported.
2190  	 */
2191  	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0,
2192  				dev->sector_buf, 1);
2193  	if (err)
2194  		return false;
2195  
2196  	for (i = 0; i < dev->sector_buf[8]; i++) {
2197  		if (dev->sector_buf[9 + i] == page)
2198  			return true;
2199  	}
2200  
2201  	return false;
2202  }
2203  
ata_do_link_spd_quirk(struct ata_device * dev)2204  static int ata_do_link_spd_quirk(struct ata_device *dev)
2205  {
2206  	struct ata_link *plink = ata_dev_phys_link(dev);
2207  	u32 target, target_limit;
2208  
2209  	if (!sata_scr_valid(plink))
2210  		return 0;
2211  
2212  	if (dev->quirks & ATA_QUIRK_1_5_GBPS)
2213  		target = 1;
2214  	else
2215  		return 0;
2216  
2217  	target_limit = (1 << target) - 1;
2218  
2219  	/* if already on stricter limit, no need to push further */
2220  	if (plink->sata_spd_limit <= target_limit)
2221  		return 0;
2222  
2223  	plink->sata_spd_limit = target_limit;
2224  
2225  	/* Request another EH round by returning -EAGAIN if link is
2226  	 * going faster than the target speed.  Forward progress is
2227  	 * guaranteed by setting sata_spd_limit to target_limit above.
2228  	 */
2229  	if (plink->sata_spd > target) {
2230  		ata_dev_info(dev, "applying link speed limit quirk to %s\n",
2231  			     sata_spd_string(target));
2232  		return -EAGAIN;
2233  	}
2234  	return 0;
2235  }
2236  
ata_dev_knobble(struct ata_device * dev)2237  static inline bool ata_dev_knobble(struct ata_device *dev)
2238  {
2239  	struct ata_port *ap = dev->link->ap;
2240  
2241  	if (ata_dev_quirks(dev) & ATA_QUIRK_BRIDGE_OK)
2242  		return false;
2243  
2244  	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2245  }
2246  
ata_dev_config_ncq_send_recv(struct ata_device * dev)2247  static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2248  {
2249  	unsigned int err_mask;
2250  
2251  	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2252  		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2253  		return;
2254  	}
2255  	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2256  				     0, dev->sector_buf, 1);
2257  	if (!err_mask) {
2258  		u8 *cmds = dev->ncq_send_recv_cmds;
2259  
2260  		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2261  		memcpy(cmds, dev->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2262  
2263  		if (dev->quirks & ATA_QUIRK_NO_NCQ_TRIM) {
2264  			ata_dev_dbg(dev, "disabling queued TRIM support\n");
2265  			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2266  				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2267  		}
2268  	}
2269  }
2270  
ata_dev_config_ncq_non_data(struct ata_device * dev)2271  static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2272  {
2273  	unsigned int err_mask;
2274  
2275  	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2276  		ata_dev_warn(dev,
2277  			     "NCQ Non-Data Log not supported\n");
2278  		return;
2279  	}
2280  	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2281  				     0, dev->sector_buf, 1);
2282  	if (!err_mask)
2283  		memcpy(dev->ncq_non_data_cmds, dev->sector_buf,
2284  		       ATA_LOG_NCQ_NON_DATA_SIZE);
2285  }
2286  
ata_dev_config_ncq_prio(struct ata_device * dev)2287  static void ata_dev_config_ncq_prio(struct ata_device *dev)
2288  {
2289  	unsigned int err_mask;
2290  
2291  	if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2292  		return;
2293  
2294  	err_mask = ata_read_log_page(dev,
2295  				     ATA_LOG_IDENTIFY_DEVICE,
2296  				     ATA_LOG_SATA_SETTINGS,
2297  				     dev->sector_buf, 1);
2298  	if (err_mask)
2299  		goto not_supported;
2300  
2301  	if (!(dev->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2302  		goto not_supported;
2303  
2304  	dev->flags |= ATA_DFLAG_NCQ_PRIO;
2305  
2306  	return;
2307  
2308  not_supported:
2309  	dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
2310  	dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2311  }
2312  
ata_dev_check_adapter(struct ata_device * dev,unsigned short vendor_id)2313  static bool ata_dev_check_adapter(struct ata_device *dev,
2314  				  unsigned short vendor_id)
2315  {
2316  	struct pci_dev *pcidev = NULL;
2317  	struct device *parent_dev = NULL;
2318  
2319  	for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2320  	     parent_dev = parent_dev->parent) {
2321  		if (dev_is_pci(parent_dev)) {
2322  			pcidev = to_pci_dev(parent_dev);
2323  			if (pcidev->vendor == vendor_id)
2324  				return true;
2325  			break;
2326  		}
2327  	}
2328  
2329  	return false;
2330  }
2331  
ata_dev_config_ncq(struct ata_device * dev,char * desc,size_t desc_sz)2332  static int ata_dev_config_ncq(struct ata_device *dev,
2333  			       char *desc, size_t desc_sz)
2334  {
2335  	struct ata_port *ap = dev->link->ap;
2336  	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2337  	unsigned int err_mask;
2338  	char *aa_desc = "";
2339  
2340  	if (!ata_id_has_ncq(dev->id)) {
2341  		desc[0] = '\0';
2342  		return 0;
2343  	}
2344  	if (!IS_ENABLED(CONFIG_SATA_HOST))
2345  		return 0;
2346  	if (dev->quirks & ATA_QUIRK_NONCQ) {
2347  		snprintf(desc, desc_sz, "NCQ (not used)");
2348  		return 0;
2349  	}
2350  
2351  	if (dev->quirks & ATA_QUIRK_NO_NCQ_ON_ATI &&
2352  	    ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2353  		snprintf(desc, desc_sz, "NCQ (not used)");
2354  		return 0;
2355  	}
2356  
2357  	if (ap->flags & ATA_FLAG_NCQ) {
2358  		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2359  		dev->flags |= ATA_DFLAG_NCQ;
2360  	}
2361  
2362  	if (!(dev->quirks & ATA_QUIRK_BROKEN_FPDMA_AA) &&
2363  		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2364  		ata_id_has_fpdma_aa(dev->id)) {
2365  		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2366  			SATA_FPDMA_AA);
2367  		if (err_mask) {
2368  			ata_dev_err(dev,
2369  				    "failed to enable AA (error_mask=0x%x)\n",
2370  				    err_mask);
2371  			if (err_mask != AC_ERR_DEV) {
2372  				dev->quirks |= ATA_QUIRK_BROKEN_FPDMA_AA;
2373  				return -EIO;
2374  			}
2375  		} else
2376  			aa_desc = ", AA";
2377  	}
2378  
2379  	if (hdepth >= ddepth)
2380  		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2381  	else
2382  		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2383  			ddepth, aa_desc);
2384  
2385  	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2386  		if (ata_id_has_ncq_send_and_recv(dev->id))
2387  			ata_dev_config_ncq_send_recv(dev);
2388  		if (ata_id_has_ncq_non_data(dev->id))
2389  			ata_dev_config_ncq_non_data(dev);
2390  		if (ata_id_has_ncq_prio(dev->id))
2391  			ata_dev_config_ncq_prio(dev);
2392  	}
2393  
2394  	return 0;
2395  }
2396  
ata_dev_config_sense_reporting(struct ata_device * dev)2397  static void ata_dev_config_sense_reporting(struct ata_device *dev)
2398  {
2399  	unsigned int err_mask;
2400  
2401  	if (!ata_id_has_sense_reporting(dev->id))
2402  		return;
2403  
2404  	if (ata_id_sense_reporting_enabled(dev->id))
2405  		return;
2406  
2407  	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2408  	if (err_mask) {
2409  		ata_dev_dbg(dev,
2410  			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
2411  			    err_mask);
2412  	}
2413  }
2414  
ata_dev_config_zac(struct ata_device * dev)2415  static void ata_dev_config_zac(struct ata_device *dev)
2416  {
2417  	unsigned int err_mask;
2418  	u8 *identify_buf = dev->sector_buf;
2419  
2420  	dev->zac_zones_optimal_open = U32_MAX;
2421  	dev->zac_zones_optimal_nonseq = U32_MAX;
2422  	dev->zac_zones_max_open = U32_MAX;
2423  
2424  	/*
2425  	 * Always set the 'ZAC' flag for Host-managed devices.
2426  	 */
2427  	if (dev->class == ATA_DEV_ZAC)
2428  		dev->flags |= ATA_DFLAG_ZAC;
2429  	else if (ata_id_zoned_cap(dev->id) == 0x01)
2430  		/*
2431  		 * Check for host-aware devices.
2432  		 */
2433  		dev->flags |= ATA_DFLAG_ZAC;
2434  
2435  	if (!(dev->flags & ATA_DFLAG_ZAC))
2436  		return;
2437  
2438  	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2439  		ata_dev_warn(dev,
2440  			     "ATA Zoned Information Log not supported\n");
2441  		return;
2442  	}
2443  
2444  	/*
2445  	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2446  	 */
2447  	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2448  				     ATA_LOG_ZONED_INFORMATION,
2449  				     identify_buf, 1);
2450  	if (!err_mask) {
2451  		u64 zoned_cap, opt_open, opt_nonseq, max_open;
2452  
2453  		zoned_cap = get_unaligned_le64(&identify_buf[8]);
2454  		if ((zoned_cap >> 63))
2455  			dev->zac_zoned_cap = (zoned_cap & 1);
2456  		opt_open = get_unaligned_le64(&identify_buf[24]);
2457  		if ((opt_open >> 63))
2458  			dev->zac_zones_optimal_open = (u32)opt_open;
2459  		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2460  		if ((opt_nonseq >> 63))
2461  			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2462  		max_open = get_unaligned_le64(&identify_buf[40]);
2463  		if ((max_open >> 63))
2464  			dev->zac_zones_max_open = (u32)max_open;
2465  	}
2466  }
2467  
ata_dev_config_trusted(struct ata_device * dev)2468  static void ata_dev_config_trusted(struct ata_device *dev)
2469  {
2470  	u64 trusted_cap;
2471  	unsigned int err;
2472  
2473  	if (!ata_id_has_trusted(dev->id))
2474  		return;
2475  
2476  	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2477  		ata_dev_warn(dev,
2478  			     "Security Log not supported\n");
2479  		return;
2480  	}
2481  
2482  	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2483  				dev->sector_buf, 1);
2484  	if (err)
2485  		return;
2486  
2487  	trusted_cap = get_unaligned_le64(&dev->sector_buf[40]);
2488  	if (!(trusted_cap & (1ULL << 63))) {
2489  		ata_dev_dbg(dev,
2490  			    "Trusted Computing capability qword not valid!\n");
2491  		return;
2492  	}
2493  
2494  	if (trusted_cap & (1 << 0))
2495  		dev->flags |= ATA_DFLAG_TRUSTED;
2496  }
2497  
ata_dev_cleanup_cdl_resources(struct ata_device * dev)2498  void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
2499  {
2500  	kfree(dev->cdl);
2501  	dev->cdl = NULL;
2502  }
2503  
ata_dev_init_cdl_resources(struct ata_device * dev)2504  static int ata_dev_init_cdl_resources(struct ata_device *dev)
2505  {
2506  	struct ata_cdl *cdl = dev->cdl;
2507  	unsigned int err_mask;
2508  
2509  	if (!cdl) {
2510  		cdl = kzalloc(sizeof(*cdl), GFP_KERNEL);
2511  		if (!cdl)
2512  			return -ENOMEM;
2513  		dev->cdl = cdl;
2514  	}
2515  
2516  	err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, cdl->desc_log_buf,
2517  				     ATA_LOG_CDL_SIZE / ATA_SECT_SIZE);
2518  	if (err_mask) {
2519  		ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
2520  		ata_dev_cleanup_cdl_resources(dev);
2521  		return -EIO;
2522  	}
2523  
2524  	return 0;
2525  }
2526  
ata_dev_config_cdl(struct ata_device * dev)2527  static void ata_dev_config_cdl(struct ata_device *dev)
2528  {
2529  	unsigned int err_mask;
2530  	bool cdl_enabled;
2531  	u64 val;
2532  	int ret;
2533  
2534  	if (ata_id_major_version(dev->id) < 11)
2535  		goto not_supported;
2536  
2537  	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
2538  	    !ata_identify_page_supported(dev, ATA_LOG_SUPPORTED_CAPABILITIES) ||
2539  	    !ata_identify_page_supported(dev, ATA_LOG_CURRENT_SETTINGS))
2540  		goto not_supported;
2541  
2542  	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2543  				     ATA_LOG_SUPPORTED_CAPABILITIES,
2544  				     dev->sector_buf, 1);
2545  	if (err_mask)
2546  		goto not_supported;
2547  
2548  	/* Check Command Duration Limit Supported bits */
2549  	val = get_unaligned_le64(&dev->sector_buf[168]);
2550  	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
2551  		goto not_supported;
2552  
2553  	/* Warn the user if command duration guideline is not supported */
2554  	if (!(val & BIT_ULL(1)))
2555  		ata_dev_warn(dev,
2556  			"Command duration guideline is not supported\n");
2557  
2558  	/*
2559  	 * We must have support for the sense data for successful NCQ commands
2560  	 * log indicated by the successful NCQ command sense data supported bit.
2561  	 */
2562  	val = get_unaligned_le64(&dev->sector_buf[8]);
2563  	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) {
2564  		ata_dev_warn(dev,
2565  			"CDL supported but Successful NCQ Command Sense Data is not supported\n");
2566  		goto not_supported;
2567  	}
2568  
2569  	/* Without NCQ autosense, the successful NCQ commands log is useless. */
2570  	if (!ata_id_has_ncq_autosense(dev->id)) {
2571  		ata_dev_warn(dev,
2572  			"CDL supported but NCQ autosense is not supported\n");
2573  		goto not_supported;
2574  	}
2575  
2576  	/*
2577  	 * If CDL is marked as enabled, make sure the feature is enabled too.
2578  	 * Conversely, if CDL is disabled, make sure the feature is turned off.
2579  	 */
2580  	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2581  				     ATA_LOG_CURRENT_SETTINGS,
2582  				     dev->sector_buf, 1);
2583  	if (err_mask)
2584  		goto not_supported;
2585  
2586  	val = get_unaligned_le64(&dev->sector_buf[8]);
2587  	cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21);
2588  	if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
2589  		if (!cdl_enabled) {
2590  			/* Enable CDL on the device */
2591  			err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 1);
2592  			if (err_mask) {
2593  				ata_dev_err(dev,
2594  					    "Enable CDL feature failed\n");
2595  				goto not_supported;
2596  			}
2597  		}
2598  	} else {
2599  		if (cdl_enabled) {
2600  			/* Disable CDL on the device */
2601  			err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 0);
2602  			if (err_mask) {
2603  				ata_dev_err(dev,
2604  					    "Disable CDL feature failed\n");
2605  				goto not_supported;
2606  			}
2607  		}
2608  	}
2609  
2610  	/*
2611  	 * While CDL itself has to be enabled using sysfs, CDL requires that
2612  	 * sense data for successful NCQ commands is enabled to work properly.
2613  	 * Just like ata_dev_config_sense_reporting(), enable it unconditionally
2614  	 * if supported.
2615  	 */
2616  	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(18))) {
2617  		err_mask = ata_dev_set_feature(dev,
2618  					SETFEATURE_SENSE_DATA_SUCC_NCQ, 0x1);
2619  		if (err_mask) {
2620  			ata_dev_warn(dev,
2621  				     "failed to enable Sense Data for successful NCQ commands, Emask 0x%x\n",
2622  				     err_mask);
2623  			goto not_supported;
2624  		}
2625  	}
2626  
2627  	/* CDL is supported: allocate and initialize needed resources. */
2628  	ret = ata_dev_init_cdl_resources(dev);
2629  	if (ret) {
2630  		ata_dev_warn(dev, "Initialize CDL resources failed\n");
2631  		goto not_supported;
2632  	}
2633  
2634  	dev->flags |= ATA_DFLAG_CDL;
2635  
2636  	return;
2637  
2638  not_supported:
2639  	dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED);
2640  	ata_dev_cleanup_cdl_resources(dev);
2641  }
2642  
ata_dev_config_lba(struct ata_device * dev)2643  static int ata_dev_config_lba(struct ata_device *dev)
2644  {
2645  	const u16 *id = dev->id;
2646  	const char *lba_desc;
2647  	char ncq_desc[32];
2648  	int ret;
2649  
2650  	dev->flags |= ATA_DFLAG_LBA;
2651  
2652  	if (ata_id_has_lba48(id)) {
2653  		lba_desc = "LBA48";
2654  		dev->flags |= ATA_DFLAG_LBA48;
2655  		if (dev->n_sectors >= (1UL << 28) &&
2656  		    ata_id_has_flush_ext(id))
2657  			dev->flags |= ATA_DFLAG_FLUSH_EXT;
2658  	} else {
2659  		lba_desc = "LBA";
2660  	}
2661  
2662  	/* config NCQ */
2663  	ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2664  
2665  	/* print device info to dmesg */
2666  	if (ata_dev_print_info(dev))
2667  		ata_dev_info(dev,
2668  			     "%llu sectors, multi %u: %s %s\n",
2669  			     (unsigned long long)dev->n_sectors,
2670  			     dev->multi_count, lba_desc, ncq_desc);
2671  
2672  	return ret;
2673  }
2674  
ata_dev_config_chs(struct ata_device * dev)2675  static void ata_dev_config_chs(struct ata_device *dev)
2676  {
2677  	const u16 *id = dev->id;
2678  
2679  	if (ata_id_current_chs_valid(id)) {
2680  		/* Current CHS translation is valid. */
2681  		dev->cylinders = id[54];
2682  		dev->heads     = id[55];
2683  		dev->sectors   = id[56];
2684  	} else {
2685  		/* Default translation */
2686  		dev->cylinders	= id[1];
2687  		dev->heads	= id[3];
2688  		dev->sectors	= id[6];
2689  	}
2690  
2691  	/* print device info to dmesg */
2692  	if (ata_dev_print_info(dev))
2693  		ata_dev_info(dev,
2694  			     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2695  			     (unsigned long long)dev->n_sectors,
2696  			     dev->multi_count, dev->cylinders,
2697  			     dev->heads, dev->sectors);
2698  }
2699  
ata_dev_config_fua(struct ata_device * dev)2700  static void ata_dev_config_fua(struct ata_device *dev)
2701  {
2702  	/* Ignore FUA support if its use is disabled globally */
2703  	if (!libata_fua)
2704  		goto nofua;
2705  
2706  	/* Ignore devices without support for WRITE DMA FUA EXT */
2707  	if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
2708  		goto nofua;
2709  
2710  	/* Ignore known bad devices and devices that lack NCQ support */
2711  	if (!ata_ncq_supported(dev) || (dev->quirks & ATA_QUIRK_NO_FUA))
2712  		goto nofua;
2713  
2714  	dev->flags |= ATA_DFLAG_FUA;
2715  
2716  	return;
2717  
2718  nofua:
2719  	dev->flags &= ~ATA_DFLAG_FUA;
2720  }
2721  
ata_dev_config_devslp(struct ata_device * dev)2722  static void ata_dev_config_devslp(struct ata_device *dev)
2723  {
2724  	u8 *sata_setting = dev->sector_buf;
2725  	unsigned int err_mask;
2726  	int i, j;
2727  
2728  	/*
2729  	 * Check device sleep capability. Get DevSlp timing variables
2730  	 * from SATA Settings page of Identify Device Data Log.
2731  	 */
2732  	if (!ata_id_has_devslp(dev->id) ||
2733  	    !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2734  		return;
2735  
2736  	err_mask = ata_read_log_page(dev,
2737  				     ATA_LOG_IDENTIFY_DEVICE,
2738  				     ATA_LOG_SATA_SETTINGS,
2739  				     sata_setting, 1);
2740  	if (err_mask)
2741  		return;
2742  
2743  	dev->flags |= ATA_DFLAG_DEVSLP;
2744  	for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2745  		j = ATA_LOG_DEVSLP_OFFSET + i;
2746  		dev->devslp_timing[i] = sata_setting[j];
2747  	}
2748  }
2749  
ata_dev_config_cpr(struct ata_device * dev)2750  static void ata_dev_config_cpr(struct ata_device *dev)
2751  {
2752  	unsigned int err_mask;
2753  	size_t buf_len;
2754  	int i, nr_cpr = 0;
2755  	struct ata_cpr_log *cpr_log = NULL;
2756  	u8 *desc, *buf = NULL;
2757  
2758  	if (ata_id_major_version(dev->id) < 11)
2759  		goto out;
2760  
2761  	buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
2762  	if (buf_len == 0)
2763  		goto out;
2764  
2765  	/*
2766  	 * Read the concurrent positioning ranges log (0x47). We can have at
2767  	 * most 255 32B range descriptors plus a 64B header. This log varies in
2768  	 * size, so use the size reported in the GPL directory. Reading beyond
2769  	 * the supported length will result in an error.
2770  	 */
2771  	buf_len <<= 9;
2772  	buf = kzalloc(buf_len, GFP_KERNEL);
2773  	if (!buf)
2774  		goto out;
2775  
2776  	err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2777  				     0, buf, buf_len >> 9);
2778  	if (err_mask)
2779  		goto out;
2780  
2781  	nr_cpr = buf[0];
2782  	if (!nr_cpr)
2783  		goto out;
2784  
2785  	cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2786  	if (!cpr_log)
2787  		goto out;
2788  
2789  	cpr_log->nr_cpr = nr_cpr;
2790  	desc = &buf[64];
2791  	for (i = 0; i < nr_cpr; i++, desc += 32) {
2792  		cpr_log->cpr[i].num = desc[0];
2793  		cpr_log->cpr[i].num_storage_elements = desc[1];
2794  		cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2795  		cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2796  	}
2797  
2798  out:
2799  	swap(dev->cpr_log, cpr_log);
2800  	kfree(cpr_log);
2801  	kfree(buf);
2802  }
2803  
ata_dev_print_features(struct ata_device * dev)2804  static void ata_dev_print_features(struct ata_device *dev)
2805  {
2806  	if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2807  		return;
2808  
2809  	ata_dev_info(dev,
2810  		     "Features:%s%s%s%s%s%s%s%s\n",
2811  		     dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
2812  		     dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2813  		     dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2814  		     dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2815  		     dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
2816  		     dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2817  		     dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
2818  		     dev->cpr_log ? " CPR" : "");
2819  }
2820  
2821  /**
2822   *	ata_dev_configure - Configure the specified ATA/ATAPI device
2823   *	@dev: Target device to configure
2824   *
2825   *	Configure @dev according to @dev->id.  Generic and low-level
2826   *	driver specific fixups are also applied.
2827   *
2828   *	LOCKING:
2829   *	Kernel thread context (may sleep)
2830   *
2831   *	RETURNS:
2832   *	0 on success, -errno otherwise
2833   */
ata_dev_configure(struct ata_device * dev)2834  int ata_dev_configure(struct ata_device *dev)
2835  {
2836  	struct ata_port *ap = dev->link->ap;
2837  	bool print_info = ata_dev_print_info(dev);
2838  	const u16 *id = dev->id;
2839  	unsigned int xfer_mask;
2840  	unsigned int err_mask;
2841  	char revbuf[7];		/* XYZ-99\0 */
2842  	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2843  	char modelbuf[ATA_ID_PROD_LEN+1];
2844  	int rc;
2845  
2846  	if (!ata_dev_enabled(dev)) {
2847  		ata_dev_dbg(dev, "no device\n");
2848  		return 0;
2849  	}
2850  
2851  	/* Set quirks */
2852  	dev->quirks |= ata_dev_quirks(dev);
2853  	ata_force_quirks(dev);
2854  
2855  	if (dev->quirks & ATA_QUIRK_DISABLE) {
2856  		ata_dev_info(dev, "unsupported device, disabling\n");
2857  		ata_dev_disable(dev);
2858  		return 0;
2859  	}
2860  
2861  	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2862  	    dev->class == ATA_DEV_ATAPI) {
2863  		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2864  			     atapi_enabled ? "not supported with this driver"
2865  			     : "disabled");
2866  		ata_dev_disable(dev);
2867  		return 0;
2868  	}
2869  
2870  	rc = ata_do_link_spd_quirk(dev);
2871  	if (rc)
2872  		return rc;
2873  
2874  	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2875  	if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
2876  	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2877  		dev->quirks |= ATA_QUIRK_NOLPM;
2878  
2879  	if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
2880  	    ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
2881  		dev->quirks |= ATA_QUIRK_NOLPM;
2882  
2883  	if (ap->flags & ATA_FLAG_NO_LPM)
2884  		dev->quirks |= ATA_QUIRK_NOLPM;
2885  
2886  	if (dev->quirks & ATA_QUIRK_NOLPM) {
2887  		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2888  		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2889  	}
2890  
2891  	/* let ACPI work its magic */
2892  	rc = ata_acpi_on_devcfg(dev);
2893  	if (rc)
2894  		return rc;
2895  
2896  	/* massage HPA, do it early as it might change IDENTIFY data */
2897  	rc = ata_hpa_resize(dev);
2898  	if (rc)
2899  		return rc;
2900  
2901  	/* print device capabilities */
2902  	ata_dev_dbg(dev,
2903  		    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2904  		    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2905  		    __func__,
2906  		    id[49], id[82], id[83], id[84],
2907  		    id[85], id[86], id[87], id[88]);
2908  
2909  	/* initialize to-be-configured parameters */
2910  	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2911  	dev->max_sectors = 0;
2912  	dev->cdb_len = 0;
2913  	dev->n_sectors = 0;
2914  	dev->cylinders = 0;
2915  	dev->heads = 0;
2916  	dev->sectors = 0;
2917  	dev->multi_count = 0;
2918  
2919  	/*
2920  	 * common ATA, ATAPI feature tests
2921  	 */
2922  
2923  	/* find max transfer mode; for printk only */
2924  	xfer_mask = ata_id_xfermask(id);
2925  
2926  	ata_dump_id(dev, id);
2927  
2928  	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2929  	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2930  			sizeof(fwrevbuf));
2931  
2932  	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2933  			sizeof(modelbuf));
2934  
2935  	/* ATA-specific feature tests */
2936  	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2937  		if (ata_id_is_cfa(id)) {
2938  			/* CPRM may make this media unusable */
2939  			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2940  				ata_dev_warn(dev,
2941  	"supports DRM functions and may not be fully accessible\n");
2942  			snprintf(revbuf, 7, "CFA");
2943  		} else {
2944  			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2945  			/* Warn the user if the device has TPM extensions */
2946  			if (ata_id_has_tpm(id))
2947  				ata_dev_warn(dev,
2948  	"supports DRM functions and may not be fully accessible\n");
2949  		}
2950  
2951  		dev->n_sectors = ata_id_n_sectors(id);
2952  
2953  		/* get current R/W Multiple count setting */
2954  		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2955  			unsigned int max = dev->id[47] & 0xff;
2956  			unsigned int cnt = dev->id[59] & 0xff;
2957  			/* only recognize/allow powers of two here */
2958  			if (is_power_of_2(max) && is_power_of_2(cnt))
2959  				if (cnt <= max)
2960  					dev->multi_count = cnt;
2961  		}
2962  
2963  		/* print device info to dmesg */
2964  		if (print_info)
2965  			ata_dev_info(dev, "%s: %s, %s, max %s\n",
2966  				     revbuf, modelbuf, fwrevbuf,
2967  				     ata_mode_string(xfer_mask));
2968  
2969  		if (ata_id_has_lba(id)) {
2970  			rc = ata_dev_config_lba(dev);
2971  			if (rc)
2972  				return rc;
2973  		} else {
2974  			ata_dev_config_chs(dev);
2975  		}
2976  
2977  		ata_dev_config_fua(dev);
2978  		ata_dev_config_devslp(dev);
2979  		ata_dev_config_sense_reporting(dev);
2980  		ata_dev_config_zac(dev);
2981  		ata_dev_config_trusted(dev);
2982  		ata_dev_config_cpr(dev);
2983  		ata_dev_config_cdl(dev);
2984  		dev->cdb_len = 32;
2985  
2986  		if (print_info)
2987  			ata_dev_print_features(dev);
2988  	}
2989  
2990  	/* ATAPI-specific feature tests */
2991  	else if (dev->class == ATA_DEV_ATAPI) {
2992  		const char *cdb_intr_string = "";
2993  		const char *atapi_an_string = "";
2994  		const char *dma_dir_string = "";
2995  		u32 sntf;
2996  
2997  		rc = atapi_cdb_len(id);
2998  		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2999  			ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
3000  			rc = -EINVAL;
3001  			goto err_out_nosup;
3002  		}
3003  		dev->cdb_len = (unsigned int) rc;
3004  
3005  		/* Enable ATAPI AN if both the host and device have
3006  		 * the support.  If PMP is attached, SNTF is required
3007  		 * to enable ATAPI AN to discern between PHY status
3008  		 * changed notifications and ATAPI ANs.
3009  		 */
3010  		if (atapi_an &&
3011  		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
3012  		    (!sata_pmp_attached(ap) ||
3013  		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
3014  			/* issue SET feature command to turn this on */
3015  			err_mask = ata_dev_set_feature(dev,
3016  					SETFEATURES_SATA_ENABLE, SATA_AN);
3017  			if (err_mask)
3018  				ata_dev_err(dev,
3019  					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
3020  					    err_mask);
3021  			else {
3022  				dev->flags |= ATA_DFLAG_AN;
3023  				atapi_an_string = ", ATAPI AN";
3024  			}
3025  		}
3026  
3027  		if (ata_id_cdb_intr(dev->id)) {
3028  			dev->flags |= ATA_DFLAG_CDB_INTR;
3029  			cdb_intr_string = ", CDB intr";
3030  		}
3031  
3032  		if (atapi_dmadir || (dev->quirks & ATA_QUIRK_ATAPI_DMADIR) ||
3033  		    atapi_id_dmadir(dev->id)) {
3034  			dev->flags |= ATA_DFLAG_DMADIR;
3035  			dma_dir_string = ", DMADIR";
3036  		}
3037  
3038  		if (ata_id_has_da(dev->id)) {
3039  			dev->flags |= ATA_DFLAG_DA;
3040  			zpodd_init(dev);
3041  		}
3042  
3043  		/* print device info to dmesg */
3044  		if (print_info)
3045  			ata_dev_info(dev,
3046  				     "ATAPI: %s, %s, max %s%s%s%s\n",
3047  				     modelbuf, fwrevbuf,
3048  				     ata_mode_string(xfer_mask),
3049  				     cdb_intr_string, atapi_an_string,
3050  				     dma_dir_string);
3051  	}
3052  
3053  	/* determine max_sectors */
3054  	dev->max_sectors = ATA_MAX_SECTORS;
3055  	if (dev->flags & ATA_DFLAG_LBA48)
3056  		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
3057  
3058  	/* Limit PATA drive on SATA cable bridge transfers to udma5,
3059  	   200 sectors */
3060  	if (ata_dev_knobble(dev)) {
3061  		if (print_info)
3062  			ata_dev_info(dev, "applying bridge limits\n");
3063  		dev->udma_mask &= ATA_UDMA5;
3064  		dev->max_sectors = ATA_MAX_SECTORS;
3065  	}
3066  
3067  	if ((dev->class == ATA_DEV_ATAPI) &&
3068  	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
3069  		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
3070  		dev->quirks |= ATA_QUIRK_STUCK_ERR;
3071  	}
3072  
3073  	if (dev->quirks & ATA_QUIRK_MAX_SEC_128)
3074  		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
3075  					 dev->max_sectors);
3076  
3077  	if (dev->quirks & ATA_QUIRK_MAX_SEC_1024)
3078  		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
3079  					 dev->max_sectors);
3080  
3081  	if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
3082  		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
3083  
3084  	if (ap->ops->dev_config)
3085  		ap->ops->dev_config(dev);
3086  
3087  	if (dev->quirks & ATA_QUIRK_DIAGNOSTIC) {
3088  		/* Let the user know. We don't want to disallow opens for
3089  		   rescue purposes, or in case the vendor is just a blithering
3090  		   idiot. Do this after the dev_config call as some controllers
3091  		   with buggy firmware may want to avoid reporting false device
3092  		   bugs */
3093  
3094  		if (print_info) {
3095  			ata_dev_warn(dev,
3096  "Drive reports diagnostics failure. This may indicate a drive\n");
3097  			ata_dev_warn(dev,
3098  "fault or invalid emulation. Contact drive vendor for information.\n");
3099  		}
3100  	}
3101  
3102  	if ((dev->quirks & ATA_QUIRK_FIRMWARE_WARN) && print_info) {
3103  		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
3104  		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
3105  	}
3106  
3107  	return 0;
3108  
3109  err_out_nosup:
3110  	return rc;
3111  }
3112  
3113  /**
3114   *	ata_cable_40wire	-	return 40 wire cable type
3115   *	@ap: port
3116   *
3117   *	Helper method for drivers which want to hardwire 40 wire cable
3118   *	detection.
3119   */
3120  
ata_cable_40wire(struct ata_port * ap)3121  int ata_cable_40wire(struct ata_port *ap)
3122  {
3123  	return ATA_CBL_PATA40;
3124  }
3125  EXPORT_SYMBOL_GPL(ata_cable_40wire);
3126  
3127  /**
3128   *	ata_cable_80wire	-	return 80 wire cable type
3129   *	@ap: port
3130   *
3131   *	Helper method for drivers which want to hardwire 80 wire cable
3132   *	detection.
3133   */
3134  
ata_cable_80wire(struct ata_port * ap)3135  int ata_cable_80wire(struct ata_port *ap)
3136  {
3137  	return ATA_CBL_PATA80;
3138  }
3139  EXPORT_SYMBOL_GPL(ata_cable_80wire);
3140  
3141  /**
3142   *	ata_cable_unknown	-	return unknown PATA cable.
3143   *	@ap: port
3144   *
3145   *	Helper method for drivers which have no PATA cable detection.
3146   */
3147  
ata_cable_unknown(struct ata_port * ap)3148  int ata_cable_unknown(struct ata_port *ap)
3149  {
3150  	return ATA_CBL_PATA_UNK;
3151  }
3152  EXPORT_SYMBOL_GPL(ata_cable_unknown);
3153  
3154  /**
3155   *	ata_cable_ignore	-	return ignored PATA cable.
3156   *	@ap: port
3157   *
3158   *	Helper method for drivers which don't use cable type to limit
3159   *	transfer mode.
3160   */
ata_cable_ignore(struct ata_port * ap)3161  int ata_cable_ignore(struct ata_port *ap)
3162  {
3163  	return ATA_CBL_PATA_IGN;
3164  }
3165  EXPORT_SYMBOL_GPL(ata_cable_ignore);
3166  
3167  /**
3168   *	ata_cable_sata	-	return SATA cable type
3169   *	@ap: port
3170   *
3171   *	Helper method for drivers which have SATA cables
3172   */
3173  
ata_cable_sata(struct ata_port * ap)3174  int ata_cable_sata(struct ata_port *ap)
3175  {
3176  	return ATA_CBL_SATA;
3177  }
3178  EXPORT_SYMBOL_GPL(ata_cable_sata);
3179  
3180  /**
3181   *	sata_print_link_status - Print SATA link status
3182   *	@link: SATA link to printk link status about
3183   *
3184   *	This function prints link speed and status of a SATA link.
3185   *
3186   *	LOCKING:
3187   *	None.
3188   */
sata_print_link_status(struct ata_link * link)3189  static void sata_print_link_status(struct ata_link *link)
3190  {
3191  	u32 sstatus, scontrol, tmp;
3192  
3193  	if (sata_scr_read(link, SCR_STATUS, &sstatus))
3194  		return;
3195  	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3196  		return;
3197  
3198  	if (ata_phys_link_online(link)) {
3199  		tmp = (sstatus >> 4) & 0xf;
3200  		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3201  			      sata_spd_string(tmp), sstatus, scontrol);
3202  	} else {
3203  		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3204  			      sstatus, scontrol);
3205  	}
3206  }
3207  
3208  /**
3209   *	ata_dev_pair		-	return other device on cable
3210   *	@adev: device
3211   *
3212   *	Obtain the other device on the same cable, or if none is
3213   *	present NULL is returned
3214   */
3215  
ata_dev_pair(struct ata_device * adev)3216  struct ata_device *ata_dev_pair(struct ata_device *adev)
3217  {
3218  	struct ata_link *link = adev->link;
3219  	struct ata_device *pair = &link->device[1 - adev->devno];
3220  	if (!ata_dev_enabled(pair))
3221  		return NULL;
3222  	return pair;
3223  }
3224  EXPORT_SYMBOL_GPL(ata_dev_pair);
3225  
3226  #ifdef CONFIG_ATA_ACPI
3227  /**
3228   *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3229   *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3230   *	@cycle: cycle duration in ns
3231   *
3232   *	Return matching xfer mode for @cycle.  The returned mode is of
3233   *	the transfer type specified by @xfer_shift.  If @cycle is too
3234   *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3235   *	than the fastest known mode, the fasted mode is returned.
3236   *
3237   *	LOCKING:
3238   *	None.
3239   *
3240   *	RETURNS:
3241   *	Matching xfer_mode, 0xff if no match found.
3242   */
ata_timing_cycle2mode(unsigned int xfer_shift,int cycle)3243  u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3244  {
3245  	u8 base_mode = 0xff, last_mode = 0xff;
3246  	const struct ata_xfer_ent *ent;
3247  	const struct ata_timing *t;
3248  
3249  	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3250  		if (ent->shift == xfer_shift)
3251  			base_mode = ent->base;
3252  
3253  	for (t = ata_timing_find_mode(base_mode);
3254  	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3255  		unsigned short this_cycle;
3256  
3257  		switch (xfer_shift) {
3258  		case ATA_SHIFT_PIO:
3259  		case ATA_SHIFT_MWDMA:
3260  			this_cycle = t->cycle;
3261  			break;
3262  		case ATA_SHIFT_UDMA:
3263  			this_cycle = t->udma;
3264  			break;
3265  		default:
3266  			return 0xff;
3267  		}
3268  
3269  		if (cycle > this_cycle)
3270  			break;
3271  
3272  		last_mode = t->mode;
3273  	}
3274  
3275  	return last_mode;
3276  }
3277  #endif
3278  
3279  /**
3280   *	ata_down_xfermask_limit - adjust dev xfer masks downward
3281   *	@dev: Device to adjust xfer masks
3282   *	@sel: ATA_DNXFER_* selector
3283   *
3284   *	Adjust xfer masks of @dev downward.  Note that this function
3285   *	does not apply the change.  Invoking ata_set_mode() afterwards
3286   *	will apply the limit.
3287   *
3288   *	LOCKING:
3289   *	Inherited from caller.
3290   *
3291   *	RETURNS:
3292   *	0 on success, negative errno on failure
3293   */
ata_down_xfermask_limit(struct ata_device * dev,unsigned int sel)3294  int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3295  {
3296  	char buf[32];
3297  	unsigned int orig_mask, xfer_mask;
3298  	unsigned int pio_mask, mwdma_mask, udma_mask;
3299  	int quiet, highbit;
3300  
3301  	quiet = !!(sel & ATA_DNXFER_QUIET);
3302  	sel &= ~ATA_DNXFER_QUIET;
3303  
3304  	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3305  						  dev->mwdma_mask,
3306  						  dev->udma_mask);
3307  	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3308  
3309  	switch (sel) {
3310  	case ATA_DNXFER_PIO:
3311  		highbit = fls(pio_mask) - 1;
3312  		pio_mask &= ~(1 << highbit);
3313  		break;
3314  
3315  	case ATA_DNXFER_DMA:
3316  		if (udma_mask) {
3317  			highbit = fls(udma_mask) - 1;
3318  			udma_mask &= ~(1 << highbit);
3319  			if (!udma_mask)
3320  				return -ENOENT;
3321  		} else if (mwdma_mask) {
3322  			highbit = fls(mwdma_mask) - 1;
3323  			mwdma_mask &= ~(1 << highbit);
3324  			if (!mwdma_mask)
3325  				return -ENOENT;
3326  		}
3327  		break;
3328  
3329  	case ATA_DNXFER_40C:
3330  		udma_mask &= ATA_UDMA_MASK_40C;
3331  		break;
3332  
3333  	case ATA_DNXFER_FORCE_PIO0:
3334  		pio_mask &= 1;
3335  		fallthrough;
3336  	case ATA_DNXFER_FORCE_PIO:
3337  		mwdma_mask = 0;
3338  		udma_mask = 0;
3339  		break;
3340  
3341  	default:
3342  		BUG();
3343  	}
3344  
3345  	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3346  
3347  	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3348  		return -ENOENT;
3349  
3350  	if (!quiet) {
3351  		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3352  			snprintf(buf, sizeof(buf), "%s:%s",
3353  				 ata_mode_string(xfer_mask),
3354  				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3355  		else
3356  			snprintf(buf, sizeof(buf), "%s",
3357  				 ata_mode_string(xfer_mask));
3358  
3359  		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3360  	}
3361  
3362  	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3363  			    &dev->udma_mask);
3364  
3365  	return 0;
3366  }
3367  
ata_dev_set_mode(struct ata_device * dev)3368  static int ata_dev_set_mode(struct ata_device *dev)
3369  {
3370  	struct ata_port *ap = dev->link->ap;
3371  	struct ata_eh_context *ehc = &dev->link->eh_context;
3372  	const bool nosetxfer = dev->quirks & ATA_QUIRK_NOSETXFER;
3373  	const char *dev_err_whine = "";
3374  	int ign_dev_err = 0;
3375  	unsigned int err_mask = 0;
3376  	int rc;
3377  
3378  	dev->flags &= ~ATA_DFLAG_PIO;
3379  	if (dev->xfer_shift == ATA_SHIFT_PIO)
3380  		dev->flags |= ATA_DFLAG_PIO;
3381  
3382  	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3383  		dev_err_whine = " (SET_XFERMODE skipped)";
3384  	else {
3385  		if (nosetxfer)
3386  			ata_dev_warn(dev,
3387  				     "NOSETXFER but PATA detected - can't "
3388  				     "skip SETXFER, might malfunction\n");
3389  		err_mask = ata_dev_set_xfermode(dev);
3390  	}
3391  
3392  	if (err_mask & ~AC_ERR_DEV)
3393  		goto fail;
3394  
3395  	/* revalidate */
3396  	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3397  	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3398  	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3399  	if (rc)
3400  		return rc;
3401  
3402  	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3403  		/* Old CFA may refuse this command, which is just fine */
3404  		if (ata_id_is_cfa(dev->id))
3405  			ign_dev_err = 1;
3406  		/* Catch several broken garbage emulations plus some pre
3407  		   ATA devices */
3408  		if (ata_id_major_version(dev->id) == 0 &&
3409  					dev->pio_mode <= XFER_PIO_2)
3410  			ign_dev_err = 1;
3411  		/* Some very old devices and some bad newer ones fail
3412  		   any kind of SET_XFERMODE request but support PIO0-2
3413  		   timings and no IORDY */
3414  		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3415  			ign_dev_err = 1;
3416  	}
3417  	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3418  	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3419  	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3420  	    dev->dma_mode == XFER_MW_DMA_0 &&
3421  	    (dev->id[63] >> 8) & 1)
3422  		ign_dev_err = 1;
3423  
3424  	/* if the device is actually configured correctly, ignore dev err */
3425  	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3426  		ign_dev_err = 1;
3427  
3428  	if (err_mask & AC_ERR_DEV) {
3429  		if (!ign_dev_err)
3430  			goto fail;
3431  		else
3432  			dev_err_whine = " (device error ignored)";
3433  	}
3434  
3435  	ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3436  		    dev->xfer_shift, (int)dev->xfer_mode);
3437  
3438  	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3439  	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
3440  		ata_dev_info(dev, "configured for %s%s\n",
3441  			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3442  			     dev_err_whine);
3443  
3444  	return 0;
3445  
3446   fail:
3447  	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3448  	return -EIO;
3449  }
3450  
3451  /**
3452   *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3453   *	@link: link on which timings will be programmed
3454   *	@r_failed_dev: out parameter for failed device
3455   *
3456   *	Standard implementation of the function used to tune and set
3457   *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3458   *	ata_dev_set_mode() fails, pointer to the failing device is
3459   *	returned in @r_failed_dev.
3460   *
3461   *	LOCKING:
3462   *	PCI/etc. bus probe sem.
3463   *
3464   *	RETURNS:
3465   *	0 on success, negative errno otherwise
3466   */
3467  
ata_do_set_mode(struct ata_link * link,struct ata_device ** r_failed_dev)3468  int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3469  {
3470  	struct ata_port *ap = link->ap;
3471  	struct ata_device *dev;
3472  	int rc = 0, used_dma = 0, found = 0;
3473  
3474  	/* step 1: calculate xfer_mask */
3475  	ata_for_each_dev(dev, link, ENABLED) {
3476  		unsigned int pio_mask, dma_mask;
3477  		unsigned int mode_mask;
3478  
3479  		mode_mask = ATA_DMA_MASK_ATA;
3480  		if (dev->class == ATA_DEV_ATAPI)
3481  			mode_mask = ATA_DMA_MASK_ATAPI;
3482  		else if (ata_id_is_cfa(dev->id))
3483  			mode_mask = ATA_DMA_MASK_CFA;
3484  
3485  		ata_dev_xfermask(dev);
3486  		ata_force_xfermask(dev);
3487  
3488  		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3489  
3490  		if (libata_dma_mask & mode_mask)
3491  			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3492  						     dev->udma_mask);
3493  		else
3494  			dma_mask = 0;
3495  
3496  		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3497  		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3498  
3499  		found = 1;
3500  		if (ata_dma_enabled(dev))
3501  			used_dma = 1;
3502  	}
3503  	if (!found)
3504  		goto out;
3505  
3506  	/* step 2: always set host PIO timings */
3507  	ata_for_each_dev(dev, link, ENABLED) {
3508  		if (dev->pio_mode == 0xff) {
3509  			ata_dev_warn(dev, "no PIO support\n");
3510  			rc = -EINVAL;
3511  			goto out;
3512  		}
3513  
3514  		dev->xfer_mode = dev->pio_mode;
3515  		dev->xfer_shift = ATA_SHIFT_PIO;
3516  		if (ap->ops->set_piomode)
3517  			ap->ops->set_piomode(ap, dev);
3518  	}
3519  
3520  	/* step 3: set host DMA timings */
3521  	ata_for_each_dev(dev, link, ENABLED) {
3522  		if (!ata_dma_enabled(dev))
3523  			continue;
3524  
3525  		dev->xfer_mode = dev->dma_mode;
3526  		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3527  		if (ap->ops->set_dmamode)
3528  			ap->ops->set_dmamode(ap, dev);
3529  	}
3530  
3531  	/* step 4: update devices' xfer mode */
3532  	ata_for_each_dev(dev, link, ENABLED) {
3533  		rc = ata_dev_set_mode(dev);
3534  		if (rc)
3535  			goto out;
3536  	}
3537  
3538  	/* Record simplex status. If we selected DMA then the other
3539  	 * host channels are not permitted to do so.
3540  	 */
3541  	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3542  		ap->host->simplex_claimed = ap;
3543  
3544   out:
3545  	if (rc)
3546  		*r_failed_dev = dev;
3547  	return rc;
3548  }
3549  EXPORT_SYMBOL_GPL(ata_do_set_mode);
3550  
3551  /**
3552   *	ata_wait_ready - wait for link to become ready
3553   *	@link: link to be waited on
3554   *	@deadline: deadline jiffies for the operation
3555   *	@check_ready: callback to check link readiness
3556   *
3557   *	Wait for @link to become ready.  @check_ready should return
3558   *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3559   *	link doesn't seem to be occupied, other errno for other error
3560   *	conditions.
3561   *
3562   *	Transient -ENODEV conditions are allowed for
3563   *	ATA_TMOUT_FF_WAIT.
3564   *
3565   *	LOCKING:
3566   *	EH context.
3567   *
3568   *	RETURNS:
3569   *	0 if @link is ready before @deadline; otherwise, -errno.
3570   */
ata_wait_ready(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3571  int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3572  		   int (*check_ready)(struct ata_link *link))
3573  {
3574  	unsigned long start = jiffies;
3575  	unsigned long nodev_deadline;
3576  	int warned = 0;
3577  
3578  	/* choose which 0xff timeout to use, read comment in libata.h */
3579  	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3580  		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3581  	else
3582  		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3583  
3584  	/* Slave readiness can't be tested separately from master.  On
3585  	 * M/S emulation configuration, this function should be called
3586  	 * only on the master and it will handle both master and slave.
3587  	 */
3588  	WARN_ON(link == link->ap->slave_link);
3589  
3590  	if (time_after(nodev_deadline, deadline))
3591  		nodev_deadline = deadline;
3592  
3593  	while (1) {
3594  		unsigned long now = jiffies;
3595  		int ready, tmp;
3596  
3597  		ready = tmp = check_ready(link);
3598  		if (ready > 0)
3599  			return 0;
3600  
3601  		/*
3602  		 * -ENODEV could be transient.  Ignore -ENODEV if link
3603  		 * is online.  Also, some SATA devices take a long
3604  		 * time to clear 0xff after reset.  Wait for
3605  		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3606  		 * offline.
3607  		 *
3608  		 * Note that some PATA controllers (pata_ali) explode
3609  		 * if status register is read more than once when
3610  		 * there's no device attached.
3611  		 */
3612  		if (ready == -ENODEV) {
3613  			if (ata_link_online(link))
3614  				ready = 0;
3615  			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3616  				 !ata_link_offline(link) &&
3617  				 time_before(now, nodev_deadline))
3618  				ready = 0;
3619  		}
3620  
3621  		if (ready)
3622  			return ready;
3623  		if (time_after(now, deadline))
3624  			return -EBUSY;
3625  
3626  		if (!warned && time_after(now, start + 5 * HZ) &&
3627  		    (deadline - now > 3 * HZ)) {
3628  			ata_link_warn(link,
3629  				"link is slow to respond, please be patient "
3630  				"(ready=%d)\n", tmp);
3631  			warned = 1;
3632  		}
3633  
3634  		ata_msleep(link->ap, 50);
3635  	}
3636  }
3637  
3638  /**
3639   *	ata_wait_after_reset - wait for link to become ready after reset
3640   *	@link: link to be waited on
3641   *	@deadline: deadline jiffies for the operation
3642   *	@check_ready: callback to check link readiness
3643   *
3644   *	Wait for @link to become ready after reset.
3645   *
3646   *	LOCKING:
3647   *	EH context.
3648   *
3649   *	RETURNS:
3650   *	0 if @link is ready before @deadline; otherwise, -errno.
3651   */
ata_wait_after_reset(struct ata_link * link,unsigned long deadline,int (* check_ready)(struct ata_link * link))3652  int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3653  				int (*check_ready)(struct ata_link *link))
3654  {
3655  	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3656  
3657  	return ata_wait_ready(link, deadline, check_ready);
3658  }
3659  EXPORT_SYMBOL_GPL(ata_wait_after_reset);
3660  
3661  /**
3662   *	ata_std_prereset - prepare for reset
3663   *	@link: ATA link to be reset
3664   *	@deadline: deadline jiffies for the operation
3665   *
3666   *	@link is about to be reset.  Initialize it.  Failure from
3667   *	prereset makes libata abort whole reset sequence and give up
3668   *	that port, so prereset should be best-effort.  It does its
3669   *	best to prepare for reset sequence but if things go wrong, it
3670   *	should just whine, not fail.
3671   *
3672   *	LOCKING:
3673   *	Kernel thread context (may sleep)
3674   *
3675   *	RETURNS:
3676   *	Always 0.
3677   */
ata_std_prereset(struct ata_link * link,unsigned long deadline)3678  int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3679  {
3680  	struct ata_port *ap = link->ap;
3681  	struct ata_eh_context *ehc = &link->eh_context;
3682  	const unsigned int *timing = sata_ehc_deb_timing(ehc);
3683  	int rc;
3684  
3685  	/* if we're about to do hardreset, nothing more to do */
3686  	if (ehc->i.action & ATA_EH_HARDRESET)
3687  		return 0;
3688  
3689  	/* if SATA, resume link */
3690  	if (ap->flags & ATA_FLAG_SATA) {
3691  		rc = sata_link_resume(link, timing, deadline);
3692  		/* whine about phy resume failure but proceed */
3693  		if (rc && rc != -EOPNOTSUPP)
3694  			ata_link_warn(link,
3695  				      "failed to resume link for reset (errno=%d)\n",
3696  				      rc);
3697  	}
3698  
3699  	/* no point in trying softreset on offline link */
3700  	if (ata_phys_link_offline(link))
3701  		ehc->i.action &= ~ATA_EH_SOFTRESET;
3702  
3703  	return 0;
3704  }
3705  EXPORT_SYMBOL_GPL(ata_std_prereset);
3706  
3707  /**
3708   *	ata_std_postreset - standard postreset callback
3709   *	@link: the target ata_link
3710   *	@classes: classes of attached devices
3711   *
3712   *	This function is invoked after a successful reset.  Note that
3713   *	the device might have been reset more than once using
3714   *	different reset methods before postreset is invoked.
3715   *
3716   *	LOCKING:
3717   *	Kernel thread context (may sleep)
3718   */
ata_std_postreset(struct ata_link * link,unsigned int * classes)3719  void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3720  {
3721  	u32 serror;
3722  
3723  	/* reset complete, clear SError */
3724  	if (!sata_scr_read(link, SCR_ERROR, &serror))
3725  		sata_scr_write(link, SCR_ERROR, serror);
3726  
3727  	/* print link status */
3728  	sata_print_link_status(link);
3729  }
3730  EXPORT_SYMBOL_GPL(ata_std_postreset);
3731  
3732  /**
3733   *	ata_dev_same_device - Determine whether new ID matches configured device
3734   *	@dev: device to compare against
3735   *	@new_class: class of the new device
3736   *	@new_id: IDENTIFY page of the new device
3737   *
3738   *	Compare @new_class and @new_id against @dev and determine
3739   *	whether @dev is the device indicated by @new_class and
3740   *	@new_id.
3741   *
3742   *	LOCKING:
3743   *	None.
3744   *
3745   *	RETURNS:
3746   *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3747   */
ata_dev_same_device(struct ata_device * dev,unsigned int new_class,const u16 * new_id)3748  static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3749  			       const u16 *new_id)
3750  {
3751  	const u16 *old_id = dev->id;
3752  	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3753  	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3754  
3755  	if (dev->class != new_class) {
3756  		ata_dev_info(dev, "class mismatch %d != %d\n",
3757  			     dev->class, new_class);
3758  		return 0;
3759  	}
3760  
3761  	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3762  	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3763  	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3764  	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3765  
3766  	if (strcmp(model[0], model[1])) {
3767  		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3768  			     model[0], model[1]);
3769  		return 0;
3770  	}
3771  
3772  	if (strcmp(serial[0], serial[1])) {
3773  		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3774  			     serial[0], serial[1]);
3775  		return 0;
3776  	}
3777  
3778  	return 1;
3779  }
3780  
3781  /**
3782   *	ata_dev_reread_id - Re-read IDENTIFY data
3783   *	@dev: target ATA device
3784   *	@readid_flags: read ID flags
3785   *
3786   *	Re-read IDENTIFY page and make sure @dev is still attached to
3787   *	the port.
3788   *
3789   *	LOCKING:
3790   *	Kernel thread context (may sleep)
3791   *
3792   *	RETURNS:
3793   *	0 on success, negative errno otherwise
3794   */
ata_dev_reread_id(struct ata_device * dev,unsigned int readid_flags)3795  int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3796  {
3797  	unsigned int class = dev->class;
3798  	u16 *id = (void *)dev->sector_buf;
3799  	int rc;
3800  
3801  	/* read ID data */
3802  	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3803  	if (rc)
3804  		return rc;
3805  
3806  	/* is the device still there? */
3807  	if (!ata_dev_same_device(dev, class, id))
3808  		return -ENODEV;
3809  
3810  	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3811  	return 0;
3812  }
3813  
3814  /**
3815   *	ata_dev_revalidate - Revalidate ATA device
3816   *	@dev: device to revalidate
3817   *	@new_class: new class code
3818   *	@readid_flags: read ID flags
3819   *
3820   *	Re-read IDENTIFY page, make sure @dev is still attached to the
3821   *	port and reconfigure it according to the new IDENTIFY page.
3822   *
3823   *	LOCKING:
3824   *	Kernel thread context (may sleep)
3825   *
3826   *	RETURNS:
3827   *	0 on success, negative errno otherwise
3828   */
ata_dev_revalidate(struct ata_device * dev,unsigned int new_class,unsigned int readid_flags)3829  int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3830  		       unsigned int readid_flags)
3831  {
3832  	u64 n_sectors = dev->n_sectors;
3833  	u64 n_native_sectors = dev->n_native_sectors;
3834  	int rc;
3835  
3836  	if (!ata_dev_enabled(dev))
3837  		return -ENODEV;
3838  
3839  	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3840  	if (ata_class_enabled(new_class) && new_class == ATA_DEV_PMP) {
3841  		ata_dev_info(dev, "class mismatch %u != %u\n",
3842  			     dev->class, new_class);
3843  		rc = -ENODEV;
3844  		goto fail;
3845  	}
3846  
3847  	/* re-read ID */
3848  	rc = ata_dev_reread_id(dev, readid_flags);
3849  	if (rc)
3850  		goto fail;
3851  
3852  	/* configure device according to the new ID */
3853  	rc = ata_dev_configure(dev);
3854  	if (rc)
3855  		goto fail;
3856  
3857  	/* verify n_sectors hasn't changed */
3858  	if (dev->class != ATA_DEV_ATA || !n_sectors ||
3859  	    dev->n_sectors == n_sectors)
3860  		return 0;
3861  
3862  	/* n_sectors has changed */
3863  	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3864  		     (unsigned long long)n_sectors,
3865  		     (unsigned long long)dev->n_sectors);
3866  
3867  	/*
3868  	 * Something could have caused HPA to be unlocked
3869  	 * involuntarily.  If n_native_sectors hasn't changed and the
3870  	 * new size matches it, keep the device.
3871  	 */
3872  	if (dev->n_native_sectors == n_native_sectors &&
3873  	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3874  		ata_dev_warn(dev,
3875  			     "new n_sectors matches native, probably "
3876  			     "late HPA unlock, n_sectors updated\n");
3877  		/* use the larger n_sectors */
3878  		return 0;
3879  	}
3880  
3881  	/*
3882  	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
3883  	 * unlocking HPA in those cases.
3884  	 *
3885  	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3886  	 */
3887  	if (dev->n_native_sectors == n_native_sectors &&
3888  	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3889  	    !(dev->quirks & ATA_QUIRK_BROKEN_HPA)) {
3890  		ata_dev_warn(dev,
3891  			     "old n_sectors matches native, probably "
3892  			     "late HPA lock, will try to unlock HPA\n");
3893  		/* try unlocking HPA */
3894  		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3895  		rc = -EIO;
3896  	} else
3897  		rc = -ENODEV;
3898  
3899  	/* restore original n_[native_]sectors and fail */
3900  	dev->n_native_sectors = n_native_sectors;
3901  	dev->n_sectors = n_sectors;
3902   fail:
3903  	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
3904  	return rc;
3905  }
3906  
3907  static const char * const ata_quirk_names[] = {
3908  	[__ATA_QUIRK_DIAGNOSTIC]	= "diagnostic",
3909  	[__ATA_QUIRK_NODMA]		= "nodma",
3910  	[__ATA_QUIRK_NONCQ]		= "noncq",
3911  	[__ATA_QUIRK_MAX_SEC_128]	= "maxsec128",
3912  	[__ATA_QUIRK_BROKEN_HPA]	= "brokenhpa",
3913  	[__ATA_QUIRK_DISABLE]		= "disable",
3914  	[__ATA_QUIRK_HPA_SIZE]		= "hpasize",
3915  	[__ATA_QUIRK_IVB]		= "ivb",
3916  	[__ATA_QUIRK_STUCK_ERR]		= "stuckerr",
3917  	[__ATA_QUIRK_BRIDGE_OK]		= "bridgeok",
3918  	[__ATA_QUIRK_ATAPI_MOD16_DMA]	= "atapimod16dma",
3919  	[__ATA_QUIRK_FIRMWARE_WARN]	= "firmwarewarn",
3920  	[__ATA_QUIRK_1_5_GBPS]		= "1.5gbps",
3921  	[__ATA_QUIRK_NOSETXFER]		= "nosetxfer",
3922  	[__ATA_QUIRK_BROKEN_FPDMA_AA]	= "brokenfpdmaaa",
3923  	[__ATA_QUIRK_DUMP_ID]		= "dumpid",
3924  	[__ATA_QUIRK_MAX_SEC_LBA48]	= "maxseclba48",
3925  	[__ATA_QUIRK_ATAPI_DMADIR]	= "atapidmadir",
3926  	[__ATA_QUIRK_NO_NCQ_TRIM]	= "noncqtrim",
3927  	[__ATA_QUIRK_NOLPM]		= "nolpm",
3928  	[__ATA_QUIRK_WD_BROKEN_LPM]	= "wdbrokenlpm",
3929  	[__ATA_QUIRK_ZERO_AFTER_TRIM]	= "zeroaftertrim",
3930  	[__ATA_QUIRK_NO_DMA_LOG]	= "nodmalog",
3931  	[__ATA_QUIRK_NOTRIM]		= "notrim",
3932  	[__ATA_QUIRK_MAX_SEC_1024]	= "maxsec1024",
3933  	[__ATA_QUIRK_MAX_TRIM_128M]	= "maxtrim128m",
3934  	[__ATA_QUIRK_NO_NCQ_ON_ATI]	= "noncqonati",
3935  	[__ATA_QUIRK_NO_LPM_ON_ATI]	= "nolpmonati",
3936  	[__ATA_QUIRK_NO_ID_DEV_LOG]	= "noiddevlog",
3937  	[__ATA_QUIRK_NO_LOG_DIR]	= "nologdir",
3938  	[__ATA_QUIRK_NO_FUA]		= "nofua",
3939  };
3940  
ata_dev_print_quirks(const struct ata_device * dev,const char * model,const char * rev,unsigned int quirks)3941  static void ata_dev_print_quirks(const struct ata_device *dev,
3942  				 const char *model, const char *rev,
3943  				 unsigned int quirks)
3944  {
3945  	struct ata_eh_context *ehc = &dev->link->eh_context;
3946  	int n = 0, i;
3947  	size_t sz;
3948  	char *str;
3949  
3950  	if (!ata_dev_print_info(dev) || ehc->i.flags & ATA_EHI_DID_PRINT_QUIRKS)
3951  		return;
3952  
3953  	ehc->i.flags |= ATA_EHI_DID_PRINT_QUIRKS;
3954  
3955  	if (!quirks)
3956  		return;
3957  
3958  	sz = 64 + ARRAY_SIZE(ata_quirk_names) * 16;
3959  	str = kmalloc(sz, GFP_KERNEL);
3960  	if (!str)
3961  		return;
3962  
3963  	n = snprintf(str, sz, "Model '%s', rev '%s', applying quirks:",
3964  		     model, rev);
3965  
3966  	for (i = 0; i < ARRAY_SIZE(ata_quirk_names); i++) {
3967  		if (quirks & (1U << i))
3968  			n += snprintf(str + n, sz - n,
3969  				      " %s", ata_quirk_names[i]);
3970  	}
3971  
3972  	ata_dev_warn(dev, "%s\n", str);
3973  
3974  	kfree(str);
3975  }
3976  
3977  struct ata_dev_quirks_entry {
3978  	const char *model_num;
3979  	const char *model_rev;
3980  	unsigned int quirks;
3981  };
3982  
3983  static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
3984  	/* Devices with DMA related problems under Linux */
3985  	{ "WDC AC11000H",	NULL,		ATA_QUIRK_NODMA },
3986  	{ "WDC AC22100H",	NULL,		ATA_QUIRK_NODMA },
3987  	{ "WDC AC32500H",	NULL,		ATA_QUIRK_NODMA },
3988  	{ "WDC AC33100H",	NULL,		ATA_QUIRK_NODMA },
3989  	{ "WDC AC31600H",	NULL,		ATA_QUIRK_NODMA },
3990  	{ "WDC AC32100H",	"24.09P07",	ATA_QUIRK_NODMA },
3991  	{ "WDC AC23200L",	"21.10N21",	ATA_QUIRK_NODMA },
3992  	{ "Compaq CRD-8241B",	NULL,		ATA_QUIRK_NODMA },
3993  	{ "CRD-8400B",		NULL,		ATA_QUIRK_NODMA },
3994  	{ "CRD-848[02]B",	NULL,		ATA_QUIRK_NODMA },
3995  	{ "CRD-84",		NULL,		ATA_QUIRK_NODMA },
3996  	{ "SanDisk SDP3B",	NULL,		ATA_QUIRK_NODMA },
3997  	{ "SanDisk SDP3B-64",	NULL,		ATA_QUIRK_NODMA },
3998  	{ "SANYO CD-ROM CRD",	NULL,		ATA_QUIRK_NODMA },
3999  	{ "HITACHI CDR-8",	NULL,		ATA_QUIRK_NODMA },
4000  	{ "HITACHI CDR-8[34]35", NULL,		ATA_QUIRK_NODMA },
4001  	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_QUIRK_NODMA },
4002  	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_QUIRK_NODMA },
4003  	{ "CD-532E-A",		NULL,		ATA_QUIRK_NODMA },
4004  	{ "E-IDE CD-ROM CR-840", NULL,		ATA_QUIRK_NODMA },
4005  	{ "CD-ROM Drive/F5A",	NULL,		ATA_QUIRK_NODMA },
4006  	{ "WPI CDD-820",	NULL,		ATA_QUIRK_NODMA },
4007  	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_QUIRK_NODMA },
4008  	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_QUIRK_NODMA },
4009  	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL, ATA_QUIRK_NODMA },
4010  	{ "_NEC DV5800A",	NULL,		ATA_QUIRK_NODMA },
4011  	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_QUIRK_NODMA },
4012  	{ "Seagate STT20000A", NULL,		ATA_QUIRK_NODMA },
4013  	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_QUIRK_NODMA },
4014  	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_QUIRK_NODMA },
4015  	/* Odd clown on sil3726/4726 PMPs */
4016  	{ "Config  Disk",	NULL,		ATA_QUIRK_DISABLE },
4017  	/* Similar story with ASMedia 1092 */
4018  	{ "ASMT109x- Config",	NULL,		ATA_QUIRK_DISABLE },
4019  
4020  	/* Weird ATAPI devices */
4021  	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_QUIRK_MAX_SEC_128 },
4022  	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_QUIRK_ATAPI_MOD16_DMA },
4023  	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_QUIRK_MAX_SEC_LBA48 },
4024  	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_QUIRK_MAX_SEC_LBA48 },
4025  
4026  	/*
4027  	 * Causes silent data corruption with higher max sects.
4028  	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4029  	 */
4030  	{ "ST380013AS",		"3.20",		ATA_QUIRK_MAX_SEC_1024 },
4031  
4032  	/*
4033  	 * These devices time out with higher max sects.
4034  	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4035  	 */
4036  	{ "LITEON CX1-JB*-HP",	NULL,		ATA_QUIRK_MAX_SEC_1024 },
4037  	{ "LITEON EP1-*",	NULL,		ATA_QUIRK_MAX_SEC_1024 },
4038  
4039  	/* Devices we expect to fail diagnostics */
4040  
4041  	/* Devices where NCQ should be avoided */
4042  	/* NCQ is slow */
4043  	{ "WDC WD740ADFD-00",	NULL,		ATA_QUIRK_NONCQ },
4044  	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_QUIRK_NONCQ },
4045  	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4046  	{ "FUJITSU MHT2060BH",	NULL,		ATA_QUIRK_NONCQ },
4047  	/* NCQ is broken */
4048  	{ "Maxtor *",		"BANC*",	ATA_QUIRK_NONCQ },
4049  	{ "Maxtor 7V300F0",	"VA111630",	ATA_QUIRK_NONCQ },
4050  	{ "ST380817AS",		"3.42",		ATA_QUIRK_NONCQ },
4051  	{ "ST3160023AS",	"3.42",		ATA_QUIRK_NONCQ },
4052  	{ "OCZ CORE_SSD",	"02.10104",	ATA_QUIRK_NONCQ },
4053  
4054  	/* Seagate NCQ + FLUSH CACHE firmware bug */
4055  	{ "ST31500341AS",	"SD1[5-9]",	ATA_QUIRK_NONCQ |
4056  						ATA_QUIRK_FIRMWARE_WARN },
4057  
4058  	{ "ST31000333AS",	"SD1[5-9]",	ATA_QUIRK_NONCQ |
4059  						ATA_QUIRK_FIRMWARE_WARN },
4060  
4061  	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_QUIRK_NONCQ |
4062  						ATA_QUIRK_FIRMWARE_WARN },
4063  
4064  	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_QUIRK_NONCQ |
4065  						ATA_QUIRK_FIRMWARE_WARN },
4066  
4067  	/* drives which fail FPDMA_AA activation (some may freeze afterwards)
4068  	   the ST disks also have LPM issues */
4069  	{ "ST1000LM024 HN-M101MBB", NULL,	ATA_QUIRK_BROKEN_FPDMA_AA |
4070  						ATA_QUIRK_NOLPM },
4071  	{ "VB0250EAVER",	"HPG7",		ATA_QUIRK_BROKEN_FPDMA_AA },
4072  
4073  	/* Blacklist entries taken from Silicon Image 3124/3132
4074  	   Windows driver .inf file - also several Linux problem reports */
4075  	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_QUIRK_NONCQ },
4076  	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_QUIRK_NONCQ },
4077  	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_QUIRK_NONCQ },
4078  
4079  	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4080  	{ "C300-CTFDDAC128MAG",	"0001",		ATA_QUIRK_NONCQ },
4081  
4082  	/* Sandisk SD7/8/9s lock up hard on large trims */
4083  	{ "SanDisk SD[789]*",	NULL,		ATA_QUIRK_MAX_TRIM_128M },
4084  
4085  	/* devices which puke on READ_NATIVE_MAX */
4086  	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_QUIRK_BROKEN_HPA },
4087  	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_QUIRK_BROKEN_HPA },
4088  	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_QUIRK_BROKEN_HPA },
4089  	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_QUIRK_BROKEN_HPA },
4090  
4091  	/* this one allows HPA unlocking but fails IOs on the area */
4092  	{ "OCZ-VERTEX",		    "1.30",	ATA_QUIRK_BROKEN_HPA },
4093  
4094  	/* Devices which report 1 sector over size HPA */
4095  	{ "ST340823A",		NULL,		ATA_QUIRK_HPA_SIZE },
4096  	{ "ST320413A",		NULL,		ATA_QUIRK_HPA_SIZE },
4097  	{ "ST310211A",		NULL,		ATA_QUIRK_HPA_SIZE },
4098  
4099  	/* Devices which get the IVB wrong */
4100  	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_QUIRK_IVB },
4101  	/* Maybe we should just add all TSSTcorp devices... */
4102  	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_QUIRK_IVB },
4103  
4104  	/* Devices that do not need bridging limits applied */
4105  	{ "MTRON MSP-SATA*",		NULL,	ATA_QUIRK_BRIDGE_OK },
4106  	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_QUIRK_BRIDGE_OK },
4107  
4108  	/* Devices which aren't very happy with higher link speeds */
4109  	{ "WD My Book",			NULL,	ATA_QUIRK_1_5_GBPS },
4110  	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_QUIRK_1_5_GBPS },
4111  
4112  	/*
4113  	 * Devices which choke on SETXFER.  Applies only if both the
4114  	 * device and controller are SATA.
4115  	 */
4116  	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_QUIRK_NOSETXFER },
4117  	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_QUIRK_NOSETXFER },
4118  	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_QUIRK_NOSETXFER },
4119  	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_QUIRK_NOSETXFER },
4120  	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_QUIRK_NOSETXFER },
4121  
4122  	/* These specific Pioneer models have LPM issues */
4123  	{ "PIONEER BD-RW   BDR-207M",	NULL,	ATA_QUIRK_NOLPM },
4124  	{ "PIONEER BD-RW   BDR-205",	NULL,	ATA_QUIRK_NOLPM },
4125  
4126  	/* Crucial devices with broken LPM support */
4127  	{ "CT*0BX*00SSD1",		NULL,	ATA_QUIRK_NOLPM },
4128  
4129  	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4130  	{ "Crucial_CT512MX100*",	"MU01",	ATA_QUIRK_NO_NCQ_TRIM |
4131  						ATA_QUIRK_ZERO_AFTER_TRIM |
4132  						ATA_QUIRK_NOLPM },
4133  	/* 512GB MX100 with newer firmware has only LPM issues */
4134  	{ "Crucial_CT512MX100*",	NULL,	ATA_QUIRK_ZERO_AFTER_TRIM |
4135  						ATA_QUIRK_NOLPM },
4136  
4137  	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4138  	{ "Crucial_CT480M500*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4139  						ATA_QUIRK_ZERO_AFTER_TRIM |
4140  						ATA_QUIRK_NOLPM },
4141  	{ "Crucial_CT960M500*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4142  						ATA_QUIRK_ZERO_AFTER_TRIM |
4143  						ATA_QUIRK_NOLPM },
4144  
4145  	/* AMD Radeon devices with broken LPM support */
4146  	{ "R3SL240G",			NULL,	ATA_QUIRK_NOLPM },
4147  
4148  	/* Apacer models with LPM issues */
4149  	{ "Apacer AS340*",		NULL,	ATA_QUIRK_NOLPM },
4150  
4151  	/* These specific Samsung models/firmware-revs do not handle LPM well */
4152  	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
4153  	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_QUIRK_NOLPM },
4154  	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_QUIRK_NOLPM },
4155  	{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_QUIRK_NOLPM },
4156  
4157  	/* devices that don't properly handle queued TRIM commands */
4158  	{ "Micron_M500IT_*",		"MU01",	ATA_QUIRK_NO_NCQ_TRIM |
4159  						ATA_QUIRK_ZERO_AFTER_TRIM },
4160  	{ "Micron_M500_*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4161  						ATA_QUIRK_ZERO_AFTER_TRIM },
4162  	{ "Micron_M5[15]0_*",		"MU01",	ATA_QUIRK_NO_NCQ_TRIM |
4163  						ATA_QUIRK_ZERO_AFTER_TRIM },
4164  	{ "Micron_1100_*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4165  						ATA_QUIRK_ZERO_AFTER_TRIM, },
4166  	{ "Crucial_CT*M500*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4167  						ATA_QUIRK_ZERO_AFTER_TRIM },
4168  	{ "Crucial_CT*M550*",		"MU01",	ATA_QUIRK_NO_NCQ_TRIM |
4169  						ATA_QUIRK_ZERO_AFTER_TRIM },
4170  	{ "Crucial_CT*MX100*",		"MU01",	ATA_QUIRK_NO_NCQ_TRIM |
4171  						ATA_QUIRK_ZERO_AFTER_TRIM },
4172  	{ "Samsung SSD 840 EVO*",	NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4173  						ATA_QUIRK_NO_DMA_LOG |
4174  						ATA_QUIRK_ZERO_AFTER_TRIM },
4175  	{ "Samsung SSD 840*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4176  						ATA_QUIRK_ZERO_AFTER_TRIM },
4177  	{ "Samsung SSD 850*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4178  						ATA_QUIRK_ZERO_AFTER_TRIM },
4179  	{ "Samsung SSD 860*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4180  						ATA_QUIRK_ZERO_AFTER_TRIM |
4181  						ATA_QUIRK_NO_NCQ_ON_ATI |
4182  						ATA_QUIRK_NO_LPM_ON_ATI },
4183  	{ "Samsung SSD 870*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4184  						ATA_QUIRK_ZERO_AFTER_TRIM |
4185  						ATA_QUIRK_NO_NCQ_ON_ATI |
4186  						ATA_QUIRK_NO_LPM_ON_ATI },
4187  	{ "SAMSUNG*MZ7LH*",		NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4188  						ATA_QUIRK_ZERO_AFTER_TRIM |
4189  						ATA_QUIRK_NO_NCQ_ON_ATI |
4190  						ATA_QUIRK_NO_LPM_ON_ATI },
4191  	{ "FCCT*M500*",			NULL,	ATA_QUIRK_NO_NCQ_TRIM |
4192  						ATA_QUIRK_ZERO_AFTER_TRIM },
4193  
4194  	/* devices that don't properly handle TRIM commands */
4195  	{ "SuperSSpeed S238*",		NULL,	ATA_QUIRK_NOTRIM },
4196  	{ "M88V29*",			NULL,	ATA_QUIRK_NOTRIM },
4197  
4198  	/*
4199  	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4200  	 * (Return Zero After Trim) flags in the ATA Command Set are
4201  	 * unreliable in the sense that they only define what happens if
4202  	 * the device successfully executed the DSM TRIM command. TRIM
4203  	 * is only advisory, however, and the device is free to silently
4204  	 * ignore all or parts of the request.
4205  	 *
4206  	 * Whitelist drives that are known to reliably return zeroes
4207  	 * after TRIM.
4208  	 */
4209  
4210  	/*
4211  	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4212  	 * that model before whitelisting all other intel SSDs.
4213  	 */
4214  	{ "INTEL*SSDSC2MH*",		NULL,	0 },
4215  
4216  	{ "Micron*",			NULL,	ATA_QUIRK_ZERO_AFTER_TRIM },
4217  	{ "Crucial*",			NULL,	ATA_QUIRK_ZERO_AFTER_TRIM },
4218  	{ "INTEL*SSD*",			NULL,	ATA_QUIRK_ZERO_AFTER_TRIM },
4219  	{ "SSD*INTEL*",			NULL,	ATA_QUIRK_ZERO_AFTER_TRIM },
4220  	{ "Samsung*SSD*",		NULL,	ATA_QUIRK_ZERO_AFTER_TRIM },
4221  	{ "SAMSUNG*SSD*",		NULL,	ATA_QUIRK_ZERO_AFTER_TRIM },
4222  	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_QUIRK_ZERO_AFTER_TRIM },
4223  	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_QUIRK_ZERO_AFTER_TRIM },
4224  
4225  	/*
4226  	 * Some WD SATA-I drives spin up and down erratically when the link
4227  	 * is put into the slumber mode.  We don't have full list of the
4228  	 * affected devices.  Disable LPM if the device matches one of the
4229  	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4230  	 * lost too.
4231  	 *
4232  	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4233  	 */
4234  	{ "WDC WD800JD-*",		NULL,	ATA_QUIRK_WD_BROKEN_LPM },
4235  	{ "WDC WD1200JD-*",		NULL,	ATA_QUIRK_WD_BROKEN_LPM },
4236  	{ "WDC WD1600JD-*",		NULL,	ATA_QUIRK_WD_BROKEN_LPM },
4237  	{ "WDC WD2000JD-*",		NULL,	ATA_QUIRK_WD_BROKEN_LPM },
4238  	{ "WDC WD2500JD-*",		NULL,	ATA_QUIRK_WD_BROKEN_LPM },
4239  	{ "WDC WD3000JD-*",		NULL,	ATA_QUIRK_WD_BROKEN_LPM },
4240  	{ "WDC WD3200JD-*",		NULL,	ATA_QUIRK_WD_BROKEN_LPM },
4241  
4242  	/*
4243  	 * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
4244  	 * log page is accessed. Ensure we never ask for this log page with
4245  	 * these devices.
4246  	 */
4247  	{ "SATADOM-ML 3ME",		NULL,	ATA_QUIRK_NO_LOG_DIR },
4248  
4249  	/* Buggy FUA */
4250  	{ "Maxtor",		"BANC1G10",	ATA_QUIRK_NO_FUA },
4251  	{ "WDC*WD2500J*",	NULL,		ATA_QUIRK_NO_FUA },
4252  	{ "OCZ-VERTEX*",	NULL,		ATA_QUIRK_NO_FUA },
4253  	{ "INTEL*SSDSC2CT*",	NULL,		ATA_QUIRK_NO_FUA },
4254  
4255  	/* End Marker */
4256  	{ }
4257  };
4258  
ata_dev_quirks(const struct ata_device * dev)4259  static unsigned int ata_dev_quirks(const struct ata_device *dev)
4260  {
4261  	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4262  	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4263  	const struct ata_dev_quirks_entry *ad = __ata_dev_quirks;
4264  
4265  	/* dev->quirks is an unsigned int. */
4266  	BUILD_BUG_ON(__ATA_QUIRK_MAX > 32);
4267  
4268  	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4269  	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4270  
4271  	while (ad->model_num) {
4272  		if (glob_match(ad->model_num, model_num) &&
4273  		    (!ad->model_rev || glob_match(ad->model_rev, model_rev))) {
4274  			ata_dev_print_quirks(dev, model_num, model_rev,
4275  					     ad->quirks);
4276  			return ad->quirks;
4277  		}
4278  		ad++;
4279  	}
4280  	return 0;
4281  }
4282  
ata_dev_nodma(const struct ata_device * dev)4283  static bool ata_dev_nodma(const struct ata_device *dev)
4284  {
4285  	/*
4286  	 * We do not support polling DMA. Deny DMA for those ATAPI devices
4287  	 * with CDB-intr (and use PIO) if the LLDD handles only interrupts in
4288  	 * the HSM_ST_LAST state.
4289  	 */
4290  	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4291  	    (dev->flags & ATA_DFLAG_CDB_INTR))
4292  		return true;
4293  	return dev->quirks & ATA_QUIRK_NODMA;
4294  }
4295  
4296  /**
4297   *	ata_is_40wire		-	check drive side detection
4298   *	@dev: device
4299   *
4300   *	Perform drive side detection decoding, allowing for device vendors
4301   *	who can't follow the documentation.
4302   */
4303  
ata_is_40wire(struct ata_device * dev)4304  static int ata_is_40wire(struct ata_device *dev)
4305  {
4306  	if (dev->quirks & ATA_QUIRK_IVB)
4307  		return ata_drive_40wire_relaxed(dev->id);
4308  	return ata_drive_40wire(dev->id);
4309  }
4310  
4311  /**
4312   *	cable_is_40wire		-	40/80/SATA decider
4313   *	@ap: port to consider
4314   *
4315   *	This function encapsulates the policy for speed management
4316   *	in one place. At the moment we don't cache the result but
4317   *	there is a good case for setting ap->cbl to the result when
4318   *	we are called with unknown cables (and figuring out if it
4319   *	impacts hotplug at all).
4320   *
4321   *	Return 1 if the cable appears to be 40 wire.
4322   */
4323  
cable_is_40wire(struct ata_port * ap)4324  static int cable_is_40wire(struct ata_port *ap)
4325  {
4326  	struct ata_link *link;
4327  	struct ata_device *dev;
4328  
4329  	/* If the controller thinks we are 40 wire, we are. */
4330  	if (ap->cbl == ATA_CBL_PATA40)
4331  		return 1;
4332  
4333  	/* If the controller thinks we are 80 wire, we are. */
4334  	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4335  		return 0;
4336  
4337  	/* If the system is known to be 40 wire short cable (eg
4338  	 * laptop), then we allow 80 wire modes even if the drive
4339  	 * isn't sure.
4340  	 */
4341  	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4342  		return 0;
4343  
4344  	/* If the controller doesn't know, we scan.
4345  	 *
4346  	 * Note: We look for all 40 wire detects at this point.  Any
4347  	 *       80 wire detect is taken to be 80 wire cable because
4348  	 * - in many setups only the one drive (slave if present) will
4349  	 *   give a valid detect
4350  	 * - if you have a non detect capable drive you don't want it
4351  	 *   to colour the choice
4352  	 */
4353  	ata_for_each_link(link, ap, EDGE) {
4354  		ata_for_each_dev(dev, link, ENABLED) {
4355  			if (!ata_is_40wire(dev))
4356  				return 0;
4357  		}
4358  	}
4359  	return 1;
4360  }
4361  
4362  /**
4363   *	ata_dev_xfermask - Compute supported xfermask of the given device
4364   *	@dev: Device to compute xfermask for
4365   *
4366   *	Compute supported xfermask of @dev and store it in
4367   *	dev->*_mask.  This function is responsible for applying all
4368   *	known limits including host controller limits, device quirks, etc...
4369   *
4370   *	LOCKING:
4371   *	None.
4372   */
ata_dev_xfermask(struct ata_device * dev)4373  static void ata_dev_xfermask(struct ata_device *dev)
4374  {
4375  	struct ata_link *link = dev->link;
4376  	struct ata_port *ap = link->ap;
4377  	struct ata_host *host = ap->host;
4378  	unsigned int xfer_mask;
4379  
4380  	/* controller modes available */
4381  	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4382  				      ap->mwdma_mask, ap->udma_mask);
4383  
4384  	/* drive modes available */
4385  	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4386  				       dev->mwdma_mask, dev->udma_mask);
4387  	xfer_mask &= ata_id_xfermask(dev->id);
4388  
4389  	/*
4390  	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4391  	 *	cable
4392  	 */
4393  	if (ata_dev_pair(dev)) {
4394  		/* No PIO5 or PIO6 */
4395  		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4396  		/* No MWDMA3 or MWDMA 4 */
4397  		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4398  	}
4399  
4400  	if (ata_dev_nodma(dev)) {
4401  		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4402  		ata_dev_warn(dev,
4403  			     "device does not support DMA, disabling DMA\n");
4404  	}
4405  
4406  	if ((host->flags & ATA_HOST_SIMPLEX) &&
4407  	    host->simplex_claimed && host->simplex_claimed != ap) {
4408  		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4409  		ata_dev_warn(dev,
4410  			     "simplex DMA is claimed by other device, disabling DMA\n");
4411  	}
4412  
4413  	if (ap->flags & ATA_FLAG_NO_IORDY)
4414  		xfer_mask &= ata_pio_mask_no_iordy(dev);
4415  
4416  	if (ap->ops->mode_filter)
4417  		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4418  
4419  	/* Apply cable rule here.  Don't apply it early because when
4420  	 * we handle hot plug the cable type can itself change.
4421  	 * Check this last so that we know if the transfer rate was
4422  	 * solely limited by the cable.
4423  	 * Unknown or 80 wire cables reported host side are checked
4424  	 * drive side as well. Cases where we know a 40wire cable
4425  	 * is used safely for 80 are not checked here.
4426  	 */
4427  	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4428  		/* UDMA/44 or higher would be available */
4429  		if (cable_is_40wire(ap)) {
4430  			ata_dev_warn(dev,
4431  				     "limited to UDMA/33 due to 40-wire cable\n");
4432  			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4433  		}
4434  
4435  	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4436  			    &dev->mwdma_mask, &dev->udma_mask);
4437  }
4438  
4439  /**
4440   *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4441   *	@dev: Device to which command will be sent
4442   *
4443   *	Issue SET FEATURES - XFER MODE command to device @dev
4444   *	on port @ap.
4445   *
4446   *	LOCKING:
4447   *	PCI/etc. bus probe sem.
4448   *
4449   *	RETURNS:
4450   *	0 on success, AC_ERR_* mask otherwise.
4451   */
4452  
ata_dev_set_xfermode(struct ata_device * dev)4453  static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4454  {
4455  	struct ata_taskfile tf;
4456  
4457  	/* set up set-features taskfile */
4458  	ata_dev_dbg(dev, "set features - xfer mode\n");
4459  
4460  	/* Some controllers and ATAPI devices show flaky interrupt
4461  	 * behavior after setting xfer mode.  Use polling instead.
4462  	 */
4463  	ata_tf_init(dev, &tf);
4464  	tf.command = ATA_CMD_SET_FEATURES;
4465  	tf.feature = SETFEATURES_XFER;
4466  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4467  	tf.protocol = ATA_PROT_NODATA;
4468  	/* If we are using IORDY we must send the mode setting command */
4469  	if (ata_pio_need_iordy(dev))
4470  		tf.nsect = dev->xfer_mode;
4471  	/* If the device has IORDY and the controller does not - turn it off */
4472   	else if (ata_id_has_iordy(dev->id))
4473  		tf.nsect = 0x01;
4474  	else /* In the ancient relic department - skip all of this */
4475  		return 0;
4476  
4477  	/*
4478  	 * On some disks, this command causes spin-up, so we need longer
4479  	 * timeout.
4480  	 */
4481  	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4482  }
4483  
4484  /**
4485   *	ata_dev_set_feature - Issue SET FEATURES
4486   *	@dev: Device to which command will be sent
4487   *	@subcmd: The SET FEATURES subcommand to be sent
4488   *	@action: The sector count represents a subcommand specific action
4489   *
4490   *	Issue SET FEATURES command to device @dev on port @ap with sector count
4491   *
4492   *	LOCKING:
4493   *	PCI/etc. bus probe sem.
4494   *
4495   *	RETURNS:
4496   *	0 on success, AC_ERR_* mask otherwise.
4497   */
ata_dev_set_feature(struct ata_device * dev,u8 subcmd,u8 action)4498  unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
4499  {
4500  	struct ata_taskfile tf;
4501  	unsigned int timeout = 0;
4502  
4503  	/* set up set-features taskfile */
4504  	ata_dev_dbg(dev, "set features\n");
4505  
4506  	ata_tf_init(dev, &tf);
4507  	tf.command = ATA_CMD_SET_FEATURES;
4508  	tf.feature = subcmd;
4509  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4510  	tf.protocol = ATA_PROT_NODATA;
4511  	tf.nsect = action;
4512  
4513  	if (subcmd == SETFEATURES_SPINUP)
4514  		timeout = ata_probe_timeout ?
4515  			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4516  
4517  	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4518  }
4519  EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4520  
4521  /**
4522   *	ata_dev_init_params - Issue INIT DEV PARAMS command
4523   *	@dev: Device to which command will be sent
4524   *	@heads: Number of heads (taskfile parameter)
4525   *	@sectors: Number of sectors (taskfile parameter)
4526   *
4527   *	LOCKING:
4528   *	Kernel thread context (may sleep)
4529   *
4530   *	RETURNS:
4531   *	0 on success, AC_ERR_* mask otherwise.
4532   */
ata_dev_init_params(struct ata_device * dev,u16 heads,u16 sectors)4533  static unsigned int ata_dev_init_params(struct ata_device *dev,
4534  					u16 heads, u16 sectors)
4535  {
4536  	struct ata_taskfile tf;
4537  	unsigned int err_mask;
4538  
4539  	/* Number of sectors per track 1-255. Number of heads 1-16 */
4540  	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4541  		return AC_ERR_INVALID;
4542  
4543  	/* set up init dev params taskfile */
4544  	ata_dev_dbg(dev, "init dev params \n");
4545  
4546  	ata_tf_init(dev, &tf);
4547  	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4548  	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4549  	tf.protocol = ATA_PROT_NODATA;
4550  	tf.nsect = sectors;
4551  	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4552  
4553  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4554  	/* A clean abort indicates an original or just out of spec drive
4555  	   and we should continue as we issue the setup based on the
4556  	   drive reported working geometry */
4557  	if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
4558  		err_mask = 0;
4559  
4560  	return err_mask;
4561  }
4562  
4563  /**
4564   *	atapi_check_dma - Check whether ATAPI DMA can be supported
4565   *	@qc: Metadata associated with taskfile to check
4566   *
4567   *	Allow low-level driver to filter ATA PACKET commands, returning
4568   *	a status indicating whether or not it is OK to use DMA for the
4569   *	supplied PACKET command.
4570   *
4571   *	LOCKING:
4572   *	spin_lock_irqsave(host lock)
4573   *
4574   *	RETURNS: 0 when ATAPI DMA can be used
4575   *               nonzero otherwise
4576   */
atapi_check_dma(struct ata_queued_cmd * qc)4577  int atapi_check_dma(struct ata_queued_cmd *qc)
4578  {
4579  	struct ata_port *ap = qc->ap;
4580  
4581  	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4582  	 * few ATAPI devices choke on such DMA requests.
4583  	 */
4584  	if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) &&
4585  	    unlikely(qc->nbytes & 15))
4586  		return -EOPNOTSUPP;
4587  
4588  	if (ap->ops->check_atapi_dma)
4589  		return ap->ops->check_atapi_dma(qc);
4590  
4591  	return 0;
4592  }
4593  
4594  /**
4595   *	ata_std_qc_defer - Check whether a qc needs to be deferred
4596   *	@qc: ATA command in question
4597   *
4598   *	Non-NCQ commands cannot run with any other command, NCQ or
4599   *	not.  As upper layer only knows the queue depth, we are
4600   *	responsible for maintaining exclusion.  This function checks
4601   *	whether a new command @qc can be issued.
4602   *
4603   *	LOCKING:
4604   *	spin_lock_irqsave(host lock)
4605   *
4606   *	RETURNS:
4607   *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4608   */
ata_std_qc_defer(struct ata_queued_cmd * qc)4609  int ata_std_qc_defer(struct ata_queued_cmd *qc)
4610  {
4611  	struct ata_link *link = qc->dev->link;
4612  
4613  	if (ata_is_ncq(qc->tf.protocol)) {
4614  		if (!ata_tag_valid(link->active_tag))
4615  			return 0;
4616  	} else {
4617  		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4618  			return 0;
4619  	}
4620  
4621  	return ATA_DEFER_LINK;
4622  }
4623  EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4624  
4625  /**
4626   *	ata_sg_init - Associate command with scatter-gather table.
4627   *	@qc: Command to be associated
4628   *	@sg: Scatter-gather table.
4629   *	@n_elem: Number of elements in s/g table.
4630   *
4631   *	Initialize the data-related elements of queued_cmd @qc
4632   *	to point to a scatter-gather table @sg, containing @n_elem
4633   *	elements.
4634   *
4635   *	LOCKING:
4636   *	spin_lock_irqsave(host lock)
4637   */
ata_sg_init(struct ata_queued_cmd * qc,struct scatterlist * sg,unsigned int n_elem)4638  void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4639  		 unsigned int n_elem)
4640  {
4641  	qc->sg = sg;
4642  	qc->n_elem = n_elem;
4643  	qc->cursg = qc->sg;
4644  }
4645  
4646  #ifdef CONFIG_HAS_DMA
4647  
4648  /**
4649   *	ata_sg_clean - Unmap DMA memory associated with command
4650   *	@qc: Command containing DMA memory to be released
4651   *
4652   *	Unmap all mapped DMA memory associated with this command.
4653   *
4654   *	LOCKING:
4655   *	spin_lock_irqsave(host lock)
4656   */
ata_sg_clean(struct ata_queued_cmd * qc)4657  static void ata_sg_clean(struct ata_queued_cmd *qc)
4658  {
4659  	struct ata_port *ap = qc->ap;
4660  	struct scatterlist *sg = qc->sg;
4661  	int dir = qc->dma_dir;
4662  
4663  	WARN_ON_ONCE(sg == NULL);
4664  
4665  	if (qc->n_elem)
4666  		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4667  
4668  	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4669  	qc->sg = NULL;
4670  }
4671  
4672  /**
4673   *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4674   *	@qc: Command with scatter-gather table to be mapped.
4675   *
4676   *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4677   *
4678   *	LOCKING:
4679   *	spin_lock_irqsave(host lock)
4680   *
4681   *	RETURNS:
4682   *	Zero on success, negative on error.
4683   *
4684   */
ata_sg_setup(struct ata_queued_cmd * qc)4685  static int ata_sg_setup(struct ata_queued_cmd *qc)
4686  {
4687  	struct ata_port *ap = qc->ap;
4688  	unsigned int n_elem;
4689  
4690  	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4691  	if (n_elem < 1)
4692  		return -1;
4693  
4694  	qc->orig_n_elem = qc->n_elem;
4695  	qc->n_elem = n_elem;
4696  	qc->flags |= ATA_QCFLAG_DMAMAP;
4697  
4698  	return 0;
4699  }
4700  
4701  #else /* !CONFIG_HAS_DMA */
4702  
ata_sg_clean(struct ata_queued_cmd * qc)4703  static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
ata_sg_setup(struct ata_queued_cmd * qc)4704  static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4705  
4706  #endif /* !CONFIG_HAS_DMA */
4707  
4708  /**
4709   *	swap_buf_le16 - swap halves of 16-bit words in place
4710   *	@buf:  Buffer to swap
4711   *	@buf_words:  Number of 16-bit words in buffer.
4712   *
4713   *	Swap halves of 16-bit words if needed to convert from
4714   *	little-endian byte order to native cpu byte order, or
4715   *	vice-versa.
4716   *
4717   *	LOCKING:
4718   *	Inherited from caller.
4719   */
swap_buf_le16(u16 * buf,unsigned int buf_words)4720  void swap_buf_le16(u16 *buf, unsigned int buf_words)
4721  {
4722  #ifdef __BIG_ENDIAN
4723  	unsigned int i;
4724  
4725  	for (i = 0; i < buf_words; i++)
4726  		buf[i] = le16_to_cpu(buf[i]);
4727  #endif /* __BIG_ENDIAN */
4728  }
4729  
4730  /**
4731   *	ata_qc_free - free unused ata_queued_cmd
4732   *	@qc: Command to complete
4733   *
4734   *	Designed to free unused ata_queued_cmd object
4735   *	in case something prevents using it.
4736   *
4737   *	LOCKING:
4738   *	spin_lock_irqsave(host lock)
4739   */
ata_qc_free(struct ata_queued_cmd * qc)4740  void ata_qc_free(struct ata_queued_cmd *qc)
4741  {
4742  	qc->flags = 0;
4743  	if (ata_tag_valid(qc->tag))
4744  		qc->tag = ATA_TAG_POISON;
4745  }
4746  
__ata_qc_complete(struct ata_queued_cmd * qc)4747  void __ata_qc_complete(struct ata_queued_cmd *qc)
4748  {
4749  	struct ata_port *ap;
4750  	struct ata_link *link;
4751  
4752  	if (WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)))
4753  		return;
4754  
4755  	ap = qc->ap;
4756  	link = qc->dev->link;
4757  
4758  	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4759  		ata_sg_clean(qc);
4760  
4761  	/* command should be marked inactive atomically with qc completion */
4762  	if (ata_is_ncq(qc->tf.protocol)) {
4763  		link->sactive &= ~(1 << qc->hw_tag);
4764  		if (!link->sactive)
4765  			ap->nr_active_links--;
4766  	} else {
4767  		link->active_tag = ATA_TAG_POISON;
4768  		ap->nr_active_links--;
4769  	}
4770  
4771  	/* clear exclusive status */
4772  	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4773  		     ap->excl_link == link))
4774  		ap->excl_link = NULL;
4775  
4776  	/*
4777  	 * Mark qc as inactive to prevent the port interrupt handler from
4778  	 * completing the command twice later, before the error handler is
4779  	 * called.
4780  	 */
4781  	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4782  	ap->qc_active &= ~(1ULL << qc->tag);
4783  
4784  	/* call completion callback */
4785  	qc->complete_fn(qc);
4786  }
4787  
fill_result_tf(struct ata_queued_cmd * qc)4788  static void fill_result_tf(struct ata_queued_cmd *qc)
4789  {
4790  	struct ata_port *ap = qc->ap;
4791  
4792  	/*
4793  	 * rtf may already be filled (e.g. for successful NCQ commands).
4794  	 * If that is the case, we have nothing to do.
4795  	 */
4796  	if (qc->flags & ATA_QCFLAG_RTF_FILLED)
4797  		return;
4798  
4799  	qc->result_tf.flags = qc->tf.flags;
4800  	ap->ops->qc_fill_rtf(qc);
4801  	qc->flags |= ATA_QCFLAG_RTF_FILLED;
4802  }
4803  
ata_verify_xfer(struct ata_queued_cmd * qc)4804  static void ata_verify_xfer(struct ata_queued_cmd *qc)
4805  {
4806  	struct ata_device *dev = qc->dev;
4807  
4808  	if (!ata_is_data(qc->tf.protocol))
4809  		return;
4810  
4811  	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4812  		return;
4813  
4814  	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4815  }
4816  
4817  /**
4818   *	ata_qc_complete - Complete an active ATA command
4819   *	@qc: Command to complete
4820   *
4821   *	Indicate to the mid and upper layers that an ATA command has
4822   *	completed, with either an ok or not-ok status.
4823   *
4824   *	Refrain from calling this function multiple times when
4825   *	successfully completing multiple NCQ commands.
4826   *	ata_qc_complete_multiple() should be used instead, which will
4827   *	properly update IRQ expect state.
4828   *
4829   *	LOCKING:
4830   *	spin_lock_irqsave(host lock)
4831   */
ata_qc_complete(struct ata_queued_cmd * qc)4832  void ata_qc_complete(struct ata_queued_cmd *qc)
4833  {
4834  	struct ata_port *ap = qc->ap;
4835  	struct ata_device *dev = qc->dev;
4836  	struct ata_eh_info *ehi = &dev->link->eh_info;
4837  
4838  	/* Trigger the LED (if available) */
4839  	ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4840  
4841  	/*
4842  	 * In order to synchronize EH with the regular execution path, a qc that
4843  	 * is owned by EH is marked with ATA_QCFLAG_EH.
4844  	 *
4845  	 * The normal execution path is responsible for not accessing a qc owned
4846  	 * by EH.  libata core enforces the rule by returning NULL from
4847  	 * ata_qc_from_tag() for qcs owned by EH.
4848  	 */
4849  	if (unlikely(qc->err_mask))
4850  		qc->flags |= ATA_QCFLAG_EH;
4851  
4852  	/*
4853  	 * Finish internal commands without any further processing and always
4854  	 * with the result TF filled.
4855  	 */
4856  	if (unlikely(ata_tag_internal(qc->tag))) {
4857  		fill_result_tf(qc);
4858  		trace_ata_qc_complete_internal(qc);
4859  		__ata_qc_complete(qc);
4860  		return;
4861  	}
4862  
4863  	/* Non-internal qc has failed.  Fill the result TF and summon EH. */
4864  	if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
4865  		fill_result_tf(qc);
4866  		trace_ata_qc_complete_failed(qc);
4867  		ata_qc_schedule_eh(qc);
4868  		return;
4869  	}
4870  
4871  	WARN_ON_ONCE(ata_port_is_frozen(ap));
4872  
4873  	/* read result TF if requested */
4874  	if (qc->flags & ATA_QCFLAG_RESULT_TF)
4875  		fill_result_tf(qc);
4876  
4877  	trace_ata_qc_complete_done(qc);
4878  
4879  	/*
4880  	 * For CDL commands that completed without an error, check if we have
4881  	 * sense data (ATA_SENSE is set). If we do, then the command may have
4882  	 * been aborted by the device due to a limit timeout using the policy
4883  	 * 0xD. For these commands, invoke EH to get the command sense data.
4884  	 */
4885  	if (qc->flags & ATA_QCFLAG_HAS_CDL &&
4886  	    qc->result_tf.status & ATA_SENSE) {
4887  		/*
4888  		 * Tell SCSI EH to not overwrite scmd->result even if this
4889  		 * command is finished with result SAM_STAT_GOOD.
4890  		 */
4891  		qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
4892  		qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
4893  		ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
4894  
4895  		/*
4896  		 * set pending so that ata_qc_schedule_eh() does not trigger
4897  		 * fast drain, and freeze the port.
4898  		 */
4899  		ap->pflags |= ATA_PFLAG_EH_PENDING;
4900  		ata_qc_schedule_eh(qc);
4901  		return;
4902  	}
4903  
4904  	/* Some commands need post-processing after successful completion. */
4905  	switch (qc->tf.command) {
4906  	case ATA_CMD_SET_FEATURES:
4907  		if (qc->tf.feature != SETFEATURES_WC_ON &&
4908  		    qc->tf.feature != SETFEATURES_WC_OFF &&
4909  		    qc->tf.feature != SETFEATURES_RA_ON &&
4910  		    qc->tf.feature != SETFEATURES_RA_OFF)
4911  			break;
4912  		fallthrough;
4913  	case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4914  	case ATA_CMD_SET_MULTI: /* multi_count changed */
4915  		/* revalidate device */
4916  		ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4917  		ata_port_schedule_eh(ap);
4918  		break;
4919  
4920  	case ATA_CMD_SLEEP:
4921  		dev->flags |= ATA_DFLAG_SLEEPING;
4922  		break;
4923  	}
4924  
4925  	if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4926  		ata_verify_xfer(qc);
4927  
4928  	__ata_qc_complete(qc);
4929  }
4930  EXPORT_SYMBOL_GPL(ata_qc_complete);
4931  
4932  /**
4933   *	ata_qc_get_active - get bitmask of active qcs
4934   *	@ap: port in question
4935   *
4936   *	LOCKING:
4937   *	spin_lock_irqsave(host lock)
4938   *
4939   *	RETURNS:
4940   *	Bitmask of active qcs
4941   */
ata_qc_get_active(struct ata_port * ap)4942  u64 ata_qc_get_active(struct ata_port *ap)
4943  {
4944  	u64 qc_active = ap->qc_active;
4945  
4946  	/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4947  	if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4948  		qc_active |= (1 << 0);
4949  		qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4950  	}
4951  
4952  	return qc_active;
4953  }
4954  EXPORT_SYMBOL_GPL(ata_qc_get_active);
4955  
4956  /**
4957   *	ata_qc_issue - issue taskfile to device
4958   *	@qc: command to issue to device
4959   *
4960   *	Prepare an ATA command to submission to device.
4961   *	This includes mapping the data into a DMA-able
4962   *	area, filling in the S/G table, and finally
4963   *	writing the taskfile to hardware, starting the command.
4964   *
4965   *	LOCKING:
4966   *	spin_lock_irqsave(host lock)
4967   */
ata_qc_issue(struct ata_queued_cmd * qc)4968  void ata_qc_issue(struct ata_queued_cmd *qc)
4969  {
4970  	struct ata_port *ap = qc->ap;
4971  	struct ata_link *link = qc->dev->link;
4972  	u8 prot = qc->tf.protocol;
4973  
4974  	/* Make sure only one non-NCQ command is outstanding. */
4975  	WARN_ON_ONCE(ata_tag_valid(link->active_tag));
4976  
4977  	if (ata_is_ncq(prot)) {
4978  		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
4979  
4980  		if (!link->sactive)
4981  			ap->nr_active_links++;
4982  		link->sactive |= 1 << qc->hw_tag;
4983  	} else {
4984  		WARN_ON_ONCE(link->sactive);
4985  
4986  		ap->nr_active_links++;
4987  		link->active_tag = qc->tag;
4988  	}
4989  
4990  	qc->flags |= ATA_QCFLAG_ACTIVE;
4991  	ap->qc_active |= 1ULL << qc->tag;
4992  
4993  	/*
4994  	 * We guarantee to LLDs that they will have at least one
4995  	 * non-zero sg if the command is a data command.
4996  	 */
4997  	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
4998  		goto sys_err;
4999  
5000  	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5001  				 (ap->flags & ATA_FLAG_PIO_DMA)))
5002  		if (ata_sg_setup(qc))
5003  			goto sys_err;
5004  
5005  	/* if device is sleeping, schedule reset and abort the link */
5006  	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5007  		link->eh_info.action |= ATA_EH_RESET;
5008  		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5009  		ata_link_abort(link);
5010  		return;
5011  	}
5012  
5013  	if (ap->ops->qc_prep) {
5014  		trace_ata_qc_prep(qc);
5015  		qc->err_mask |= ap->ops->qc_prep(qc);
5016  		if (unlikely(qc->err_mask))
5017  			goto err;
5018  	}
5019  
5020  	trace_ata_qc_issue(qc);
5021  	qc->err_mask |= ap->ops->qc_issue(qc);
5022  	if (unlikely(qc->err_mask))
5023  		goto err;
5024  	return;
5025  
5026  sys_err:
5027  	qc->err_mask |= AC_ERR_SYSTEM;
5028  err:
5029  	ata_qc_complete(qc);
5030  }
5031  
5032  /**
5033   *	ata_phys_link_online - test whether the given link is online
5034   *	@link: ATA link to test
5035   *
5036   *	Test whether @link is online.  Note that this function returns
5037   *	0 if online status of @link cannot be obtained, so
5038   *	ata_link_online(link) != !ata_link_offline(link).
5039   *
5040   *	LOCKING:
5041   *	None.
5042   *
5043   *	RETURNS:
5044   *	True if the port online status is available and online.
5045   */
ata_phys_link_online(struct ata_link * link)5046  bool ata_phys_link_online(struct ata_link *link)
5047  {
5048  	u32 sstatus;
5049  
5050  	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5051  	    ata_sstatus_online(sstatus))
5052  		return true;
5053  	return false;
5054  }
5055  
5056  /**
5057   *	ata_phys_link_offline - test whether the given link is offline
5058   *	@link: ATA link to test
5059   *
5060   *	Test whether @link is offline.  Note that this function
5061   *	returns 0 if offline status of @link cannot be obtained, so
5062   *	ata_link_online(link) != !ata_link_offline(link).
5063   *
5064   *	LOCKING:
5065   *	None.
5066   *
5067   *	RETURNS:
5068   *	True if the port offline status is available and offline.
5069   */
ata_phys_link_offline(struct ata_link * link)5070  bool ata_phys_link_offline(struct ata_link *link)
5071  {
5072  	u32 sstatus;
5073  
5074  	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5075  	    !ata_sstatus_online(sstatus))
5076  		return true;
5077  	return false;
5078  }
5079  
5080  /**
5081   *	ata_link_online - test whether the given link is online
5082   *	@link: ATA link to test
5083   *
5084   *	Test whether @link is online.  This is identical to
5085   *	ata_phys_link_online() when there's no slave link.  When
5086   *	there's a slave link, this function should only be called on
5087   *	the master link and will return true if any of M/S links is
5088   *	online.
5089   *
5090   *	LOCKING:
5091   *	None.
5092   *
5093   *	RETURNS:
5094   *	True if the port online status is available and online.
5095   */
ata_link_online(struct ata_link * link)5096  bool ata_link_online(struct ata_link *link)
5097  {
5098  	struct ata_link *slave = link->ap->slave_link;
5099  
5100  	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5101  
5102  	return ata_phys_link_online(link) ||
5103  		(slave && ata_phys_link_online(slave));
5104  }
5105  EXPORT_SYMBOL_GPL(ata_link_online);
5106  
5107  /**
5108   *	ata_link_offline - test whether the given link is offline
5109   *	@link: ATA link to test
5110   *
5111   *	Test whether @link is offline.  This is identical to
5112   *	ata_phys_link_offline() when there's no slave link.  When
5113   *	there's a slave link, this function should only be called on
5114   *	the master link and will return true if both M/S links are
5115   *	offline.
5116   *
5117   *	LOCKING:
5118   *	None.
5119   *
5120   *	RETURNS:
5121   *	True if the port offline status is available and offline.
5122   */
ata_link_offline(struct ata_link * link)5123  bool ata_link_offline(struct ata_link *link)
5124  {
5125  	struct ata_link *slave = link->ap->slave_link;
5126  
5127  	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5128  
5129  	return ata_phys_link_offline(link) &&
5130  		(!slave || ata_phys_link_offline(slave));
5131  }
5132  EXPORT_SYMBOL_GPL(ata_link_offline);
5133  
5134  #ifdef CONFIG_PM
ata_port_request_pm(struct ata_port * ap,pm_message_t mesg,unsigned int action,unsigned int ehi_flags,bool async)5135  static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5136  				unsigned int action, unsigned int ehi_flags,
5137  				bool async)
5138  {
5139  	struct ata_link *link;
5140  	unsigned long flags;
5141  
5142  	spin_lock_irqsave(ap->lock, flags);
5143  
5144  	/*
5145  	 * A previous PM operation might still be in progress. Wait for
5146  	 * ATA_PFLAG_PM_PENDING to clear.
5147  	 */
5148  	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5149  		spin_unlock_irqrestore(ap->lock, flags);
5150  		ata_port_wait_eh(ap);
5151  		spin_lock_irqsave(ap->lock, flags);
5152  	}
5153  
5154  	/* Request PM operation to EH */
5155  	ap->pm_mesg = mesg;
5156  	ap->pflags |= ATA_PFLAG_PM_PENDING;
5157  	ata_for_each_link(link, ap, HOST_FIRST) {
5158  		link->eh_info.action |= action;
5159  		link->eh_info.flags |= ehi_flags;
5160  	}
5161  
5162  	ata_port_schedule_eh(ap);
5163  
5164  	spin_unlock_irqrestore(ap->lock, flags);
5165  
5166  	if (!async)
5167  		ata_port_wait_eh(ap);
5168  }
5169  
ata_port_suspend(struct ata_port * ap,pm_message_t mesg,bool async)5170  static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg,
5171  			     bool async)
5172  {
5173  	/*
5174  	 * We are about to suspend the port, so we do not care about
5175  	 * scsi_rescan_device() calls scheduled by previous resume operations.
5176  	 * The next resume will schedule the rescan again. So cancel any rescan
5177  	 * that is not done yet.
5178  	 */
5179  	cancel_delayed_work_sync(&ap->scsi_rescan_task);
5180  
5181  	/*
5182  	 * On some hardware, device fails to respond after spun down for
5183  	 * suspend. As the device will not be used until being resumed, we
5184  	 * do not need to touch the device. Ask EH to skip the usual stuff
5185  	 * and proceed directly to suspend.
5186  	 *
5187  	 * http://thread.gmane.org/gmane.linux.ide/46764
5188  	 */
5189  	ata_port_request_pm(ap, mesg, 0,
5190  			    ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
5191  			    ATA_EHI_NO_RECOVERY,
5192  			    async);
5193  }
5194  
ata_port_pm_suspend(struct device * dev)5195  static int ata_port_pm_suspend(struct device *dev)
5196  {
5197  	struct ata_port *ap = to_ata_port(dev);
5198  
5199  	if (pm_runtime_suspended(dev))
5200  		return 0;
5201  
5202  	ata_port_suspend(ap, PMSG_SUSPEND, false);
5203  	return 0;
5204  }
5205  
ata_port_pm_freeze(struct device * dev)5206  static int ata_port_pm_freeze(struct device *dev)
5207  {
5208  	struct ata_port *ap = to_ata_port(dev);
5209  
5210  	if (pm_runtime_suspended(dev))
5211  		return 0;
5212  
5213  	ata_port_suspend(ap, PMSG_FREEZE, false);
5214  	return 0;
5215  }
5216  
ata_port_pm_poweroff(struct device * dev)5217  static int ata_port_pm_poweroff(struct device *dev)
5218  {
5219  	if (!pm_runtime_suspended(dev))
5220  		ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE, false);
5221  	return 0;
5222  }
5223  
ata_port_resume(struct ata_port * ap,pm_message_t mesg,bool async)5224  static void ata_port_resume(struct ata_port *ap, pm_message_t mesg,
5225  			    bool async)
5226  {
5227  	ata_port_request_pm(ap, mesg, ATA_EH_RESET,
5228  			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
5229  			    async);
5230  }
5231  
ata_port_pm_resume(struct device * dev)5232  static int ata_port_pm_resume(struct device *dev)
5233  {
5234  	if (!pm_runtime_suspended(dev))
5235  		ata_port_resume(to_ata_port(dev), PMSG_RESUME, true);
5236  	return 0;
5237  }
5238  
5239  /*
5240   * For ODDs, the upper layer will poll for media change every few seconds,
5241   * which will make it enter and leave suspend state every few seconds. And
5242   * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5243   * is very little and the ODD may malfunction after constantly being reset.
5244   * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5245   * ODD is attached to the port.
5246   */
ata_port_runtime_idle(struct device * dev)5247  static int ata_port_runtime_idle(struct device *dev)
5248  {
5249  	struct ata_port *ap = to_ata_port(dev);
5250  	struct ata_link *link;
5251  	struct ata_device *adev;
5252  
5253  	ata_for_each_link(link, ap, HOST_FIRST) {
5254  		ata_for_each_dev(adev, link, ENABLED)
5255  			if (adev->class == ATA_DEV_ATAPI &&
5256  			    !zpodd_dev_enabled(adev))
5257  				return -EBUSY;
5258  	}
5259  
5260  	return 0;
5261  }
5262  
ata_port_runtime_suspend(struct device * dev)5263  static int ata_port_runtime_suspend(struct device *dev)
5264  {
5265  	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND, false);
5266  	return 0;
5267  }
5268  
ata_port_runtime_resume(struct device * dev)5269  static int ata_port_runtime_resume(struct device *dev)
5270  {
5271  	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME, false);
5272  	return 0;
5273  }
5274  
5275  static const struct dev_pm_ops ata_port_pm_ops = {
5276  	.suspend = ata_port_pm_suspend,
5277  	.resume = ata_port_pm_resume,
5278  	.freeze = ata_port_pm_freeze,
5279  	.thaw = ata_port_pm_resume,
5280  	.poweroff = ata_port_pm_poweroff,
5281  	.restore = ata_port_pm_resume,
5282  
5283  	.runtime_suspend = ata_port_runtime_suspend,
5284  	.runtime_resume = ata_port_runtime_resume,
5285  	.runtime_idle = ata_port_runtime_idle,
5286  };
5287  
5288  /* sas ports don't participate in pm runtime management of ata_ports,
5289   * and need to resume ata devices at the domain level, not the per-port
5290   * level. sas suspend/resume is async to allow parallel port recovery
5291   * since sas has multiple ata_port instances per Scsi_Host.
5292   */
ata_sas_port_suspend(struct ata_port * ap)5293  void ata_sas_port_suspend(struct ata_port *ap)
5294  {
5295  	ata_port_suspend(ap, PMSG_SUSPEND, true);
5296  }
5297  EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5298  
ata_sas_port_resume(struct ata_port * ap)5299  void ata_sas_port_resume(struct ata_port *ap)
5300  {
5301  	ata_port_resume(ap, PMSG_RESUME, true);
5302  }
5303  EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5304  
5305  /**
5306   *	ata_host_suspend - suspend host
5307   *	@host: host to suspend
5308   *	@mesg: PM message
5309   *
5310   *	Suspend @host.  Actual operation is performed by port suspend.
5311   */
ata_host_suspend(struct ata_host * host,pm_message_t mesg)5312  void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5313  {
5314  	host->dev->power.power_state = mesg;
5315  }
5316  EXPORT_SYMBOL_GPL(ata_host_suspend);
5317  
5318  /**
5319   *	ata_host_resume - resume host
5320   *	@host: host to resume
5321   *
5322   *	Resume @host.  Actual operation is performed by port resume.
5323   */
ata_host_resume(struct ata_host * host)5324  void ata_host_resume(struct ata_host *host)
5325  {
5326  	host->dev->power.power_state = PMSG_ON;
5327  }
5328  EXPORT_SYMBOL_GPL(ata_host_resume);
5329  #endif
5330  
5331  const struct device_type ata_port_type = {
5332  	.name = ATA_PORT_TYPE_NAME,
5333  #ifdef CONFIG_PM
5334  	.pm = &ata_port_pm_ops,
5335  #endif
5336  };
5337  
5338  /**
5339   *	ata_dev_init - Initialize an ata_device structure
5340   *	@dev: Device structure to initialize
5341   *
5342   *	Initialize @dev in preparation for probing.
5343   *
5344   *	LOCKING:
5345   *	Inherited from caller.
5346   */
ata_dev_init(struct ata_device * dev)5347  void ata_dev_init(struct ata_device *dev)
5348  {
5349  	struct ata_link *link = ata_dev_phys_link(dev);
5350  	struct ata_port *ap = link->ap;
5351  	unsigned long flags;
5352  
5353  	/* SATA spd limit is bound to the attached device, reset together */
5354  	link->sata_spd_limit = link->hw_sata_spd_limit;
5355  	link->sata_spd = 0;
5356  
5357  	/* High bits of dev->flags are used to record warm plug
5358  	 * requests which occur asynchronously.  Synchronize using
5359  	 * host lock.
5360  	 */
5361  	spin_lock_irqsave(ap->lock, flags);
5362  	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5363  	dev->quirks = 0;
5364  	spin_unlock_irqrestore(ap->lock, flags);
5365  
5366  	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5367  	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5368  	dev->pio_mask = UINT_MAX;
5369  	dev->mwdma_mask = UINT_MAX;
5370  	dev->udma_mask = UINT_MAX;
5371  }
5372  
5373  /**
5374   *	ata_link_init - Initialize an ata_link structure
5375   *	@ap: ATA port link is attached to
5376   *	@link: Link structure to initialize
5377   *	@pmp: Port multiplier port number
5378   *
5379   *	Initialize @link.
5380   *
5381   *	LOCKING:
5382   *	Kernel thread context (may sleep)
5383   */
ata_link_init(struct ata_port * ap,struct ata_link * link,int pmp)5384  void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5385  {
5386  	int i;
5387  
5388  	/* clear everything except for devices */
5389  	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5390  	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5391  
5392  	link->ap = ap;
5393  	link->pmp = pmp;
5394  	link->active_tag = ATA_TAG_POISON;
5395  	link->hw_sata_spd_limit = UINT_MAX;
5396  
5397  	/* can't use iterator, ap isn't initialized yet */
5398  	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5399  		struct ata_device *dev = &link->device[i];
5400  
5401  		dev->link = link;
5402  		dev->devno = dev - link->device;
5403  #ifdef CONFIG_ATA_ACPI
5404  		dev->gtf_filter = ata_acpi_gtf_filter;
5405  #endif
5406  		ata_dev_init(dev);
5407  	}
5408  }
5409  
5410  /**
5411   *	sata_link_init_spd - Initialize link->sata_spd_limit
5412   *	@link: Link to configure sata_spd_limit for
5413   *
5414   *	Initialize ``link->[hw_]sata_spd_limit`` to the currently
5415   *	configured value.
5416   *
5417   *	LOCKING:
5418   *	Kernel thread context (may sleep).
5419   *
5420   *	RETURNS:
5421   *	0 on success, -errno on failure.
5422   */
sata_link_init_spd(struct ata_link * link)5423  int sata_link_init_spd(struct ata_link *link)
5424  {
5425  	u8 spd;
5426  	int rc;
5427  
5428  	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5429  	if (rc)
5430  		return rc;
5431  
5432  	spd = (link->saved_scontrol >> 4) & 0xf;
5433  	if (spd)
5434  		link->hw_sata_spd_limit &= (1 << spd) - 1;
5435  
5436  	ata_force_link_limits(link);
5437  
5438  	link->sata_spd_limit = link->hw_sata_spd_limit;
5439  
5440  	return 0;
5441  }
5442  
5443  /**
5444   *	ata_port_alloc - allocate and initialize basic ATA port resources
5445   *	@host: ATA host this allocated port belongs to
5446   *
5447   *	Allocate and initialize basic ATA port resources.
5448   *
5449   *	RETURNS:
5450   *	Allocate ATA port on success, NULL on failure.
5451   *
5452   *	LOCKING:
5453   *	Inherited from calling layer (may sleep).
5454   */
ata_port_alloc(struct ata_host * host)5455  struct ata_port *ata_port_alloc(struct ata_host *host)
5456  {
5457  	struct ata_port *ap;
5458  	int id;
5459  
5460  	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5461  	if (!ap)
5462  		return NULL;
5463  
5464  	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5465  	ap->lock = &host->lock;
5466  	id = ida_alloc_min(&ata_ida, 1, GFP_KERNEL);
5467  	if (id < 0) {
5468  		kfree(ap);
5469  		return NULL;
5470  	}
5471  	ap->print_id = id;
5472  	ap->host = host;
5473  	ap->dev = host->dev;
5474  
5475  	mutex_init(&ap->scsi_scan_mutex);
5476  	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5477  	INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5478  	INIT_LIST_HEAD(&ap->eh_done_q);
5479  	init_waitqueue_head(&ap->eh_wait_q);
5480  	init_completion(&ap->park_req_pending);
5481  	timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5482  		    TIMER_DEFERRABLE);
5483  
5484  	ap->cbl = ATA_CBL_NONE;
5485  
5486  	ata_link_init(ap, &ap->link, 0);
5487  
5488  #ifdef ATA_IRQ_TRAP
5489  	ap->stats.unhandled_irq = 1;
5490  	ap->stats.idle_irq = 1;
5491  #endif
5492  	ata_sff_port_init(ap);
5493  
5494  	ata_force_pflags(ap);
5495  
5496  	return ap;
5497  }
5498  EXPORT_SYMBOL_GPL(ata_port_alloc);
5499  
ata_port_free(struct ata_port * ap)5500  void ata_port_free(struct ata_port *ap)
5501  {
5502  	if (!ap)
5503  		return;
5504  
5505  	kfree(ap->pmp_link);
5506  	kfree(ap->slave_link);
5507  	ida_free(&ata_ida, ap->print_id);
5508  	kfree(ap);
5509  }
5510  EXPORT_SYMBOL_GPL(ata_port_free);
5511  
ata_devres_release(struct device * gendev,void * res)5512  static void ata_devres_release(struct device *gendev, void *res)
5513  {
5514  	struct ata_host *host = dev_get_drvdata(gendev);
5515  	int i;
5516  
5517  	for (i = 0; i < host->n_ports; i++) {
5518  		struct ata_port *ap = host->ports[i];
5519  
5520  		if (!ap)
5521  			continue;
5522  
5523  		if (ap->scsi_host)
5524  			scsi_host_put(ap->scsi_host);
5525  
5526  	}
5527  
5528  	dev_set_drvdata(gendev, NULL);
5529  	ata_host_put(host);
5530  }
5531  
ata_host_release(struct kref * kref)5532  static void ata_host_release(struct kref *kref)
5533  {
5534  	struct ata_host *host = container_of(kref, struct ata_host, kref);
5535  	int i;
5536  
5537  	for (i = 0; i < host->n_ports; i++) {
5538  		ata_port_free(host->ports[i]);
5539  		host->ports[i] = NULL;
5540  	}
5541  	kfree(host);
5542  }
5543  
ata_host_get(struct ata_host * host)5544  void ata_host_get(struct ata_host *host)
5545  {
5546  	kref_get(&host->kref);
5547  }
5548  
ata_host_put(struct ata_host * host)5549  void ata_host_put(struct ata_host *host)
5550  {
5551  	kref_put(&host->kref, ata_host_release);
5552  }
5553  EXPORT_SYMBOL_GPL(ata_host_put);
5554  
5555  /**
5556   *	ata_host_alloc - allocate and init basic ATA host resources
5557   *	@dev: generic device this host is associated with
5558   *	@n_ports: the number of ATA ports associated with this host
5559   *
5560   *	Allocate and initialize basic ATA host resources.  LLD calls
5561   *	this function to allocate a host, initializes it fully and
5562   *	attaches it using ata_host_register().
5563   *
5564   *	RETURNS:
5565   *	Allocate ATA host on success, NULL on failure.
5566   *
5567   *	LOCKING:
5568   *	Inherited from calling layer (may sleep).
5569   */
ata_host_alloc(struct device * dev,int n_ports)5570  struct ata_host *ata_host_alloc(struct device *dev, int n_ports)
5571  {
5572  	struct ata_host *host;
5573  	size_t sz;
5574  	int i;
5575  	void *dr;
5576  
5577  	/* alloc a container for our list of ATA ports (buses) */
5578  	sz = sizeof(struct ata_host) + n_ports * sizeof(void *);
5579  	host = kzalloc(sz, GFP_KERNEL);
5580  	if (!host)
5581  		return NULL;
5582  
5583  	if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
5584  		kfree(host);
5585  		return NULL;
5586  	}
5587  
5588  	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5589  	if (!dr) {
5590  		kfree(host);
5591  		goto err_out;
5592  	}
5593  
5594  	devres_add(dev, dr);
5595  	dev_set_drvdata(dev, host);
5596  
5597  	spin_lock_init(&host->lock);
5598  	mutex_init(&host->eh_mutex);
5599  	host->dev = dev;
5600  	host->n_ports = n_ports;
5601  	kref_init(&host->kref);
5602  
5603  	/* allocate ports bound to this host */
5604  	for (i = 0; i < n_ports; i++) {
5605  		struct ata_port *ap;
5606  
5607  		ap = ata_port_alloc(host);
5608  		if (!ap)
5609  			goto err_out;
5610  
5611  		ap->port_no = i;
5612  		host->ports[i] = ap;
5613  	}
5614  
5615  	devres_remove_group(dev, NULL);
5616  	return host;
5617  
5618   err_out:
5619  	devres_release_group(dev, NULL);
5620  	return NULL;
5621  }
5622  EXPORT_SYMBOL_GPL(ata_host_alloc);
5623  
5624  /**
5625   *	ata_host_alloc_pinfo - alloc host and init with port_info array
5626   *	@dev: generic device this host is associated with
5627   *	@ppi: array of ATA port_info to initialize host with
5628   *	@n_ports: number of ATA ports attached to this host
5629   *
5630   *	Allocate ATA host and initialize with info from @ppi.  If NULL
5631   *	terminated, @ppi may contain fewer entries than @n_ports.  The
5632   *	last entry will be used for the remaining ports.
5633   *
5634   *	RETURNS:
5635   *	Allocate ATA host on success, NULL on failure.
5636   *
5637   *	LOCKING:
5638   *	Inherited from calling layer (may sleep).
5639   */
ata_host_alloc_pinfo(struct device * dev,const struct ata_port_info * const * ppi,int n_ports)5640  struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5641  				      const struct ata_port_info * const * ppi,
5642  				      int n_ports)
5643  {
5644  	const struct ata_port_info *pi = &ata_dummy_port_info;
5645  	struct ata_host *host;
5646  	int i, j;
5647  
5648  	host = ata_host_alloc(dev, n_ports);
5649  	if (!host)
5650  		return NULL;
5651  
5652  	for (i = 0, j = 0; i < host->n_ports; i++) {
5653  		struct ata_port *ap = host->ports[i];
5654  
5655  		if (ppi[j])
5656  			pi = ppi[j++];
5657  
5658  		ap->pio_mask = pi->pio_mask;
5659  		ap->mwdma_mask = pi->mwdma_mask;
5660  		ap->udma_mask = pi->udma_mask;
5661  		ap->flags |= pi->flags;
5662  		ap->link.flags |= pi->link_flags;
5663  		ap->ops = pi->port_ops;
5664  
5665  		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5666  			host->ops = pi->port_ops;
5667  	}
5668  
5669  	return host;
5670  }
5671  EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
5672  
ata_host_stop(struct device * gendev,void * res)5673  static void ata_host_stop(struct device *gendev, void *res)
5674  {
5675  	struct ata_host *host = dev_get_drvdata(gendev);
5676  	int i;
5677  
5678  	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5679  
5680  	for (i = 0; i < host->n_ports; i++) {
5681  		struct ata_port *ap = host->ports[i];
5682  
5683  		if (ap->ops->port_stop)
5684  			ap->ops->port_stop(ap);
5685  	}
5686  
5687  	if (host->ops->host_stop)
5688  		host->ops->host_stop(host);
5689  }
5690  
5691  /**
5692   *	ata_finalize_port_ops - finalize ata_port_operations
5693   *	@ops: ata_port_operations to finalize
5694   *
5695   *	An ata_port_operations can inherit from another ops and that
5696   *	ops can again inherit from another.  This can go on as many
5697   *	times as necessary as long as there is no loop in the
5698   *	inheritance chain.
5699   *
5700   *	Ops tables are finalized when the host is started.  NULL or
5701   *	unspecified entries are inherited from the closet ancestor
5702   *	which has the method and the entry is populated with it.
5703   *	After finalization, the ops table directly points to all the
5704   *	methods and ->inherits is no longer necessary and cleared.
5705   *
5706   *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5707   *
5708   *	LOCKING:
5709   *	None.
5710   */
ata_finalize_port_ops(struct ata_port_operations * ops)5711  static void ata_finalize_port_ops(struct ata_port_operations *ops)
5712  {
5713  	static DEFINE_SPINLOCK(lock);
5714  	const struct ata_port_operations *cur;
5715  	void **begin = (void **)ops;
5716  	void **end = (void **)&ops->inherits;
5717  	void **pp;
5718  
5719  	if (!ops || !ops->inherits)
5720  		return;
5721  
5722  	spin_lock(&lock);
5723  
5724  	for (cur = ops->inherits; cur; cur = cur->inherits) {
5725  		void **inherit = (void **)cur;
5726  
5727  		for (pp = begin; pp < end; pp++, inherit++)
5728  			if (!*pp)
5729  				*pp = *inherit;
5730  	}
5731  
5732  	for (pp = begin; pp < end; pp++)
5733  		if (IS_ERR(*pp))
5734  			*pp = NULL;
5735  
5736  	ops->inherits = NULL;
5737  
5738  	spin_unlock(&lock);
5739  }
5740  
5741  /**
5742   *	ata_host_start - start and freeze ports of an ATA host
5743   *	@host: ATA host to start ports for
5744   *
5745   *	Start and then freeze ports of @host.  Started status is
5746   *	recorded in host->flags, so this function can be called
5747   *	multiple times.  Ports are guaranteed to get started only
5748   *	once.  If host->ops is not initialized yet, it is set to the
5749   *	first non-dummy port ops.
5750   *
5751   *	LOCKING:
5752   *	Inherited from calling layer (may sleep).
5753   *
5754   *	RETURNS:
5755   *	0 if all ports are started successfully, -errno otherwise.
5756   */
ata_host_start(struct ata_host * host)5757  int ata_host_start(struct ata_host *host)
5758  {
5759  	int have_stop = 0;
5760  	void *start_dr = NULL;
5761  	int i, rc;
5762  
5763  	if (host->flags & ATA_HOST_STARTED)
5764  		return 0;
5765  
5766  	ata_finalize_port_ops(host->ops);
5767  
5768  	for (i = 0; i < host->n_ports; i++) {
5769  		struct ata_port *ap = host->ports[i];
5770  
5771  		ata_finalize_port_ops(ap->ops);
5772  
5773  		if (!host->ops && !ata_port_is_dummy(ap))
5774  			host->ops = ap->ops;
5775  
5776  		if (ap->ops->port_stop)
5777  			have_stop = 1;
5778  	}
5779  
5780  	if (host->ops && host->ops->host_stop)
5781  		have_stop = 1;
5782  
5783  	if (have_stop) {
5784  		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5785  		if (!start_dr)
5786  			return -ENOMEM;
5787  	}
5788  
5789  	for (i = 0; i < host->n_ports; i++) {
5790  		struct ata_port *ap = host->ports[i];
5791  
5792  		if (ap->ops->port_start) {
5793  			rc = ap->ops->port_start(ap);
5794  			if (rc) {
5795  				if (rc != -ENODEV)
5796  					dev_err(host->dev,
5797  						"failed to start port %d (errno=%d)\n",
5798  						i, rc);
5799  				goto err_out;
5800  			}
5801  		}
5802  		ata_eh_freeze_port(ap);
5803  	}
5804  
5805  	if (start_dr)
5806  		devres_add(host->dev, start_dr);
5807  	host->flags |= ATA_HOST_STARTED;
5808  	return 0;
5809  
5810   err_out:
5811  	while (--i >= 0) {
5812  		struct ata_port *ap = host->ports[i];
5813  
5814  		if (ap->ops->port_stop)
5815  			ap->ops->port_stop(ap);
5816  	}
5817  	devres_free(start_dr);
5818  	return rc;
5819  }
5820  EXPORT_SYMBOL_GPL(ata_host_start);
5821  
5822  /**
5823   *	ata_host_init - Initialize a host struct for sas (ipr, libsas)
5824   *	@host:	host to initialize
5825   *	@dev:	device host is attached to
5826   *	@ops:	port_ops
5827   *
5828   */
ata_host_init(struct ata_host * host,struct device * dev,struct ata_port_operations * ops)5829  void ata_host_init(struct ata_host *host, struct device *dev,
5830  		   struct ata_port_operations *ops)
5831  {
5832  	spin_lock_init(&host->lock);
5833  	mutex_init(&host->eh_mutex);
5834  	host->n_tags = ATA_MAX_QUEUE;
5835  	host->dev = dev;
5836  	host->ops = ops;
5837  	kref_init(&host->kref);
5838  }
5839  EXPORT_SYMBOL_GPL(ata_host_init);
5840  
ata_port_probe(struct ata_port * ap)5841  void ata_port_probe(struct ata_port *ap)
5842  {
5843  	struct ata_eh_info *ehi = &ap->link.eh_info;
5844  	unsigned long flags;
5845  
5846  	/* kick EH for boot probing */
5847  	spin_lock_irqsave(ap->lock, flags);
5848  
5849  	ehi->probe_mask |= ATA_ALL_DEVICES;
5850  	ehi->action |= ATA_EH_RESET;
5851  	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5852  
5853  	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5854  	ap->pflags |= ATA_PFLAG_LOADING;
5855  	ata_port_schedule_eh(ap);
5856  
5857  	spin_unlock_irqrestore(ap->lock, flags);
5858  }
5859  EXPORT_SYMBOL_GPL(ata_port_probe);
5860  
async_port_probe(void * data,async_cookie_t cookie)5861  static void async_port_probe(void *data, async_cookie_t cookie)
5862  {
5863  	struct ata_port *ap = data;
5864  
5865  	/*
5866  	 * If we're not allowed to scan this host in parallel,
5867  	 * we need to wait until all previous scans have completed
5868  	 * before going further.
5869  	 * Jeff Garzik says this is only within a controller, so we
5870  	 * don't need to wait for port 0, only for later ports.
5871  	 */
5872  	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5873  		async_synchronize_cookie(cookie);
5874  
5875  	ata_port_probe(ap);
5876  	ata_port_wait_eh(ap);
5877  
5878  	/* in order to keep device order, we need to synchronize at this point */
5879  	async_synchronize_cookie(cookie);
5880  
5881  	ata_scsi_scan_host(ap, 1);
5882  }
5883  
5884  /**
5885   *	ata_host_register - register initialized ATA host
5886   *	@host: ATA host to register
5887   *	@sht: template for SCSI host
5888   *
5889   *	Register initialized ATA host.  @host is allocated using
5890   *	ata_host_alloc() and fully initialized by LLD.  This function
5891   *	starts ports, registers @host with ATA and SCSI layers and
5892   *	probe registered devices.
5893   *
5894   *	LOCKING:
5895   *	Inherited from calling layer (may sleep).
5896   *
5897   *	RETURNS:
5898   *	0 on success, -errno otherwise.
5899   */
ata_host_register(struct ata_host * host,const struct scsi_host_template * sht)5900  int ata_host_register(struct ata_host *host, const struct scsi_host_template *sht)
5901  {
5902  	int i, rc;
5903  
5904  	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5905  
5906  	/* host must have been started */
5907  	if (!(host->flags & ATA_HOST_STARTED)) {
5908  		dev_err(host->dev, "BUG: trying to register unstarted host\n");
5909  		WARN_ON(1);
5910  		return -EINVAL;
5911  	}
5912  
5913  	/* Create associated sysfs transport objects  */
5914  	for (i = 0; i < host->n_ports; i++) {
5915  		rc = ata_tport_add(host->dev,host->ports[i]);
5916  		if (rc) {
5917  			goto err_tadd;
5918  		}
5919  	}
5920  
5921  	rc = ata_scsi_add_hosts(host, sht);
5922  	if (rc)
5923  		goto err_tadd;
5924  
5925  	/* set cable, sata_spd_limit and report */
5926  	for (i = 0; i < host->n_ports; i++) {
5927  		struct ata_port *ap = host->ports[i];
5928  		unsigned int xfer_mask;
5929  
5930  		/* set SATA cable type if still unset */
5931  		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5932  			ap->cbl = ATA_CBL_SATA;
5933  
5934  		/* init sata_spd_limit to the current value */
5935  		sata_link_init_spd(&ap->link);
5936  		if (ap->slave_link)
5937  			sata_link_init_spd(ap->slave_link);
5938  
5939  		/* print per-port info to dmesg */
5940  		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5941  					      ap->udma_mask);
5942  
5943  		if (!ata_port_is_dummy(ap)) {
5944  			ata_port_info(ap, "%cATA max %s %s\n",
5945  				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5946  				      ata_mode_string(xfer_mask),
5947  				      ap->link.eh_info.desc);
5948  			ata_ehi_clear_desc(&ap->link.eh_info);
5949  		} else
5950  			ata_port_info(ap, "DUMMY\n");
5951  	}
5952  
5953  	/* perform each probe asynchronously */
5954  	for (i = 0; i < host->n_ports; i++) {
5955  		struct ata_port *ap = host->ports[i];
5956  		ap->cookie = async_schedule(async_port_probe, ap);
5957  	}
5958  
5959  	return 0;
5960  
5961   err_tadd:
5962  	while (--i >= 0) {
5963  		ata_tport_delete(host->ports[i]);
5964  	}
5965  	return rc;
5966  
5967  }
5968  EXPORT_SYMBOL_GPL(ata_host_register);
5969  
5970  /**
5971   *	ata_host_activate - start host, request IRQ and register it
5972   *	@host: target ATA host
5973   *	@irq: IRQ to request
5974   *	@irq_handler: irq_handler used when requesting IRQ
5975   *	@irq_flags: irq_flags used when requesting IRQ
5976   *	@sht: scsi_host_template to use when registering the host
5977   *
5978   *	After allocating an ATA host and initializing it, most libata
5979   *	LLDs perform three steps to activate the host - start host,
5980   *	request IRQ and register it.  This helper takes necessary
5981   *	arguments and performs the three steps in one go.
5982   *
5983   *	An invalid IRQ skips the IRQ registration and expects the host to
5984   *	have set polling mode on the port. In this case, @irq_handler
5985   *	should be NULL.
5986   *
5987   *	LOCKING:
5988   *	Inherited from calling layer (may sleep).
5989   *
5990   *	RETURNS:
5991   *	0 on success, -errno otherwise.
5992   */
ata_host_activate(struct ata_host * host,int irq,irq_handler_t irq_handler,unsigned long irq_flags,const struct scsi_host_template * sht)5993  int ata_host_activate(struct ata_host *host, int irq,
5994  		      irq_handler_t irq_handler, unsigned long irq_flags,
5995  		      const struct scsi_host_template *sht)
5996  {
5997  	int i, rc;
5998  	char *irq_desc;
5999  
6000  	rc = ata_host_start(host);
6001  	if (rc)
6002  		return rc;
6003  
6004  	/* Special case for polling mode */
6005  	if (!irq) {
6006  		WARN_ON(irq_handler);
6007  		return ata_host_register(host, sht);
6008  	}
6009  
6010  	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6011  				  dev_driver_string(host->dev),
6012  				  dev_name(host->dev));
6013  	if (!irq_desc)
6014  		return -ENOMEM;
6015  
6016  	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6017  			      irq_desc, host);
6018  	if (rc)
6019  		return rc;
6020  
6021  	for (i = 0; i < host->n_ports; i++)
6022  		ata_port_desc_misc(host->ports[i], irq);
6023  
6024  	rc = ata_host_register(host, sht);
6025  	/* if failed, just free the IRQ and leave ports alone */
6026  	if (rc)
6027  		devm_free_irq(host->dev, irq, host);
6028  
6029  	return rc;
6030  }
6031  EXPORT_SYMBOL_GPL(ata_host_activate);
6032  
6033  /**
6034   *	ata_dev_free_resources - Free a device resources
6035   *	@dev: Target ATA device
6036   *
6037   *	Free resources allocated to support a device features.
6038   *
6039   *	LOCKING:
6040   *	Kernel thread context (may sleep).
6041   */
ata_dev_free_resources(struct ata_device * dev)6042  void ata_dev_free_resources(struct ata_device *dev)
6043  {
6044  	if (zpodd_dev_enabled(dev))
6045  		zpodd_exit(dev);
6046  
6047  	ata_dev_cleanup_cdl_resources(dev);
6048  }
6049  
6050  /**
6051   *	ata_port_detach - Detach ATA port in preparation of device removal
6052   *	@ap: ATA port to be detached
6053   *
6054   *	Detach all ATA devices and the associated SCSI devices of @ap;
6055   *	then, remove the associated SCSI host.  @ap is guaranteed to
6056   *	be quiescent on return from this function.
6057   *
6058   *	LOCKING:
6059   *	Kernel thread context (may sleep).
6060   */
ata_port_detach(struct ata_port * ap)6061  static void ata_port_detach(struct ata_port *ap)
6062  {
6063  	unsigned long flags;
6064  	struct ata_link *link;
6065  	struct ata_device *dev;
6066  
6067  	/* Ensure ata_port probe has completed */
6068  	async_synchronize_cookie(ap->cookie + 1);
6069  
6070  	/* Wait for any ongoing EH */
6071  	ata_port_wait_eh(ap);
6072  
6073  	mutex_lock(&ap->scsi_scan_mutex);
6074  	spin_lock_irqsave(ap->lock, flags);
6075  
6076  	/* Remove scsi devices */
6077  	ata_for_each_link(link, ap, HOST_FIRST) {
6078  		ata_for_each_dev(dev, link, ALL) {
6079  			if (dev->sdev) {
6080  				spin_unlock_irqrestore(ap->lock, flags);
6081  				scsi_remove_device(dev->sdev);
6082  				spin_lock_irqsave(ap->lock, flags);
6083  				dev->sdev = NULL;
6084  			}
6085  		}
6086  	}
6087  
6088  	/* Tell EH to disable all devices */
6089  	ap->pflags |= ATA_PFLAG_UNLOADING;
6090  	ata_port_schedule_eh(ap);
6091  
6092  	spin_unlock_irqrestore(ap->lock, flags);
6093  	mutex_unlock(&ap->scsi_scan_mutex);
6094  
6095  	/* wait till EH commits suicide */
6096  	ata_port_wait_eh(ap);
6097  
6098  	/* it better be dead now */
6099  	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6100  
6101  	cancel_delayed_work_sync(&ap->hotplug_task);
6102  	cancel_delayed_work_sync(&ap->scsi_rescan_task);
6103  
6104  	/* Delete port multiplier link transport devices */
6105  	if (ap->pmp_link) {
6106  		int i;
6107  
6108  		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6109  			ata_tlink_delete(&ap->pmp_link[i]);
6110  	}
6111  
6112  	/* Remove the associated SCSI host */
6113  	scsi_remove_host(ap->scsi_host);
6114  	ata_tport_delete(ap);
6115  }
6116  
6117  /**
6118   *	ata_host_detach - Detach all ports of an ATA host
6119   *	@host: Host to detach
6120   *
6121   *	Detach all ports of @host.
6122   *
6123   *	LOCKING:
6124   *	Kernel thread context (may sleep).
6125   */
ata_host_detach(struct ata_host * host)6126  void ata_host_detach(struct ata_host *host)
6127  {
6128  	int i;
6129  
6130  	for (i = 0; i < host->n_ports; i++)
6131  		ata_port_detach(host->ports[i]);
6132  
6133  	/* the host is dead now, dissociate ACPI */
6134  	ata_acpi_dissociate(host);
6135  }
6136  EXPORT_SYMBOL_GPL(ata_host_detach);
6137  
6138  #ifdef CONFIG_PCI
6139  
6140  /**
6141   *	ata_pci_remove_one - PCI layer callback for device removal
6142   *	@pdev: PCI device that was removed
6143   *
6144   *	PCI layer indicates to libata via this hook that hot-unplug or
6145   *	module unload event has occurred.  Detach all ports.  Resource
6146   *	release is handled via devres.
6147   *
6148   *	LOCKING:
6149   *	Inherited from PCI layer (may sleep).
6150   */
ata_pci_remove_one(struct pci_dev * pdev)6151  void ata_pci_remove_one(struct pci_dev *pdev)
6152  {
6153  	struct ata_host *host = pci_get_drvdata(pdev);
6154  
6155  	ata_host_detach(host);
6156  }
6157  EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6158  
ata_pci_shutdown_one(struct pci_dev * pdev)6159  void ata_pci_shutdown_one(struct pci_dev *pdev)
6160  {
6161  	struct ata_host *host = pci_get_drvdata(pdev);
6162  	int i;
6163  
6164  	for (i = 0; i < host->n_ports; i++) {
6165  		struct ata_port *ap = host->ports[i];
6166  
6167  		ap->pflags |= ATA_PFLAG_FROZEN;
6168  
6169  		/* Disable port interrupts */
6170  		if (ap->ops->freeze)
6171  			ap->ops->freeze(ap);
6172  
6173  		/* Stop the port DMA engines */
6174  		if (ap->ops->port_stop)
6175  			ap->ops->port_stop(ap);
6176  	}
6177  }
6178  EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6179  
6180  /* move to PCI subsystem */
pci_test_config_bits(struct pci_dev * pdev,const struct pci_bits * bits)6181  int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6182  {
6183  	unsigned long tmp = 0;
6184  
6185  	switch (bits->width) {
6186  	case 1: {
6187  		u8 tmp8 = 0;
6188  		pci_read_config_byte(pdev, bits->reg, &tmp8);
6189  		tmp = tmp8;
6190  		break;
6191  	}
6192  	case 2: {
6193  		u16 tmp16 = 0;
6194  		pci_read_config_word(pdev, bits->reg, &tmp16);
6195  		tmp = tmp16;
6196  		break;
6197  	}
6198  	case 4: {
6199  		u32 tmp32 = 0;
6200  		pci_read_config_dword(pdev, bits->reg, &tmp32);
6201  		tmp = tmp32;
6202  		break;
6203  	}
6204  
6205  	default:
6206  		return -EINVAL;
6207  	}
6208  
6209  	tmp &= bits->mask;
6210  
6211  	return (tmp == bits->val) ? 1 : 0;
6212  }
6213  EXPORT_SYMBOL_GPL(pci_test_config_bits);
6214  
6215  #ifdef CONFIG_PM
ata_pci_device_do_suspend(struct pci_dev * pdev,pm_message_t mesg)6216  void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6217  {
6218  	pci_save_state(pdev);
6219  	pci_disable_device(pdev);
6220  
6221  	if (mesg.event & PM_EVENT_SLEEP)
6222  		pci_set_power_state(pdev, PCI_D3hot);
6223  }
6224  EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6225  
ata_pci_device_do_resume(struct pci_dev * pdev)6226  int ata_pci_device_do_resume(struct pci_dev *pdev)
6227  {
6228  	int rc;
6229  
6230  	pci_set_power_state(pdev, PCI_D0);
6231  	pci_restore_state(pdev);
6232  
6233  	rc = pcim_enable_device(pdev);
6234  	if (rc) {
6235  		dev_err(&pdev->dev,
6236  			"failed to enable device after resume (%d)\n", rc);
6237  		return rc;
6238  	}
6239  
6240  	pci_set_master(pdev);
6241  	return 0;
6242  }
6243  EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6244  
ata_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)6245  int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6246  {
6247  	struct ata_host *host = pci_get_drvdata(pdev);
6248  
6249  	ata_host_suspend(host, mesg);
6250  
6251  	ata_pci_device_do_suspend(pdev, mesg);
6252  
6253  	return 0;
6254  }
6255  EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6256  
ata_pci_device_resume(struct pci_dev * pdev)6257  int ata_pci_device_resume(struct pci_dev *pdev)
6258  {
6259  	struct ata_host *host = pci_get_drvdata(pdev);
6260  	int rc;
6261  
6262  	rc = ata_pci_device_do_resume(pdev);
6263  	if (rc == 0)
6264  		ata_host_resume(host);
6265  	return rc;
6266  }
6267  EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6268  #endif /* CONFIG_PM */
6269  #endif /* CONFIG_PCI */
6270  
6271  /**
6272   *	ata_platform_remove_one - Platform layer callback for device removal
6273   *	@pdev: Platform device that was removed
6274   *
6275   *	Platform layer indicates to libata via this hook that hot-unplug or
6276   *	module unload event has occurred.  Detach all ports.  Resource
6277   *	release is handled via devres.
6278   *
6279   *	LOCKING:
6280   *	Inherited from platform layer (may sleep).
6281   */
ata_platform_remove_one(struct platform_device * pdev)6282  void ata_platform_remove_one(struct platform_device *pdev)
6283  {
6284  	struct ata_host *host = platform_get_drvdata(pdev);
6285  
6286  	ata_host_detach(host);
6287  }
6288  EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6289  
6290  #ifdef CONFIG_ATA_FORCE
6291  
6292  #define force_cbl(name, flag)				\
6293  	{ #name,	.cbl		= (flag) }
6294  
6295  #define force_spd_limit(spd, val)			\
6296  	{ #spd,	.spd_limit		= (val) }
6297  
6298  #define force_xfer(mode, shift)				\
6299  	{ #mode,	.xfer_mask	= (1UL << (shift)) }
6300  
6301  #define force_lflag_on(name, flags)			\
6302  	{ #name,	.lflags_on	= (flags) }
6303  
6304  #define force_lflag_onoff(name, flags)			\
6305  	{ "no" #name,	.lflags_on	= (flags) },	\
6306  	{ #name,	.lflags_off	= (flags) }
6307  
6308  #define force_pflag_on(name, flags)			\
6309  	{ #name,	.pflags_on	= (flags) }
6310  
6311  #define force_quirk_on(name, flag)			\
6312  	{ #name,	.quirk_on	= (flag) }
6313  
6314  #define force_quirk_onoff(name, flag)			\
6315  	{ "no" #name,	.quirk_on	= (flag) },	\
6316  	{ #name,	.quirk_off	= (flag) }
6317  
6318  static const struct ata_force_param force_tbl[] __initconst = {
6319  	force_cbl(40c,			ATA_CBL_PATA40),
6320  	force_cbl(80c,			ATA_CBL_PATA80),
6321  	force_cbl(short40c,		ATA_CBL_PATA40_SHORT),
6322  	force_cbl(unk,			ATA_CBL_PATA_UNK),
6323  	force_cbl(ign,			ATA_CBL_PATA_IGN),
6324  	force_cbl(sata,			ATA_CBL_SATA),
6325  
6326  	force_spd_limit(1.5Gbps,	1),
6327  	force_spd_limit(3.0Gbps,	2),
6328  
6329  	force_xfer(pio0,		ATA_SHIFT_PIO + 0),
6330  	force_xfer(pio1,		ATA_SHIFT_PIO + 1),
6331  	force_xfer(pio2,		ATA_SHIFT_PIO + 2),
6332  	force_xfer(pio3,		ATA_SHIFT_PIO + 3),
6333  	force_xfer(pio4,		ATA_SHIFT_PIO + 4),
6334  	force_xfer(pio5,		ATA_SHIFT_PIO + 5),
6335  	force_xfer(pio6,		ATA_SHIFT_PIO + 6),
6336  	force_xfer(mwdma0,		ATA_SHIFT_MWDMA + 0),
6337  	force_xfer(mwdma1,		ATA_SHIFT_MWDMA + 1),
6338  	force_xfer(mwdma2,		ATA_SHIFT_MWDMA + 2),
6339  	force_xfer(mwdma3,		ATA_SHIFT_MWDMA + 3),
6340  	force_xfer(mwdma4,		ATA_SHIFT_MWDMA + 4),
6341  	force_xfer(udma0,		ATA_SHIFT_UDMA + 0),
6342  	force_xfer(udma16,		ATA_SHIFT_UDMA + 0),
6343  	force_xfer(udma/16,		ATA_SHIFT_UDMA + 0),
6344  	force_xfer(udma1,		ATA_SHIFT_UDMA + 1),
6345  	force_xfer(udma25,		ATA_SHIFT_UDMA + 1),
6346  	force_xfer(udma/25,		ATA_SHIFT_UDMA + 1),
6347  	force_xfer(udma2,		ATA_SHIFT_UDMA + 2),
6348  	force_xfer(udma33,		ATA_SHIFT_UDMA + 2),
6349  	force_xfer(udma/33,		ATA_SHIFT_UDMA + 2),
6350  	force_xfer(udma3,		ATA_SHIFT_UDMA + 3),
6351  	force_xfer(udma44,		ATA_SHIFT_UDMA + 3),
6352  	force_xfer(udma/44,		ATA_SHIFT_UDMA + 3),
6353  	force_xfer(udma4,		ATA_SHIFT_UDMA + 4),
6354  	force_xfer(udma66,		ATA_SHIFT_UDMA + 4),
6355  	force_xfer(udma/66,		ATA_SHIFT_UDMA + 4),
6356  	force_xfer(udma5,		ATA_SHIFT_UDMA + 5),
6357  	force_xfer(udma100,		ATA_SHIFT_UDMA + 5),
6358  	force_xfer(udma/100,		ATA_SHIFT_UDMA + 5),
6359  	force_xfer(udma6,		ATA_SHIFT_UDMA + 6),
6360  	force_xfer(udma133,		ATA_SHIFT_UDMA + 6),
6361  	force_xfer(udma/133,		ATA_SHIFT_UDMA + 6),
6362  	force_xfer(udma7,		ATA_SHIFT_UDMA + 7),
6363  
6364  	force_lflag_on(nohrst,		ATA_LFLAG_NO_HRST),
6365  	force_lflag_on(nosrst,		ATA_LFLAG_NO_SRST),
6366  	force_lflag_on(norst,		ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
6367  	force_lflag_on(rstonce,		ATA_LFLAG_RST_ONCE),
6368  	force_lflag_onoff(dbdelay,	ATA_LFLAG_NO_DEBOUNCE_DELAY),
6369  
6370  	force_pflag_on(external,	ATA_PFLAG_EXTERNAL),
6371  
6372  	force_quirk_onoff(ncq,		ATA_QUIRK_NONCQ),
6373  	force_quirk_onoff(ncqtrim,	ATA_QUIRK_NO_NCQ_TRIM),
6374  	force_quirk_onoff(ncqati,	ATA_QUIRK_NO_NCQ_ON_ATI),
6375  
6376  	force_quirk_onoff(trim,		ATA_QUIRK_NOTRIM),
6377  	force_quirk_on(trim_zero,	ATA_QUIRK_ZERO_AFTER_TRIM),
6378  	force_quirk_on(max_trim_128m,	ATA_QUIRK_MAX_TRIM_128M),
6379  
6380  	force_quirk_onoff(dma,		ATA_QUIRK_NODMA),
6381  	force_quirk_on(atapi_dmadir,	ATA_QUIRK_ATAPI_DMADIR),
6382  	force_quirk_on(atapi_mod16_dma,	ATA_QUIRK_ATAPI_MOD16_DMA),
6383  
6384  	force_quirk_onoff(dmalog,	ATA_QUIRK_NO_DMA_LOG),
6385  	force_quirk_onoff(iddevlog,	ATA_QUIRK_NO_ID_DEV_LOG),
6386  	force_quirk_onoff(logdir,	ATA_QUIRK_NO_LOG_DIR),
6387  
6388  	force_quirk_on(max_sec_128,	ATA_QUIRK_MAX_SEC_128),
6389  	force_quirk_on(max_sec_1024,	ATA_QUIRK_MAX_SEC_1024),
6390  	force_quirk_on(max_sec_lba48,	ATA_QUIRK_MAX_SEC_LBA48),
6391  
6392  	force_quirk_onoff(lpm,		ATA_QUIRK_NOLPM),
6393  	force_quirk_onoff(setxfer,	ATA_QUIRK_NOSETXFER),
6394  	force_quirk_on(dump_id,		ATA_QUIRK_DUMP_ID),
6395  	force_quirk_onoff(fua,		ATA_QUIRK_NO_FUA),
6396  
6397  	force_quirk_on(disable,		ATA_QUIRK_DISABLE),
6398  };
6399  
ata_parse_force_one(char ** cur,struct ata_force_ent * force_ent,const char ** reason)6400  static int __init ata_parse_force_one(char **cur,
6401  				      struct ata_force_ent *force_ent,
6402  				      const char **reason)
6403  {
6404  	char *start = *cur, *p = *cur;
6405  	char *id, *val, *endp;
6406  	const struct ata_force_param *match_fp = NULL;
6407  	int nr_matches = 0, i;
6408  
6409  	/* find where this param ends and update *cur */
6410  	while (*p != '\0' && *p != ',')
6411  		p++;
6412  
6413  	if (*p == '\0')
6414  		*cur = p;
6415  	else
6416  		*cur = p + 1;
6417  
6418  	*p = '\0';
6419  
6420  	/* parse */
6421  	p = strchr(start, ':');
6422  	if (!p) {
6423  		val = strstrip(start);
6424  		goto parse_val;
6425  	}
6426  	*p = '\0';
6427  
6428  	id = strstrip(start);
6429  	val = strstrip(p + 1);
6430  
6431  	/* parse id */
6432  	p = strchr(id, '.');
6433  	if (p) {
6434  		*p++ = '\0';
6435  		force_ent->device = simple_strtoul(p, &endp, 10);
6436  		if (p == endp || *endp != '\0') {
6437  			*reason = "invalid device";
6438  			return -EINVAL;
6439  		}
6440  	}
6441  
6442  	force_ent->port = simple_strtoul(id, &endp, 10);
6443  	if (id == endp || *endp != '\0') {
6444  		*reason = "invalid port/link";
6445  		return -EINVAL;
6446  	}
6447  
6448   parse_val:
6449  	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6450  	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6451  		const struct ata_force_param *fp = &force_tbl[i];
6452  
6453  		if (strncasecmp(val, fp->name, strlen(val)))
6454  			continue;
6455  
6456  		nr_matches++;
6457  		match_fp = fp;
6458  
6459  		if (strcasecmp(val, fp->name) == 0) {
6460  			nr_matches = 1;
6461  			break;
6462  		}
6463  	}
6464  
6465  	if (!nr_matches) {
6466  		*reason = "unknown value";
6467  		return -EINVAL;
6468  	}
6469  	if (nr_matches > 1) {
6470  		*reason = "ambiguous value";
6471  		return -EINVAL;
6472  	}
6473  
6474  	force_ent->param = *match_fp;
6475  
6476  	return 0;
6477  }
6478  
ata_parse_force_param(void)6479  static void __init ata_parse_force_param(void)
6480  {
6481  	int idx = 0, size = 1;
6482  	int last_port = -1, last_device = -1;
6483  	char *p, *cur, *next;
6484  
6485  	/* Calculate maximum number of params and allocate ata_force_tbl */
6486  	for (p = ata_force_param_buf; *p; p++)
6487  		if (*p == ',')
6488  			size++;
6489  
6490  	ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6491  	if (!ata_force_tbl) {
6492  		printk(KERN_WARNING "ata: failed to extend force table, "
6493  		       "libata.force ignored\n");
6494  		return;
6495  	}
6496  
6497  	/* parse and populate the table */
6498  	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6499  		const char *reason = "";
6500  		struct ata_force_ent te = { .port = -1, .device = -1 };
6501  
6502  		next = cur;
6503  		if (ata_parse_force_one(&next, &te, &reason)) {
6504  			printk(KERN_WARNING "ata: failed to parse force "
6505  			       "parameter \"%s\" (%s)\n",
6506  			       cur, reason);
6507  			continue;
6508  		}
6509  
6510  		if (te.port == -1) {
6511  			te.port = last_port;
6512  			te.device = last_device;
6513  		}
6514  
6515  		ata_force_tbl[idx++] = te;
6516  
6517  		last_port = te.port;
6518  		last_device = te.device;
6519  	}
6520  
6521  	ata_force_tbl_size = idx;
6522  }
6523  
ata_free_force_param(void)6524  static void ata_free_force_param(void)
6525  {
6526  	kfree(ata_force_tbl);
6527  }
6528  #else
ata_parse_force_param(void)6529  static inline void ata_parse_force_param(void) { }
ata_free_force_param(void)6530  static inline void ata_free_force_param(void) { }
6531  #endif
6532  
ata_init(void)6533  static int __init ata_init(void)
6534  {
6535  	int rc;
6536  
6537  	ata_parse_force_param();
6538  
6539  	rc = ata_sff_init();
6540  	if (rc) {
6541  		ata_free_force_param();
6542  		return rc;
6543  	}
6544  
6545  	libata_transport_init();
6546  	ata_scsi_transport_template = ata_attach_transport();
6547  	if (!ata_scsi_transport_template) {
6548  		ata_sff_exit();
6549  		rc = -ENOMEM;
6550  		goto err_out;
6551  	}
6552  
6553  	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6554  	return 0;
6555  
6556  err_out:
6557  	return rc;
6558  }
6559  
ata_exit(void)6560  static void __exit ata_exit(void)
6561  {
6562  	ata_release_transport(ata_scsi_transport_template);
6563  	libata_transport_exit();
6564  	ata_sff_exit();
6565  	ata_free_force_param();
6566  }
6567  
6568  subsys_initcall(ata_init);
6569  module_exit(ata_exit);
6570  
6571  static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6572  
ata_ratelimit(void)6573  int ata_ratelimit(void)
6574  {
6575  	return __ratelimit(&ratelimit);
6576  }
6577  EXPORT_SYMBOL_GPL(ata_ratelimit);
6578  
6579  /**
6580   *	ata_msleep - ATA EH owner aware msleep
6581   *	@ap: ATA port to attribute the sleep to
6582   *	@msecs: duration to sleep in milliseconds
6583   *
6584   *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6585   *	ownership is released before going to sleep and reacquired
6586   *	after the sleep is complete.  IOW, other ports sharing the
6587   *	@ap->host will be allowed to own the EH while this task is
6588   *	sleeping.
6589   *
6590   *	LOCKING:
6591   *	Might sleep.
6592   */
ata_msleep(struct ata_port * ap,unsigned int msecs)6593  void ata_msleep(struct ata_port *ap, unsigned int msecs)
6594  {
6595  	bool owns_eh = ap && ap->host->eh_owner == current;
6596  
6597  	if (owns_eh)
6598  		ata_eh_release(ap);
6599  
6600  	if (msecs < 20) {
6601  		unsigned long usecs = msecs * USEC_PER_MSEC;
6602  		usleep_range(usecs, usecs + 50);
6603  	} else {
6604  		msleep(msecs);
6605  	}
6606  
6607  	if (owns_eh)
6608  		ata_eh_acquire(ap);
6609  }
6610  EXPORT_SYMBOL_GPL(ata_msleep);
6611  
6612  /**
6613   *	ata_wait_register - wait until register value changes
6614   *	@ap: ATA port to wait register for, can be NULL
6615   *	@reg: IO-mapped register
6616   *	@mask: Mask to apply to read register value
6617   *	@val: Wait condition
6618   *	@interval: polling interval in milliseconds
6619   *	@timeout: timeout in milliseconds
6620   *
6621   *	Waiting for some bits of register to change is a common
6622   *	operation for ATA controllers.  This function reads 32bit LE
6623   *	IO-mapped register @reg and tests for the following condition.
6624   *
6625   *	(*@reg & mask) != val
6626   *
6627   *	If the condition is met, it returns; otherwise, the process is
6628   *	repeated after @interval_msec until timeout.
6629   *
6630   *	LOCKING:
6631   *	Kernel thread context (may sleep)
6632   *
6633   *	RETURNS:
6634   *	The final register value.
6635   */
ata_wait_register(struct ata_port * ap,void __iomem * reg,u32 mask,u32 val,unsigned int interval,unsigned int timeout)6636  u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6637  		      unsigned int interval, unsigned int timeout)
6638  {
6639  	unsigned long deadline;
6640  	u32 tmp;
6641  
6642  	tmp = ioread32(reg);
6643  
6644  	/* Calculate timeout _after_ the first read to make sure
6645  	 * preceding writes reach the controller before starting to
6646  	 * eat away the timeout.
6647  	 */
6648  	deadline = ata_deadline(jiffies, timeout);
6649  
6650  	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6651  		ata_msleep(ap, interval);
6652  		tmp = ioread32(reg);
6653  	}
6654  
6655  	return tmp;
6656  }
6657  EXPORT_SYMBOL_GPL(ata_wait_register);
6658  
6659  /*
6660   * Dummy port_ops
6661   */
ata_dummy_qc_issue(struct ata_queued_cmd * qc)6662  static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6663  {
6664  	return AC_ERR_SYSTEM;
6665  }
6666  
ata_dummy_error_handler(struct ata_port * ap)6667  static void ata_dummy_error_handler(struct ata_port *ap)
6668  {
6669  	/* truly dummy */
6670  }
6671  
6672  struct ata_port_operations ata_dummy_port_ops = {
6673  	.qc_issue		= ata_dummy_qc_issue,
6674  	.error_handler		= ata_dummy_error_handler,
6675  	.sched_eh		= ata_std_sched_eh,
6676  	.end_eh			= ata_std_end_eh,
6677  };
6678  EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6679  
6680  const struct ata_port_info ata_dummy_port_info = {
6681  	.port_ops		= &ata_dummy_port_ops,
6682  };
6683  EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6684  
ata_print_version(const struct device * dev,const char * version)6685  void ata_print_version(const struct device *dev, const char *version)
6686  {
6687  	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6688  }
6689  EXPORT_SYMBOL(ata_print_version);
6690  
6691  EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6692  EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6693  EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6694  EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6695  EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);
6696