xref: /linux/drivers/ata/libata-core.c (revision a33f32244d8550da8b4a26e277ce07d5c6d158b5)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 
69 #include "libata.h"
70 
71 
72 /* debounce timing parameters in msecs { interval, duration, timeout } */
73 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
74 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
75 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
76 
77 const struct ata_port_operations ata_base_port_ops = {
78 	.prereset		= ata_std_prereset,
79 	.postreset		= ata_std_postreset,
80 	.error_handler		= ata_std_error_handler,
81 };
82 
83 const struct ata_port_operations sata_port_ops = {
84 	.inherits		= &ata_base_port_ops,
85 
86 	.qc_defer		= ata_std_qc_defer,
87 	.hardreset		= sata_std_hardreset,
88 };
89 
90 static unsigned int ata_dev_init_params(struct ata_device *dev,
91 					u16 heads, u16 sectors);
92 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
93 static unsigned int ata_dev_set_feature(struct ata_device *dev,
94 					u8 enable, u8 feature);
95 static void ata_dev_xfermask(struct ata_device *dev);
96 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97 
98 unsigned int ata_print_id = 1;
99 static struct workqueue_struct *ata_wq;
100 
101 struct workqueue_struct *ata_aux_wq;
102 
103 struct ata_force_param {
104 	const char	*name;
105 	unsigned int	cbl;
106 	int		spd_limit;
107 	unsigned long	xfer_mask;
108 	unsigned int	horkage_on;
109 	unsigned int	horkage_off;
110 	unsigned int	lflags;
111 };
112 
113 struct ata_force_ent {
114 	int			port;
115 	int			device;
116 	struct ata_force_param	param;
117 };
118 
119 static struct ata_force_ent *ata_force_tbl;
120 static int ata_force_tbl_size;
121 
122 static char ata_force_param_buf[PAGE_SIZE] __initdata;
123 /* param_buf is thrown away after initialization, disallow read */
124 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
125 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
126 
127 static int atapi_enabled = 1;
128 module_param(atapi_enabled, int, 0444);
129 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
130 
131 static int atapi_dmadir = 0;
132 module_param(atapi_dmadir, int, 0444);
133 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
134 
135 int atapi_passthru16 = 1;
136 module_param(atapi_passthru16, int, 0444);
137 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
138 
139 int libata_fua = 0;
140 module_param_named(fua, libata_fua, int, 0444);
141 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
142 
143 static int ata_ignore_hpa;
144 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
145 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
146 
147 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
148 module_param_named(dma, libata_dma_mask, int, 0444);
149 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
150 
151 static int ata_probe_timeout;
152 module_param(ata_probe_timeout, int, 0444);
153 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
154 
155 int libata_noacpi = 0;
156 module_param_named(noacpi, libata_noacpi, int, 0444);
157 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
158 
159 int libata_allow_tpm = 0;
160 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
161 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
162 
163 MODULE_AUTHOR("Jeff Garzik");
164 MODULE_DESCRIPTION("Library module for ATA devices");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_VERSION);
167 
168 
169 static bool ata_sstatus_online(u32 sstatus)
170 {
171 	return (sstatus & 0xf) == 0x3;
172 }
173 
174 /**
175  *	ata_link_next - link iteration helper
176  *	@link: the previous link, NULL to start
177  *	@ap: ATA port containing links to iterate
178  *	@mode: iteration mode, one of ATA_LITER_*
179  *
180  *	LOCKING:
181  *	Host lock or EH context.
182  *
183  *	RETURNS:
184  *	Pointer to the next link.
185  */
186 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
187 			       enum ata_link_iter_mode mode)
188 {
189 	BUG_ON(mode != ATA_LITER_EDGE &&
190 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
191 
192 	/* NULL link indicates start of iteration */
193 	if (!link)
194 		switch (mode) {
195 		case ATA_LITER_EDGE:
196 		case ATA_LITER_PMP_FIRST:
197 			if (sata_pmp_attached(ap))
198 				return ap->pmp_link;
199 			/* fall through */
200 		case ATA_LITER_HOST_FIRST:
201 			return &ap->link;
202 		}
203 
204 	/* we just iterated over the host link, what's next? */
205 	if (link == &ap->link)
206 		switch (mode) {
207 		case ATA_LITER_HOST_FIRST:
208 			if (sata_pmp_attached(ap))
209 				return ap->pmp_link;
210 			/* fall through */
211 		case ATA_LITER_PMP_FIRST:
212 			if (unlikely(ap->slave_link))
213 				return ap->slave_link;
214 			/* fall through */
215 		case ATA_LITER_EDGE:
216 			return NULL;
217 		}
218 
219 	/* slave_link excludes PMP */
220 	if (unlikely(link == ap->slave_link))
221 		return NULL;
222 
223 	/* we were over a PMP link */
224 	if (++link < ap->pmp_link + ap->nr_pmp_links)
225 		return link;
226 
227 	if (mode == ATA_LITER_PMP_FIRST)
228 		return &ap->link;
229 
230 	return NULL;
231 }
232 
233 /**
234  *	ata_dev_next - device iteration helper
235  *	@dev: the previous device, NULL to start
236  *	@link: ATA link containing devices to iterate
237  *	@mode: iteration mode, one of ATA_DITER_*
238  *
239  *	LOCKING:
240  *	Host lock or EH context.
241  *
242  *	RETURNS:
243  *	Pointer to the next device.
244  */
245 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
246 				enum ata_dev_iter_mode mode)
247 {
248 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
249 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
250 
251 	/* NULL dev indicates start of iteration */
252 	if (!dev)
253 		switch (mode) {
254 		case ATA_DITER_ENABLED:
255 		case ATA_DITER_ALL:
256 			dev = link->device;
257 			goto check;
258 		case ATA_DITER_ENABLED_REVERSE:
259 		case ATA_DITER_ALL_REVERSE:
260 			dev = link->device + ata_link_max_devices(link) - 1;
261 			goto check;
262 		}
263 
264  next:
265 	/* move to the next one */
266 	switch (mode) {
267 	case ATA_DITER_ENABLED:
268 	case ATA_DITER_ALL:
269 		if (++dev < link->device + ata_link_max_devices(link))
270 			goto check;
271 		return NULL;
272 	case ATA_DITER_ENABLED_REVERSE:
273 	case ATA_DITER_ALL_REVERSE:
274 		if (--dev >= link->device)
275 			goto check;
276 		return NULL;
277 	}
278 
279  check:
280 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
281 	    !ata_dev_enabled(dev))
282 		goto next;
283 	return dev;
284 }
285 
286 /**
287  *	ata_dev_phys_link - find physical link for a device
288  *	@dev: ATA device to look up physical link for
289  *
290  *	Look up physical link which @dev is attached to.  Note that
291  *	this is different from @dev->link only when @dev is on slave
292  *	link.  For all other cases, it's the same as @dev->link.
293  *
294  *	LOCKING:
295  *	Don't care.
296  *
297  *	RETURNS:
298  *	Pointer to the found physical link.
299  */
300 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
301 {
302 	struct ata_port *ap = dev->link->ap;
303 
304 	if (!ap->slave_link)
305 		return dev->link;
306 	if (!dev->devno)
307 		return &ap->link;
308 	return ap->slave_link;
309 }
310 
311 /**
312  *	ata_force_cbl - force cable type according to libata.force
313  *	@ap: ATA port of interest
314  *
315  *	Force cable type according to libata.force and whine about it.
316  *	The last entry which has matching port number is used, so it
317  *	can be specified as part of device force parameters.  For
318  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
319  *	same effect.
320  *
321  *	LOCKING:
322  *	EH context.
323  */
324 void ata_force_cbl(struct ata_port *ap)
325 {
326 	int i;
327 
328 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
329 		const struct ata_force_ent *fe = &ata_force_tbl[i];
330 
331 		if (fe->port != -1 && fe->port != ap->print_id)
332 			continue;
333 
334 		if (fe->param.cbl == ATA_CBL_NONE)
335 			continue;
336 
337 		ap->cbl = fe->param.cbl;
338 		ata_port_printk(ap, KERN_NOTICE,
339 				"FORCE: cable set to %s\n", fe->param.name);
340 		return;
341 	}
342 }
343 
344 /**
345  *	ata_force_link_limits - force link limits according to libata.force
346  *	@link: ATA link of interest
347  *
348  *	Force link flags and SATA spd limit according to libata.force
349  *	and whine about it.  When only the port part is specified
350  *	(e.g. 1:), the limit applies to all links connected to both
351  *	the host link and all fan-out ports connected via PMP.  If the
352  *	device part is specified as 0 (e.g. 1.00:), it specifies the
353  *	first fan-out link not the host link.  Device number 15 always
354  *	points to the host link whether PMP is attached or not.  If the
355  *	controller has slave link, device number 16 points to it.
356  *
357  *	LOCKING:
358  *	EH context.
359  */
360 static void ata_force_link_limits(struct ata_link *link)
361 {
362 	bool did_spd = false;
363 	int linkno = link->pmp;
364 	int i;
365 
366 	if (ata_is_host_link(link))
367 		linkno += 15;
368 
369 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
370 		const struct ata_force_ent *fe = &ata_force_tbl[i];
371 
372 		if (fe->port != -1 && fe->port != link->ap->print_id)
373 			continue;
374 
375 		if (fe->device != -1 && fe->device != linkno)
376 			continue;
377 
378 		/* only honor the first spd limit */
379 		if (!did_spd && fe->param.spd_limit) {
380 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
381 			ata_link_printk(link, KERN_NOTICE,
382 					"FORCE: PHY spd limit set to %s\n",
383 					fe->param.name);
384 			did_spd = true;
385 		}
386 
387 		/* let lflags stack */
388 		if (fe->param.lflags) {
389 			link->flags |= fe->param.lflags;
390 			ata_link_printk(link, KERN_NOTICE,
391 					"FORCE: link flag 0x%x forced -> 0x%x\n",
392 					fe->param.lflags, link->flags);
393 		}
394 	}
395 }
396 
397 /**
398  *	ata_force_xfermask - force xfermask according to libata.force
399  *	@dev: ATA device of interest
400  *
401  *	Force xfer_mask according to libata.force and whine about it.
402  *	For consistency with link selection, device number 15 selects
403  *	the first device connected to the host link.
404  *
405  *	LOCKING:
406  *	EH context.
407  */
408 static void ata_force_xfermask(struct ata_device *dev)
409 {
410 	int devno = dev->link->pmp + dev->devno;
411 	int alt_devno = devno;
412 	int i;
413 
414 	/* allow n.15/16 for devices attached to host port */
415 	if (ata_is_host_link(dev->link))
416 		alt_devno += 15;
417 
418 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
419 		const struct ata_force_ent *fe = &ata_force_tbl[i];
420 		unsigned long pio_mask, mwdma_mask, udma_mask;
421 
422 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
423 			continue;
424 
425 		if (fe->device != -1 && fe->device != devno &&
426 		    fe->device != alt_devno)
427 			continue;
428 
429 		if (!fe->param.xfer_mask)
430 			continue;
431 
432 		ata_unpack_xfermask(fe->param.xfer_mask,
433 				    &pio_mask, &mwdma_mask, &udma_mask);
434 		if (udma_mask)
435 			dev->udma_mask = udma_mask;
436 		else if (mwdma_mask) {
437 			dev->udma_mask = 0;
438 			dev->mwdma_mask = mwdma_mask;
439 		} else {
440 			dev->udma_mask = 0;
441 			dev->mwdma_mask = 0;
442 			dev->pio_mask = pio_mask;
443 		}
444 
445 		ata_dev_printk(dev, KERN_NOTICE,
446 			"FORCE: xfer_mask set to %s\n", fe->param.name);
447 		return;
448 	}
449 }
450 
451 /**
452  *	ata_force_horkage - force horkage according to libata.force
453  *	@dev: ATA device of interest
454  *
455  *	Force horkage according to libata.force and whine about it.
456  *	For consistency with link selection, device number 15 selects
457  *	the first device connected to the host link.
458  *
459  *	LOCKING:
460  *	EH context.
461  */
462 static void ata_force_horkage(struct ata_device *dev)
463 {
464 	int devno = dev->link->pmp + dev->devno;
465 	int alt_devno = devno;
466 	int i;
467 
468 	/* allow n.15/16 for devices attached to host port */
469 	if (ata_is_host_link(dev->link))
470 		alt_devno += 15;
471 
472 	for (i = 0; i < ata_force_tbl_size; i++) {
473 		const struct ata_force_ent *fe = &ata_force_tbl[i];
474 
475 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
476 			continue;
477 
478 		if (fe->device != -1 && fe->device != devno &&
479 		    fe->device != alt_devno)
480 			continue;
481 
482 		if (!(~dev->horkage & fe->param.horkage_on) &&
483 		    !(dev->horkage & fe->param.horkage_off))
484 			continue;
485 
486 		dev->horkage |= fe->param.horkage_on;
487 		dev->horkage &= ~fe->param.horkage_off;
488 
489 		ata_dev_printk(dev, KERN_NOTICE,
490 			"FORCE: horkage modified (%s)\n", fe->param.name);
491 	}
492 }
493 
494 /**
495  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
496  *	@opcode: SCSI opcode
497  *
498  *	Determine ATAPI command type from @opcode.
499  *
500  *	LOCKING:
501  *	None.
502  *
503  *	RETURNS:
504  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
505  */
506 int atapi_cmd_type(u8 opcode)
507 {
508 	switch (opcode) {
509 	case GPCMD_READ_10:
510 	case GPCMD_READ_12:
511 		return ATAPI_READ;
512 
513 	case GPCMD_WRITE_10:
514 	case GPCMD_WRITE_12:
515 	case GPCMD_WRITE_AND_VERIFY_10:
516 		return ATAPI_WRITE;
517 
518 	case GPCMD_READ_CD:
519 	case GPCMD_READ_CD_MSF:
520 		return ATAPI_READ_CD;
521 
522 	case ATA_16:
523 	case ATA_12:
524 		if (atapi_passthru16)
525 			return ATAPI_PASS_THRU;
526 		/* fall thru */
527 	default:
528 		return ATAPI_MISC;
529 	}
530 }
531 
532 /**
533  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
534  *	@tf: Taskfile to convert
535  *	@pmp: Port multiplier port
536  *	@is_cmd: This FIS is for command
537  *	@fis: Buffer into which data will output
538  *
539  *	Converts a standard ATA taskfile to a Serial ATA
540  *	FIS structure (Register - Host to Device).
541  *
542  *	LOCKING:
543  *	Inherited from caller.
544  */
545 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
546 {
547 	fis[0] = 0x27;			/* Register - Host to Device FIS */
548 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
549 	if (is_cmd)
550 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
551 
552 	fis[2] = tf->command;
553 	fis[3] = tf->feature;
554 
555 	fis[4] = tf->lbal;
556 	fis[5] = tf->lbam;
557 	fis[6] = tf->lbah;
558 	fis[7] = tf->device;
559 
560 	fis[8] = tf->hob_lbal;
561 	fis[9] = tf->hob_lbam;
562 	fis[10] = tf->hob_lbah;
563 	fis[11] = tf->hob_feature;
564 
565 	fis[12] = tf->nsect;
566 	fis[13] = tf->hob_nsect;
567 	fis[14] = 0;
568 	fis[15] = tf->ctl;
569 
570 	fis[16] = 0;
571 	fis[17] = 0;
572 	fis[18] = 0;
573 	fis[19] = 0;
574 }
575 
576 /**
577  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
578  *	@fis: Buffer from which data will be input
579  *	@tf: Taskfile to output
580  *
581  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
582  *
583  *	LOCKING:
584  *	Inherited from caller.
585  */
586 
587 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
588 {
589 	tf->command	= fis[2];	/* status */
590 	tf->feature	= fis[3];	/* error */
591 
592 	tf->lbal	= fis[4];
593 	tf->lbam	= fis[5];
594 	tf->lbah	= fis[6];
595 	tf->device	= fis[7];
596 
597 	tf->hob_lbal	= fis[8];
598 	tf->hob_lbam	= fis[9];
599 	tf->hob_lbah	= fis[10];
600 
601 	tf->nsect	= fis[12];
602 	tf->hob_nsect	= fis[13];
603 }
604 
605 static const u8 ata_rw_cmds[] = {
606 	/* pio multi */
607 	ATA_CMD_READ_MULTI,
608 	ATA_CMD_WRITE_MULTI,
609 	ATA_CMD_READ_MULTI_EXT,
610 	ATA_CMD_WRITE_MULTI_EXT,
611 	0,
612 	0,
613 	0,
614 	ATA_CMD_WRITE_MULTI_FUA_EXT,
615 	/* pio */
616 	ATA_CMD_PIO_READ,
617 	ATA_CMD_PIO_WRITE,
618 	ATA_CMD_PIO_READ_EXT,
619 	ATA_CMD_PIO_WRITE_EXT,
620 	0,
621 	0,
622 	0,
623 	0,
624 	/* dma */
625 	ATA_CMD_READ,
626 	ATA_CMD_WRITE,
627 	ATA_CMD_READ_EXT,
628 	ATA_CMD_WRITE_EXT,
629 	0,
630 	0,
631 	0,
632 	ATA_CMD_WRITE_FUA_EXT
633 };
634 
635 /**
636  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
637  *	@tf: command to examine and configure
638  *	@dev: device tf belongs to
639  *
640  *	Examine the device configuration and tf->flags to calculate
641  *	the proper read/write commands and protocol to use.
642  *
643  *	LOCKING:
644  *	caller.
645  */
646 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
647 {
648 	u8 cmd;
649 
650 	int index, fua, lba48, write;
651 
652 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
653 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
654 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
655 
656 	if (dev->flags & ATA_DFLAG_PIO) {
657 		tf->protocol = ATA_PROT_PIO;
658 		index = dev->multi_count ? 0 : 8;
659 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
660 		/* Unable to use DMA due to host limitation */
661 		tf->protocol = ATA_PROT_PIO;
662 		index = dev->multi_count ? 0 : 8;
663 	} else {
664 		tf->protocol = ATA_PROT_DMA;
665 		index = 16;
666 	}
667 
668 	cmd = ata_rw_cmds[index + fua + lba48 + write];
669 	if (cmd) {
670 		tf->command = cmd;
671 		return 0;
672 	}
673 	return -1;
674 }
675 
676 /**
677  *	ata_tf_read_block - Read block address from ATA taskfile
678  *	@tf: ATA taskfile of interest
679  *	@dev: ATA device @tf belongs to
680  *
681  *	LOCKING:
682  *	None.
683  *
684  *	Read block address from @tf.  This function can handle all
685  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
686  *	flags select the address format to use.
687  *
688  *	RETURNS:
689  *	Block address read from @tf.
690  */
691 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
692 {
693 	u64 block = 0;
694 
695 	if (tf->flags & ATA_TFLAG_LBA) {
696 		if (tf->flags & ATA_TFLAG_LBA48) {
697 			block |= (u64)tf->hob_lbah << 40;
698 			block |= (u64)tf->hob_lbam << 32;
699 			block |= (u64)tf->hob_lbal << 24;
700 		} else
701 			block |= (tf->device & 0xf) << 24;
702 
703 		block |= tf->lbah << 16;
704 		block |= tf->lbam << 8;
705 		block |= tf->lbal;
706 	} else {
707 		u32 cyl, head, sect;
708 
709 		cyl = tf->lbam | (tf->lbah << 8);
710 		head = tf->device & 0xf;
711 		sect = tf->lbal;
712 
713 		if (!sect) {
714 			ata_dev_printk(dev, KERN_WARNING, "device reported "
715 				       "invalid CHS sector 0\n");
716 			sect = 1; /* oh well */
717 		}
718 
719 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
720 	}
721 
722 	return block;
723 }
724 
725 /**
726  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
727  *	@tf: Target ATA taskfile
728  *	@dev: ATA device @tf belongs to
729  *	@block: Block address
730  *	@n_block: Number of blocks
731  *	@tf_flags: RW/FUA etc...
732  *	@tag: tag
733  *
734  *	LOCKING:
735  *	None.
736  *
737  *	Build ATA taskfile @tf for read/write request described by
738  *	@block, @n_block, @tf_flags and @tag on @dev.
739  *
740  *	RETURNS:
741  *
742  *	0 on success, -ERANGE if the request is too large for @dev,
743  *	-EINVAL if the request is invalid.
744  */
745 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
746 		    u64 block, u32 n_block, unsigned int tf_flags,
747 		    unsigned int tag)
748 {
749 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
750 	tf->flags |= tf_flags;
751 
752 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
753 		/* yay, NCQ */
754 		if (!lba_48_ok(block, n_block))
755 			return -ERANGE;
756 
757 		tf->protocol = ATA_PROT_NCQ;
758 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
759 
760 		if (tf->flags & ATA_TFLAG_WRITE)
761 			tf->command = ATA_CMD_FPDMA_WRITE;
762 		else
763 			tf->command = ATA_CMD_FPDMA_READ;
764 
765 		tf->nsect = tag << 3;
766 		tf->hob_feature = (n_block >> 8) & 0xff;
767 		tf->feature = n_block & 0xff;
768 
769 		tf->hob_lbah = (block >> 40) & 0xff;
770 		tf->hob_lbam = (block >> 32) & 0xff;
771 		tf->hob_lbal = (block >> 24) & 0xff;
772 		tf->lbah = (block >> 16) & 0xff;
773 		tf->lbam = (block >> 8) & 0xff;
774 		tf->lbal = block & 0xff;
775 
776 		tf->device = 1 << 6;
777 		if (tf->flags & ATA_TFLAG_FUA)
778 			tf->device |= 1 << 7;
779 	} else if (dev->flags & ATA_DFLAG_LBA) {
780 		tf->flags |= ATA_TFLAG_LBA;
781 
782 		if (lba_28_ok(block, n_block)) {
783 			/* use LBA28 */
784 			tf->device |= (block >> 24) & 0xf;
785 		} else if (lba_48_ok(block, n_block)) {
786 			if (!(dev->flags & ATA_DFLAG_LBA48))
787 				return -ERANGE;
788 
789 			/* use LBA48 */
790 			tf->flags |= ATA_TFLAG_LBA48;
791 
792 			tf->hob_nsect = (n_block >> 8) & 0xff;
793 
794 			tf->hob_lbah = (block >> 40) & 0xff;
795 			tf->hob_lbam = (block >> 32) & 0xff;
796 			tf->hob_lbal = (block >> 24) & 0xff;
797 		} else
798 			/* request too large even for LBA48 */
799 			return -ERANGE;
800 
801 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
802 			return -EINVAL;
803 
804 		tf->nsect = n_block & 0xff;
805 
806 		tf->lbah = (block >> 16) & 0xff;
807 		tf->lbam = (block >> 8) & 0xff;
808 		tf->lbal = block & 0xff;
809 
810 		tf->device |= ATA_LBA;
811 	} else {
812 		/* CHS */
813 		u32 sect, head, cyl, track;
814 
815 		/* The request -may- be too large for CHS addressing. */
816 		if (!lba_28_ok(block, n_block))
817 			return -ERANGE;
818 
819 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
820 			return -EINVAL;
821 
822 		/* Convert LBA to CHS */
823 		track = (u32)block / dev->sectors;
824 		cyl   = track / dev->heads;
825 		head  = track % dev->heads;
826 		sect  = (u32)block % dev->sectors + 1;
827 
828 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
829 			(u32)block, track, cyl, head, sect);
830 
831 		/* Check whether the converted CHS can fit.
832 		   Cylinder: 0-65535
833 		   Head: 0-15
834 		   Sector: 1-255*/
835 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
836 			return -ERANGE;
837 
838 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
839 		tf->lbal = sect;
840 		tf->lbam = cyl;
841 		tf->lbah = cyl >> 8;
842 		tf->device |= head;
843 	}
844 
845 	return 0;
846 }
847 
848 /**
849  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
850  *	@pio_mask: pio_mask
851  *	@mwdma_mask: mwdma_mask
852  *	@udma_mask: udma_mask
853  *
854  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
855  *	unsigned int xfer_mask.
856  *
857  *	LOCKING:
858  *	None.
859  *
860  *	RETURNS:
861  *	Packed xfer_mask.
862  */
863 unsigned long ata_pack_xfermask(unsigned long pio_mask,
864 				unsigned long mwdma_mask,
865 				unsigned long udma_mask)
866 {
867 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
868 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
869 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
870 }
871 
872 /**
873  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
874  *	@xfer_mask: xfer_mask to unpack
875  *	@pio_mask: resulting pio_mask
876  *	@mwdma_mask: resulting mwdma_mask
877  *	@udma_mask: resulting udma_mask
878  *
879  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
880  *	Any NULL distination masks will be ignored.
881  */
882 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
883 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
884 {
885 	if (pio_mask)
886 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
887 	if (mwdma_mask)
888 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
889 	if (udma_mask)
890 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
891 }
892 
893 static const struct ata_xfer_ent {
894 	int shift, bits;
895 	u8 base;
896 } ata_xfer_tbl[] = {
897 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
898 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
899 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
900 	{ -1, },
901 };
902 
903 /**
904  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
905  *	@xfer_mask: xfer_mask of interest
906  *
907  *	Return matching XFER_* value for @xfer_mask.  Only the highest
908  *	bit of @xfer_mask is considered.
909  *
910  *	LOCKING:
911  *	None.
912  *
913  *	RETURNS:
914  *	Matching XFER_* value, 0xff if no match found.
915  */
916 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
917 {
918 	int highbit = fls(xfer_mask) - 1;
919 	const struct ata_xfer_ent *ent;
920 
921 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
922 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
923 			return ent->base + highbit - ent->shift;
924 	return 0xff;
925 }
926 
927 /**
928  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
929  *	@xfer_mode: XFER_* of interest
930  *
931  *	Return matching xfer_mask for @xfer_mode.
932  *
933  *	LOCKING:
934  *	None.
935  *
936  *	RETURNS:
937  *	Matching xfer_mask, 0 if no match found.
938  */
939 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
940 {
941 	const struct ata_xfer_ent *ent;
942 
943 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
944 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
945 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
946 				& ~((1 << ent->shift) - 1);
947 	return 0;
948 }
949 
950 /**
951  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
952  *	@xfer_mode: XFER_* of interest
953  *
954  *	Return matching xfer_shift for @xfer_mode.
955  *
956  *	LOCKING:
957  *	None.
958  *
959  *	RETURNS:
960  *	Matching xfer_shift, -1 if no match found.
961  */
962 int ata_xfer_mode2shift(unsigned long xfer_mode)
963 {
964 	const struct ata_xfer_ent *ent;
965 
966 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
967 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
968 			return ent->shift;
969 	return -1;
970 }
971 
972 /**
973  *	ata_mode_string - convert xfer_mask to string
974  *	@xfer_mask: mask of bits supported; only highest bit counts.
975  *
976  *	Determine string which represents the highest speed
977  *	(highest bit in @modemask).
978  *
979  *	LOCKING:
980  *	None.
981  *
982  *	RETURNS:
983  *	Constant C string representing highest speed listed in
984  *	@mode_mask, or the constant C string "<n/a>".
985  */
986 const char *ata_mode_string(unsigned long xfer_mask)
987 {
988 	static const char * const xfer_mode_str[] = {
989 		"PIO0",
990 		"PIO1",
991 		"PIO2",
992 		"PIO3",
993 		"PIO4",
994 		"PIO5",
995 		"PIO6",
996 		"MWDMA0",
997 		"MWDMA1",
998 		"MWDMA2",
999 		"MWDMA3",
1000 		"MWDMA4",
1001 		"UDMA/16",
1002 		"UDMA/25",
1003 		"UDMA/33",
1004 		"UDMA/44",
1005 		"UDMA/66",
1006 		"UDMA/100",
1007 		"UDMA/133",
1008 		"UDMA7",
1009 	};
1010 	int highbit;
1011 
1012 	highbit = fls(xfer_mask) - 1;
1013 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1014 		return xfer_mode_str[highbit];
1015 	return "<n/a>";
1016 }
1017 
1018 static const char *sata_spd_string(unsigned int spd)
1019 {
1020 	static const char * const spd_str[] = {
1021 		"1.5 Gbps",
1022 		"3.0 Gbps",
1023 		"6.0 Gbps",
1024 	};
1025 
1026 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1027 		return "<unknown>";
1028 	return spd_str[spd - 1];
1029 }
1030 
1031 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1032 {
1033 	struct ata_link *link = dev->link;
1034 	struct ata_port *ap = link->ap;
1035 	u32 scontrol;
1036 	unsigned int err_mask;
1037 	int rc;
1038 
1039 	/*
1040 	 * disallow DIPM for drivers which haven't set
1041 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
1042 	 * phy ready will be set in the interrupt status on
1043 	 * state changes, which will cause some drivers to
1044 	 * think there are errors - additionally drivers will
1045 	 * need to disable hot plug.
1046 	 */
1047 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1048 		ap->pm_policy = NOT_AVAILABLE;
1049 		return -EINVAL;
1050 	}
1051 
1052 	/*
1053 	 * For DIPM, we will only enable it for the
1054 	 * min_power setting.
1055 	 *
1056 	 * Why?  Because Disks are too stupid to know that
1057 	 * If the host rejects a request to go to SLUMBER
1058 	 * they should retry at PARTIAL, and instead it
1059 	 * just would give up.  So, for medium_power to
1060 	 * work at all, we need to only allow HIPM.
1061 	 */
1062 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1063 	if (rc)
1064 		return rc;
1065 
1066 	switch (policy) {
1067 	case MIN_POWER:
1068 		/* no restrictions on IPM transitions */
1069 		scontrol &= ~(0x3 << 8);
1070 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1071 		if (rc)
1072 			return rc;
1073 
1074 		/* enable DIPM */
1075 		if (dev->flags & ATA_DFLAG_DIPM)
1076 			err_mask = ata_dev_set_feature(dev,
1077 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
1078 		break;
1079 	case MEDIUM_POWER:
1080 		/* allow IPM to PARTIAL */
1081 		scontrol &= ~(0x1 << 8);
1082 		scontrol |= (0x2 << 8);
1083 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1084 		if (rc)
1085 			return rc;
1086 
1087 		/*
1088 		 * we don't have to disable DIPM since IPM flags
1089 		 * disallow transitions to SLUMBER, which effectively
1090 		 * disable DIPM if it does not support PARTIAL
1091 		 */
1092 		break;
1093 	case NOT_AVAILABLE:
1094 	case MAX_PERFORMANCE:
1095 		/* disable all IPM transitions */
1096 		scontrol |= (0x3 << 8);
1097 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1098 		if (rc)
1099 			return rc;
1100 
1101 		/*
1102 		 * we don't have to disable DIPM since IPM flags
1103 		 * disallow all transitions which effectively
1104 		 * disable DIPM anyway.
1105 		 */
1106 		break;
1107 	}
1108 
1109 	/* FIXME: handle SET FEATURES failure */
1110 	(void) err_mask;
1111 
1112 	return 0;
1113 }
1114 
1115 /**
1116  *	ata_dev_enable_pm - enable SATA interface power management
1117  *	@dev:  device to enable power management
1118  *	@policy: the link power management policy
1119  *
1120  *	Enable SATA Interface power management.  This will enable
1121  *	Device Interface Power Management (DIPM) for min_power
1122  * 	policy, and then call driver specific callbacks for
1123  *	enabling Host Initiated Power management.
1124  *
1125  *	Locking: Caller.
1126  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
1127  */
1128 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1129 {
1130 	int rc = 0;
1131 	struct ata_port *ap = dev->link->ap;
1132 
1133 	/* set HIPM first, then DIPM */
1134 	if (ap->ops->enable_pm)
1135 		rc = ap->ops->enable_pm(ap, policy);
1136 	if (rc)
1137 		goto enable_pm_out;
1138 	rc = ata_dev_set_dipm(dev, policy);
1139 
1140 enable_pm_out:
1141 	if (rc)
1142 		ap->pm_policy = MAX_PERFORMANCE;
1143 	else
1144 		ap->pm_policy = policy;
1145 	return /* rc */;	/* hopefully we can use 'rc' eventually */
1146 }
1147 
1148 #ifdef CONFIG_PM
1149 /**
1150  *	ata_dev_disable_pm - disable SATA interface power management
1151  *	@dev: device to disable power management
1152  *
1153  *	Disable SATA Interface power management.  This will disable
1154  *	Device Interface Power Management (DIPM) without changing
1155  * 	policy,  call driver specific callbacks for disabling Host
1156  * 	Initiated Power management.
1157  *
1158  *	Locking: Caller.
1159  *	Returns: void
1160  */
1161 static void ata_dev_disable_pm(struct ata_device *dev)
1162 {
1163 	struct ata_port *ap = dev->link->ap;
1164 
1165 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1166 	if (ap->ops->disable_pm)
1167 		ap->ops->disable_pm(ap);
1168 }
1169 #endif	/* CONFIG_PM */
1170 
1171 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1172 {
1173 	ap->pm_policy = policy;
1174 	ap->link.eh_info.action |= ATA_EH_LPM;
1175 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1176 	ata_port_schedule_eh(ap);
1177 }
1178 
1179 #ifdef CONFIG_PM
1180 static void ata_lpm_enable(struct ata_host *host)
1181 {
1182 	struct ata_link *link;
1183 	struct ata_port *ap;
1184 	struct ata_device *dev;
1185 	int i;
1186 
1187 	for (i = 0; i < host->n_ports; i++) {
1188 		ap = host->ports[i];
1189 		ata_for_each_link(link, ap, EDGE) {
1190 			ata_for_each_dev(dev, link, ALL)
1191 				ata_dev_disable_pm(dev);
1192 		}
1193 	}
1194 }
1195 
1196 static void ata_lpm_disable(struct ata_host *host)
1197 {
1198 	int i;
1199 
1200 	for (i = 0; i < host->n_ports; i++) {
1201 		struct ata_port *ap = host->ports[i];
1202 		ata_lpm_schedule(ap, ap->pm_policy);
1203 	}
1204 }
1205 #endif	/* CONFIG_PM */
1206 
1207 /**
1208  *	ata_dev_classify - determine device type based on ATA-spec signature
1209  *	@tf: ATA taskfile register set for device to be identified
1210  *
1211  *	Determine from taskfile register contents whether a device is
1212  *	ATA or ATAPI, as per "Signature and persistence" section
1213  *	of ATA/PI spec (volume 1, sect 5.14).
1214  *
1215  *	LOCKING:
1216  *	None.
1217  *
1218  *	RETURNS:
1219  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1220  *	%ATA_DEV_UNKNOWN the event of failure.
1221  */
1222 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1223 {
1224 	/* Apple's open source Darwin code hints that some devices only
1225 	 * put a proper signature into the LBA mid/high registers,
1226 	 * So, we only check those.  It's sufficient for uniqueness.
1227 	 *
1228 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1229 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1230 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1231 	 * spec has never mentioned about using different signatures
1232 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1233 	 * Multiplier specification began to use 0x69/0x96 to identify
1234 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1235 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1236 	 * 0x69/0x96 shortly and described them as reserved for
1237 	 * SerialATA.
1238 	 *
1239 	 * We follow the current spec and consider that 0x69/0x96
1240 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1241 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1242 	 * SEMB signature.  This is worked around in
1243 	 * ata_dev_read_id().
1244 	 */
1245 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1246 		DPRINTK("found ATA device by sig\n");
1247 		return ATA_DEV_ATA;
1248 	}
1249 
1250 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1251 		DPRINTK("found ATAPI device by sig\n");
1252 		return ATA_DEV_ATAPI;
1253 	}
1254 
1255 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1256 		DPRINTK("found PMP device by sig\n");
1257 		return ATA_DEV_PMP;
1258 	}
1259 
1260 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1261 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1262 		return ATA_DEV_SEMB;
1263 	}
1264 
1265 	DPRINTK("unknown device\n");
1266 	return ATA_DEV_UNKNOWN;
1267 }
1268 
1269 /**
1270  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1271  *	@id: IDENTIFY DEVICE results we will examine
1272  *	@s: string into which data is output
1273  *	@ofs: offset into identify device page
1274  *	@len: length of string to return. must be an even number.
1275  *
1276  *	The strings in the IDENTIFY DEVICE page are broken up into
1277  *	16-bit chunks.  Run through the string, and output each
1278  *	8-bit chunk linearly, regardless of platform.
1279  *
1280  *	LOCKING:
1281  *	caller.
1282  */
1283 
1284 void ata_id_string(const u16 *id, unsigned char *s,
1285 		   unsigned int ofs, unsigned int len)
1286 {
1287 	unsigned int c;
1288 
1289 	BUG_ON(len & 1);
1290 
1291 	while (len > 0) {
1292 		c = id[ofs] >> 8;
1293 		*s = c;
1294 		s++;
1295 
1296 		c = id[ofs] & 0xff;
1297 		*s = c;
1298 		s++;
1299 
1300 		ofs++;
1301 		len -= 2;
1302 	}
1303 }
1304 
1305 /**
1306  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1307  *	@id: IDENTIFY DEVICE results we will examine
1308  *	@s: string into which data is output
1309  *	@ofs: offset into identify device page
1310  *	@len: length of string to return. must be an odd number.
1311  *
1312  *	This function is identical to ata_id_string except that it
1313  *	trims trailing spaces and terminates the resulting string with
1314  *	null.  @len must be actual maximum length (even number) + 1.
1315  *
1316  *	LOCKING:
1317  *	caller.
1318  */
1319 void ata_id_c_string(const u16 *id, unsigned char *s,
1320 		     unsigned int ofs, unsigned int len)
1321 {
1322 	unsigned char *p;
1323 
1324 	ata_id_string(id, s, ofs, len - 1);
1325 
1326 	p = s + strnlen(s, len - 1);
1327 	while (p > s && p[-1] == ' ')
1328 		p--;
1329 	*p = '\0';
1330 }
1331 
1332 static u64 ata_id_n_sectors(const u16 *id)
1333 {
1334 	if (ata_id_has_lba(id)) {
1335 		if (ata_id_has_lba48(id))
1336 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1337 		else
1338 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1339 	} else {
1340 		if (ata_id_current_chs_valid(id))
1341 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1342 			       id[ATA_ID_CUR_SECTORS];
1343 		else
1344 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1345 			       id[ATA_ID_SECTORS];
1346 	}
1347 }
1348 
1349 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1350 {
1351 	u64 sectors = 0;
1352 
1353 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1354 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1355 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1356 	sectors |= (tf->lbah & 0xff) << 16;
1357 	sectors |= (tf->lbam & 0xff) << 8;
1358 	sectors |= (tf->lbal & 0xff);
1359 
1360 	return sectors;
1361 }
1362 
1363 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1364 {
1365 	u64 sectors = 0;
1366 
1367 	sectors |= (tf->device & 0x0f) << 24;
1368 	sectors |= (tf->lbah & 0xff) << 16;
1369 	sectors |= (tf->lbam & 0xff) << 8;
1370 	sectors |= (tf->lbal & 0xff);
1371 
1372 	return sectors;
1373 }
1374 
1375 /**
1376  *	ata_read_native_max_address - Read native max address
1377  *	@dev: target device
1378  *	@max_sectors: out parameter for the result native max address
1379  *
1380  *	Perform an LBA48 or LBA28 native size query upon the device in
1381  *	question.
1382  *
1383  *	RETURNS:
1384  *	0 on success, -EACCES if command is aborted by the drive.
1385  *	-EIO on other errors.
1386  */
1387 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1388 {
1389 	unsigned int err_mask;
1390 	struct ata_taskfile tf;
1391 	int lba48 = ata_id_has_lba48(dev->id);
1392 
1393 	ata_tf_init(dev, &tf);
1394 
1395 	/* always clear all address registers */
1396 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1397 
1398 	if (lba48) {
1399 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1400 		tf.flags |= ATA_TFLAG_LBA48;
1401 	} else
1402 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1403 
1404 	tf.protocol |= ATA_PROT_NODATA;
1405 	tf.device |= ATA_LBA;
1406 
1407 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1408 	if (err_mask) {
1409 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1410 			       "max address (err_mask=0x%x)\n", err_mask);
1411 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1412 			return -EACCES;
1413 		return -EIO;
1414 	}
1415 
1416 	if (lba48)
1417 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1418 	else
1419 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1420 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1421 		(*max_sectors)--;
1422 	return 0;
1423 }
1424 
1425 /**
1426  *	ata_set_max_sectors - Set max sectors
1427  *	@dev: target device
1428  *	@new_sectors: new max sectors value to set for the device
1429  *
1430  *	Set max sectors of @dev to @new_sectors.
1431  *
1432  *	RETURNS:
1433  *	0 on success, -EACCES if command is aborted or denied (due to
1434  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1435  *	errors.
1436  */
1437 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1438 {
1439 	unsigned int err_mask;
1440 	struct ata_taskfile tf;
1441 	int lba48 = ata_id_has_lba48(dev->id);
1442 
1443 	new_sectors--;
1444 
1445 	ata_tf_init(dev, &tf);
1446 
1447 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1448 
1449 	if (lba48) {
1450 		tf.command = ATA_CMD_SET_MAX_EXT;
1451 		tf.flags |= ATA_TFLAG_LBA48;
1452 
1453 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1454 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1455 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1456 	} else {
1457 		tf.command = ATA_CMD_SET_MAX;
1458 
1459 		tf.device |= (new_sectors >> 24) & 0xf;
1460 	}
1461 
1462 	tf.protocol |= ATA_PROT_NODATA;
1463 	tf.device |= ATA_LBA;
1464 
1465 	tf.lbal = (new_sectors >> 0) & 0xff;
1466 	tf.lbam = (new_sectors >> 8) & 0xff;
1467 	tf.lbah = (new_sectors >> 16) & 0xff;
1468 
1469 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1470 	if (err_mask) {
1471 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1472 			       "max address (err_mask=0x%x)\n", err_mask);
1473 		if (err_mask == AC_ERR_DEV &&
1474 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1475 			return -EACCES;
1476 		return -EIO;
1477 	}
1478 
1479 	return 0;
1480 }
1481 
1482 /**
1483  *	ata_hpa_resize		-	Resize a device with an HPA set
1484  *	@dev: Device to resize
1485  *
1486  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1487  *	it if required to the full size of the media. The caller must check
1488  *	the drive has the HPA feature set enabled.
1489  *
1490  *	RETURNS:
1491  *	0 on success, -errno on failure.
1492  */
1493 static int ata_hpa_resize(struct ata_device *dev)
1494 {
1495 	struct ata_eh_context *ehc = &dev->link->eh_context;
1496 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1497 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1498 	u64 sectors = ata_id_n_sectors(dev->id);
1499 	u64 native_sectors;
1500 	int rc;
1501 
1502 	/* do we need to do it? */
1503 	if (dev->class != ATA_DEV_ATA ||
1504 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1505 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1506 		return 0;
1507 
1508 	/* read native max address */
1509 	rc = ata_read_native_max_address(dev, &native_sectors);
1510 	if (rc) {
1511 		/* If device aborted the command or HPA isn't going to
1512 		 * be unlocked, skip HPA resizing.
1513 		 */
1514 		if (rc == -EACCES || !unlock_hpa) {
1515 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1516 				       "broken, skipping HPA handling\n");
1517 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1518 
1519 			/* we can continue if device aborted the command */
1520 			if (rc == -EACCES)
1521 				rc = 0;
1522 		}
1523 
1524 		return rc;
1525 	}
1526 	dev->n_native_sectors = native_sectors;
1527 
1528 	/* nothing to do? */
1529 	if (native_sectors <= sectors || !unlock_hpa) {
1530 		if (!print_info || native_sectors == sectors)
1531 			return 0;
1532 
1533 		if (native_sectors > sectors)
1534 			ata_dev_printk(dev, KERN_INFO,
1535 				"HPA detected: current %llu, native %llu\n",
1536 				(unsigned long long)sectors,
1537 				(unsigned long long)native_sectors);
1538 		else if (native_sectors < sectors)
1539 			ata_dev_printk(dev, KERN_WARNING,
1540 				"native sectors (%llu) is smaller than "
1541 				"sectors (%llu)\n",
1542 				(unsigned long long)native_sectors,
1543 				(unsigned long long)sectors);
1544 		return 0;
1545 	}
1546 
1547 	/* let's unlock HPA */
1548 	rc = ata_set_max_sectors(dev, native_sectors);
1549 	if (rc == -EACCES) {
1550 		/* if device aborted the command, skip HPA resizing */
1551 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1552 			       "(%llu -> %llu), skipping HPA handling\n",
1553 			       (unsigned long long)sectors,
1554 			       (unsigned long long)native_sectors);
1555 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1556 		return 0;
1557 	} else if (rc)
1558 		return rc;
1559 
1560 	/* re-read IDENTIFY data */
1561 	rc = ata_dev_reread_id(dev, 0);
1562 	if (rc) {
1563 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1564 			       "data after HPA resizing\n");
1565 		return rc;
1566 	}
1567 
1568 	if (print_info) {
1569 		u64 new_sectors = ata_id_n_sectors(dev->id);
1570 		ata_dev_printk(dev, KERN_INFO,
1571 			"HPA unlocked: %llu -> %llu, native %llu\n",
1572 			(unsigned long long)sectors,
1573 			(unsigned long long)new_sectors,
1574 			(unsigned long long)native_sectors);
1575 	}
1576 
1577 	return 0;
1578 }
1579 
1580 /**
1581  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1582  *	@id: IDENTIFY DEVICE page to dump
1583  *
1584  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1585  *	page.
1586  *
1587  *	LOCKING:
1588  *	caller.
1589  */
1590 
1591 static inline void ata_dump_id(const u16 *id)
1592 {
1593 	DPRINTK("49==0x%04x  "
1594 		"53==0x%04x  "
1595 		"63==0x%04x  "
1596 		"64==0x%04x  "
1597 		"75==0x%04x  \n",
1598 		id[49],
1599 		id[53],
1600 		id[63],
1601 		id[64],
1602 		id[75]);
1603 	DPRINTK("80==0x%04x  "
1604 		"81==0x%04x  "
1605 		"82==0x%04x  "
1606 		"83==0x%04x  "
1607 		"84==0x%04x  \n",
1608 		id[80],
1609 		id[81],
1610 		id[82],
1611 		id[83],
1612 		id[84]);
1613 	DPRINTK("88==0x%04x  "
1614 		"93==0x%04x\n",
1615 		id[88],
1616 		id[93]);
1617 }
1618 
1619 /**
1620  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1621  *	@id: IDENTIFY data to compute xfer mask from
1622  *
1623  *	Compute the xfermask for this device. This is not as trivial
1624  *	as it seems if we must consider early devices correctly.
1625  *
1626  *	FIXME: pre IDE drive timing (do we care ?).
1627  *
1628  *	LOCKING:
1629  *	None.
1630  *
1631  *	RETURNS:
1632  *	Computed xfermask
1633  */
1634 unsigned long ata_id_xfermask(const u16 *id)
1635 {
1636 	unsigned long pio_mask, mwdma_mask, udma_mask;
1637 
1638 	/* Usual case. Word 53 indicates word 64 is valid */
1639 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1640 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1641 		pio_mask <<= 3;
1642 		pio_mask |= 0x7;
1643 	} else {
1644 		/* If word 64 isn't valid then Word 51 high byte holds
1645 		 * the PIO timing number for the maximum. Turn it into
1646 		 * a mask.
1647 		 */
1648 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1649 		if (mode < 5)	/* Valid PIO range */
1650 			pio_mask = (2 << mode) - 1;
1651 		else
1652 			pio_mask = 1;
1653 
1654 		/* But wait.. there's more. Design your standards by
1655 		 * committee and you too can get a free iordy field to
1656 		 * process. However its the speeds not the modes that
1657 		 * are supported... Note drivers using the timing API
1658 		 * will get this right anyway
1659 		 */
1660 	}
1661 
1662 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1663 
1664 	if (ata_id_is_cfa(id)) {
1665 		/*
1666 		 *	Process compact flash extended modes
1667 		 */
1668 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1669 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1670 
1671 		if (pio)
1672 			pio_mask |= (1 << 5);
1673 		if (pio > 1)
1674 			pio_mask |= (1 << 6);
1675 		if (dma)
1676 			mwdma_mask |= (1 << 3);
1677 		if (dma > 1)
1678 			mwdma_mask |= (1 << 4);
1679 	}
1680 
1681 	udma_mask = 0;
1682 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1683 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1684 
1685 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1686 }
1687 
1688 /**
1689  *	ata_pio_queue_task - Queue port_task
1690  *	@ap: The ata_port to queue port_task for
1691  *	@data: data for @fn to use
1692  *	@delay: delay time in msecs for workqueue function
1693  *
1694  *	Schedule @fn(@data) for execution after @delay jiffies using
1695  *	port_task.  There is one port_task per port and it's the
1696  *	user(low level driver)'s responsibility to make sure that only
1697  *	one task is active at any given time.
1698  *
1699  *	libata core layer takes care of synchronization between
1700  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1701  *	synchronization.
1702  *
1703  *	LOCKING:
1704  *	Inherited from caller.
1705  */
1706 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1707 {
1708 	ap->port_task_data = data;
1709 
1710 	/* may fail if ata_port_flush_task() in progress */
1711 	queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1712 }
1713 
1714 /**
1715  *	ata_port_flush_task - Flush port_task
1716  *	@ap: The ata_port to flush port_task for
1717  *
1718  *	After this function completes, port_task is guranteed not to
1719  *	be running or scheduled.
1720  *
1721  *	LOCKING:
1722  *	Kernel thread context (may sleep)
1723  */
1724 void ata_port_flush_task(struct ata_port *ap)
1725 {
1726 	DPRINTK("ENTER\n");
1727 
1728 	cancel_rearming_delayed_work(&ap->port_task);
1729 
1730 	if (ata_msg_ctl(ap))
1731 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1732 }
1733 
1734 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1735 {
1736 	struct completion *waiting = qc->private_data;
1737 
1738 	complete(waiting);
1739 }
1740 
1741 /**
1742  *	ata_exec_internal_sg - execute libata internal command
1743  *	@dev: Device to which the command is sent
1744  *	@tf: Taskfile registers for the command and the result
1745  *	@cdb: CDB for packet command
1746  *	@dma_dir: Data tranfer direction of the command
1747  *	@sgl: sg list for the data buffer of the command
1748  *	@n_elem: Number of sg entries
1749  *	@timeout: Timeout in msecs (0 for default)
1750  *
1751  *	Executes libata internal command with timeout.  @tf contains
1752  *	command on entry and result on return.  Timeout and error
1753  *	conditions are reported via return value.  No recovery action
1754  *	is taken after a command times out.  It's caller's duty to
1755  *	clean up after timeout.
1756  *
1757  *	LOCKING:
1758  *	None.  Should be called with kernel context, might sleep.
1759  *
1760  *	RETURNS:
1761  *	Zero on success, AC_ERR_* mask on failure
1762  */
1763 unsigned ata_exec_internal_sg(struct ata_device *dev,
1764 			      struct ata_taskfile *tf, const u8 *cdb,
1765 			      int dma_dir, struct scatterlist *sgl,
1766 			      unsigned int n_elem, unsigned long timeout)
1767 {
1768 	struct ata_link *link = dev->link;
1769 	struct ata_port *ap = link->ap;
1770 	u8 command = tf->command;
1771 	int auto_timeout = 0;
1772 	struct ata_queued_cmd *qc;
1773 	unsigned int tag, preempted_tag;
1774 	u32 preempted_sactive, preempted_qc_active;
1775 	int preempted_nr_active_links;
1776 	DECLARE_COMPLETION_ONSTACK(wait);
1777 	unsigned long flags;
1778 	unsigned int err_mask;
1779 	int rc;
1780 
1781 	spin_lock_irqsave(ap->lock, flags);
1782 
1783 	/* no internal command while frozen */
1784 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1785 		spin_unlock_irqrestore(ap->lock, flags);
1786 		return AC_ERR_SYSTEM;
1787 	}
1788 
1789 	/* initialize internal qc */
1790 
1791 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1792 	 * drivers choke if any other tag is given.  This breaks
1793 	 * ata_tag_internal() test for those drivers.  Don't use new
1794 	 * EH stuff without converting to it.
1795 	 */
1796 	if (ap->ops->error_handler)
1797 		tag = ATA_TAG_INTERNAL;
1798 	else
1799 		tag = 0;
1800 
1801 	if (test_and_set_bit(tag, &ap->qc_allocated))
1802 		BUG();
1803 	qc = __ata_qc_from_tag(ap, tag);
1804 
1805 	qc->tag = tag;
1806 	qc->scsicmd = NULL;
1807 	qc->ap = ap;
1808 	qc->dev = dev;
1809 	ata_qc_reinit(qc);
1810 
1811 	preempted_tag = link->active_tag;
1812 	preempted_sactive = link->sactive;
1813 	preempted_qc_active = ap->qc_active;
1814 	preempted_nr_active_links = ap->nr_active_links;
1815 	link->active_tag = ATA_TAG_POISON;
1816 	link->sactive = 0;
1817 	ap->qc_active = 0;
1818 	ap->nr_active_links = 0;
1819 
1820 	/* prepare & issue qc */
1821 	qc->tf = *tf;
1822 	if (cdb)
1823 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1824 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1825 	qc->dma_dir = dma_dir;
1826 	if (dma_dir != DMA_NONE) {
1827 		unsigned int i, buflen = 0;
1828 		struct scatterlist *sg;
1829 
1830 		for_each_sg(sgl, sg, n_elem, i)
1831 			buflen += sg->length;
1832 
1833 		ata_sg_init(qc, sgl, n_elem);
1834 		qc->nbytes = buflen;
1835 	}
1836 
1837 	qc->private_data = &wait;
1838 	qc->complete_fn = ata_qc_complete_internal;
1839 
1840 	ata_qc_issue(qc);
1841 
1842 	spin_unlock_irqrestore(ap->lock, flags);
1843 
1844 	if (!timeout) {
1845 		if (ata_probe_timeout)
1846 			timeout = ata_probe_timeout * 1000;
1847 		else {
1848 			timeout = ata_internal_cmd_timeout(dev, command);
1849 			auto_timeout = 1;
1850 		}
1851 	}
1852 
1853 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1854 
1855 	ata_port_flush_task(ap);
1856 
1857 	if (!rc) {
1858 		spin_lock_irqsave(ap->lock, flags);
1859 
1860 		/* We're racing with irq here.  If we lose, the
1861 		 * following test prevents us from completing the qc
1862 		 * twice.  If we win, the port is frozen and will be
1863 		 * cleaned up by ->post_internal_cmd().
1864 		 */
1865 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1866 			qc->err_mask |= AC_ERR_TIMEOUT;
1867 
1868 			if (ap->ops->error_handler)
1869 				ata_port_freeze(ap);
1870 			else
1871 				ata_qc_complete(qc);
1872 
1873 			if (ata_msg_warn(ap))
1874 				ata_dev_printk(dev, KERN_WARNING,
1875 					"qc timeout (cmd 0x%x)\n", command);
1876 		}
1877 
1878 		spin_unlock_irqrestore(ap->lock, flags);
1879 	}
1880 
1881 	/* do post_internal_cmd */
1882 	if (ap->ops->post_internal_cmd)
1883 		ap->ops->post_internal_cmd(qc);
1884 
1885 	/* perform minimal error analysis */
1886 	if (qc->flags & ATA_QCFLAG_FAILED) {
1887 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1888 			qc->err_mask |= AC_ERR_DEV;
1889 
1890 		if (!qc->err_mask)
1891 			qc->err_mask |= AC_ERR_OTHER;
1892 
1893 		if (qc->err_mask & ~AC_ERR_OTHER)
1894 			qc->err_mask &= ~AC_ERR_OTHER;
1895 	}
1896 
1897 	/* finish up */
1898 	spin_lock_irqsave(ap->lock, flags);
1899 
1900 	*tf = qc->result_tf;
1901 	err_mask = qc->err_mask;
1902 
1903 	ata_qc_free(qc);
1904 	link->active_tag = preempted_tag;
1905 	link->sactive = preempted_sactive;
1906 	ap->qc_active = preempted_qc_active;
1907 	ap->nr_active_links = preempted_nr_active_links;
1908 
1909 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1910 	 * Until those drivers are fixed, we detect the condition
1911 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1912 	 * port.
1913 	 *
1914 	 * Note that this doesn't change any behavior as internal
1915 	 * command failure results in disabling the device in the
1916 	 * higher layer for LLDDs without new reset/EH callbacks.
1917 	 *
1918 	 * Kill the following code as soon as those drivers are fixed.
1919 	 */
1920 	if (ap->flags & ATA_FLAG_DISABLED) {
1921 		err_mask |= AC_ERR_SYSTEM;
1922 		ata_port_probe(ap);
1923 	}
1924 
1925 	spin_unlock_irqrestore(ap->lock, flags);
1926 
1927 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1928 		ata_internal_cmd_timed_out(dev, command);
1929 
1930 	return err_mask;
1931 }
1932 
1933 /**
1934  *	ata_exec_internal - execute libata internal command
1935  *	@dev: Device to which the command is sent
1936  *	@tf: Taskfile registers for the command and the result
1937  *	@cdb: CDB for packet command
1938  *	@dma_dir: Data tranfer direction of the command
1939  *	@buf: Data buffer of the command
1940  *	@buflen: Length of data buffer
1941  *	@timeout: Timeout in msecs (0 for default)
1942  *
1943  *	Wrapper around ata_exec_internal_sg() which takes simple
1944  *	buffer instead of sg list.
1945  *
1946  *	LOCKING:
1947  *	None.  Should be called with kernel context, might sleep.
1948  *
1949  *	RETURNS:
1950  *	Zero on success, AC_ERR_* mask on failure
1951  */
1952 unsigned ata_exec_internal(struct ata_device *dev,
1953 			   struct ata_taskfile *tf, const u8 *cdb,
1954 			   int dma_dir, void *buf, unsigned int buflen,
1955 			   unsigned long timeout)
1956 {
1957 	struct scatterlist *psg = NULL, sg;
1958 	unsigned int n_elem = 0;
1959 
1960 	if (dma_dir != DMA_NONE) {
1961 		WARN_ON(!buf);
1962 		sg_init_one(&sg, buf, buflen);
1963 		psg = &sg;
1964 		n_elem++;
1965 	}
1966 
1967 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1968 				    timeout);
1969 }
1970 
1971 /**
1972  *	ata_do_simple_cmd - execute simple internal command
1973  *	@dev: Device to which the command is sent
1974  *	@cmd: Opcode to execute
1975  *
1976  *	Execute a 'simple' command, that only consists of the opcode
1977  *	'cmd' itself, without filling any other registers
1978  *
1979  *	LOCKING:
1980  *	Kernel thread context (may sleep).
1981  *
1982  *	RETURNS:
1983  *	Zero on success, AC_ERR_* mask on failure
1984  */
1985 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1986 {
1987 	struct ata_taskfile tf;
1988 
1989 	ata_tf_init(dev, &tf);
1990 
1991 	tf.command = cmd;
1992 	tf.flags |= ATA_TFLAG_DEVICE;
1993 	tf.protocol = ATA_PROT_NODATA;
1994 
1995 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1996 }
1997 
1998 /**
1999  *	ata_pio_need_iordy	-	check if iordy needed
2000  *	@adev: ATA device
2001  *
2002  *	Check if the current speed of the device requires IORDY. Used
2003  *	by various controllers for chip configuration.
2004  */
2005 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2006 {
2007 	/* Don't set IORDY if we're preparing for reset.  IORDY may
2008 	 * lead to controller lock up on certain controllers if the
2009 	 * port is not occupied.  See bko#11703 for details.
2010 	 */
2011 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
2012 		return 0;
2013 	/* Controller doesn't support IORDY.  Probably a pointless
2014 	 * check as the caller should know this.
2015 	 */
2016 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2017 		return 0;
2018 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
2019 	if (ata_id_is_cfa(adev->id)
2020 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
2021 		return 0;
2022 	/* PIO3 and higher it is mandatory */
2023 	if (adev->pio_mode > XFER_PIO_2)
2024 		return 1;
2025 	/* We turn it on when possible */
2026 	if (ata_id_has_iordy(adev->id))
2027 		return 1;
2028 	return 0;
2029 }
2030 
2031 /**
2032  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
2033  *	@adev: ATA device
2034  *
2035  *	Compute the highest mode possible if we are not using iordy. Return
2036  *	-1 if no iordy mode is available.
2037  */
2038 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2039 {
2040 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
2041 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
2042 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
2043 		/* Is the speed faster than the drive allows non IORDY ? */
2044 		if (pio) {
2045 			/* This is cycle times not frequency - watch the logic! */
2046 			if (pio > 240)	/* PIO2 is 240nS per cycle */
2047 				return 3 << ATA_SHIFT_PIO;
2048 			return 7 << ATA_SHIFT_PIO;
2049 		}
2050 	}
2051 	return 3 << ATA_SHIFT_PIO;
2052 }
2053 
2054 /**
2055  *	ata_do_dev_read_id		-	default ID read method
2056  *	@dev: device
2057  *	@tf: proposed taskfile
2058  *	@id: data buffer
2059  *
2060  *	Issue the identify taskfile and hand back the buffer containing
2061  *	identify data. For some RAID controllers and for pre ATA devices
2062  *	this function is wrapped or replaced by the driver
2063  */
2064 unsigned int ata_do_dev_read_id(struct ata_device *dev,
2065 					struct ata_taskfile *tf, u16 *id)
2066 {
2067 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
2068 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2069 }
2070 
2071 /**
2072  *	ata_dev_read_id - Read ID data from the specified device
2073  *	@dev: target device
2074  *	@p_class: pointer to class of the target device (may be changed)
2075  *	@flags: ATA_READID_* flags
2076  *	@id: buffer to read IDENTIFY data into
2077  *
2078  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
2079  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2080  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
2081  *	for pre-ATA4 drives.
2082  *
2083  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2084  *	now we abort if we hit that case.
2085  *
2086  *	LOCKING:
2087  *	Kernel thread context (may sleep)
2088  *
2089  *	RETURNS:
2090  *	0 on success, -errno otherwise.
2091  */
2092 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2093 		    unsigned int flags, u16 *id)
2094 {
2095 	struct ata_port *ap = dev->link->ap;
2096 	unsigned int class = *p_class;
2097 	struct ata_taskfile tf;
2098 	unsigned int err_mask = 0;
2099 	const char *reason;
2100 	bool is_semb = class == ATA_DEV_SEMB;
2101 	int may_fallback = 1, tried_spinup = 0;
2102 	int rc;
2103 
2104 	if (ata_msg_ctl(ap))
2105 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2106 
2107 retry:
2108 	ata_tf_init(dev, &tf);
2109 
2110 	switch (class) {
2111 	case ATA_DEV_SEMB:
2112 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
2113 	case ATA_DEV_ATA:
2114 		tf.command = ATA_CMD_ID_ATA;
2115 		break;
2116 	case ATA_DEV_ATAPI:
2117 		tf.command = ATA_CMD_ID_ATAPI;
2118 		break;
2119 	default:
2120 		rc = -ENODEV;
2121 		reason = "unsupported class";
2122 		goto err_out;
2123 	}
2124 
2125 	tf.protocol = ATA_PROT_PIO;
2126 
2127 	/* Some devices choke if TF registers contain garbage.  Make
2128 	 * sure those are properly initialized.
2129 	 */
2130 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2131 
2132 	/* Device presence detection is unreliable on some
2133 	 * controllers.  Always poll IDENTIFY if available.
2134 	 */
2135 	tf.flags |= ATA_TFLAG_POLLING;
2136 
2137 	if (ap->ops->read_id)
2138 		err_mask = ap->ops->read_id(dev, &tf, id);
2139 	else
2140 		err_mask = ata_do_dev_read_id(dev, &tf, id);
2141 
2142 	if (err_mask) {
2143 		if (err_mask & AC_ERR_NODEV_HINT) {
2144 			ata_dev_printk(dev, KERN_DEBUG,
2145 				       "NODEV after polling detection\n");
2146 			return -ENOENT;
2147 		}
2148 
2149 		if (is_semb) {
2150 			ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
2151 				       "device w/ SEMB sig, disabled\n");
2152 			/* SEMB is not supported yet */
2153 			*p_class = ATA_DEV_SEMB_UNSUP;
2154 			return 0;
2155 		}
2156 
2157 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2158 			/* Device or controller might have reported
2159 			 * the wrong device class.  Give a shot at the
2160 			 * other IDENTIFY if the current one is
2161 			 * aborted by the device.
2162 			 */
2163 			if (may_fallback) {
2164 				may_fallback = 0;
2165 
2166 				if (class == ATA_DEV_ATA)
2167 					class = ATA_DEV_ATAPI;
2168 				else
2169 					class = ATA_DEV_ATA;
2170 				goto retry;
2171 			}
2172 
2173 			/* Control reaches here iff the device aborted
2174 			 * both flavors of IDENTIFYs which happens
2175 			 * sometimes with phantom devices.
2176 			 */
2177 			ata_dev_printk(dev, KERN_DEBUG,
2178 				       "both IDENTIFYs aborted, assuming NODEV\n");
2179 			return -ENOENT;
2180 		}
2181 
2182 		rc = -EIO;
2183 		reason = "I/O error";
2184 		goto err_out;
2185 	}
2186 
2187 	/* Falling back doesn't make sense if ID data was read
2188 	 * successfully at least once.
2189 	 */
2190 	may_fallback = 0;
2191 
2192 	swap_buf_le16(id, ATA_ID_WORDS);
2193 
2194 	/* sanity check */
2195 	rc = -EINVAL;
2196 	reason = "device reports invalid type";
2197 
2198 	if (class == ATA_DEV_ATA) {
2199 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2200 			goto err_out;
2201 	} else {
2202 		if (ata_id_is_ata(id))
2203 			goto err_out;
2204 	}
2205 
2206 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2207 		tried_spinup = 1;
2208 		/*
2209 		 * Drive powered-up in standby mode, and requires a specific
2210 		 * SET_FEATURES spin-up subcommand before it will accept
2211 		 * anything other than the original IDENTIFY command.
2212 		 */
2213 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2214 		if (err_mask && id[2] != 0x738c) {
2215 			rc = -EIO;
2216 			reason = "SPINUP failed";
2217 			goto err_out;
2218 		}
2219 		/*
2220 		 * If the drive initially returned incomplete IDENTIFY info,
2221 		 * we now must reissue the IDENTIFY command.
2222 		 */
2223 		if (id[2] == 0x37c8)
2224 			goto retry;
2225 	}
2226 
2227 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2228 		/*
2229 		 * The exact sequence expected by certain pre-ATA4 drives is:
2230 		 * SRST RESET
2231 		 * IDENTIFY (optional in early ATA)
2232 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2233 		 * anything else..
2234 		 * Some drives were very specific about that exact sequence.
2235 		 *
2236 		 * Note that ATA4 says lba is mandatory so the second check
2237 		 * should never trigger.
2238 		 */
2239 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2240 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2241 			if (err_mask) {
2242 				rc = -EIO;
2243 				reason = "INIT_DEV_PARAMS failed";
2244 				goto err_out;
2245 			}
2246 
2247 			/* current CHS translation info (id[53-58]) might be
2248 			 * changed. reread the identify device info.
2249 			 */
2250 			flags &= ~ATA_READID_POSTRESET;
2251 			goto retry;
2252 		}
2253 	}
2254 
2255 	*p_class = class;
2256 
2257 	return 0;
2258 
2259  err_out:
2260 	if (ata_msg_warn(ap))
2261 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2262 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2263 	return rc;
2264 }
2265 
2266 static int ata_do_link_spd_horkage(struct ata_device *dev)
2267 {
2268 	struct ata_link *plink = ata_dev_phys_link(dev);
2269 	u32 target, target_limit;
2270 
2271 	if (!sata_scr_valid(plink))
2272 		return 0;
2273 
2274 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2275 		target = 1;
2276 	else
2277 		return 0;
2278 
2279 	target_limit = (1 << target) - 1;
2280 
2281 	/* if already on stricter limit, no need to push further */
2282 	if (plink->sata_spd_limit <= target_limit)
2283 		return 0;
2284 
2285 	plink->sata_spd_limit = target_limit;
2286 
2287 	/* Request another EH round by returning -EAGAIN if link is
2288 	 * going faster than the target speed.  Forward progress is
2289 	 * guaranteed by setting sata_spd_limit to target_limit above.
2290 	 */
2291 	if (plink->sata_spd > target) {
2292 		ata_dev_printk(dev, KERN_INFO,
2293 			       "applying link speed limit horkage to %s\n",
2294 			       sata_spd_string(target));
2295 		return -EAGAIN;
2296 	}
2297 	return 0;
2298 }
2299 
2300 static inline u8 ata_dev_knobble(struct ata_device *dev)
2301 {
2302 	struct ata_port *ap = dev->link->ap;
2303 
2304 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2305 		return 0;
2306 
2307 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2308 }
2309 
2310 static int ata_dev_config_ncq(struct ata_device *dev,
2311 			       char *desc, size_t desc_sz)
2312 {
2313 	struct ata_port *ap = dev->link->ap;
2314 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2315 	unsigned int err_mask;
2316 	char *aa_desc = "";
2317 
2318 	if (!ata_id_has_ncq(dev->id)) {
2319 		desc[0] = '\0';
2320 		return 0;
2321 	}
2322 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2323 		snprintf(desc, desc_sz, "NCQ (not used)");
2324 		return 0;
2325 	}
2326 	if (ap->flags & ATA_FLAG_NCQ) {
2327 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2328 		dev->flags |= ATA_DFLAG_NCQ;
2329 	}
2330 
2331 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2332 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2333 		ata_id_has_fpdma_aa(dev->id)) {
2334 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2335 			SATA_FPDMA_AA);
2336 		if (err_mask) {
2337 			ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
2338 				"(error_mask=0x%x)\n", err_mask);
2339 			if (err_mask != AC_ERR_DEV) {
2340 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2341 				return -EIO;
2342 			}
2343 		} else
2344 			aa_desc = ", AA";
2345 	}
2346 
2347 	if (hdepth >= ddepth)
2348 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2349 	else
2350 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2351 			ddepth, aa_desc);
2352 	return 0;
2353 }
2354 
2355 /**
2356  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2357  *	@dev: Target device to configure
2358  *
2359  *	Configure @dev according to @dev->id.  Generic and low-level
2360  *	driver specific fixups are also applied.
2361  *
2362  *	LOCKING:
2363  *	Kernel thread context (may sleep)
2364  *
2365  *	RETURNS:
2366  *	0 on success, -errno otherwise
2367  */
2368 int ata_dev_configure(struct ata_device *dev)
2369 {
2370 	struct ata_port *ap = dev->link->ap;
2371 	struct ata_eh_context *ehc = &dev->link->eh_context;
2372 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2373 	const u16 *id = dev->id;
2374 	unsigned long xfer_mask;
2375 	char revbuf[7];		/* XYZ-99\0 */
2376 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2377 	char modelbuf[ATA_ID_PROD_LEN+1];
2378 	int rc;
2379 
2380 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2381 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2382 			       __func__);
2383 		return 0;
2384 	}
2385 
2386 	if (ata_msg_probe(ap))
2387 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2388 
2389 	/* set horkage */
2390 	dev->horkage |= ata_dev_blacklisted(dev);
2391 	ata_force_horkage(dev);
2392 
2393 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2394 		ata_dev_printk(dev, KERN_INFO,
2395 			       "unsupported device, disabling\n");
2396 		ata_dev_disable(dev);
2397 		return 0;
2398 	}
2399 
2400 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2401 	    dev->class == ATA_DEV_ATAPI) {
2402 		ata_dev_printk(dev, KERN_WARNING,
2403 			"WARNING: ATAPI is %s, device ignored.\n",
2404 			atapi_enabled ? "not supported with this driver"
2405 				      : "disabled");
2406 		ata_dev_disable(dev);
2407 		return 0;
2408 	}
2409 
2410 	rc = ata_do_link_spd_horkage(dev);
2411 	if (rc)
2412 		return rc;
2413 
2414 	/* let ACPI work its magic */
2415 	rc = ata_acpi_on_devcfg(dev);
2416 	if (rc)
2417 		return rc;
2418 
2419 	/* massage HPA, do it early as it might change IDENTIFY data */
2420 	rc = ata_hpa_resize(dev);
2421 	if (rc)
2422 		return rc;
2423 
2424 	/* print device capabilities */
2425 	if (ata_msg_probe(ap))
2426 		ata_dev_printk(dev, KERN_DEBUG,
2427 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2428 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2429 			       __func__,
2430 			       id[49], id[82], id[83], id[84],
2431 			       id[85], id[86], id[87], id[88]);
2432 
2433 	/* initialize to-be-configured parameters */
2434 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2435 	dev->max_sectors = 0;
2436 	dev->cdb_len = 0;
2437 	dev->n_sectors = 0;
2438 	dev->cylinders = 0;
2439 	dev->heads = 0;
2440 	dev->sectors = 0;
2441 	dev->multi_count = 0;
2442 
2443 	/*
2444 	 * common ATA, ATAPI feature tests
2445 	 */
2446 
2447 	/* find max transfer mode; for printk only */
2448 	xfer_mask = ata_id_xfermask(id);
2449 
2450 	if (ata_msg_probe(ap))
2451 		ata_dump_id(id);
2452 
2453 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2454 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2455 			sizeof(fwrevbuf));
2456 
2457 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2458 			sizeof(modelbuf));
2459 
2460 	/* ATA-specific feature tests */
2461 	if (dev->class == ATA_DEV_ATA) {
2462 		if (ata_id_is_cfa(id)) {
2463 			/* CPRM may make this media unusable */
2464 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2465 				ata_dev_printk(dev, KERN_WARNING,
2466 					       "supports DRM functions and may "
2467 					       "not be fully accessable.\n");
2468 			snprintf(revbuf, 7, "CFA");
2469 		} else {
2470 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2471 			/* Warn the user if the device has TPM extensions */
2472 			if (ata_id_has_tpm(id))
2473 				ata_dev_printk(dev, KERN_WARNING,
2474 					       "supports DRM functions and may "
2475 					       "not be fully accessable.\n");
2476 		}
2477 
2478 		dev->n_sectors = ata_id_n_sectors(id);
2479 
2480 		/* get current R/W Multiple count setting */
2481 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2482 			unsigned int max = dev->id[47] & 0xff;
2483 			unsigned int cnt = dev->id[59] & 0xff;
2484 			/* only recognize/allow powers of two here */
2485 			if (is_power_of_2(max) && is_power_of_2(cnt))
2486 				if (cnt <= max)
2487 					dev->multi_count = cnt;
2488 		}
2489 
2490 		if (ata_id_has_lba(id)) {
2491 			const char *lba_desc;
2492 			char ncq_desc[24];
2493 
2494 			lba_desc = "LBA";
2495 			dev->flags |= ATA_DFLAG_LBA;
2496 			if (ata_id_has_lba48(id)) {
2497 				dev->flags |= ATA_DFLAG_LBA48;
2498 				lba_desc = "LBA48";
2499 
2500 				if (dev->n_sectors >= (1UL << 28) &&
2501 				    ata_id_has_flush_ext(id))
2502 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2503 			}
2504 
2505 			/* config NCQ */
2506 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2507 			if (rc)
2508 				return rc;
2509 
2510 			/* print device info to dmesg */
2511 			if (ata_msg_drv(ap) && print_info) {
2512 				ata_dev_printk(dev, KERN_INFO,
2513 					"%s: %s, %s, max %s\n",
2514 					revbuf, modelbuf, fwrevbuf,
2515 					ata_mode_string(xfer_mask));
2516 				ata_dev_printk(dev, KERN_INFO,
2517 					"%Lu sectors, multi %u: %s %s\n",
2518 					(unsigned long long)dev->n_sectors,
2519 					dev->multi_count, lba_desc, ncq_desc);
2520 			}
2521 		} else {
2522 			/* CHS */
2523 
2524 			/* Default translation */
2525 			dev->cylinders	= id[1];
2526 			dev->heads	= id[3];
2527 			dev->sectors	= id[6];
2528 
2529 			if (ata_id_current_chs_valid(id)) {
2530 				/* Current CHS translation is valid. */
2531 				dev->cylinders = id[54];
2532 				dev->heads     = id[55];
2533 				dev->sectors   = id[56];
2534 			}
2535 
2536 			/* print device info to dmesg */
2537 			if (ata_msg_drv(ap) && print_info) {
2538 				ata_dev_printk(dev, KERN_INFO,
2539 					"%s: %s, %s, max %s\n",
2540 					revbuf,	modelbuf, fwrevbuf,
2541 					ata_mode_string(xfer_mask));
2542 				ata_dev_printk(dev, KERN_INFO,
2543 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2544 					(unsigned long long)dev->n_sectors,
2545 					dev->multi_count, dev->cylinders,
2546 					dev->heads, dev->sectors);
2547 			}
2548 		}
2549 
2550 		dev->cdb_len = 16;
2551 	}
2552 
2553 	/* ATAPI-specific feature tests */
2554 	else if (dev->class == ATA_DEV_ATAPI) {
2555 		const char *cdb_intr_string = "";
2556 		const char *atapi_an_string = "";
2557 		const char *dma_dir_string = "";
2558 		u32 sntf;
2559 
2560 		rc = atapi_cdb_len(id);
2561 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2562 			if (ata_msg_warn(ap))
2563 				ata_dev_printk(dev, KERN_WARNING,
2564 					       "unsupported CDB len\n");
2565 			rc = -EINVAL;
2566 			goto err_out_nosup;
2567 		}
2568 		dev->cdb_len = (unsigned int) rc;
2569 
2570 		/* Enable ATAPI AN if both the host and device have
2571 		 * the support.  If PMP is attached, SNTF is required
2572 		 * to enable ATAPI AN to discern between PHY status
2573 		 * changed notifications and ATAPI ANs.
2574 		 */
2575 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2576 		    (!sata_pmp_attached(ap) ||
2577 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2578 			unsigned int err_mask;
2579 
2580 			/* issue SET feature command to turn this on */
2581 			err_mask = ata_dev_set_feature(dev,
2582 					SETFEATURES_SATA_ENABLE, SATA_AN);
2583 			if (err_mask)
2584 				ata_dev_printk(dev, KERN_ERR,
2585 					"failed to enable ATAPI AN "
2586 					"(err_mask=0x%x)\n", err_mask);
2587 			else {
2588 				dev->flags |= ATA_DFLAG_AN;
2589 				atapi_an_string = ", ATAPI AN";
2590 			}
2591 		}
2592 
2593 		if (ata_id_cdb_intr(dev->id)) {
2594 			dev->flags |= ATA_DFLAG_CDB_INTR;
2595 			cdb_intr_string = ", CDB intr";
2596 		}
2597 
2598 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2599 			dev->flags |= ATA_DFLAG_DMADIR;
2600 			dma_dir_string = ", DMADIR";
2601 		}
2602 
2603 		/* print device info to dmesg */
2604 		if (ata_msg_drv(ap) && print_info)
2605 			ata_dev_printk(dev, KERN_INFO,
2606 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2607 				       modelbuf, fwrevbuf,
2608 				       ata_mode_string(xfer_mask),
2609 				       cdb_intr_string, atapi_an_string,
2610 				       dma_dir_string);
2611 	}
2612 
2613 	/* determine max_sectors */
2614 	dev->max_sectors = ATA_MAX_SECTORS;
2615 	if (dev->flags & ATA_DFLAG_LBA48)
2616 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2617 
2618 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2619 		if (ata_id_has_hipm(dev->id))
2620 			dev->flags |= ATA_DFLAG_HIPM;
2621 		if (ata_id_has_dipm(dev->id))
2622 			dev->flags |= ATA_DFLAG_DIPM;
2623 	}
2624 
2625 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2626 	   200 sectors */
2627 	if (ata_dev_knobble(dev)) {
2628 		if (ata_msg_drv(ap) && print_info)
2629 			ata_dev_printk(dev, KERN_INFO,
2630 				       "applying bridge limits\n");
2631 		dev->udma_mask &= ATA_UDMA5;
2632 		dev->max_sectors = ATA_MAX_SECTORS;
2633 	}
2634 
2635 	if ((dev->class == ATA_DEV_ATAPI) &&
2636 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2637 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2638 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2639 	}
2640 
2641 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2642 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2643 					 dev->max_sectors);
2644 
2645 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2646 		dev->horkage |= ATA_HORKAGE_IPM;
2647 
2648 		/* reset link pm_policy for this port to no pm */
2649 		ap->pm_policy = MAX_PERFORMANCE;
2650 	}
2651 
2652 	if (ap->ops->dev_config)
2653 		ap->ops->dev_config(dev);
2654 
2655 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2656 		/* Let the user know. We don't want to disallow opens for
2657 		   rescue purposes, or in case the vendor is just a blithering
2658 		   idiot. Do this after the dev_config call as some controllers
2659 		   with buggy firmware may want to avoid reporting false device
2660 		   bugs */
2661 
2662 		if (print_info) {
2663 			ata_dev_printk(dev, KERN_WARNING,
2664 "Drive reports diagnostics failure. This may indicate a drive\n");
2665 			ata_dev_printk(dev, KERN_WARNING,
2666 "fault or invalid emulation. Contact drive vendor for information.\n");
2667 		}
2668 	}
2669 
2670 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2671 		ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2672 			       "firmware update to be fully functional.\n");
2673 		ata_dev_printk(dev, KERN_WARNING, "         contact the vendor "
2674 			       "or visit http://ata.wiki.kernel.org.\n");
2675 	}
2676 
2677 	return 0;
2678 
2679 err_out_nosup:
2680 	if (ata_msg_probe(ap))
2681 		ata_dev_printk(dev, KERN_DEBUG,
2682 			       "%s: EXIT, err\n", __func__);
2683 	return rc;
2684 }
2685 
2686 /**
2687  *	ata_cable_40wire	-	return 40 wire cable type
2688  *	@ap: port
2689  *
2690  *	Helper method for drivers which want to hardwire 40 wire cable
2691  *	detection.
2692  */
2693 
2694 int ata_cable_40wire(struct ata_port *ap)
2695 {
2696 	return ATA_CBL_PATA40;
2697 }
2698 
2699 /**
2700  *	ata_cable_80wire	-	return 80 wire cable type
2701  *	@ap: port
2702  *
2703  *	Helper method for drivers which want to hardwire 80 wire cable
2704  *	detection.
2705  */
2706 
2707 int ata_cable_80wire(struct ata_port *ap)
2708 {
2709 	return ATA_CBL_PATA80;
2710 }
2711 
2712 /**
2713  *	ata_cable_unknown	-	return unknown PATA cable.
2714  *	@ap: port
2715  *
2716  *	Helper method for drivers which have no PATA cable detection.
2717  */
2718 
2719 int ata_cable_unknown(struct ata_port *ap)
2720 {
2721 	return ATA_CBL_PATA_UNK;
2722 }
2723 
2724 /**
2725  *	ata_cable_ignore	-	return ignored PATA cable.
2726  *	@ap: port
2727  *
2728  *	Helper method for drivers which don't use cable type to limit
2729  *	transfer mode.
2730  */
2731 int ata_cable_ignore(struct ata_port *ap)
2732 {
2733 	return ATA_CBL_PATA_IGN;
2734 }
2735 
2736 /**
2737  *	ata_cable_sata	-	return SATA cable type
2738  *	@ap: port
2739  *
2740  *	Helper method for drivers which have SATA cables
2741  */
2742 
2743 int ata_cable_sata(struct ata_port *ap)
2744 {
2745 	return ATA_CBL_SATA;
2746 }
2747 
2748 /**
2749  *	ata_bus_probe - Reset and probe ATA bus
2750  *	@ap: Bus to probe
2751  *
2752  *	Master ATA bus probing function.  Initiates a hardware-dependent
2753  *	bus reset, then attempts to identify any devices found on
2754  *	the bus.
2755  *
2756  *	LOCKING:
2757  *	PCI/etc. bus probe sem.
2758  *
2759  *	RETURNS:
2760  *	Zero on success, negative errno otherwise.
2761  */
2762 
2763 int ata_bus_probe(struct ata_port *ap)
2764 {
2765 	unsigned int classes[ATA_MAX_DEVICES];
2766 	int tries[ATA_MAX_DEVICES];
2767 	int rc;
2768 	struct ata_device *dev;
2769 
2770 	ata_port_probe(ap);
2771 
2772 	ata_for_each_dev(dev, &ap->link, ALL)
2773 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2774 
2775  retry:
2776 	ata_for_each_dev(dev, &ap->link, ALL) {
2777 		/* If we issue an SRST then an ATA drive (not ATAPI)
2778 		 * may change configuration and be in PIO0 timing. If
2779 		 * we do a hard reset (or are coming from power on)
2780 		 * this is true for ATA or ATAPI. Until we've set a
2781 		 * suitable controller mode we should not touch the
2782 		 * bus as we may be talking too fast.
2783 		 */
2784 		dev->pio_mode = XFER_PIO_0;
2785 
2786 		/* If the controller has a pio mode setup function
2787 		 * then use it to set the chipset to rights. Don't
2788 		 * touch the DMA setup as that will be dealt with when
2789 		 * configuring devices.
2790 		 */
2791 		if (ap->ops->set_piomode)
2792 			ap->ops->set_piomode(ap, dev);
2793 	}
2794 
2795 	/* reset and determine device classes */
2796 	ap->ops->phy_reset(ap);
2797 
2798 	ata_for_each_dev(dev, &ap->link, ALL) {
2799 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2800 		    dev->class != ATA_DEV_UNKNOWN)
2801 			classes[dev->devno] = dev->class;
2802 		else
2803 			classes[dev->devno] = ATA_DEV_NONE;
2804 
2805 		dev->class = ATA_DEV_UNKNOWN;
2806 	}
2807 
2808 	ata_port_probe(ap);
2809 
2810 	/* read IDENTIFY page and configure devices. We have to do the identify
2811 	   specific sequence bass-ackwards so that PDIAG- is released by
2812 	   the slave device */
2813 
2814 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2815 		if (tries[dev->devno])
2816 			dev->class = classes[dev->devno];
2817 
2818 		if (!ata_dev_enabled(dev))
2819 			continue;
2820 
2821 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2822 				     dev->id);
2823 		if (rc)
2824 			goto fail;
2825 	}
2826 
2827 	/* Now ask for the cable type as PDIAG- should have been released */
2828 	if (ap->ops->cable_detect)
2829 		ap->cbl = ap->ops->cable_detect(ap);
2830 
2831 	/* We may have SATA bridge glue hiding here irrespective of
2832 	 * the reported cable types and sensed types.  When SATA
2833 	 * drives indicate we have a bridge, we don't know which end
2834 	 * of the link the bridge is which is a problem.
2835 	 */
2836 	ata_for_each_dev(dev, &ap->link, ENABLED)
2837 		if (ata_id_is_sata(dev->id))
2838 			ap->cbl = ATA_CBL_SATA;
2839 
2840 	/* After the identify sequence we can now set up the devices. We do
2841 	   this in the normal order so that the user doesn't get confused */
2842 
2843 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2844 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2845 		rc = ata_dev_configure(dev);
2846 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2847 		if (rc)
2848 			goto fail;
2849 	}
2850 
2851 	/* configure transfer mode */
2852 	rc = ata_set_mode(&ap->link, &dev);
2853 	if (rc)
2854 		goto fail;
2855 
2856 	ata_for_each_dev(dev, &ap->link, ENABLED)
2857 		return 0;
2858 
2859 	/* no device present, disable port */
2860 	ata_port_disable(ap);
2861 	return -ENODEV;
2862 
2863  fail:
2864 	tries[dev->devno]--;
2865 
2866 	switch (rc) {
2867 	case -EINVAL:
2868 		/* eeek, something went very wrong, give up */
2869 		tries[dev->devno] = 0;
2870 		break;
2871 
2872 	case -ENODEV:
2873 		/* give it just one more chance */
2874 		tries[dev->devno] = min(tries[dev->devno], 1);
2875 	case -EIO:
2876 		if (tries[dev->devno] == 1) {
2877 			/* This is the last chance, better to slow
2878 			 * down than lose it.
2879 			 */
2880 			sata_down_spd_limit(&ap->link, 0);
2881 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2882 		}
2883 	}
2884 
2885 	if (!tries[dev->devno])
2886 		ata_dev_disable(dev);
2887 
2888 	goto retry;
2889 }
2890 
2891 /**
2892  *	ata_port_probe - Mark port as enabled
2893  *	@ap: Port for which we indicate enablement
2894  *
2895  *	Modify @ap data structure such that the system
2896  *	thinks that the entire port is enabled.
2897  *
2898  *	LOCKING: host lock, or some other form of
2899  *	serialization.
2900  */
2901 
2902 void ata_port_probe(struct ata_port *ap)
2903 {
2904 	ap->flags &= ~ATA_FLAG_DISABLED;
2905 }
2906 
2907 /**
2908  *	sata_print_link_status - Print SATA link status
2909  *	@link: SATA link to printk link status about
2910  *
2911  *	This function prints link speed and status of a SATA link.
2912  *
2913  *	LOCKING:
2914  *	None.
2915  */
2916 static void sata_print_link_status(struct ata_link *link)
2917 {
2918 	u32 sstatus, scontrol, tmp;
2919 
2920 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2921 		return;
2922 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2923 
2924 	if (ata_phys_link_online(link)) {
2925 		tmp = (sstatus >> 4) & 0xf;
2926 		ata_link_printk(link, KERN_INFO,
2927 				"SATA link up %s (SStatus %X SControl %X)\n",
2928 				sata_spd_string(tmp), sstatus, scontrol);
2929 	} else {
2930 		ata_link_printk(link, KERN_INFO,
2931 				"SATA link down (SStatus %X SControl %X)\n",
2932 				sstatus, scontrol);
2933 	}
2934 }
2935 
2936 /**
2937  *	ata_dev_pair		-	return other device on cable
2938  *	@adev: device
2939  *
2940  *	Obtain the other device on the same cable, or if none is
2941  *	present NULL is returned
2942  */
2943 
2944 struct ata_device *ata_dev_pair(struct ata_device *adev)
2945 {
2946 	struct ata_link *link = adev->link;
2947 	struct ata_device *pair = &link->device[1 - adev->devno];
2948 	if (!ata_dev_enabled(pair))
2949 		return NULL;
2950 	return pair;
2951 }
2952 
2953 /**
2954  *	ata_port_disable - Disable port.
2955  *	@ap: Port to be disabled.
2956  *
2957  *	Modify @ap data structure such that the system
2958  *	thinks that the entire port is disabled, and should
2959  *	never attempt to probe or communicate with devices
2960  *	on this port.
2961  *
2962  *	LOCKING: host lock, or some other form of
2963  *	serialization.
2964  */
2965 
2966 void ata_port_disable(struct ata_port *ap)
2967 {
2968 	ap->link.device[0].class = ATA_DEV_NONE;
2969 	ap->link.device[1].class = ATA_DEV_NONE;
2970 	ap->flags |= ATA_FLAG_DISABLED;
2971 }
2972 
2973 /**
2974  *	sata_down_spd_limit - adjust SATA spd limit downward
2975  *	@link: Link to adjust SATA spd limit for
2976  *	@spd_limit: Additional limit
2977  *
2978  *	Adjust SATA spd limit of @link downward.  Note that this
2979  *	function only adjusts the limit.  The change must be applied
2980  *	using sata_set_spd().
2981  *
2982  *	If @spd_limit is non-zero, the speed is limited to equal to or
2983  *	lower than @spd_limit if such speed is supported.  If
2984  *	@spd_limit is slower than any supported speed, only the lowest
2985  *	supported speed is allowed.
2986  *
2987  *	LOCKING:
2988  *	Inherited from caller.
2989  *
2990  *	RETURNS:
2991  *	0 on success, negative errno on failure
2992  */
2993 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2994 {
2995 	u32 sstatus, spd, mask;
2996 	int rc, bit;
2997 
2998 	if (!sata_scr_valid(link))
2999 		return -EOPNOTSUPP;
3000 
3001 	/* If SCR can be read, use it to determine the current SPD.
3002 	 * If not, use cached value in link->sata_spd.
3003 	 */
3004 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3005 	if (rc == 0 && ata_sstatus_online(sstatus))
3006 		spd = (sstatus >> 4) & 0xf;
3007 	else
3008 		spd = link->sata_spd;
3009 
3010 	mask = link->sata_spd_limit;
3011 	if (mask <= 1)
3012 		return -EINVAL;
3013 
3014 	/* unconditionally mask off the highest bit */
3015 	bit = fls(mask) - 1;
3016 	mask &= ~(1 << bit);
3017 
3018 	/* Mask off all speeds higher than or equal to the current
3019 	 * one.  Force 1.5Gbps if current SPD is not available.
3020 	 */
3021 	if (spd > 1)
3022 		mask &= (1 << (spd - 1)) - 1;
3023 	else
3024 		mask &= 1;
3025 
3026 	/* were we already at the bottom? */
3027 	if (!mask)
3028 		return -EINVAL;
3029 
3030 	if (spd_limit) {
3031 		if (mask & ((1 << spd_limit) - 1))
3032 			mask &= (1 << spd_limit) - 1;
3033 		else {
3034 			bit = ffs(mask) - 1;
3035 			mask = 1 << bit;
3036 		}
3037 	}
3038 
3039 	link->sata_spd_limit = mask;
3040 
3041 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
3042 			sata_spd_string(fls(mask)));
3043 
3044 	return 0;
3045 }
3046 
3047 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3048 {
3049 	struct ata_link *host_link = &link->ap->link;
3050 	u32 limit, target, spd;
3051 
3052 	limit = link->sata_spd_limit;
3053 
3054 	/* Don't configure downstream link faster than upstream link.
3055 	 * It doesn't speed up anything and some PMPs choke on such
3056 	 * configuration.
3057 	 */
3058 	if (!ata_is_host_link(link) && host_link->sata_spd)
3059 		limit &= (1 << host_link->sata_spd) - 1;
3060 
3061 	if (limit == UINT_MAX)
3062 		target = 0;
3063 	else
3064 		target = fls(limit);
3065 
3066 	spd = (*scontrol >> 4) & 0xf;
3067 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3068 
3069 	return spd != target;
3070 }
3071 
3072 /**
3073  *	sata_set_spd_needed - is SATA spd configuration needed
3074  *	@link: Link in question
3075  *
3076  *	Test whether the spd limit in SControl matches
3077  *	@link->sata_spd_limit.  This function is used to determine
3078  *	whether hardreset is necessary to apply SATA spd
3079  *	configuration.
3080  *
3081  *	LOCKING:
3082  *	Inherited from caller.
3083  *
3084  *	RETURNS:
3085  *	1 if SATA spd configuration is needed, 0 otherwise.
3086  */
3087 static int sata_set_spd_needed(struct ata_link *link)
3088 {
3089 	u32 scontrol;
3090 
3091 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3092 		return 1;
3093 
3094 	return __sata_set_spd_needed(link, &scontrol);
3095 }
3096 
3097 /**
3098  *	sata_set_spd - set SATA spd according to spd limit
3099  *	@link: Link to set SATA spd for
3100  *
3101  *	Set SATA spd of @link according to sata_spd_limit.
3102  *
3103  *	LOCKING:
3104  *	Inherited from caller.
3105  *
3106  *	RETURNS:
3107  *	0 if spd doesn't need to be changed, 1 if spd has been
3108  *	changed.  Negative errno if SCR registers are inaccessible.
3109  */
3110 int sata_set_spd(struct ata_link *link)
3111 {
3112 	u32 scontrol;
3113 	int rc;
3114 
3115 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3116 		return rc;
3117 
3118 	if (!__sata_set_spd_needed(link, &scontrol))
3119 		return 0;
3120 
3121 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3122 		return rc;
3123 
3124 	return 1;
3125 }
3126 
3127 /*
3128  * This mode timing computation functionality is ported over from
3129  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3130  */
3131 /*
3132  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3133  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3134  * for UDMA6, which is currently supported only by Maxtor drives.
3135  *
3136  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3137  */
3138 
3139 static const struct ata_timing ata_timing[] = {
3140 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
3141 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
3142 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
3143 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
3144 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
3145 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
3146 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
3147 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
3148 
3149 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
3150 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
3151 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
3152 
3153 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
3154 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
3155 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
3156 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
3157 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
3158 
3159 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
3160 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
3161 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
3162 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
3163 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
3164 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
3165 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
3166 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
3167 
3168 	{ 0xFF }
3169 };
3170 
3171 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3172 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
3173 
3174 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3175 {
3176 	q->setup	= EZ(t->setup      * 1000,  T);
3177 	q->act8b	= EZ(t->act8b      * 1000,  T);
3178 	q->rec8b	= EZ(t->rec8b      * 1000,  T);
3179 	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
3180 	q->active	= EZ(t->active     * 1000,  T);
3181 	q->recover	= EZ(t->recover    * 1000,  T);
3182 	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
3183 	q->cycle	= EZ(t->cycle      * 1000,  T);
3184 	q->udma		= EZ(t->udma       * 1000, UT);
3185 }
3186 
3187 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3188 		      struct ata_timing *m, unsigned int what)
3189 {
3190 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3191 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3192 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3193 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3194 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3195 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3196 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3197 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3198 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3199 }
3200 
3201 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3202 {
3203 	const struct ata_timing *t = ata_timing;
3204 
3205 	while (xfer_mode > t->mode)
3206 		t++;
3207 
3208 	if (xfer_mode == t->mode)
3209 		return t;
3210 	return NULL;
3211 }
3212 
3213 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3214 		       struct ata_timing *t, int T, int UT)
3215 {
3216 	const u16 *id = adev->id;
3217 	const struct ata_timing *s;
3218 	struct ata_timing p;
3219 
3220 	/*
3221 	 * Find the mode.
3222 	 */
3223 
3224 	if (!(s = ata_timing_find_mode(speed)))
3225 		return -EINVAL;
3226 
3227 	memcpy(t, s, sizeof(*s));
3228 
3229 	/*
3230 	 * If the drive is an EIDE drive, it can tell us it needs extended
3231 	 * PIO/MW_DMA cycle timing.
3232 	 */
3233 
3234 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3235 		memset(&p, 0, sizeof(p));
3236 
3237 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3238 			if (speed <= XFER_PIO_2)
3239 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3240 			else if ((speed <= XFER_PIO_4) ||
3241 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3242 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3243 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3244 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3245 
3246 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3247 	}
3248 
3249 	/*
3250 	 * Convert the timing to bus clock counts.
3251 	 */
3252 
3253 	ata_timing_quantize(t, t, T, UT);
3254 
3255 	/*
3256 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3257 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3258 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3259 	 */
3260 
3261 	if (speed > XFER_PIO_6) {
3262 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3263 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3264 	}
3265 
3266 	/*
3267 	 * Lengthen active & recovery time so that cycle time is correct.
3268 	 */
3269 
3270 	if (t->act8b + t->rec8b < t->cyc8b) {
3271 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3272 		t->rec8b = t->cyc8b - t->act8b;
3273 	}
3274 
3275 	if (t->active + t->recover < t->cycle) {
3276 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3277 		t->recover = t->cycle - t->active;
3278 	}
3279 
3280 	/* In a few cases quantisation may produce enough errors to
3281 	   leave t->cycle too low for the sum of active and recovery
3282 	   if so we must correct this */
3283 	if (t->active + t->recover > t->cycle)
3284 		t->cycle = t->active + t->recover;
3285 
3286 	return 0;
3287 }
3288 
3289 /**
3290  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3291  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3292  *	@cycle: cycle duration in ns
3293  *
3294  *	Return matching xfer mode for @cycle.  The returned mode is of
3295  *	the transfer type specified by @xfer_shift.  If @cycle is too
3296  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3297  *	than the fastest known mode, the fasted mode is returned.
3298  *
3299  *	LOCKING:
3300  *	None.
3301  *
3302  *	RETURNS:
3303  *	Matching xfer_mode, 0xff if no match found.
3304  */
3305 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3306 {
3307 	u8 base_mode = 0xff, last_mode = 0xff;
3308 	const struct ata_xfer_ent *ent;
3309 	const struct ata_timing *t;
3310 
3311 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3312 		if (ent->shift == xfer_shift)
3313 			base_mode = ent->base;
3314 
3315 	for (t = ata_timing_find_mode(base_mode);
3316 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3317 		unsigned short this_cycle;
3318 
3319 		switch (xfer_shift) {
3320 		case ATA_SHIFT_PIO:
3321 		case ATA_SHIFT_MWDMA:
3322 			this_cycle = t->cycle;
3323 			break;
3324 		case ATA_SHIFT_UDMA:
3325 			this_cycle = t->udma;
3326 			break;
3327 		default:
3328 			return 0xff;
3329 		}
3330 
3331 		if (cycle > this_cycle)
3332 			break;
3333 
3334 		last_mode = t->mode;
3335 	}
3336 
3337 	return last_mode;
3338 }
3339 
3340 /**
3341  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3342  *	@dev: Device to adjust xfer masks
3343  *	@sel: ATA_DNXFER_* selector
3344  *
3345  *	Adjust xfer masks of @dev downward.  Note that this function
3346  *	does not apply the change.  Invoking ata_set_mode() afterwards
3347  *	will apply the limit.
3348  *
3349  *	LOCKING:
3350  *	Inherited from caller.
3351  *
3352  *	RETURNS:
3353  *	0 on success, negative errno on failure
3354  */
3355 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3356 {
3357 	char buf[32];
3358 	unsigned long orig_mask, xfer_mask;
3359 	unsigned long pio_mask, mwdma_mask, udma_mask;
3360 	int quiet, highbit;
3361 
3362 	quiet = !!(sel & ATA_DNXFER_QUIET);
3363 	sel &= ~ATA_DNXFER_QUIET;
3364 
3365 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3366 						  dev->mwdma_mask,
3367 						  dev->udma_mask);
3368 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3369 
3370 	switch (sel) {
3371 	case ATA_DNXFER_PIO:
3372 		highbit = fls(pio_mask) - 1;
3373 		pio_mask &= ~(1 << highbit);
3374 		break;
3375 
3376 	case ATA_DNXFER_DMA:
3377 		if (udma_mask) {
3378 			highbit = fls(udma_mask) - 1;
3379 			udma_mask &= ~(1 << highbit);
3380 			if (!udma_mask)
3381 				return -ENOENT;
3382 		} else if (mwdma_mask) {
3383 			highbit = fls(mwdma_mask) - 1;
3384 			mwdma_mask &= ~(1 << highbit);
3385 			if (!mwdma_mask)
3386 				return -ENOENT;
3387 		}
3388 		break;
3389 
3390 	case ATA_DNXFER_40C:
3391 		udma_mask &= ATA_UDMA_MASK_40C;
3392 		break;
3393 
3394 	case ATA_DNXFER_FORCE_PIO0:
3395 		pio_mask &= 1;
3396 	case ATA_DNXFER_FORCE_PIO:
3397 		mwdma_mask = 0;
3398 		udma_mask = 0;
3399 		break;
3400 
3401 	default:
3402 		BUG();
3403 	}
3404 
3405 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3406 
3407 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3408 		return -ENOENT;
3409 
3410 	if (!quiet) {
3411 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3412 			snprintf(buf, sizeof(buf), "%s:%s",
3413 				 ata_mode_string(xfer_mask),
3414 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3415 		else
3416 			snprintf(buf, sizeof(buf), "%s",
3417 				 ata_mode_string(xfer_mask));
3418 
3419 		ata_dev_printk(dev, KERN_WARNING,
3420 			       "limiting speed to %s\n", buf);
3421 	}
3422 
3423 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3424 			    &dev->udma_mask);
3425 
3426 	return 0;
3427 }
3428 
3429 static int ata_dev_set_mode(struct ata_device *dev)
3430 {
3431 	struct ata_port *ap = dev->link->ap;
3432 	struct ata_eh_context *ehc = &dev->link->eh_context;
3433 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3434 	const char *dev_err_whine = "";
3435 	int ign_dev_err = 0;
3436 	unsigned int err_mask = 0;
3437 	int rc;
3438 
3439 	dev->flags &= ~ATA_DFLAG_PIO;
3440 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3441 		dev->flags |= ATA_DFLAG_PIO;
3442 
3443 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3444 		dev_err_whine = " (SET_XFERMODE skipped)";
3445 	else {
3446 		if (nosetxfer)
3447 			ata_dev_printk(dev, KERN_WARNING,
3448 				       "NOSETXFER but PATA detected - can't "
3449 				       "skip SETXFER, might malfunction\n");
3450 		err_mask = ata_dev_set_xfermode(dev);
3451 	}
3452 
3453 	if (err_mask & ~AC_ERR_DEV)
3454 		goto fail;
3455 
3456 	/* revalidate */
3457 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3458 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3459 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3460 	if (rc)
3461 		return rc;
3462 
3463 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3464 		/* Old CFA may refuse this command, which is just fine */
3465 		if (ata_id_is_cfa(dev->id))
3466 			ign_dev_err = 1;
3467 		/* Catch several broken garbage emulations plus some pre
3468 		   ATA devices */
3469 		if (ata_id_major_version(dev->id) == 0 &&
3470 					dev->pio_mode <= XFER_PIO_2)
3471 			ign_dev_err = 1;
3472 		/* Some very old devices and some bad newer ones fail
3473 		   any kind of SET_XFERMODE request but support PIO0-2
3474 		   timings and no IORDY */
3475 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3476 			ign_dev_err = 1;
3477 	}
3478 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3479 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3480 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3481 	    dev->dma_mode == XFER_MW_DMA_0 &&
3482 	    (dev->id[63] >> 8) & 1)
3483 		ign_dev_err = 1;
3484 
3485 	/* if the device is actually configured correctly, ignore dev err */
3486 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3487 		ign_dev_err = 1;
3488 
3489 	if (err_mask & AC_ERR_DEV) {
3490 		if (!ign_dev_err)
3491 			goto fail;
3492 		else
3493 			dev_err_whine = " (device error ignored)";
3494 	}
3495 
3496 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3497 		dev->xfer_shift, (int)dev->xfer_mode);
3498 
3499 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3500 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3501 		       dev_err_whine);
3502 
3503 	return 0;
3504 
3505  fail:
3506 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3507 		       "(err_mask=0x%x)\n", err_mask);
3508 	return -EIO;
3509 }
3510 
3511 /**
3512  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3513  *	@link: link on which timings will be programmed
3514  *	@r_failed_dev: out parameter for failed device
3515  *
3516  *	Standard implementation of the function used to tune and set
3517  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3518  *	ata_dev_set_mode() fails, pointer to the failing device is
3519  *	returned in @r_failed_dev.
3520  *
3521  *	LOCKING:
3522  *	PCI/etc. bus probe sem.
3523  *
3524  *	RETURNS:
3525  *	0 on success, negative errno otherwise
3526  */
3527 
3528 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3529 {
3530 	struct ata_port *ap = link->ap;
3531 	struct ata_device *dev;
3532 	int rc = 0, used_dma = 0, found = 0;
3533 
3534 	/* step 1: calculate xfer_mask */
3535 	ata_for_each_dev(dev, link, ENABLED) {
3536 		unsigned long pio_mask, dma_mask;
3537 		unsigned int mode_mask;
3538 
3539 		mode_mask = ATA_DMA_MASK_ATA;
3540 		if (dev->class == ATA_DEV_ATAPI)
3541 			mode_mask = ATA_DMA_MASK_ATAPI;
3542 		else if (ata_id_is_cfa(dev->id))
3543 			mode_mask = ATA_DMA_MASK_CFA;
3544 
3545 		ata_dev_xfermask(dev);
3546 		ata_force_xfermask(dev);
3547 
3548 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3549 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3550 
3551 		if (libata_dma_mask & mode_mask)
3552 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3553 		else
3554 			dma_mask = 0;
3555 
3556 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3557 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3558 
3559 		found = 1;
3560 		if (ata_dma_enabled(dev))
3561 			used_dma = 1;
3562 	}
3563 	if (!found)
3564 		goto out;
3565 
3566 	/* step 2: always set host PIO timings */
3567 	ata_for_each_dev(dev, link, ENABLED) {
3568 		if (dev->pio_mode == 0xff) {
3569 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3570 			rc = -EINVAL;
3571 			goto out;
3572 		}
3573 
3574 		dev->xfer_mode = dev->pio_mode;
3575 		dev->xfer_shift = ATA_SHIFT_PIO;
3576 		if (ap->ops->set_piomode)
3577 			ap->ops->set_piomode(ap, dev);
3578 	}
3579 
3580 	/* step 3: set host DMA timings */
3581 	ata_for_each_dev(dev, link, ENABLED) {
3582 		if (!ata_dma_enabled(dev))
3583 			continue;
3584 
3585 		dev->xfer_mode = dev->dma_mode;
3586 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3587 		if (ap->ops->set_dmamode)
3588 			ap->ops->set_dmamode(ap, dev);
3589 	}
3590 
3591 	/* step 4: update devices' xfer mode */
3592 	ata_for_each_dev(dev, link, ENABLED) {
3593 		rc = ata_dev_set_mode(dev);
3594 		if (rc)
3595 			goto out;
3596 	}
3597 
3598 	/* Record simplex status. If we selected DMA then the other
3599 	 * host channels are not permitted to do so.
3600 	 */
3601 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3602 		ap->host->simplex_claimed = ap;
3603 
3604  out:
3605 	if (rc)
3606 		*r_failed_dev = dev;
3607 	return rc;
3608 }
3609 
3610 /**
3611  *	ata_wait_ready - wait for link to become ready
3612  *	@link: link to be waited on
3613  *	@deadline: deadline jiffies for the operation
3614  *	@check_ready: callback to check link readiness
3615  *
3616  *	Wait for @link to become ready.  @check_ready should return
3617  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3618  *	link doesn't seem to be occupied, other errno for other error
3619  *	conditions.
3620  *
3621  *	Transient -ENODEV conditions are allowed for
3622  *	ATA_TMOUT_FF_WAIT.
3623  *
3624  *	LOCKING:
3625  *	EH context.
3626  *
3627  *	RETURNS:
3628  *	0 if @linke is ready before @deadline; otherwise, -errno.
3629  */
3630 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3631 		   int (*check_ready)(struct ata_link *link))
3632 {
3633 	unsigned long start = jiffies;
3634 	unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3635 	int warned = 0;
3636 
3637 	/* Slave readiness can't be tested separately from master.  On
3638 	 * M/S emulation configuration, this function should be called
3639 	 * only on the master and it will handle both master and slave.
3640 	 */
3641 	WARN_ON(link == link->ap->slave_link);
3642 
3643 	if (time_after(nodev_deadline, deadline))
3644 		nodev_deadline = deadline;
3645 
3646 	while (1) {
3647 		unsigned long now = jiffies;
3648 		int ready, tmp;
3649 
3650 		ready = tmp = check_ready(link);
3651 		if (ready > 0)
3652 			return 0;
3653 
3654 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3655 		 * is online.  Also, some SATA devices take a long
3656 		 * time to clear 0xff after reset.  For example,
3657 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3658 		 * GoVault needs even more than that.  Wait for
3659 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3660 		 *
3661 		 * Note that some PATA controllers (pata_ali) explode
3662 		 * if status register is read more than once when
3663 		 * there's no device attached.
3664 		 */
3665 		if (ready == -ENODEV) {
3666 			if (ata_link_online(link))
3667 				ready = 0;
3668 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3669 				 !ata_link_offline(link) &&
3670 				 time_before(now, nodev_deadline))
3671 				ready = 0;
3672 		}
3673 
3674 		if (ready)
3675 			return ready;
3676 		if (time_after(now, deadline))
3677 			return -EBUSY;
3678 
3679 		if (!warned && time_after(now, start + 5 * HZ) &&
3680 		    (deadline - now > 3 * HZ)) {
3681 			ata_link_printk(link, KERN_WARNING,
3682 				"link is slow to respond, please be patient "
3683 				"(ready=%d)\n", tmp);
3684 			warned = 1;
3685 		}
3686 
3687 		msleep(50);
3688 	}
3689 }
3690 
3691 /**
3692  *	ata_wait_after_reset - wait for link to become ready after reset
3693  *	@link: link to be waited on
3694  *	@deadline: deadline jiffies for the operation
3695  *	@check_ready: callback to check link readiness
3696  *
3697  *	Wait for @link to become ready after reset.
3698  *
3699  *	LOCKING:
3700  *	EH context.
3701  *
3702  *	RETURNS:
3703  *	0 if @linke is ready before @deadline; otherwise, -errno.
3704  */
3705 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3706 				int (*check_ready)(struct ata_link *link))
3707 {
3708 	msleep(ATA_WAIT_AFTER_RESET);
3709 
3710 	return ata_wait_ready(link, deadline, check_ready);
3711 }
3712 
3713 /**
3714  *	sata_link_debounce - debounce SATA phy status
3715  *	@link: ATA link to debounce SATA phy status for
3716  *	@params: timing parameters { interval, duratinon, timeout } in msec
3717  *	@deadline: deadline jiffies for the operation
3718  *
3719 *	Make sure SStatus of @link reaches stable state, determined by
3720  *	holding the same value where DET is not 1 for @duration polled
3721  *	every @interval, before @timeout.  Timeout constraints the
3722  *	beginning of the stable state.  Because DET gets stuck at 1 on
3723  *	some controllers after hot unplugging, this functions waits
3724  *	until timeout then returns 0 if DET is stable at 1.
3725  *
3726  *	@timeout is further limited by @deadline.  The sooner of the
3727  *	two is used.
3728  *
3729  *	LOCKING:
3730  *	Kernel thread context (may sleep)
3731  *
3732  *	RETURNS:
3733  *	0 on success, -errno on failure.
3734  */
3735 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3736 		       unsigned long deadline)
3737 {
3738 	unsigned long interval = params[0];
3739 	unsigned long duration = params[1];
3740 	unsigned long last_jiffies, t;
3741 	u32 last, cur;
3742 	int rc;
3743 
3744 	t = ata_deadline(jiffies, params[2]);
3745 	if (time_before(t, deadline))
3746 		deadline = t;
3747 
3748 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3749 		return rc;
3750 	cur &= 0xf;
3751 
3752 	last = cur;
3753 	last_jiffies = jiffies;
3754 
3755 	while (1) {
3756 		msleep(interval);
3757 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3758 			return rc;
3759 		cur &= 0xf;
3760 
3761 		/* DET stable? */
3762 		if (cur == last) {
3763 			if (cur == 1 && time_before(jiffies, deadline))
3764 				continue;
3765 			if (time_after(jiffies,
3766 				       ata_deadline(last_jiffies, duration)))
3767 				return 0;
3768 			continue;
3769 		}
3770 
3771 		/* unstable, start over */
3772 		last = cur;
3773 		last_jiffies = jiffies;
3774 
3775 		/* Check deadline.  If debouncing failed, return
3776 		 * -EPIPE to tell upper layer to lower link speed.
3777 		 */
3778 		if (time_after(jiffies, deadline))
3779 			return -EPIPE;
3780 	}
3781 }
3782 
3783 /**
3784  *	sata_link_resume - resume SATA link
3785  *	@link: ATA link to resume SATA
3786  *	@params: timing parameters { interval, duratinon, timeout } in msec
3787  *	@deadline: deadline jiffies for the operation
3788  *
3789  *	Resume SATA phy @link and debounce it.
3790  *
3791  *	LOCKING:
3792  *	Kernel thread context (may sleep)
3793  *
3794  *	RETURNS:
3795  *	0 on success, -errno on failure.
3796  */
3797 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3798 		     unsigned long deadline)
3799 {
3800 	int tries = ATA_LINK_RESUME_TRIES;
3801 	u32 scontrol, serror;
3802 	int rc;
3803 
3804 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3805 		return rc;
3806 
3807 	/*
3808 	 * Writes to SControl sometimes get ignored under certain
3809 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3810 	 * cleared.
3811 	 */
3812 	do {
3813 		scontrol = (scontrol & 0x0f0) | 0x300;
3814 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3815 			return rc;
3816 		/*
3817 		 * Some PHYs react badly if SStatus is pounded
3818 		 * immediately after resuming.  Delay 200ms before
3819 		 * debouncing.
3820 		 */
3821 		msleep(200);
3822 
3823 		/* is SControl restored correctly? */
3824 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3825 			return rc;
3826 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3827 
3828 	if ((scontrol & 0xf0f) != 0x300) {
3829 		ata_link_printk(link, KERN_ERR,
3830 				"failed to resume link (SControl %X)\n",
3831 				scontrol);
3832 		return 0;
3833 	}
3834 
3835 	if (tries < ATA_LINK_RESUME_TRIES)
3836 		ata_link_printk(link, KERN_WARNING,
3837 				"link resume succeeded after %d retries\n",
3838 				ATA_LINK_RESUME_TRIES - tries);
3839 
3840 	if ((rc = sata_link_debounce(link, params, deadline)))
3841 		return rc;
3842 
3843 	/* clear SError, some PHYs require this even for SRST to work */
3844 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3845 		rc = sata_scr_write(link, SCR_ERROR, serror);
3846 
3847 	return rc != -EINVAL ? rc : 0;
3848 }
3849 
3850 /**
3851  *	ata_std_prereset - prepare for reset
3852  *	@link: ATA link to be reset
3853  *	@deadline: deadline jiffies for the operation
3854  *
3855  *	@link is about to be reset.  Initialize it.  Failure from
3856  *	prereset makes libata abort whole reset sequence and give up
3857  *	that port, so prereset should be best-effort.  It does its
3858  *	best to prepare for reset sequence but if things go wrong, it
3859  *	should just whine, not fail.
3860  *
3861  *	LOCKING:
3862  *	Kernel thread context (may sleep)
3863  *
3864  *	RETURNS:
3865  *	0 on success, -errno otherwise.
3866  */
3867 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3868 {
3869 	struct ata_port *ap = link->ap;
3870 	struct ata_eh_context *ehc = &link->eh_context;
3871 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3872 	int rc;
3873 
3874 	/* if we're about to do hardreset, nothing more to do */
3875 	if (ehc->i.action & ATA_EH_HARDRESET)
3876 		return 0;
3877 
3878 	/* if SATA, resume link */
3879 	if (ap->flags & ATA_FLAG_SATA) {
3880 		rc = sata_link_resume(link, timing, deadline);
3881 		/* whine about phy resume failure but proceed */
3882 		if (rc && rc != -EOPNOTSUPP)
3883 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3884 					"link for reset (errno=%d)\n", rc);
3885 	}
3886 
3887 	/* no point in trying softreset on offline link */
3888 	if (ata_phys_link_offline(link))
3889 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3890 
3891 	return 0;
3892 }
3893 
3894 /**
3895  *	sata_link_hardreset - reset link via SATA phy reset
3896  *	@link: link to reset
3897  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3898  *	@deadline: deadline jiffies for the operation
3899  *	@online: optional out parameter indicating link onlineness
3900  *	@check_ready: optional callback to check link readiness
3901  *
3902  *	SATA phy-reset @link using DET bits of SControl register.
3903  *	After hardreset, link readiness is waited upon using
3904  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3905  *	allowed to not specify @check_ready and wait itself after this
3906  *	function returns.  Device classification is LLD's
3907  *	responsibility.
3908  *
3909  *	*@online is set to one iff reset succeeded and @link is online
3910  *	after reset.
3911  *
3912  *	LOCKING:
3913  *	Kernel thread context (may sleep)
3914  *
3915  *	RETURNS:
3916  *	0 on success, -errno otherwise.
3917  */
3918 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3919 			unsigned long deadline,
3920 			bool *online, int (*check_ready)(struct ata_link *))
3921 {
3922 	u32 scontrol;
3923 	int rc;
3924 
3925 	DPRINTK("ENTER\n");
3926 
3927 	if (online)
3928 		*online = false;
3929 
3930 	if (sata_set_spd_needed(link)) {
3931 		/* SATA spec says nothing about how to reconfigure
3932 		 * spd.  To be on the safe side, turn off phy during
3933 		 * reconfiguration.  This works for at least ICH7 AHCI
3934 		 * and Sil3124.
3935 		 */
3936 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3937 			goto out;
3938 
3939 		scontrol = (scontrol & 0x0f0) | 0x304;
3940 
3941 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3942 			goto out;
3943 
3944 		sata_set_spd(link);
3945 	}
3946 
3947 	/* issue phy wake/reset */
3948 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3949 		goto out;
3950 
3951 	scontrol = (scontrol & 0x0f0) | 0x301;
3952 
3953 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3954 		goto out;
3955 
3956 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3957 	 * 10.4.2 says at least 1 ms.
3958 	 */
3959 	msleep(1);
3960 
3961 	/* bring link back */
3962 	rc = sata_link_resume(link, timing, deadline);
3963 	if (rc)
3964 		goto out;
3965 	/* if link is offline nothing more to do */
3966 	if (ata_phys_link_offline(link))
3967 		goto out;
3968 
3969 	/* Link is online.  From this point, -ENODEV too is an error. */
3970 	if (online)
3971 		*online = true;
3972 
3973 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3974 		/* If PMP is supported, we have to do follow-up SRST.
3975 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3976 		 * the first port is empty.  Wait only for
3977 		 * ATA_TMOUT_PMP_SRST_WAIT.
3978 		 */
3979 		if (check_ready) {
3980 			unsigned long pmp_deadline;
3981 
3982 			pmp_deadline = ata_deadline(jiffies,
3983 						    ATA_TMOUT_PMP_SRST_WAIT);
3984 			if (time_after(pmp_deadline, deadline))
3985 				pmp_deadline = deadline;
3986 			ata_wait_ready(link, pmp_deadline, check_ready);
3987 		}
3988 		rc = -EAGAIN;
3989 		goto out;
3990 	}
3991 
3992 	rc = 0;
3993 	if (check_ready)
3994 		rc = ata_wait_ready(link, deadline, check_ready);
3995  out:
3996 	if (rc && rc != -EAGAIN) {
3997 		/* online is set iff link is online && reset succeeded */
3998 		if (online)
3999 			*online = false;
4000 		ata_link_printk(link, KERN_ERR,
4001 				"COMRESET failed (errno=%d)\n", rc);
4002 	}
4003 	DPRINTK("EXIT, rc=%d\n", rc);
4004 	return rc;
4005 }
4006 
4007 /**
4008  *	sata_std_hardreset - COMRESET w/o waiting or classification
4009  *	@link: link to reset
4010  *	@class: resulting class of attached device
4011  *	@deadline: deadline jiffies for the operation
4012  *
4013  *	Standard SATA COMRESET w/o waiting or classification.
4014  *
4015  *	LOCKING:
4016  *	Kernel thread context (may sleep)
4017  *
4018  *	RETURNS:
4019  *	0 if link offline, -EAGAIN if link online, -errno on errors.
4020  */
4021 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4022 		       unsigned long deadline)
4023 {
4024 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4025 	bool online;
4026 	int rc;
4027 
4028 	/* do hardreset */
4029 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4030 	return online ? -EAGAIN : rc;
4031 }
4032 
4033 /**
4034  *	ata_std_postreset - standard postreset callback
4035  *	@link: the target ata_link
4036  *	@classes: classes of attached devices
4037  *
4038  *	This function is invoked after a successful reset.  Note that
4039  *	the device might have been reset more than once using
4040  *	different reset methods before postreset is invoked.
4041  *
4042  *	LOCKING:
4043  *	Kernel thread context (may sleep)
4044  */
4045 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4046 {
4047 	u32 serror;
4048 
4049 	DPRINTK("ENTER\n");
4050 
4051 	/* reset complete, clear SError */
4052 	if (!sata_scr_read(link, SCR_ERROR, &serror))
4053 		sata_scr_write(link, SCR_ERROR, serror);
4054 
4055 	/* print link status */
4056 	sata_print_link_status(link);
4057 
4058 	DPRINTK("EXIT\n");
4059 }
4060 
4061 /**
4062  *	ata_dev_same_device - Determine whether new ID matches configured device
4063  *	@dev: device to compare against
4064  *	@new_class: class of the new device
4065  *	@new_id: IDENTIFY page of the new device
4066  *
4067  *	Compare @new_class and @new_id against @dev and determine
4068  *	whether @dev is the device indicated by @new_class and
4069  *	@new_id.
4070  *
4071  *	LOCKING:
4072  *	None.
4073  *
4074  *	RETURNS:
4075  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
4076  */
4077 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4078 			       const u16 *new_id)
4079 {
4080 	const u16 *old_id = dev->id;
4081 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
4082 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4083 
4084 	if (dev->class != new_class) {
4085 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4086 			       dev->class, new_class);
4087 		return 0;
4088 	}
4089 
4090 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4091 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4092 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4093 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4094 
4095 	if (strcmp(model[0], model[1])) {
4096 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4097 			       "'%s' != '%s'\n", model[0], model[1]);
4098 		return 0;
4099 	}
4100 
4101 	if (strcmp(serial[0], serial[1])) {
4102 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4103 			       "'%s' != '%s'\n", serial[0], serial[1]);
4104 		return 0;
4105 	}
4106 
4107 	return 1;
4108 }
4109 
4110 /**
4111  *	ata_dev_reread_id - Re-read IDENTIFY data
4112  *	@dev: target ATA device
4113  *	@readid_flags: read ID flags
4114  *
4115  *	Re-read IDENTIFY page and make sure @dev is still attached to
4116  *	the port.
4117  *
4118  *	LOCKING:
4119  *	Kernel thread context (may sleep)
4120  *
4121  *	RETURNS:
4122  *	0 on success, negative errno otherwise
4123  */
4124 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4125 {
4126 	unsigned int class = dev->class;
4127 	u16 *id = (void *)dev->link->ap->sector_buf;
4128 	int rc;
4129 
4130 	/* read ID data */
4131 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4132 	if (rc)
4133 		return rc;
4134 
4135 	/* is the device still there? */
4136 	if (!ata_dev_same_device(dev, class, id))
4137 		return -ENODEV;
4138 
4139 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4140 	return 0;
4141 }
4142 
4143 /**
4144  *	ata_dev_revalidate - Revalidate ATA device
4145  *	@dev: device to revalidate
4146  *	@new_class: new class code
4147  *	@readid_flags: read ID flags
4148  *
4149  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4150  *	port and reconfigure it according to the new IDENTIFY page.
4151  *
4152  *	LOCKING:
4153  *	Kernel thread context (may sleep)
4154  *
4155  *	RETURNS:
4156  *	0 on success, negative errno otherwise
4157  */
4158 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4159 		       unsigned int readid_flags)
4160 {
4161 	u64 n_sectors = dev->n_sectors;
4162 	u64 n_native_sectors = dev->n_native_sectors;
4163 	int rc;
4164 
4165 	if (!ata_dev_enabled(dev))
4166 		return -ENODEV;
4167 
4168 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4169 	if (ata_class_enabled(new_class) &&
4170 	    new_class != ATA_DEV_ATA &&
4171 	    new_class != ATA_DEV_ATAPI &&
4172 	    new_class != ATA_DEV_SEMB) {
4173 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4174 			       dev->class, new_class);
4175 		rc = -ENODEV;
4176 		goto fail;
4177 	}
4178 
4179 	/* re-read ID */
4180 	rc = ata_dev_reread_id(dev, readid_flags);
4181 	if (rc)
4182 		goto fail;
4183 
4184 	/* configure device according to the new ID */
4185 	rc = ata_dev_configure(dev);
4186 	if (rc)
4187 		goto fail;
4188 
4189 	/* verify n_sectors hasn't changed */
4190 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4191 	    dev->n_sectors == n_sectors)
4192 		return 0;
4193 
4194 	/* n_sectors has changed */
4195 	ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
4196 		       (unsigned long long)n_sectors,
4197 		       (unsigned long long)dev->n_sectors);
4198 
4199 	/*
4200 	 * Something could have caused HPA to be unlocked
4201 	 * involuntarily.  If n_native_sectors hasn't changed and the
4202 	 * new size matches it, keep the device.
4203 	 */
4204 	if (dev->n_native_sectors == n_native_sectors &&
4205 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4206 		ata_dev_printk(dev, KERN_WARNING,
4207 			       "new n_sectors matches native, probably "
4208 			       "late HPA unlock, continuing\n");
4209 		/* keep using the old n_sectors */
4210 		dev->n_sectors = n_sectors;
4211 		return 0;
4212 	}
4213 
4214 	/*
4215 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4216 	 * unlocking HPA in those cases.
4217 	 *
4218 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4219 	 */
4220 	if (dev->n_native_sectors == n_native_sectors &&
4221 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4222 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4223 		ata_dev_printk(dev, KERN_WARNING,
4224 			       "old n_sectors matches native, probably "
4225 			       "late HPA lock, will try to unlock HPA\n");
4226 		/* try unlocking HPA */
4227 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4228 		rc = -EIO;
4229 	} else
4230 		rc = -ENODEV;
4231 
4232 	/* restore original n_[native_]sectors and fail */
4233 	dev->n_native_sectors = n_native_sectors;
4234 	dev->n_sectors = n_sectors;
4235  fail:
4236 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4237 	return rc;
4238 }
4239 
4240 struct ata_blacklist_entry {
4241 	const char *model_num;
4242 	const char *model_rev;
4243 	unsigned long horkage;
4244 };
4245 
4246 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4247 	/* Devices with DMA related problems under Linux */
4248 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4249 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4250 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4251 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4252 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4253 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4254 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4255 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4256 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4257 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
4258 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
4259 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4260 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4261 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4262 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4263 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4264 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
4265 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
4266 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4267 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4268 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4269 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4270 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4271 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4272 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4273 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4274 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4275 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4276 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4277 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4278 	/* Odd clown on sil3726/4726 PMPs */
4279 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4280 
4281 	/* Weird ATAPI devices */
4282 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4283 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4284 
4285 	/* Devices we expect to fail diagnostics */
4286 
4287 	/* Devices where NCQ should be avoided */
4288 	/* NCQ is slow */
4289 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4290 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4291 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4292 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4293 	/* NCQ is broken */
4294 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4295 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4296 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4297 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4298 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4299 
4300 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4301 	{ "ST31500341AS",	"SD15",		ATA_HORKAGE_NONCQ |
4302 						ATA_HORKAGE_FIRMWARE_WARN },
4303 	{ "ST31500341AS",	"SD16",		ATA_HORKAGE_NONCQ |
4304 						ATA_HORKAGE_FIRMWARE_WARN },
4305 	{ "ST31500341AS",	"SD17",		ATA_HORKAGE_NONCQ |
4306 						ATA_HORKAGE_FIRMWARE_WARN },
4307 	{ "ST31500341AS",	"SD18",		ATA_HORKAGE_NONCQ |
4308 						ATA_HORKAGE_FIRMWARE_WARN },
4309 	{ "ST31500341AS",	"SD19",		ATA_HORKAGE_NONCQ |
4310 						ATA_HORKAGE_FIRMWARE_WARN },
4311 
4312 	{ "ST31000333AS",	"SD15",		ATA_HORKAGE_NONCQ |
4313 						ATA_HORKAGE_FIRMWARE_WARN },
4314 	{ "ST31000333AS",	"SD16",		ATA_HORKAGE_NONCQ |
4315 						ATA_HORKAGE_FIRMWARE_WARN },
4316 	{ "ST31000333AS",	"SD17",		ATA_HORKAGE_NONCQ |
4317 						ATA_HORKAGE_FIRMWARE_WARN },
4318 	{ "ST31000333AS",	"SD18",		ATA_HORKAGE_NONCQ |
4319 						ATA_HORKAGE_FIRMWARE_WARN },
4320 	{ "ST31000333AS",	"SD19",		ATA_HORKAGE_NONCQ |
4321 						ATA_HORKAGE_FIRMWARE_WARN },
4322 
4323 	{ "ST3640623AS",	"SD15",		ATA_HORKAGE_NONCQ |
4324 						ATA_HORKAGE_FIRMWARE_WARN },
4325 	{ "ST3640623AS",	"SD16",		ATA_HORKAGE_NONCQ |
4326 						ATA_HORKAGE_FIRMWARE_WARN },
4327 	{ "ST3640623AS",	"SD17",		ATA_HORKAGE_NONCQ |
4328 						ATA_HORKAGE_FIRMWARE_WARN },
4329 	{ "ST3640623AS",	"SD18",		ATA_HORKAGE_NONCQ |
4330 						ATA_HORKAGE_FIRMWARE_WARN },
4331 	{ "ST3640623AS",	"SD19",		ATA_HORKAGE_NONCQ |
4332 						ATA_HORKAGE_FIRMWARE_WARN },
4333 
4334 	{ "ST3640323AS",	"SD15",		ATA_HORKAGE_NONCQ |
4335 						ATA_HORKAGE_FIRMWARE_WARN },
4336 	{ "ST3640323AS",	"SD16",		ATA_HORKAGE_NONCQ |
4337 						ATA_HORKAGE_FIRMWARE_WARN },
4338 	{ "ST3640323AS",	"SD17",		ATA_HORKAGE_NONCQ |
4339 						ATA_HORKAGE_FIRMWARE_WARN },
4340 	{ "ST3640323AS",	"SD18",		ATA_HORKAGE_NONCQ |
4341 						ATA_HORKAGE_FIRMWARE_WARN },
4342 	{ "ST3640323AS",	"SD19",		ATA_HORKAGE_NONCQ |
4343 						ATA_HORKAGE_FIRMWARE_WARN },
4344 
4345 	{ "ST3320813AS",	"SD15",		ATA_HORKAGE_NONCQ |
4346 						ATA_HORKAGE_FIRMWARE_WARN },
4347 	{ "ST3320813AS",	"SD16",		ATA_HORKAGE_NONCQ |
4348 						ATA_HORKAGE_FIRMWARE_WARN },
4349 	{ "ST3320813AS",	"SD17",		ATA_HORKAGE_NONCQ |
4350 						ATA_HORKAGE_FIRMWARE_WARN },
4351 	{ "ST3320813AS",	"SD18",		ATA_HORKAGE_NONCQ |
4352 						ATA_HORKAGE_FIRMWARE_WARN },
4353 	{ "ST3320813AS",	"SD19",		ATA_HORKAGE_NONCQ |
4354 						ATA_HORKAGE_FIRMWARE_WARN },
4355 
4356 	{ "ST3320613AS",	"SD15",		ATA_HORKAGE_NONCQ |
4357 						ATA_HORKAGE_FIRMWARE_WARN },
4358 	{ "ST3320613AS",	"SD16",		ATA_HORKAGE_NONCQ |
4359 						ATA_HORKAGE_FIRMWARE_WARN },
4360 	{ "ST3320613AS",	"SD17",		ATA_HORKAGE_NONCQ |
4361 						ATA_HORKAGE_FIRMWARE_WARN },
4362 	{ "ST3320613AS",	"SD18",		ATA_HORKAGE_NONCQ |
4363 						ATA_HORKAGE_FIRMWARE_WARN },
4364 	{ "ST3320613AS",	"SD19",		ATA_HORKAGE_NONCQ |
4365 						ATA_HORKAGE_FIRMWARE_WARN },
4366 
4367 	/* Blacklist entries taken from Silicon Image 3124/3132
4368 	   Windows driver .inf file - also several Linux problem reports */
4369 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4370 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4371 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4372 
4373 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4374 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4375 
4376 	/* devices which puke on READ_NATIVE_MAX */
4377 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4378 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4379 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4380 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4381 
4382 	/* this one allows HPA unlocking but fails IOs on the area */
4383 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4384 
4385 	/* Devices which report 1 sector over size HPA */
4386 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4387 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4388 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4389 
4390 	/* Devices which get the IVB wrong */
4391 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4392 	/* Maybe we should just blacklist TSSTcorp... */
4393 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
4394 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
4395 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4396 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4397 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4398 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
4399 
4400 	/* Devices that do not need bridging limits applied */
4401 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4402 
4403 	/* Devices which aren't very happy with higher link speeds */
4404 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4405 
4406 	/*
4407 	 * Devices which choke on SETXFER.  Applies only if both the
4408 	 * device and controller are SATA.
4409 	 */
4410 	{ "PIONEER DVD-RW  DVRTD08",	"1.00",	ATA_HORKAGE_NOSETXFER },
4411 
4412 	/* End Marker */
4413 	{ }
4414 };
4415 
4416 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4417 {
4418 	const char *p;
4419 	int len;
4420 
4421 	/*
4422 	 * check for trailing wildcard: *\0
4423 	 */
4424 	p = strchr(patt, wildchar);
4425 	if (p && ((*(p + 1)) == 0))
4426 		len = p - patt;
4427 	else {
4428 		len = strlen(name);
4429 		if (!len) {
4430 			if (!*patt)
4431 				return 0;
4432 			return -1;
4433 		}
4434 	}
4435 
4436 	return strncmp(patt, name, len);
4437 }
4438 
4439 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4440 {
4441 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4442 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4443 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4444 
4445 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4446 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4447 
4448 	while (ad->model_num) {
4449 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4450 			if (ad->model_rev == NULL)
4451 				return ad->horkage;
4452 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4453 				return ad->horkage;
4454 		}
4455 		ad++;
4456 	}
4457 	return 0;
4458 }
4459 
4460 static int ata_dma_blacklisted(const struct ata_device *dev)
4461 {
4462 	/* We don't support polling DMA.
4463 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4464 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4465 	 */
4466 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4467 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4468 		return 1;
4469 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4470 }
4471 
4472 /**
4473  *	ata_is_40wire		-	check drive side detection
4474  *	@dev: device
4475  *
4476  *	Perform drive side detection decoding, allowing for device vendors
4477  *	who can't follow the documentation.
4478  */
4479 
4480 static int ata_is_40wire(struct ata_device *dev)
4481 {
4482 	if (dev->horkage & ATA_HORKAGE_IVB)
4483 		return ata_drive_40wire_relaxed(dev->id);
4484 	return ata_drive_40wire(dev->id);
4485 }
4486 
4487 /**
4488  *	cable_is_40wire		-	40/80/SATA decider
4489  *	@ap: port to consider
4490  *
4491  *	This function encapsulates the policy for speed management
4492  *	in one place. At the moment we don't cache the result but
4493  *	there is a good case for setting ap->cbl to the result when
4494  *	we are called with unknown cables (and figuring out if it
4495  *	impacts hotplug at all).
4496  *
4497  *	Return 1 if the cable appears to be 40 wire.
4498  */
4499 
4500 static int cable_is_40wire(struct ata_port *ap)
4501 {
4502 	struct ata_link *link;
4503 	struct ata_device *dev;
4504 
4505 	/* If the controller thinks we are 40 wire, we are. */
4506 	if (ap->cbl == ATA_CBL_PATA40)
4507 		return 1;
4508 
4509 	/* If the controller thinks we are 80 wire, we are. */
4510 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4511 		return 0;
4512 
4513 	/* If the system is known to be 40 wire short cable (eg
4514 	 * laptop), then we allow 80 wire modes even if the drive
4515 	 * isn't sure.
4516 	 */
4517 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4518 		return 0;
4519 
4520 	/* If the controller doesn't know, we scan.
4521 	 *
4522 	 * Note: We look for all 40 wire detects at this point.  Any
4523 	 *       80 wire detect is taken to be 80 wire cable because
4524 	 * - in many setups only the one drive (slave if present) will
4525 	 *   give a valid detect
4526 	 * - if you have a non detect capable drive you don't want it
4527 	 *   to colour the choice
4528 	 */
4529 	ata_for_each_link(link, ap, EDGE) {
4530 		ata_for_each_dev(dev, link, ENABLED) {
4531 			if (!ata_is_40wire(dev))
4532 				return 0;
4533 		}
4534 	}
4535 	return 1;
4536 }
4537 
4538 /**
4539  *	ata_dev_xfermask - Compute supported xfermask of the given device
4540  *	@dev: Device to compute xfermask for
4541  *
4542  *	Compute supported xfermask of @dev and store it in
4543  *	dev->*_mask.  This function is responsible for applying all
4544  *	known limits including host controller limits, device
4545  *	blacklist, etc...
4546  *
4547  *	LOCKING:
4548  *	None.
4549  */
4550 static void ata_dev_xfermask(struct ata_device *dev)
4551 {
4552 	struct ata_link *link = dev->link;
4553 	struct ata_port *ap = link->ap;
4554 	struct ata_host *host = ap->host;
4555 	unsigned long xfer_mask;
4556 
4557 	/* controller modes available */
4558 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4559 				      ap->mwdma_mask, ap->udma_mask);
4560 
4561 	/* drive modes available */
4562 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4563 				       dev->mwdma_mask, dev->udma_mask);
4564 	xfer_mask &= ata_id_xfermask(dev->id);
4565 
4566 	/*
4567 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4568 	 *	cable
4569 	 */
4570 	if (ata_dev_pair(dev)) {
4571 		/* No PIO5 or PIO6 */
4572 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4573 		/* No MWDMA3 or MWDMA 4 */
4574 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4575 	}
4576 
4577 	if (ata_dma_blacklisted(dev)) {
4578 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4579 		ata_dev_printk(dev, KERN_WARNING,
4580 			       "device is on DMA blacklist, disabling DMA\n");
4581 	}
4582 
4583 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4584 	    host->simplex_claimed && host->simplex_claimed != ap) {
4585 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4586 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4587 			       "other device, disabling DMA\n");
4588 	}
4589 
4590 	if (ap->flags & ATA_FLAG_NO_IORDY)
4591 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4592 
4593 	if (ap->ops->mode_filter)
4594 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4595 
4596 	/* Apply cable rule here.  Don't apply it early because when
4597 	 * we handle hot plug the cable type can itself change.
4598 	 * Check this last so that we know if the transfer rate was
4599 	 * solely limited by the cable.
4600 	 * Unknown or 80 wire cables reported host side are checked
4601 	 * drive side as well. Cases where we know a 40wire cable
4602 	 * is used safely for 80 are not checked here.
4603 	 */
4604 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4605 		/* UDMA/44 or higher would be available */
4606 		if (cable_is_40wire(ap)) {
4607 			ata_dev_printk(dev, KERN_WARNING,
4608 				 "limited to UDMA/33 due to 40-wire cable\n");
4609 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4610 		}
4611 
4612 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4613 			    &dev->mwdma_mask, &dev->udma_mask);
4614 }
4615 
4616 /**
4617  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4618  *	@dev: Device to which command will be sent
4619  *
4620  *	Issue SET FEATURES - XFER MODE command to device @dev
4621  *	on port @ap.
4622  *
4623  *	LOCKING:
4624  *	PCI/etc. bus probe sem.
4625  *
4626  *	RETURNS:
4627  *	0 on success, AC_ERR_* mask otherwise.
4628  */
4629 
4630 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4631 {
4632 	struct ata_taskfile tf;
4633 	unsigned int err_mask;
4634 
4635 	/* set up set-features taskfile */
4636 	DPRINTK("set features - xfer mode\n");
4637 
4638 	/* Some controllers and ATAPI devices show flaky interrupt
4639 	 * behavior after setting xfer mode.  Use polling instead.
4640 	 */
4641 	ata_tf_init(dev, &tf);
4642 	tf.command = ATA_CMD_SET_FEATURES;
4643 	tf.feature = SETFEATURES_XFER;
4644 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4645 	tf.protocol = ATA_PROT_NODATA;
4646 	/* If we are using IORDY we must send the mode setting command */
4647 	if (ata_pio_need_iordy(dev))
4648 		tf.nsect = dev->xfer_mode;
4649 	/* If the device has IORDY and the controller does not - turn it off */
4650  	else if (ata_id_has_iordy(dev->id))
4651 		tf.nsect = 0x01;
4652 	else /* In the ancient relic department - skip all of this */
4653 		return 0;
4654 
4655 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4656 
4657 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4658 	return err_mask;
4659 }
4660 /**
4661  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4662  *	@dev: Device to which command will be sent
4663  *	@enable: Whether to enable or disable the feature
4664  *	@feature: The sector count represents the feature to set
4665  *
4666  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4667  *	on port @ap with sector count
4668  *
4669  *	LOCKING:
4670  *	PCI/etc. bus probe sem.
4671  *
4672  *	RETURNS:
4673  *	0 on success, AC_ERR_* mask otherwise.
4674  */
4675 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4676 					u8 feature)
4677 {
4678 	struct ata_taskfile tf;
4679 	unsigned int err_mask;
4680 
4681 	/* set up set-features taskfile */
4682 	DPRINTK("set features - SATA features\n");
4683 
4684 	ata_tf_init(dev, &tf);
4685 	tf.command = ATA_CMD_SET_FEATURES;
4686 	tf.feature = enable;
4687 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4688 	tf.protocol = ATA_PROT_NODATA;
4689 	tf.nsect = feature;
4690 
4691 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4692 
4693 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4694 	return err_mask;
4695 }
4696 
4697 /**
4698  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4699  *	@dev: Device to which command will be sent
4700  *	@heads: Number of heads (taskfile parameter)
4701  *	@sectors: Number of sectors (taskfile parameter)
4702  *
4703  *	LOCKING:
4704  *	Kernel thread context (may sleep)
4705  *
4706  *	RETURNS:
4707  *	0 on success, AC_ERR_* mask otherwise.
4708  */
4709 static unsigned int ata_dev_init_params(struct ata_device *dev,
4710 					u16 heads, u16 sectors)
4711 {
4712 	struct ata_taskfile tf;
4713 	unsigned int err_mask;
4714 
4715 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4716 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4717 		return AC_ERR_INVALID;
4718 
4719 	/* set up init dev params taskfile */
4720 	DPRINTK("init dev params \n");
4721 
4722 	ata_tf_init(dev, &tf);
4723 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4724 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4725 	tf.protocol = ATA_PROT_NODATA;
4726 	tf.nsect = sectors;
4727 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4728 
4729 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4730 	/* A clean abort indicates an original or just out of spec drive
4731 	   and we should continue as we issue the setup based on the
4732 	   drive reported working geometry */
4733 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4734 		err_mask = 0;
4735 
4736 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4737 	return err_mask;
4738 }
4739 
4740 /**
4741  *	ata_sg_clean - Unmap DMA memory associated with command
4742  *	@qc: Command containing DMA memory to be released
4743  *
4744  *	Unmap all mapped DMA memory associated with this command.
4745  *
4746  *	LOCKING:
4747  *	spin_lock_irqsave(host lock)
4748  */
4749 void ata_sg_clean(struct ata_queued_cmd *qc)
4750 {
4751 	struct ata_port *ap = qc->ap;
4752 	struct scatterlist *sg = qc->sg;
4753 	int dir = qc->dma_dir;
4754 
4755 	WARN_ON_ONCE(sg == NULL);
4756 
4757 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4758 
4759 	if (qc->n_elem)
4760 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4761 
4762 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4763 	qc->sg = NULL;
4764 }
4765 
4766 /**
4767  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4768  *	@qc: Metadata associated with taskfile to check
4769  *
4770  *	Allow low-level driver to filter ATA PACKET commands, returning
4771  *	a status indicating whether or not it is OK to use DMA for the
4772  *	supplied PACKET command.
4773  *
4774  *	LOCKING:
4775  *	spin_lock_irqsave(host lock)
4776  *
4777  *	RETURNS: 0 when ATAPI DMA can be used
4778  *               nonzero otherwise
4779  */
4780 int atapi_check_dma(struct ata_queued_cmd *qc)
4781 {
4782 	struct ata_port *ap = qc->ap;
4783 
4784 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4785 	 * few ATAPI devices choke on such DMA requests.
4786 	 */
4787 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4788 	    unlikely(qc->nbytes & 15))
4789 		return 1;
4790 
4791 	if (ap->ops->check_atapi_dma)
4792 		return ap->ops->check_atapi_dma(qc);
4793 
4794 	return 0;
4795 }
4796 
4797 /**
4798  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4799  *	@qc: ATA command in question
4800  *
4801  *	Non-NCQ commands cannot run with any other command, NCQ or
4802  *	not.  As upper layer only knows the queue depth, we are
4803  *	responsible for maintaining exclusion.  This function checks
4804  *	whether a new command @qc can be issued.
4805  *
4806  *	LOCKING:
4807  *	spin_lock_irqsave(host lock)
4808  *
4809  *	RETURNS:
4810  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4811  */
4812 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4813 {
4814 	struct ata_link *link = qc->dev->link;
4815 
4816 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4817 		if (!ata_tag_valid(link->active_tag))
4818 			return 0;
4819 	} else {
4820 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4821 			return 0;
4822 	}
4823 
4824 	return ATA_DEFER_LINK;
4825 }
4826 
4827 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4828 
4829 /**
4830  *	ata_sg_init - Associate command with scatter-gather table.
4831  *	@qc: Command to be associated
4832  *	@sg: Scatter-gather table.
4833  *	@n_elem: Number of elements in s/g table.
4834  *
4835  *	Initialize the data-related elements of queued_cmd @qc
4836  *	to point to a scatter-gather table @sg, containing @n_elem
4837  *	elements.
4838  *
4839  *	LOCKING:
4840  *	spin_lock_irqsave(host lock)
4841  */
4842 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4843 		 unsigned int n_elem)
4844 {
4845 	qc->sg = sg;
4846 	qc->n_elem = n_elem;
4847 	qc->cursg = qc->sg;
4848 }
4849 
4850 /**
4851  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4852  *	@qc: Command with scatter-gather table to be mapped.
4853  *
4854  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4855  *
4856  *	LOCKING:
4857  *	spin_lock_irqsave(host lock)
4858  *
4859  *	RETURNS:
4860  *	Zero on success, negative on error.
4861  *
4862  */
4863 static int ata_sg_setup(struct ata_queued_cmd *qc)
4864 {
4865 	struct ata_port *ap = qc->ap;
4866 	unsigned int n_elem;
4867 
4868 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4869 
4870 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4871 	if (n_elem < 1)
4872 		return -1;
4873 
4874 	DPRINTK("%d sg elements mapped\n", n_elem);
4875 	qc->orig_n_elem = qc->n_elem;
4876 	qc->n_elem = n_elem;
4877 	qc->flags |= ATA_QCFLAG_DMAMAP;
4878 
4879 	return 0;
4880 }
4881 
4882 /**
4883  *	swap_buf_le16 - swap halves of 16-bit words in place
4884  *	@buf:  Buffer to swap
4885  *	@buf_words:  Number of 16-bit words in buffer.
4886  *
4887  *	Swap halves of 16-bit words if needed to convert from
4888  *	little-endian byte order to native cpu byte order, or
4889  *	vice-versa.
4890  *
4891  *	LOCKING:
4892  *	Inherited from caller.
4893  */
4894 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4895 {
4896 #ifdef __BIG_ENDIAN
4897 	unsigned int i;
4898 
4899 	for (i = 0; i < buf_words; i++)
4900 		buf[i] = le16_to_cpu(buf[i]);
4901 #endif /* __BIG_ENDIAN */
4902 }
4903 
4904 /**
4905  *	ata_qc_new - Request an available ATA command, for queueing
4906  *	@ap: target port
4907  *
4908  *	LOCKING:
4909  *	None.
4910  */
4911 
4912 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4913 {
4914 	struct ata_queued_cmd *qc = NULL;
4915 	unsigned int i;
4916 
4917 	/* no command while frozen */
4918 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4919 		return NULL;
4920 
4921 	/* the last tag is reserved for internal command. */
4922 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4923 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4924 			qc = __ata_qc_from_tag(ap, i);
4925 			break;
4926 		}
4927 
4928 	if (qc)
4929 		qc->tag = i;
4930 
4931 	return qc;
4932 }
4933 
4934 /**
4935  *	ata_qc_new_init - Request an available ATA command, and initialize it
4936  *	@dev: Device from whom we request an available command structure
4937  *
4938  *	LOCKING:
4939  *	None.
4940  */
4941 
4942 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4943 {
4944 	struct ata_port *ap = dev->link->ap;
4945 	struct ata_queued_cmd *qc;
4946 
4947 	qc = ata_qc_new(ap);
4948 	if (qc) {
4949 		qc->scsicmd = NULL;
4950 		qc->ap = ap;
4951 		qc->dev = dev;
4952 
4953 		ata_qc_reinit(qc);
4954 	}
4955 
4956 	return qc;
4957 }
4958 
4959 /**
4960  *	ata_qc_free - free unused ata_queued_cmd
4961  *	@qc: Command to complete
4962  *
4963  *	Designed to free unused ata_queued_cmd object
4964  *	in case something prevents using it.
4965  *
4966  *	LOCKING:
4967  *	spin_lock_irqsave(host lock)
4968  */
4969 void ata_qc_free(struct ata_queued_cmd *qc)
4970 {
4971 	struct ata_port *ap;
4972 	unsigned int tag;
4973 
4974 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4975 	ap = qc->ap;
4976 
4977 	qc->flags = 0;
4978 	tag = qc->tag;
4979 	if (likely(ata_tag_valid(tag))) {
4980 		qc->tag = ATA_TAG_POISON;
4981 		clear_bit(tag, &ap->qc_allocated);
4982 	}
4983 }
4984 
4985 void __ata_qc_complete(struct ata_queued_cmd *qc)
4986 {
4987 	struct ata_port *ap;
4988 	struct ata_link *link;
4989 
4990 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4991 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4992 	ap = qc->ap;
4993 	link = qc->dev->link;
4994 
4995 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4996 		ata_sg_clean(qc);
4997 
4998 	/* command should be marked inactive atomically with qc completion */
4999 	if (qc->tf.protocol == ATA_PROT_NCQ) {
5000 		link->sactive &= ~(1 << qc->tag);
5001 		if (!link->sactive)
5002 			ap->nr_active_links--;
5003 	} else {
5004 		link->active_tag = ATA_TAG_POISON;
5005 		ap->nr_active_links--;
5006 	}
5007 
5008 	/* clear exclusive status */
5009 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5010 		     ap->excl_link == link))
5011 		ap->excl_link = NULL;
5012 
5013 	/* atapi: mark qc as inactive to prevent the interrupt handler
5014 	 * from completing the command twice later, before the error handler
5015 	 * is called. (when rc != 0 and atapi request sense is needed)
5016 	 */
5017 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5018 	ap->qc_active &= ~(1 << qc->tag);
5019 
5020 	/* call completion callback */
5021 	qc->complete_fn(qc);
5022 }
5023 
5024 static void fill_result_tf(struct ata_queued_cmd *qc)
5025 {
5026 	struct ata_port *ap = qc->ap;
5027 
5028 	qc->result_tf.flags = qc->tf.flags;
5029 	ap->ops->qc_fill_rtf(qc);
5030 }
5031 
5032 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5033 {
5034 	struct ata_device *dev = qc->dev;
5035 
5036 	if (ata_tag_internal(qc->tag))
5037 		return;
5038 
5039 	if (ata_is_nodata(qc->tf.protocol))
5040 		return;
5041 
5042 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5043 		return;
5044 
5045 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5046 }
5047 
5048 /**
5049  *	ata_qc_complete - Complete an active ATA command
5050  *	@qc: Command to complete
5051  *
5052  *	Indicate to the mid and upper layers that an ATA
5053  *	command has completed, with either an ok or not-ok status.
5054  *
5055  *	LOCKING:
5056  *	spin_lock_irqsave(host lock)
5057  */
5058 void ata_qc_complete(struct ata_queued_cmd *qc)
5059 {
5060 	struct ata_port *ap = qc->ap;
5061 
5062 	/* XXX: New EH and old EH use different mechanisms to
5063 	 * synchronize EH with regular execution path.
5064 	 *
5065 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5066 	 * Normal execution path is responsible for not accessing a
5067 	 * failed qc.  libata core enforces the rule by returning NULL
5068 	 * from ata_qc_from_tag() for failed qcs.
5069 	 *
5070 	 * Old EH depends on ata_qc_complete() nullifying completion
5071 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5072 	 * not synchronize with interrupt handler.  Only PIO task is
5073 	 * taken care of.
5074 	 */
5075 	if (ap->ops->error_handler) {
5076 		struct ata_device *dev = qc->dev;
5077 		struct ata_eh_info *ehi = &dev->link->eh_info;
5078 
5079 		if (unlikely(qc->err_mask))
5080 			qc->flags |= ATA_QCFLAG_FAILED;
5081 
5082 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5083 			/* always fill result TF for failed qc */
5084 			fill_result_tf(qc);
5085 
5086 			if (!ata_tag_internal(qc->tag))
5087 				ata_qc_schedule_eh(qc);
5088 			else
5089 				__ata_qc_complete(qc);
5090 			return;
5091 		}
5092 
5093 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5094 
5095 		/* read result TF if requested */
5096 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5097 			fill_result_tf(qc);
5098 
5099 		/* Some commands need post-processing after successful
5100 		 * completion.
5101 		 */
5102 		switch (qc->tf.command) {
5103 		case ATA_CMD_SET_FEATURES:
5104 			if (qc->tf.feature != SETFEATURES_WC_ON &&
5105 			    qc->tf.feature != SETFEATURES_WC_OFF)
5106 				break;
5107 			/* fall through */
5108 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5109 		case ATA_CMD_SET_MULTI: /* multi_count changed */
5110 			/* revalidate device */
5111 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5112 			ata_port_schedule_eh(ap);
5113 			break;
5114 
5115 		case ATA_CMD_SLEEP:
5116 			dev->flags |= ATA_DFLAG_SLEEPING;
5117 			break;
5118 		}
5119 
5120 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5121 			ata_verify_xfer(qc);
5122 
5123 		__ata_qc_complete(qc);
5124 	} else {
5125 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5126 			return;
5127 
5128 		/* read result TF if failed or requested */
5129 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5130 			fill_result_tf(qc);
5131 
5132 		__ata_qc_complete(qc);
5133 	}
5134 }
5135 
5136 /**
5137  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5138  *	@ap: port in question
5139  *	@qc_active: new qc_active mask
5140  *
5141  *	Complete in-flight commands.  This functions is meant to be
5142  *	called from low-level driver's interrupt routine to complete
5143  *	requests normally.  ap->qc_active and @qc_active is compared
5144  *	and commands are completed accordingly.
5145  *
5146  *	LOCKING:
5147  *	spin_lock_irqsave(host lock)
5148  *
5149  *	RETURNS:
5150  *	Number of completed commands on success, -errno otherwise.
5151  */
5152 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5153 {
5154 	int nr_done = 0;
5155 	u32 done_mask;
5156 
5157 	done_mask = ap->qc_active ^ qc_active;
5158 
5159 	if (unlikely(done_mask & qc_active)) {
5160 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5161 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5162 		return -EINVAL;
5163 	}
5164 
5165 	while (done_mask) {
5166 		struct ata_queued_cmd *qc;
5167 		unsigned int tag = __ffs(done_mask);
5168 
5169 		qc = ata_qc_from_tag(ap, tag);
5170 		if (qc) {
5171 			ata_qc_complete(qc);
5172 			nr_done++;
5173 		}
5174 		done_mask &= ~(1 << tag);
5175 	}
5176 
5177 	return nr_done;
5178 }
5179 
5180 /**
5181  *	ata_qc_issue - issue taskfile to device
5182  *	@qc: command to issue to device
5183  *
5184  *	Prepare an ATA command to submission to device.
5185  *	This includes mapping the data into a DMA-able
5186  *	area, filling in the S/G table, and finally
5187  *	writing the taskfile to hardware, starting the command.
5188  *
5189  *	LOCKING:
5190  *	spin_lock_irqsave(host lock)
5191  */
5192 void ata_qc_issue(struct ata_queued_cmd *qc)
5193 {
5194 	struct ata_port *ap = qc->ap;
5195 	struct ata_link *link = qc->dev->link;
5196 	u8 prot = qc->tf.protocol;
5197 
5198 	/* Make sure only one non-NCQ command is outstanding.  The
5199 	 * check is skipped for old EH because it reuses active qc to
5200 	 * request ATAPI sense.
5201 	 */
5202 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5203 
5204 	if (ata_is_ncq(prot)) {
5205 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5206 
5207 		if (!link->sactive)
5208 			ap->nr_active_links++;
5209 		link->sactive |= 1 << qc->tag;
5210 	} else {
5211 		WARN_ON_ONCE(link->sactive);
5212 
5213 		ap->nr_active_links++;
5214 		link->active_tag = qc->tag;
5215 	}
5216 
5217 	qc->flags |= ATA_QCFLAG_ACTIVE;
5218 	ap->qc_active |= 1 << qc->tag;
5219 
5220 	/* We guarantee to LLDs that they will have at least one
5221 	 * non-zero sg if the command is a data command.
5222 	 */
5223 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5224 
5225 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5226 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5227 		if (ata_sg_setup(qc))
5228 			goto sg_err;
5229 
5230 	/* if device is sleeping, schedule reset and abort the link */
5231 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5232 		link->eh_info.action |= ATA_EH_RESET;
5233 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5234 		ata_link_abort(link);
5235 		return;
5236 	}
5237 
5238 	ap->ops->qc_prep(qc);
5239 
5240 	qc->err_mask |= ap->ops->qc_issue(qc);
5241 	if (unlikely(qc->err_mask))
5242 		goto err;
5243 	return;
5244 
5245 sg_err:
5246 	qc->err_mask |= AC_ERR_SYSTEM;
5247 err:
5248 	ata_qc_complete(qc);
5249 }
5250 
5251 /**
5252  *	sata_scr_valid - test whether SCRs are accessible
5253  *	@link: ATA link to test SCR accessibility for
5254  *
5255  *	Test whether SCRs are accessible for @link.
5256  *
5257  *	LOCKING:
5258  *	None.
5259  *
5260  *	RETURNS:
5261  *	1 if SCRs are accessible, 0 otherwise.
5262  */
5263 int sata_scr_valid(struct ata_link *link)
5264 {
5265 	struct ata_port *ap = link->ap;
5266 
5267 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5268 }
5269 
5270 /**
5271  *	sata_scr_read - read SCR register of the specified port
5272  *	@link: ATA link to read SCR for
5273  *	@reg: SCR to read
5274  *	@val: Place to store read value
5275  *
5276  *	Read SCR register @reg of @link into *@val.  This function is
5277  *	guaranteed to succeed if @link is ap->link, the cable type of
5278  *	the port is SATA and the port implements ->scr_read.
5279  *
5280  *	LOCKING:
5281  *	None if @link is ap->link.  Kernel thread context otherwise.
5282  *
5283  *	RETURNS:
5284  *	0 on success, negative errno on failure.
5285  */
5286 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5287 {
5288 	if (ata_is_host_link(link)) {
5289 		if (sata_scr_valid(link))
5290 			return link->ap->ops->scr_read(link, reg, val);
5291 		return -EOPNOTSUPP;
5292 	}
5293 
5294 	return sata_pmp_scr_read(link, reg, val);
5295 }
5296 
5297 /**
5298  *	sata_scr_write - write SCR register of the specified port
5299  *	@link: ATA link to write SCR for
5300  *	@reg: SCR to write
5301  *	@val: value to write
5302  *
5303  *	Write @val to SCR register @reg of @link.  This function is
5304  *	guaranteed to succeed if @link is ap->link, the cable type of
5305  *	the port is SATA and the port implements ->scr_read.
5306  *
5307  *	LOCKING:
5308  *	None if @link is ap->link.  Kernel thread context otherwise.
5309  *
5310  *	RETURNS:
5311  *	0 on success, negative errno on failure.
5312  */
5313 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5314 {
5315 	if (ata_is_host_link(link)) {
5316 		if (sata_scr_valid(link))
5317 			return link->ap->ops->scr_write(link, reg, val);
5318 		return -EOPNOTSUPP;
5319 	}
5320 
5321 	return sata_pmp_scr_write(link, reg, val);
5322 }
5323 
5324 /**
5325  *	sata_scr_write_flush - write SCR register of the specified port and flush
5326  *	@link: ATA link to write SCR for
5327  *	@reg: SCR to write
5328  *	@val: value to write
5329  *
5330  *	This function is identical to sata_scr_write() except that this
5331  *	function performs flush after writing to the register.
5332  *
5333  *	LOCKING:
5334  *	None if @link is ap->link.  Kernel thread context otherwise.
5335  *
5336  *	RETURNS:
5337  *	0 on success, negative errno on failure.
5338  */
5339 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5340 {
5341 	if (ata_is_host_link(link)) {
5342 		int rc;
5343 
5344 		if (sata_scr_valid(link)) {
5345 			rc = link->ap->ops->scr_write(link, reg, val);
5346 			if (rc == 0)
5347 				rc = link->ap->ops->scr_read(link, reg, &val);
5348 			return rc;
5349 		}
5350 		return -EOPNOTSUPP;
5351 	}
5352 
5353 	return sata_pmp_scr_write(link, reg, val);
5354 }
5355 
5356 /**
5357  *	ata_phys_link_online - test whether the given link is online
5358  *	@link: ATA link to test
5359  *
5360  *	Test whether @link is online.  Note that this function returns
5361  *	0 if online status of @link cannot be obtained, so
5362  *	ata_link_online(link) != !ata_link_offline(link).
5363  *
5364  *	LOCKING:
5365  *	None.
5366  *
5367  *	RETURNS:
5368  *	True if the port online status is available and online.
5369  */
5370 bool ata_phys_link_online(struct ata_link *link)
5371 {
5372 	u32 sstatus;
5373 
5374 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5375 	    ata_sstatus_online(sstatus))
5376 		return true;
5377 	return false;
5378 }
5379 
5380 /**
5381  *	ata_phys_link_offline - test whether the given link is offline
5382  *	@link: ATA link to test
5383  *
5384  *	Test whether @link is offline.  Note that this function
5385  *	returns 0 if offline status of @link cannot be obtained, so
5386  *	ata_link_online(link) != !ata_link_offline(link).
5387  *
5388  *	LOCKING:
5389  *	None.
5390  *
5391  *	RETURNS:
5392  *	True if the port offline status is available and offline.
5393  */
5394 bool ata_phys_link_offline(struct ata_link *link)
5395 {
5396 	u32 sstatus;
5397 
5398 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5399 	    !ata_sstatus_online(sstatus))
5400 		return true;
5401 	return false;
5402 }
5403 
5404 /**
5405  *	ata_link_online - test whether the given link is online
5406  *	@link: ATA link to test
5407  *
5408  *	Test whether @link is online.  This is identical to
5409  *	ata_phys_link_online() when there's no slave link.  When
5410  *	there's a slave link, this function should only be called on
5411  *	the master link and will return true if any of M/S links is
5412  *	online.
5413  *
5414  *	LOCKING:
5415  *	None.
5416  *
5417  *	RETURNS:
5418  *	True if the port online status is available and online.
5419  */
5420 bool ata_link_online(struct ata_link *link)
5421 {
5422 	struct ata_link *slave = link->ap->slave_link;
5423 
5424 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5425 
5426 	return ata_phys_link_online(link) ||
5427 		(slave && ata_phys_link_online(slave));
5428 }
5429 
5430 /**
5431  *	ata_link_offline - test whether the given link is offline
5432  *	@link: ATA link to test
5433  *
5434  *	Test whether @link is offline.  This is identical to
5435  *	ata_phys_link_offline() when there's no slave link.  When
5436  *	there's a slave link, this function should only be called on
5437  *	the master link and will return true if both M/S links are
5438  *	offline.
5439  *
5440  *	LOCKING:
5441  *	None.
5442  *
5443  *	RETURNS:
5444  *	True if the port offline status is available and offline.
5445  */
5446 bool ata_link_offline(struct ata_link *link)
5447 {
5448 	struct ata_link *slave = link->ap->slave_link;
5449 
5450 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5451 
5452 	return ata_phys_link_offline(link) &&
5453 		(!slave || ata_phys_link_offline(slave));
5454 }
5455 
5456 #ifdef CONFIG_PM
5457 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5458 			       unsigned int action, unsigned int ehi_flags,
5459 			       int wait)
5460 {
5461 	unsigned long flags;
5462 	int i, rc;
5463 
5464 	for (i = 0; i < host->n_ports; i++) {
5465 		struct ata_port *ap = host->ports[i];
5466 		struct ata_link *link;
5467 
5468 		/* Previous resume operation might still be in
5469 		 * progress.  Wait for PM_PENDING to clear.
5470 		 */
5471 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5472 			ata_port_wait_eh(ap);
5473 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5474 		}
5475 
5476 		/* request PM ops to EH */
5477 		spin_lock_irqsave(ap->lock, flags);
5478 
5479 		ap->pm_mesg = mesg;
5480 		if (wait) {
5481 			rc = 0;
5482 			ap->pm_result = &rc;
5483 		}
5484 
5485 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5486 		ata_for_each_link(link, ap, HOST_FIRST) {
5487 			link->eh_info.action |= action;
5488 			link->eh_info.flags |= ehi_flags;
5489 		}
5490 
5491 		ata_port_schedule_eh(ap);
5492 
5493 		spin_unlock_irqrestore(ap->lock, flags);
5494 
5495 		/* wait and check result */
5496 		if (wait) {
5497 			ata_port_wait_eh(ap);
5498 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5499 			if (rc)
5500 				return rc;
5501 		}
5502 	}
5503 
5504 	return 0;
5505 }
5506 
5507 /**
5508  *	ata_host_suspend - suspend host
5509  *	@host: host to suspend
5510  *	@mesg: PM message
5511  *
5512  *	Suspend @host.  Actual operation is performed by EH.  This
5513  *	function requests EH to perform PM operations and waits for EH
5514  *	to finish.
5515  *
5516  *	LOCKING:
5517  *	Kernel thread context (may sleep).
5518  *
5519  *	RETURNS:
5520  *	0 on success, -errno on failure.
5521  */
5522 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5523 {
5524 	int rc;
5525 
5526 	/*
5527 	 * disable link pm on all ports before requesting
5528 	 * any pm activity
5529 	 */
5530 	ata_lpm_enable(host);
5531 
5532 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5533 	if (rc == 0)
5534 		host->dev->power.power_state = mesg;
5535 	return rc;
5536 }
5537 
5538 /**
5539  *	ata_host_resume - resume host
5540  *	@host: host to resume
5541  *
5542  *	Resume @host.  Actual operation is performed by EH.  This
5543  *	function requests EH to perform PM operations and returns.
5544  *	Note that all resume operations are performed parallely.
5545  *
5546  *	LOCKING:
5547  *	Kernel thread context (may sleep).
5548  */
5549 void ata_host_resume(struct ata_host *host)
5550 {
5551 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5552 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5553 	host->dev->power.power_state = PMSG_ON;
5554 
5555 	/* reenable link pm */
5556 	ata_lpm_disable(host);
5557 }
5558 #endif
5559 
5560 /**
5561  *	ata_port_start - Set port up for dma.
5562  *	@ap: Port to initialize
5563  *
5564  *	Called just after data structures for each port are
5565  *	initialized.  Allocates space for PRD table.
5566  *
5567  *	May be used as the port_start() entry in ata_port_operations.
5568  *
5569  *	LOCKING:
5570  *	Inherited from caller.
5571  */
5572 int ata_port_start(struct ata_port *ap)
5573 {
5574 	struct device *dev = ap->dev;
5575 
5576 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5577 				      GFP_KERNEL);
5578 	if (!ap->prd)
5579 		return -ENOMEM;
5580 
5581 	return 0;
5582 }
5583 
5584 /**
5585  *	ata_dev_init - Initialize an ata_device structure
5586  *	@dev: Device structure to initialize
5587  *
5588  *	Initialize @dev in preparation for probing.
5589  *
5590  *	LOCKING:
5591  *	Inherited from caller.
5592  */
5593 void ata_dev_init(struct ata_device *dev)
5594 {
5595 	struct ata_link *link = ata_dev_phys_link(dev);
5596 	struct ata_port *ap = link->ap;
5597 	unsigned long flags;
5598 
5599 	/* SATA spd limit is bound to the attached device, reset together */
5600 	link->sata_spd_limit = link->hw_sata_spd_limit;
5601 	link->sata_spd = 0;
5602 
5603 	/* High bits of dev->flags are used to record warm plug
5604 	 * requests which occur asynchronously.  Synchronize using
5605 	 * host lock.
5606 	 */
5607 	spin_lock_irqsave(ap->lock, flags);
5608 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5609 	dev->horkage = 0;
5610 	spin_unlock_irqrestore(ap->lock, flags);
5611 
5612 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5613 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5614 	dev->pio_mask = UINT_MAX;
5615 	dev->mwdma_mask = UINT_MAX;
5616 	dev->udma_mask = UINT_MAX;
5617 }
5618 
5619 /**
5620  *	ata_link_init - Initialize an ata_link structure
5621  *	@ap: ATA port link is attached to
5622  *	@link: Link structure to initialize
5623  *	@pmp: Port multiplier port number
5624  *
5625  *	Initialize @link.
5626  *
5627  *	LOCKING:
5628  *	Kernel thread context (may sleep)
5629  */
5630 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5631 {
5632 	int i;
5633 
5634 	/* clear everything except for devices */
5635 	memset(link, 0, offsetof(struct ata_link, device[0]));
5636 
5637 	link->ap = ap;
5638 	link->pmp = pmp;
5639 	link->active_tag = ATA_TAG_POISON;
5640 	link->hw_sata_spd_limit = UINT_MAX;
5641 
5642 	/* can't use iterator, ap isn't initialized yet */
5643 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5644 		struct ata_device *dev = &link->device[i];
5645 
5646 		dev->link = link;
5647 		dev->devno = dev - link->device;
5648 #ifdef CONFIG_ATA_ACPI
5649 		dev->gtf_filter = ata_acpi_gtf_filter;
5650 #endif
5651 		ata_dev_init(dev);
5652 	}
5653 }
5654 
5655 /**
5656  *	sata_link_init_spd - Initialize link->sata_spd_limit
5657  *	@link: Link to configure sata_spd_limit for
5658  *
5659  *	Initialize @link->[hw_]sata_spd_limit to the currently
5660  *	configured value.
5661  *
5662  *	LOCKING:
5663  *	Kernel thread context (may sleep).
5664  *
5665  *	RETURNS:
5666  *	0 on success, -errno on failure.
5667  */
5668 int sata_link_init_spd(struct ata_link *link)
5669 {
5670 	u8 spd;
5671 	int rc;
5672 
5673 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5674 	if (rc)
5675 		return rc;
5676 
5677 	spd = (link->saved_scontrol >> 4) & 0xf;
5678 	if (spd)
5679 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5680 
5681 	ata_force_link_limits(link);
5682 
5683 	link->sata_spd_limit = link->hw_sata_spd_limit;
5684 
5685 	return 0;
5686 }
5687 
5688 /**
5689  *	ata_port_alloc - allocate and initialize basic ATA port resources
5690  *	@host: ATA host this allocated port belongs to
5691  *
5692  *	Allocate and initialize basic ATA port resources.
5693  *
5694  *	RETURNS:
5695  *	Allocate ATA port on success, NULL on failure.
5696  *
5697  *	LOCKING:
5698  *	Inherited from calling layer (may sleep).
5699  */
5700 struct ata_port *ata_port_alloc(struct ata_host *host)
5701 {
5702 	struct ata_port *ap;
5703 
5704 	DPRINTK("ENTER\n");
5705 
5706 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5707 	if (!ap)
5708 		return NULL;
5709 
5710 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5711 	ap->lock = &host->lock;
5712 	ap->flags = ATA_FLAG_DISABLED;
5713 	ap->print_id = -1;
5714 	ap->ctl = ATA_DEVCTL_OBS;
5715 	ap->host = host;
5716 	ap->dev = host->dev;
5717 	ap->last_ctl = 0xFF;
5718 
5719 #if defined(ATA_VERBOSE_DEBUG)
5720 	/* turn on all debugging levels */
5721 	ap->msg_enable = 0x00FF;
5722 #elif defined(ATA_DEBUG)
5723 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5724 #else
5725 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5726 #endif
5727 
5728 #ifdef CONFIG_ATA_SFF
5729 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5730 #else
5731 	INIT_DELAYED_WORK(&ap->port_task, NULL);
5732 #endif
5733 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5734 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5735 	INIT_LIST_HEAD(&ap->eh_done_q);
5736 	init_waitqueue_head(&ap->eh_wait_q);
5737 	init_completion(&ap->park_req_pending);
5738 	init_timer_deferrable(&ap->fastdrain_timer);
5739 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5740 	ap->fastdrain_timer.data = (unsigned long)ap;
5741 
5742 	ap->cbl = ATA_CBL_NONE;
5743 
5744 	ata_link_init(ap, &ap->link, 0);
5745 
5746 #ifdef ATA_IRQ_TRAP
5747 	ap->stats.unhandled_irq = 1;
5748 	ap->stats.idle_irq = 1;
5749 #endif
5750 	return ap;
5751 }
5752 
5753 static void ata_host_release(struct device *gendev, void *res)
5754 {
5755 	struct ata_host *host = dev_get_drvdata(gendev);
5756 	int i;
5757 
5758 	for (i = 0; i < host->n_ports; i++) {
5759 		struct ata_port *ap = host->ports[i];
5760 
5761 		if (!ap)
5762 			continue;
5763 
5764 		if (ap->scsi_host)
5765 			scsi_host_put(ap->scsi_host);
5766 
5767 		kfree(ap->pmp_link);
5768 		kfree(ap->slave_link);
5769 		kfree(ap);
5770 		host->ports[i] = NULL;
5771 	}
5772 
5773 	dev_set_drvdata(gendev, NULL);
5774 }
5775 
5776 /**
5777  *	ata_host_alloc - allocate and init basic ATA host resources
5778  *	@dev: generic device this host is associated with
5779  *	@max_ports: maximum number of ATA ports associated with this host
5780  *
5781  *	Allocate and initialize basic ATA host resources.  LLD calls
5782  *	this function to allocate a host, initializes it fully and
5783  *	attaches it using ata_host_register().
5784  *
5785  *	@max_ports ports are allocated and host->n_ports is
5786  *	initialized to @max_ports.  The caller is allowed to decrease
5787  *	host->n_ports before calling ata_host_register().  The unused
5788  *	ports will be automatically freed on registration.
5789  *
5790  *	RETURNS:
5791  *	Allocate ATA host on success, NULL on failure.
5792  *
5793  *	LOCKING:
5794  *	Inherited from calling layer (may sleep).
5795  */
5796 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5797 {
5798 	struct ata_host *host;
5799 	size_t sz;
5800 	int i;
5801 
5802 	DPRINTK("ENTER\n");
5803 
5804 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5805 		return NULL;
5806 
5807 	/* alloc a container for our list of ATA ports (buses) */
5808 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5809 	/* alloc a container for our list of ATA ports (buses) */
5810 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5811 	if (!host)
5812 		goto err_out;
5813 
5814 	devres_add(dev, host);
5815 	dev_set_drvdata(dev, host);
5816 
5817 	spin_lock_init(&host->lock);
5818 	host->dev = dev;
5819 	host->n_ports = max_ports;
5820 
5821 	/* allocate ports bound to this host */
5822 	for (i = 0; i < max_ports; i++) {
5823 		struct ata_port *ap;
5824 
5825 		ap = ata_port_alloc(host);
5826 		if (!ap)
5827 			goto err_out;
5828 
5829 		ap->port_no = i;
5830 		host->ports[i] = ap;
5831 	}
5832 
5833 	devres_remove_group(dev, NULL);
5834 	return host;
5835 
5836  err_out:
5837 	devres_release_group(dev, NULL);
5838 	return NULL;
5839 }
5840 
5841 /**
5842  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5843  *	@dev: generic device this host is associated with
5844  *	@ppi: array of ATA port_info to initialize host with
5845  *	@n_ports: number of ATA ports attached to this host
5846  *
5847  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5848  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5849  *	last entry will be used for the remaining ports.
5850  *
5851  *	RETURNS:
5852  *	Allocate ATA host on success, NULL on failure.
5853  *
5854  *	LOCKING:
5855  *	Inherited from calling layer (may sleep).
5856  */
5857 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5858 				      const struct ata_port_info * const * ppi,
5859 				      int n_ports)
5860 {
5861 	const struct ata_port_info *pi;
5862 	struct ata_host *host;
5863 	int i, j;
5864 
5865 	host = ata_host_alloc(dev, n_ports);
5866 	if (!host)
5867 		return NULL;
5868 
5869 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5870 		struct ata_port *ap = host->ports[i];
5871 
5872 		if (ppi[j])
5873 			pi = ppi[j++];
5874 
5875 		ap->pio_mask = pi->pio_mask;
5876 		ap->mwdma_mask = pi->mwdma_mask;
5877 		ap->udma_mask = pi->udma_mask;
5878 		ap->flags |= pi->flags;
5879 		ap->link.flags |= pi->link_flags;
5880 		ap->ops = pi->port_ops;
5881 
5882 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5883 			host->ops = pi->port_ops;
5884 	}
5885 
5886 	return host;
5887 }
5888 
5889 /**
5890  *	ata_slave_link_init - initialize slave link
5891  *	@ap: port to initialize slave link for
5892  *
5893  *	Create and initialize slave link for @ap.  This enables slave
5894  *	link handling on the port.
5895  *
5896  *	In libata, a port contains links and a link contains devices.
5897  *	There is single host link but if a PMP is attached to it,
5898  *	there can be multiple fan-out links.  On SATA, there's usually
5899  *	a single device connected to a link but PATA and SATA
5900  *	controllers emulating TF based interface can have two - master
5901  *	and slave.
5902  *
5903  *	However, there are a few controllers which don't fit into this
5904  *	abstraction too well - SATA controllers which emulate TF
5905  *	interface with both master and slave devices but also have
5906  *	separate SCR register sets for each device.  These controllers
5907  *	need separate links for physical link handling
5908  *	(e.g. onlineness, link speed) but should be treated like a
5909  *	traditional M/S controller for everything else (e.g. command
5910  *	issue, softreset).
5911  *
5912  *	slave_link is libata's way of handling this class of
5913  *	controllers without impacting core layer too much.  For
5914  *	anything other than physical link handling, the default host
5915  *	link is used for both master and slave.  For physical link
5916  *	handling, separate @ap->slave_link is used.  All dirty details
5917  *	are implemented inside libata core layer.  From LLD's POV, the
5918  *	only difference is that prereset, hardreset and postreset are
5919  *	called once more for the slave link, so the reset sequence
5920  *	looks like the following.
5921  *
5922  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5923  *	softreset(M) -> postreset(M) -> postreset(S)
5924  *
5925  *	Note that softreset is called only for the master.  Softreset
5926  *	resets both M/S by definition, so SRST on master should handle
5927  *	both (the standard method will work just fine).
5928  *
5929  *	LOCKING:
5930  *	Should be called before host is registered.
5931  *
5932  *	RETURNS:
5933  *	0 on success, -errno on failure.
5934  */
5935 int ata_slave_link_init(struct ata_port *ap)
5936 {
5937 	struct ata_link *link;
5938 
5939 	WARN_ON(ap->slave_link);
5940 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5941 
5942 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5943 	if (!link)
5944 		return -ENOMEM;
5945 
5946 	ata_link_init(ap, link, 1);
5947 	ap->slave_link = link;
5948 	return 0;
5949 }
5950 
5951 static void ata_host_stop(struct device *gendev, void *res)
5952 {
5953 	struct ata_host *host = dev_get_drvdata(gendev);
5954 	int i;
5955 
5956 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5957 
5958 	for (i = 0; i < host->n_ports; i++) {
5959 		struct ata_port *ap = host->ports[i];
5960 
5961 		if (ap->ops->port_stop)
5962 			ap->ops->port_stop(ap);
5963 	}
5964 
5965 	if (host->ops->host_stop)
5966 		host->ops->host_stop(host);
5967 }
5968 
5969 /**
5970  *	ata_finalize_port_ops - finalize ata_port_operations
5971  *	@ops: ata_port_operations to finalize
5972  *
5973  *	An ata_port_operations can inherit from another ops and that
5974  *	ops can again inherit from another.  This can go on as many
5975  *	times as necessary as long as there is no loop in the
5976  *	inheritance chain.
5977  *
5978  *	Ops tables are finalized when the host is started.  NULL or
5979  *	unspecified entries are inherited from the closet ancestor
5980  *	which has the method and the entry is populated with it.
5981  *	After finalization, the ops table directly points to all the
5982  *	methods and ->inherits is no longer necessary and cleared.
5983  *
5984  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5985  *
5986  *	LOCKING:
5987  *	None.
5988  */
5989 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5990 {
5991 	static DEFINE_SPINLOCK(lock);
5992 	const struct ata_port_operations *cur;
5993 	void **begin = (void **)ops;
5994 	void **end = (void **)&ops->inherits;
5995 	void **pp;
5996 
5997 	if (!ops || !ops->inherits)
5998 		return;
5999 
6000 	spin_lock(&lock);
6001 
6002 	for (cur = ops->inherits; cur; cur = cur->inherits) {
6003 		void **inherit = (void **)cur;
6004 
6005 		for (pp = begin; pp < end; pp++, inherit++)
6006 			if (!*pp)
6007 				*pp = *inherit;
6008 	}
6009 
6010 	for (pp = begin; pp < end; pp++)
6011 		if (IS_ERR(*pp))
6012 			*pp = NULL;
6013 
6014 	ops->inherits = NULL;
6015 
6016 	spin_unlock(&lock);
6017 }
6018 
6019 /**
6020  *	ata_host_start - start and freeze ports of an ATA host
6021  *	@host: ATA host to start ports for
6022  *
6023  *	Start and then freeze ports of @host.  Started status is
6024  *	recorded in host->flags, so this function can be called
6025  *	multiple times.  Ports are guaranteed to get started only
6026  *	once.  If host->ops isn't initialized yet, its set to the
6027  *	first non-dummy port ops.
6028  *
6029  *	LOCKING:
6030  *	Inherited from calling layer (may sleep).
6031  *
6032  *	RETURNS:
6033  *	0 if all ports are started successfully, -errno otherwise.
6034  */
6035 int ata_host_start(struct ata_host *host)
6036 {
6037 	int have_stop = 0;
6038 	void *start_dr = NULL;
6039 	int i, rc;
6040 
6041 	if (host->flags & ATA_HOST_STARTED)
6042 		return 0;
6043 
6044 	ata_finalize_port_ops(host->ops);
6045 
6046 	for (i = 0; i < host->n_ports; i++) {
6047 		struct ata_port *ap = host->ports[i];
6048 
6049 		ata_finalize_port_ops(ap->ops);
6050 
6051 		if (!host->ops && !ata_port_is_dummy(ap))
6052 			host->ops = ap->ops;
6053 
6054 		if (ap->ops->port_stop)
6055 			have_stop = 1;
6056 	}
6057 
6058 	if (host->ops->host_stop)
6059 		have_stop = 1;
6060 
6061 	if (have_stop) {
6062 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6063 		if (!start_dr)
6064 			return -ENOMEM;
6065 	}
6066 
6067 	for (i = 0; i < host->n_ports; i++) {
6068 		struct ata_port *ap = host->ports[i];
6069 
6070 		if (ap->ops->port_start) {
6071 			rc = ap->ops->port_start(ap);
6072 			if (rc) {
6073 				if (rc != -ENODEV)
6074 					dev_printk(KERN_ERR, host->dev,
6075 						"failed to start port %d "
6076 						"(errno=%d)\n", i, rc);
6077 				goto err_out;
6078 			}
6079 		}
6080 		ata_eh_freeze_port(ap);
6081 	}
6082 
6083 	if (start_dr)
6084 		devres_add(host->dev, start_dr);
6085 	host->flags |= ATA_HOST_STARTED;
6086 	return 0;
6087 
6088  err_out:
6089 	while (--i >= 0) {
6090 		struct ata_port *ap = host->ports[i];
6091 
6092 		if (ap->ops->port_stop)
6093 			ap->ops->port_stop(ap);
6094 	}
6095 	devres_free(start_dr);
6096 	return rc;
6097 }
6098 
6099 /**
6100  *	ata_sas_host_init - Initialize a host struct
6101  *	@host:	host to initialize
6102  *	@dev:	device host is attached to
6103  *	@flags:	host flags
6104  *	@ops:	port_ops
6105  *
6106  *	LOCKING:
6107  *	PCI/etc. bus probe sem.
6108  *
6109  */
6110 /* KILLME - the only user left is ipr */
6111 void ata_host_init(struct ata_host *host, struct device *dev,
6112 		   unsigned long flags, struct ata_port_operations *ops)
6113 {
6114 	spin_lock_init(&host->lock);
6115 	host->dev = dev;
6116 	host->flags = flags;
6117 	host->ops = ops;
6118 }
6119 
6120 
6121 static void async_port_probe(void *data, async_cookie_t cookie)
6122 {
6123 	int rc;
6124 	struct ata_port *ap = data;
6125 
6126 	/*
6127 	 * If we're not allowed to scan this host in parallel,
6128 	 * we need to wait until all previous scans have completed
6129 	 * before going further.
6130 	 * Jeff Garzik says this is only within a controller, so we
6131 	 * don't need to wait for port 0, only for later ports.
6132 	 */
6133 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6134 		async_synchronize_cookie(cookie);
6135 
6136 	/* probe */
6137 	if (ap->ops->error_handler) {
6138 		struct ata_eh_info *ehi = &ap->link.eh_info;
6139 		unsigned long flags;
6140 
6141 		ata_port_probe(ap);
6142 
6143 		/* kick EH for boot probing */
6144 		spin_lock_irqsave(ap->lock, flags);
6145 
6146 		ehi->probe_mask |= ATA_ALL_DEVICES;
6147 		ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
6148 		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6149 
6150 		ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6151 		ap->pflags |= ATA_PFLAG_LOADING;
6152 		ata_port_schedule_eh(ap);
6153 
6154 		spin_unlock_irqrestore(ap->lock, flags);
6155 
6156 		/* wait for EH to finish */
6157 		ata_port_wait_eh(ap);
6158 	} else {
6159 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6160 		rc = ata_bus_probe(ap);
6161 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6162 
6163 		if (rc) {
6164 			/* FIXME: do something useful here?
6165 			 * Current libata behavior will
6166 			 * tear down everything when
6167 			 * the module is removed
6168 			 * or the h/w is unplugged.
6169 			 */
6170 		}
6171 	}
6172 
6173 	/* in order to keep device order, we need to synchronize at this point */
6174 	async_synchronize_cookie(cookie);
6175 
6176 	ata_scsi_scan_host(ap, 1);
6177 
6178 }
6179 /**
6180  *	ata_host_register - register initialized ATA host
6181  *	@host: ATA host to register
6182  *	@sht: template for SCSI host
6183  *
6184  *	Register initialized ATA host.  @host is allocated using
6185  *	ata_host_alloc() and fully initialized by LLD.  This function
6186  *	starts ports, registers @host with ATA and SCSI layers and
6187  *	probe registered devices.
6188  *
6189  *	LOCKING:
6190  *	Inherited from calling layer (may sleep).
6191  *
6192  *	RETURNS:
6193  *	0 on success, -errno otherwise.
6194  */
6195 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6196 {
6197 	int i, rc;
6198 
6199 	/* host must have been started */
6200 	if (!(host->flags & ATA_HOST_STARTED)) {
6201 		dev_printk(KERN_ERR, host->dev,
6202 			   "BUG: trying to register unstarted host\n");
6203 		WARN_ON(1);
6204 		return -EINVAL;
6205 	}
6206 
6207 	/* Blow away unused ports.  This happens when LLD can't
6208 	 * determine the exact number of ports to allocate at
6209 	 * allocation time.
6210 	 */
6211 	for (i = host->n_ports; host->ports[i]; i++)
6212 		kfree(host->ports[i]);
6213 
6214 	/* give ports names and add SCSI hosts */
6215 	for (i = 0; i < host->n_ports; i++)
6216 		host->ports[i]->print_id = ata_print_id++;
6217 
6218 	rc = ata_scsi_add_hosts(host, sht);
6219 	if (rc)
6220 		return rc;
6221 
6222 	/* associate with ACPI nodes */
6223 	ata_acpi_associate(host);
6224 
6225 	/* set cable, sata_spd_limit and report */
6226 	for (i = 0; i < host->n_ports; i++) {
6227 		struct ata_port *ap = host->ports[i];
6228 		unsigned long xfer_mask;
6229 
6230 		/* set SATA cable type if still unset */
6231 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6232 			ap->cbl = ATA_CBL_SATA;
6233 
6234 		/* init sata_spd_limit to the current value */
6235 		sata_link_init_spd(&ap->link);
6236 		if (ap->slave_link)
6237 			sata_link_init_spd(ap->slave_link);
6238 
6239 		/* print per-port info to dmesg */
6240 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6241 					      ap->udma_mask);
6242 
6243 		if (!ata_port_is_dummy(ap)) {
6244 			ata_port_printk(ap, KERN_INFO,
6245 					"%cATA max %s %s\n",
6246 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6247 					ata_mode_string(xfer_mask),
6248 					ap->link.eh_info.desc);
6249 			ata_ehi_clear_desc(&ap->link.eh_info);
6250 		} else
6251 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6252 	}
6253 
6254 	/* perform each probe asynchronously */
6255 	for (i = 0; i < host->n_ports; i++) {
6256 		struct ata_port *ap = host->ports[i];
6257 		async_schedule(async_port_probe, ap);
6258 	}
6259 
6260 	return 0;
6261 }
6262 
6263 /**
6264  *	ata_host_activate - start host, request IRQ and register it
6265  *	@host: target ATA host
6266  *	@irq: IRQ to request
6267  *	@irq_handler: irq_handler used when requesting IRQ
6268  *	@irq_flags: irq_flags used when requesting IRQ
6269  *	@sht: scsi_host_template to use when registering the host
6270  *
6271  *	After allocating an ATA host and initializing it, most libata
6272  *	LLDs perform three steps to activate the host - start host,
6273  *	request IRQ and register it.  This helper takes necessasry
6274  *	arguments and performs the three steps in one go.
6275  *
6276  *	An invalid IRQ skips the IRQ registration and expects the host to
6277  *	have set polling mode on the port. In this case, @irq_handler
6278  *	should be NULL.
6279  *
6280  *	LOCKING:
6281  *	Inherited from calling layer (may sleep).
6282  *
6283  *	RETURNS:
6284  *	0 on success, -errno otherwise.
6285  */
6286 int ata_host_activate(struct ata_host *host, int irq,
6287 		      irq_handler_t irq_handler, unsigned long irq_flags,
6288 		      struct scsi_host_template *sht)
6289 {
6290 	int i, rc;
6291 
6292 	rc = ata_host_start(host);
6293 	if (rc)
6294 		return rc;
6295 
6296 	/* Special case for polling mode */
6297 	if (!irq) {
6298 		WARN_ON(irq_handler);
6299 		return ata_host_register(host, sht);
6300 	}
6301 
6302 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6303 			      dev_driver_string(host->dev), host);
6304 	if (rc)
6305 		return rc;
6306 
6307 	for (i = 0; i < host->n_ports; i++)
6308 		ata_port_desc(host->ports[i], "irq %d", irq);
6309 
6310 	rc = ata_host_register(host, sht);
6311 	/* if failed, just free the IRQ and leave ports alone */
6312 	if (rc)
6313 		devm_free_irq(host->dev, irq, host);
6314 
6315 	return rc;
6316 }
6317 
6318 /**
6319  *	ata_port_detach - Detach ATA port in prepration of device removal
6320  *	@ap: ATA port to be detached
6321  *
6322  *	Detach all ATA devices and the associated SCSI devices of @ap;
6323  *	then, remove the associated SCSI host.  @ap is guaranteed to
6324  *	be quiescent on return from this function.
6325  *
6326  *	LOCKING:
6327  *	Kernel thread context (may sleep).
6328  */
6329 static void ata_port_detach(struct ata_port *ap)
6330 {
6331 	unsigned long flags;
6332 
6333 	if (!ap->ops->error_handler)
6334 		goto skip_eh;
6335 
6336 	/* tell EH we're leaving & flush EH */
6337 	spin_lock_irqsave(ap->lock, flags);
6338 	ap->pflags |= ATA_PFLAG_UNLOADING;
6339 	ata_port_schedule_eh(ap);
6340 	spin_unlock_irqrestore(ap->lock, flags);
6341 
6342 	/* wait till EH commits suicide */
6343 	ata_port_wait_eh(ap);
6344 
6345 	/* it better be dead now */
6346 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6347 
6348 	cancel_rearming_delayed_work(&ap->hotplug_task);
6349 
6350  skip_eh:
6351 	/* remove the associated SCSI host */
6352 	scsi_remove_host(ap->scsi_host);
6353 }
6354 
6355 /**
6356  *	ata_host_detach - Detach all ports of an ATA host
6357  *	@host: Host to detach
6358  *
6359  *	Detach all ports of @host.
6360  *
6361  *	LOCKING:
6362  *	Kernel thread context (may sleep).
6363  */
6364 void ata_host_detach(struct ata_host *host)
6365 {
6366 	int i;
6367 
6368 	for (i = 0; i < host->n_ports; i++)
6369 		ata_port_detach(host->ports[i]);
6370 
6371 	/* the host is dead now, dissociate ACPI */
6372 	ata_acpi_dissociate(host);
6373 }
6374 
6375 #ifdef CONFIG_PCI
6376 
6377 /**
6378  *	ata_pci_remove_one - PCI layer callback for device removal
6379  *	@pdev: PCI device that was removed
6380  *
6381  *	PCI layer indicates to libata via this hook that hot-unplug or
6382  *	module unload event has occurred.  Detach all ports.  Resource
6383  *	release is handled via devres.
6384  *
6385  *	LOCKING:
6386  *	Inherited from PCI layer (may sleep).
6387  */
6388 void ata_pci_remove_one(struct pci_dev *pdev)
6389 {
6390 	struct device *dev = &pdev->dev;
6391 	struct ata_host *host = dev_get_drvdata(dev);
6392 
6393 	ata_host_detach(host);
6394 }
6395 
6396 /* move to PCI subsystem */
6397 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6398 {
6399 	unsigned long tmp = 0;
6400 
6401 	switch (bits->width) {
6402 	case 1: {
6403 		u8 tmp8 = 0;
6404 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6405 		tmp = tmp8;
6406 		break;
6407 	}
6408 	case 2: {
6409 		u16 tmp16 = 0;
6410 		pci_read_config_word(pdev, bits->reg, &tmp16);
6411 		tmp = tmp16;
6412 		break;
6413 	}
6414 	case 4: {
6415 		u32 tmp32 = 0;
6416 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6417 		tmp = tmp32;
6418 		break;
6419 	}
6420 
6421 	default:
6422 		return -EINVAL;
6423 	}
6424 
6425 	tmp &= bits->mask;
6426 
6427 	return (tmp == bits->val) ? 1 : 0;
6428 }
6429 
6430 #ifdef CONFIG_PM
6431 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6432 {
6433 	pci_save_state(pdev);
6434 	pci_disable_device(pdev);
6435 
6436 	if (mesg.event & PM_EVENT_SLEEP)
6437 		pci_set_power_state(pdev, PCI_D3hot);
6438 }
6439 
6440 int ata_pci_device_do_resume(struct pci_dev *pdev)
6441 {
6442 	int rc;
6443 
6444 	pci_set_power_state(pdev, PCI_D0);
6445 	pci_restore_state(pdev);
6446 
6447 	rc = pcim_enable_device(pdev);
6448 	if (rc) {
6449 		dev_printk(KERN_ERR, &pdev->dev,
6450 			   "failed to enable device after resume (%d)\n", rc);
6451 		return rc;
6452 	}
6453 
6454 	pci_set_master(pdev);
6455 	return 0;
6456 }
6457 
6458 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6459 {
6460 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6461 	int rc = 0;
6462 
6463 	rc = ata_host_suspend(host, mesg);
6464 	if (rc)
6465 		return rc;
6466 
6467 	ata_pci_device_do_suspend(pdev, mesg);
6468 
6469 	return 0;
6470 }
6471 
6472 int ata_pci_device_resume(struct pci_dev *pdev)
6473 {
6474 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6475 	int rc;
6476 
6477 	rc = ata_pci_device_do_resume(pdev);
6478 	if (rc == 0)
6479 		ata_host_resume(host);
6480 	return rc;
6481 }
6482 #endif /* CONFIG_PM */
6483 
6484 #endif /* CONFIG_PCI */
6485 
6486 static int __init ata_parse_force_one(char **cur,
6487 				      struct ata_force_ent *force_ent,
6488 				      const char **reason)
6489 {
6490 	/* FIXME: Currently, there's no way to tag init const data and
6491 	 * using __initdata causes build failure on some versions of
6492 	 * gcc.  Once __initdataconst is implemented, add const to the
6493 	 * following structure.
6494 	 */
6495 	static struct ata_force_param force_tbl[] __initdata = {
6496 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6497 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6498 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6499 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6500 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6501 		{ "sata",	.cbl		= ATA_CBL_SATA },
6502 		{ "1.5Gbps",	.spd_limit	= 1 },
6503 		{ "3.0Gbps",	.spd_limit	= 2 },
6504 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6505 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6506 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6507 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6508 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6509 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6510 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6511 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6512 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6513 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6514 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6515 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6516 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6517 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6518 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6519 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6520 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6521 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6522 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6523 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6524 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6525 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6526 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6527 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6528 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6529 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6530 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6531 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6532 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6533 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6534 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6535 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6536 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6537 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6538 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6539 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6540 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6541 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6542 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6543 	};
6544 	char *start = *cur, *p = *cur;
6545 	char *id, *val, *endp;
6546 	const struct ata_force_param *match_fp = NULL;
6547 	int nr_matches = 0, i;
6548 
6549 	/* find where this param ends and update *cur */
6550 	while (*p != '\0' && *p != ',')
6551 		p++;
6552 
6553 	if (*p == '\0')
6554 		*cur = p;
6555 	else
6556 		*cur = p + 1;
6557 
6558 	*p = '\0';
6559 
6560 	/* parse */
6561 	p = strchr(start, ':');
6562 	if (!p) {
6563 		val = strstrip(start);
6564 		goto parse_val;
6565 	}
6566 	*p = '\0';
6567 
6568 	id = strstrip(start);
6569 	val = strstrip(p + 1);
6570 
6571 	/* parse id */
6572 	p = strchr(id, '.');
6573 	if (p) {
6574 		*p++ = '\0';
6575 		force_ent->device = simple_strtoul(p, &endp, 10);
6576 		if (p == endp || *endp != '\0') {
6577 			*reason = "invalid device";
6578 			return -EINVAL;
6579 		}
6580 	}
6581 
6582 	force_ent->port = simple_strtoul(id, &endp, 10);
6583 	if (p == endp || *endp != '\0') {
6584 		*reason = "invalid port/link";
6585 		return -EINVAL;
6586 	}
6587 
6588  parse_val:
6589 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6590 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6591 		const struct ata_force_param *fp = &force_tbl[i];
6592 
6593 		if (strncasecmp(val, fp->name, strlen(val)))
6594 			continue;
6595 
6596 		nr_matches++;
6597 		match_fp = fp;
6598 
6599 		if (strcasecmp(val, fp->name) == 0) {
6600 			nr_matches = 1;
6601 			break;
6602 		}
6603 	}
6604 
6605 	if (!nr_matches) {
6606 		*reason = "unknown value";
6607 		return -EINVAL;
6608 	}
6609 	if (nr_matches > 1) {
6610 		*reason = "ambigious value";
6611 		return -EINVAL;
6612 	}
6613 
6614 	force_ent->param = *match_fp;
6615 
6616 	return 0;
6617 }
6618 
6619 static void __init ata_parse_force_param(void)
6620 {
6621 	int idx = 0, size = 1;
6622 	int last_port = -1, last_device = -1;
6623 	char *p, *cur, *next;
6624 
6625 	/* calculate maximum number of params and allocate force_tbl */
6626 	for (p = ata_force_param_buf; *p; p++)
6627 		if (*p == ',')
6628 			size++;
6629 
6630 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6631 	if (!ata_force_tbl) {
6632 		printk(KERN_WARNING "ata: failed to extend force table, "
6633 		       "libata.force ignored\n");
6634 		return;
6635 	}
6636 
6637 	/* parse and populate the table */
6638 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6639 		const char *reason = "";
6640 		struct ata_force_ent te = { .port = -1, .device = -1 };
6641 
6642 		next = cur;
6643 		if (ata_parse_force_one(&next, &te, &reason)) {
6644 			printk(KERN_WARNING "ata: failed to parse force "
6645 			       "parameter \"%s\" (%s)\n",
6646 			       cur, reason);
6647 			continue;
6648 		}
6649 
6650 		if (te.port == -1) {
6651 			te.port = last_port;
6652 			te.device = last_device;
6653 		}
6654 
6655 		ata_force_tbl[idx++] = te;
6656 
6657 		last_port = te.port;
6658 		last_device = te.device;
6659 	}
6660 
6661 	ata_force_tbl_size = idx;
6662 }
6663 
6664 static int __init ata_init(void)
6665 {
6666 	ata_parse_force_param();
6667 
6668 	/*
6669 	 * FIXME: In UP case, there is only one workqueue thread and if you
6670 	 * have more than one PIO device, latency is bloody awful, with
6671 	 * occasional multi-second "hiccups" as one PIO device waits for
6672 	 * another.  It's an ugly wart that users DO occasionally complain
6673 	 * about; luckily most users have at most one PIO polled device.
6674 	 */
6675 	ata_wq = create_workqueue("ata");
6676 	if (!ata_wq)
6677 		goto free_force_tbl;
6678 
6679 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6680 	if (!ata_aux_wq)
6681 		goto free_wq;
6682 
6683 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6684 	return 0;
6685 
6686 free_wq:
6687 	destroy_workqueue(ata_wq);
6688 free_force_tbl:
6689 	kfree(ata_force_tbl);
6690 	return -ENOMEM;
6691 }
6692 
6693 static void __exit ata_exit(void)
6694 {
6695 	kfree(ata_force_tbl);
6696 	destroy_workqueue(ata_wq);
6697 	destroy_workqueue(ata_aux_wq);
6698 }
6699 
6700 subsys_initcall(ata_init);
6701 module_exit(ata_exit);
6702 
6703 static unsigned long ratelimit_time;
6704 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6705 
6706 int ata_ratelimit(void)
6707 {
6708 	int rc;
6709 	unsigned long flags;
6710 
6711 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6712 
6713 	if (time_after(jiffies, ratelimit_time)) {
6714 		rc = 1;
6715 		ratelimit_time = jiffies + (HZ/5);
6716 	} else
6717 		rc = 0;
6718 
6719 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6720 
6721 	return rc;
6722 }
6723 
6724 /**
6725  *	ata_wait_register - wait until register value changes
6726  *	@reg: IO-mapped register
6727  *	@mask: Mask to apply to read register value
6728  *	@val: Wait condition
6729  *	@interval: polling interval in milliseconds
6730  *	@timeout: timeout in milliseconds
6731  *
6732  *	Waiting for some bits of register to change is a common
6733  *	operation for ATA controllers.  This function reads 32bit LE
6734  *	IO-mapped register @reg and tests for the following condition.
6735  *
6736  *	(*@reg & mask) != val
6737  *
6738  *	If the condition is met, it returns; otherwise, the process is
6739  *	repeated after @interval_msec until timeout.
6740  *
6741  *	LOCKING:
6742  *	Kernel thread context (may sleep)
6743  *
6744  *	RETURNS:
6745  *	The final register value.
6746  */
6747 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6748 		      unsigned long interval, unsigned long timeout)
6749 {
6750 	unsigned long deadline;
6751 	u32 tmp;
6752 
6753 	tmp = ioread32(reg);
6754 
6755 	/* Calculate timeout _after_ the first read to make sure
6756 	 * preceding writes reach the controller before starting to
6757 	 * eat away the timeout.
6758 	 */
6759 	deadline = ata_deadline(jiffies, timeout);
6760 
6761 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6762 		msleep(interval);
6763 		tmp = ioread32(reg);
6764 	}
6765 
6766 	return tmp;
6767 }
6768 
6769 /*
6770  * Dummy port_ops
6771  */
6772 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6773 {
6774 	return AC_ERR_SYSTEM;
6775 }
6776 
6777 static void ata_dummy_error_handler(struct ata_port *ap)
6778 {
6779 	/* truly dummy */
6780 }
6781 
6782 struct ata_port_operations ata_dummy_port_ops = {
6783 	.qc_prep		= ata_noop_qc_prep,
6784 	.qc_issue		= ata_dummy_qc_issue,
6785 	.error_handler		= ata_dummy_error_handler,
6786 };
6787 
6788 const struct ata_port_info ata_dummy_port_info = {
6789 	.port_ops		= &ata_dummy_port_ops,
6790 };
6791 
6792 /*
6793  * libata is essentially a library of internal helper functions for
6794  * low-level ATA host controller drivers.  As such, the API/ABI is
6795  * likely to change as new drivers are added and updated.
6796  * Do not depend on ABI/API stability.
6797  */
6798 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6799 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6800 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6801 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6802 EXPORT_SYMBOL_GPL(sata_port_ops);
6803 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6804 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6805 EXPORT_SYMBOL_GPL(ata_link_next);
6806 EXPORT_SYMBOL_GPL(ata_dev_next);
6807 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6808 EXPORT_SYMBOL_GPL(ata_host_init);
6809 EXPORT_SYMBOL_GPL(ata_host_alloc);
6810 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6811 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6812 EXPORT_SYMBOL_GPL(ata_host_start);
6813 EXPORT_SYMBOL_GPL(ata_host_register);
6814 EXPORT_SYMBOL_GPL(ata_host_activate);
6815 EXPORT_SYMBOL_GPL(ata_host_detach);
6816 EXPORT_SYMBOL_GPL(ata_sg_init);
6817 EXPORT_SYMBOL_GPL(ata_qc_complete);
6818 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6819 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6820 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6821 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6822 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6823 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6824 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6825 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6826 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6827 EXPORT_SYMBOL_GPL(ata_mode_string);
6828 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6829 EXPORT_SYMBOL_GPL(ata_port_start);
6830 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6831 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6832 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6833 EXPORT_SYMBOL_GPL(ata_port_probe);
6834 EXPORT_SYMBOL_GPL(ata_dev_disable);
6835 EXPORT_SYMBOL_GPL(sata_set_spd);
6836 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6837 EXPORT_SYMBOL_GPL(sata_link_debounce);
6838 EXPORT_SYMBOL_GPL(sata_link_resume);
6839 EXPORT_SYMBOL_GPL(ata_std_prereset);
6840 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6841 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6842 EXPORT_SYMBOL_GPL(ata_std_postreset);
6843 EXPORT_SYMBOL_GPL(ata_dev_classify);
6844 EXPORT_SYMBOL_GPL(ata_dev_pair);
6845 EXPORT_SYMBOL_GPL(ata_port_disable);
6846 EXPORT_SYMBOL_GPL(ata_ratelimit);
6847 EXPORT_SYMBOL_GPL(ata_wait_register);
6848 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6849 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6850 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6851 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6852 EXPORT_SYMBOL_GPL(sata_scr_valid);
6853 EXPORT_SYMBOL_GPL(sata_scr_read);
6854 EXPORT_SYMBOL_GPL(sata_scr_write);
6855 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6856 EXPORT_SYMBOL_GPL(ata_link_online);
6857 EXPORT_SYMBOL_GPL(ata_link_offline);
6858 #ifdef CONFIG_PM
6859 EXPORT_SYMBOL_GPL(ata_host_suspend);
6860 EXPORT_SYMBOL_GPL(ata_host_resume);
6861 #endif /* CONFIG_PM */
6862 EXPORT_SYMBOL_GPL(ata_id_string);
6863 EXPORT_SYMBOL_GPL(ata_id_c_string);
6864 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6865 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6866 
6867 EXPORT_SYMBOL_GPL(ata_pio_queue_task);
6868 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6869 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6870 EXPORT_SYMBOL_GPL(ata_timing_compute);
6871 EXPORT_SYMBOL_GPL(ata_timing_merge);
6872 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6873 
6874 #ifdef CONFIG_PCI
6875 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6876 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6877 #ifdef CONFIG_PM
6878 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6879 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6880 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6881 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6882 #endif /* CONFIG_PM */
6883 #endif /* CONFIG_PCI */
6884 
6885 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6886 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6887 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6888 EXPORT_SYMBOL_GPL(ata_port_desc);
6889 #ifdef CONFIG_PCI
6890 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6891 #endif /* CONFIG_PCI */
6892 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6893 EXPORT_SYMBOL_GPL(ata_link_abort);
6894 EXPORT_SYMBOL_GPL(ata_port_abort);
6895 EXPORT_SYMBOL_GPL(ata_port_freeze);
6896 EXPORT_SYMBOL_GPL(sata_async_notification);
6897 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6898 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6899 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6900 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6901 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6902 EXPORT_SYMBOL_GPL(ata_do_eh);
6903 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6904 
6905 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6906 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6907 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6908 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6909 EXPORT_SYMBOL_GPL(ata_cable_sata);
6910