xref: /linux/drivers/ata/libata-core.c (revision f8324e20f8289dffc646d64366332e05eaacab25)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h>
69 
70 #include "libata.h"
71 
72 
73 /* debounce timing parameters in msecs { interval, duration, timeout } */
74 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
75 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
76 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
77 
78 const struct ata_port_operations ata_base_port_ops = {
79 	.prereset		= ata_std_prereset,
80 	.postreset		= ata_std_postreset,
81 	.error_handler		= ata_std_error_handler,
82 };
83 
84 const struct ata_port_operations sata_port_ops = {
85 	.inherits		= &ata_base_port_ops,
86 
87 	.qc_defer		= ata_std_qc_defer,
88 	.hardreset		= sata_std_hardreset,
89 };
90 
91 static unsigned int ata_dev_init_params(struct ata_device *dev,
92 					u16 heads, u16 sectors);
93 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94 static unsigned int ata_dev_set_feature(struct ata_device *dev,
95 					u8 enable, u8 feature);
96 static void ata_dev_xfermask(struct ata_device *dev);
97 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
98 
99 unsigned int ata_print_id = 1;
100 
101 struct workqueue_struct *ata_aux_wq;
102 
103 struct ata_force_param {
104 	const char	*name;
105 	unsigned int	cbl;
106 	int		spd_limit;
107 	unsigned long	xfer_mask;
108 	unsigned int	horkage_on;
109 	unsigned int	horkage_off;
110 	unsigned int	lflags;
111 };
112 
113 struct ata_force_ent {
114 	int			port;
115 	int			device;
116 	struct ata_force_param	param;
117 };
118 
119 static struct ata_force_ent *ata_force_tbl;
120 static int ata_force_tbl_size;
121 
122 static char ata_force_param_buf[PAGE_SIZE] __initdata;
123 /* param_buf is thrown away after initialization, disallow read */
124 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
125 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
126 
127 static int atapi_enabled = 1;
128 module_param(atapi_enabled, int, 0444);
129 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
130 
131 static int atapi_dmadir = 0;
132 module_param(atapi_dmadir, int, 0444);
133 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
134 
135 int atapi_passthru16 = 1;
136 module_param(atapi_passthru16, int, 0444);
137 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
138 
139 int libata_fua = 0;
140 module_param_named(fua, libata_fua, int, 0444);
141 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
142 
143 static int ata_ignore_hpa;
144 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
145 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
146 
147 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
148 module_param_named(dma, libata_dma_mask, int, 0444);
149 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
150 
151 static int ata_probe_timeout;
152 module_param(ata_probe_timeout, int, 0444);
153 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
154 
155 int libata_noacpi = 0;
156 module_param_named(noacpi, libata_noacpi, int, 0444);
157 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
158 
159 int libata_allow_tpm = 0;
160 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
161 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
162 
163 static int atapi_an;
164 module_param(atapi_an, int, 0444);
165 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
166 
167 MODULE_AUTHOR("Jeff Garzik");
168 MODULE_DESCRIPTION("Library module for ATA devices");
169 MODULE_LICENSE("GPL");
170 MODULE_VERSION(DRV_VERSION);
171 
172 
173 static bool ata_sstatus_online(u32 sstatus)
174 {
175 	return (sstatus & 0xf) == 0x3;
176 }
177 
178 /**
179  *	ata_link_next - link iteration helper
180  *	@link: the previous link, NULL to start
181  *	@ap: ATA port containing links to iterate
182  *	@mode: iteration mode, one of ATA_LITER_*
183  *
184  *	LOCKING:
185  *	Host lock or EH context.
186  *
187  *	RETURNS:
188  *	Pointer to the next link.
189  */
190 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
191 			       enum ata_link_iter_mode mode)
192 {
193 	BUG_ON(mode != ATA_LITER_EDGE &&
194 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
195 
196 	/* NULL link indicates start of iteration */
197 	if (!link)
198 		switch (mode) {
199 		case ATA_LITER_EDGE:
200 		case ATA_LITER_PMP_FIRST:
201 			if (sata_pmp_attached(ap))
202 				return ap->pmp_link;
203 			/* fall through */
204 		case ATA_LITER_HOST_FIRST:
205 			return &ap->link;
206 		}
207 
208 	/* we just iterated over the host link, what's next? */
209 	if (link == &ap->link)
210 		switch (mode) {
211 		case ATA_LITER_HOST_FIRST:
212 			if (sata_pmp_attached(ap))
213 				return ap->pmp_link;
214 			/* fall through */
215 		case ATA_LITER_PMP_FIRST:
216 			if (unlikely(ap->slave_link))
217 				return ap->slave_link;
218 			/* fall through */
219 		case ATA_LITER_EDGE:
220 			return NULL;
221 		}
222 
223 	/* slave_link excludes PMP */
224 	if (unlikely(link == ap->slave_link))
225 		return NULL;
226 
227 	/* we were over a PMP link */
228 	if (++link < ap->pmp_link + ap->nr_pmp_links)
229 		return link;
230 
231 	if (mode == ATA_LITER_PMP_FIRST)
232 		return &ap->link;
233 
234 	return NULL;
235 }
236 
237 /**
238  *	ata_dev_next - device iteration helper
239  *	@dev: the previous device, NULL to start
240  *	@link: ATA link containing devices to iterate
241  *	@mode: iteration mode, one of ATA_DITER_*
242  *
243  *	LOCKING:
244  *	Host lock or EH context.
245  *
246  *	RETURNS:
247  *	Pointer to the next device.
248  */
249 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
250 				enum ata_dev_iter_mode mode)
251 {
252 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
253 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
254 
255 	/* NULL dev indicates start of iteration */
256 	if (!dev)
257 		switch (mode) {
258 		case ATA_DITER_ENABLED:
259 		case ATA_DITER_ALL:
260 			dev = link->device;
261 			goto check;
262 		case ATA_DITER_ENABLED_REVERSE:
263 		case ATA_DITER_ALL_REVERSE:
264 			dev = link->device + ata_link_max_devices(link) - 1;
265 			goto check;
266 		}
267 
268  next:
269 	/* move to the next one */
270 	switch (mode) {
271 	case ATA_DITER_ENABLED:
272 	case ATA_DITER_ALL:
273 		if (++dev < link->device + ata_link_max_devices(link))
274 			goto check;
275 		return NULL;
276 	case ATA_DITER_ENABLED_REVERSE:
277 	case ATA_DITER_ALL_REVERSE:
278 		if (--dev >= link->device)
279 			goto check;
280 		return NULL;
281 	}
282 
283  check:
284 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
285 	    !ata_dev_enabled(dev))
286 		goto next;
287 	return dev;
288 }
289 
290 /**
291  *	ata_dev_phys_link - find physical link for a device
292  *	@dev: ATA device to look up physical link for
293  *
294  *	Look up physical link which @dev is attached to.  Note that
295  *	this is different from @dev->link only when @dev is on slave
296  *	link.  For all other cases, it's the same as @dev->link.
297  *
298  *	LOCKING:
299  *	Don't care.
300  *
301  *	RETURNS:
302  *	Pointer to the found physical link.
303  */
304 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
305 {
306 	struct ata_port *ap = dev->link->ap;
307 
308 	if (!ap->slave_link)
309 		return dev->link;
310 	if (!dev->devno)
311 		return &ap->link;
312 	return ap->slave_link;
313 }
314 
315 /**
316  *	ata_force_cbl - force cable type according to libata.force
317  *	@ap: ATA port of interest
318  *
319  *	Force cable type according to libata.force and whine about it.
320  *	The last entry which has matching port number is used, so it
321  *	can be specified as part of device force parameters.  For
322  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
323  *	same effect.
324  *
325  *	LOCKING:
326  *	EH context.
327  */
328 void ata_force_cbl(struct ata_port *ap)
329 {
330 	int i;
331 
332 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333 		const struct ata_force_ent *fe = &ata_force_tbl[i];
334 
335 		if (fe->port != -1 && fe->port != ap->print_id)
336 			continue;
337 
338 		if (fe->param.cbl == ATA_CBL_NONE)
339 			continue;
340 
341 		ap->cbl = fe->param.cbl;
342 		ata_port_printk(ap, KERN_NOTICE,
343 				"FORCE: cable set to %s\n", fe->param.name);
344 		return;
345 	}
346 }
347 
348 /**
349  *	ata_force_link_limits - force link limits according to libata.force
350  *	@link: ATA link of interest
351  *
352  *	Force link flags and SATA spd limit according to libata.force
353  *	and whine about it.  When only the port part is specified
354  *	(e.g. 1:), the limit applies to all links connected to both
355  *	the host link and all fan-out ports connected via PMP.  If the
356  *	device part is specified as 0 (e.g. 1.00:), it specifies the
357  *	first fan-out link not the host link.  Device number 15 always
358  *	points to the host link whether PMP is attached or not.  If the
359  *	controller has slave link, device number 16 points to it.
360  *
361  *	LOCKING:
362  *	EH context.
363  */
364 static void ata_force_link_limits(struct ata_link *link)
365 {
366 	bool did_spd = false;
367 	int linkno = link->pmp;
368 	int i;
369 
370 	if (ata_is_host_link(link))
371 		linkno += 15;
372 
373 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
374 		const struct ata_force_ent *fe = &ata_force_tbl[i];
375 
376 		if (fe->port != -1 && fe->port != link->ap->print_id)
377 			continue;
378 
379 		if (fe->device != -1 && fe->device != linkno)
380 			continue;
381 
382 		/* only honor the first spd limit */
383 		if (!did_spd && fe->param.spd_limit) {
384 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
385 			ata_link_printk(link, KERN_NOTICE,
386 					"FORCE: PHY spd limit set to %s\n",
387 					fe->param.name);
388 			did_spd = true;
389 		}
390 
391 		/* let lflags stack */
392 		if (fe->param.lflags) {
393 			link->flags |= fe->param.lflags;
394 			ata_link_printk(link, KERN_NOTICE,
395 					"FORCE: link flag 0x%x forced -> 0x%x\n",
396 					fe->param.lflags, link->flags);
397 		}
398 	}
399 }
400 
401 /**
402  *	ata_force_xfermask - force xfermask according to libata.force
403  *	@dev: ATA device of interest
404  *
405  *	Force xfer_mask according to libata.force and whine about it.
406  *	For consistency with link selection, device number 15 selects
407  *	the first device connected to the host link.
408  *
409  *	LOCKING:
410  *	EH context.
411  */
412 static void ata_force_xfermask(struct ata_device *dev)
413 {
414 	int devno = dev->link->pmp + dev->devno;
415 	int alt_devno = devno;
416 	int i;
417 
418 	/* allow n.15/16 for devices attached to host port */
419 	if (ata_is_host_link(dev->link))
420 		alt_devno += 15;
421 
422 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
423 		const struct ata_force_ent *fe = &ata_force_tbl[i];
424 		unsigned long pio_mask, mwdma_mask, udma_mask;
425 
426 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
427 			continue;
428 
429 		if (fe->device != -1 && fe->device != devno &&
430 		    fe->device != alt_devno)
431 			continue;
432 
433 		if (!fe->param.xfer_mask)
434 			continue;
435 
436 		ata_unpack_xfermask(fe->param.xfer_mask,
437 				    &pio_mask, &mwdma_mask, &udma_mask);
438 		if (udma_mask)
439 			dev->udma_mask = udma_mask;
440 		else if (mwdma_mask) {
441 			dev->udma_mask = 0;
442 			dev->mwdma_mask = mwdma_mask;
443 		} else {
444 			dev->udma_mask = 0;
445 			dev->mwdma_mask = 0;
446 			dev->pio_mask = pio_mask;
447 		}
448 
449 		ata_dev_printk(dev, KERN_NOTICE,
450 			"FORCE: xfer_mask set to %s\n", fe->param.name);
451 		return;
452 	}
453 }
454 
455 /**
456  *	ata_force_horkage - force horkage according to libata.force
457  *	@dev: ATA device of interest
458  *
459  *	Force horkage according to libata.force and whine about it.
460  *	For consistency with link selection, device number 15 selects
461  *	the first device connected to the host link.
462  *
463  *	LOCKING:
464  *	EH context.
465  */
466 static void ata_force_horkage(struct ata_device *dev)
467 {
468 	int devno = dev->link->pmp + dev->devno;
469 	int alt_devno = devno;
470 	int i;
471 
472 	/* allow n.15/16 for devices attached to host port */
473 	if (ata_is_host_link(dev->link))
474 		alt_devno += 15;
475 
476 	for (i = 0; i < ata_force_tbl_size; i++) {
477 		const struct ata_force_ent *fe = &ata_force_tbl[i];
478 
479 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
480 			continue;
481 
482 		if (fe->device != -1 && fe->device != devno &&
483 		    fe->device != alt_devno)
484 			continue;
485 
486 		if (!(~dev->horkage & fe->param.horkage_on) &&
487 		    !(dev->horkage & fe->param.horkage_off))
488 			continue;
489 
490 		dev->horkage |= fe->param.horkage_on;
491 		dev->horkage &= ~fe->param.horkage_off;
492 
493 		ata_dev_printk(dev, KERN_NOTICE,
494 			"FORCE: horkage modified (%s)\n", fe->param.name);
495 	}
496 }
497 
498 /**
499  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
500  *	@opcode: SCSI opcode
501  *
502  *	Determine ATAPI command type from @opcode.
503  *
504  *	LOCKING:
505  *	None.
506  *
507  *	RETURNS:
508  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
509  */
510 int atapi_cmd_type(u8 opcode)
511 {
512 	switch (opcode) {
513 	case GPCMD_READ_10:
514 	case GPCMD_READ_12:
515 		return ATAPI_READ;
516 
517 	case GPCMD_WRITE_10:
518 	case GPCMD_WRITE_12:
519 	case GPCMD_WRITE_AND_VERIFY_10:
520 		return ATAPI_WRITE;
521 
522 	case GPCMD_READ_CD:
523 	case GPCMD_READ_CD_MSF:
524 		return ATAPI_READ_CD;
525 
526 	case ATA_16:
527 	case ATA_12:
528 		if (atapi_passthru16)
529 			return ATAPI_PASS_THRU;
530 		/* fall thru */
531 	default:
532 		return ATAPI_MISC;
533 	}
534 }
535 
536 /**
537  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
538  *	@tf: Taskfile to convert
539  *	@pmp: Port multiplier port
540  *	@is_cmd: This FIS is for command
541  *	@fis: Buffer into which data will output
542  *
543  *	Converts a standard ATA taskfile to a Serial ATA
544  *	FIS structure (Register - Host to Device).
545  *
546  *	LOCKING:
547  *	Inherited from caller.
548  */
549 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
550 {
551 	fis[0] = 0x27;			/* Register - Host to Device FIS */
552 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
553 	if (is_cmd)
554 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
555 
556 	fis[2] = tf->command;
557 	fis[3] = tf->feature;
558 
559 	fis[4] = tf->lbal;
560 	fis[5] = tf->lbam;
561 	fis[6] = tf->lbah;
562 	fis[7] = tf->device;
563 
564 	fis[8] = tf->hob_lbal;
565 	fis[9] = tf->hob_lbam;
566 	fis[10] = tf->hob_lbah;
567 	fis[11] = tf->hob_feature;
568 
569 	fis[12] = tf->nsect;
570 	fis[13] = tf->hob_nsect;
571 	fis[14] = 0;
572 	fis[15] = tf->ctl;
573 
574 	fis[16] = 0;
575 	fis[17] = 0;
576 	fis[18] = 0;
577 	fis[19] = 0;
578 }
579 
580 /**
581  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
582  *	@fis: Buffer from which data will be input
583  *	@tf: Taskfile to output
584  *
585  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
586  *
587  *	LOCKING:
588  *	Inherited from caller.
589  */
590 
591 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
592 {
593 	tf->command	= fis[2];	/* status */
594 	tf->feature	= fis[3];	/* error */
595 
596 	tf->lbal	= fis[4];
597 	tf->lbam	= fis[5];
598 	tf->lbah	= fis[6];
599 	tf->device	= fis[7];
600 
601 	tf->hob_lbal	= fis[8];
602 	tf->hob_lbam	= fis[9];
603 	tf->hob_lbah	= fis[10];
604 
605 	tf->nsect	= fis[12];
606 	tf->hob_nsect	= fis[13];
607 }
608 
609 static const u8 ata_rw_cmds[] = {
610 	/* pio multi */
611 	ATA_CMD_READ_MULTI,
612 	ATA_CMD_WRITE_MULTI,
613 	ATA_CMD_READ_MULTI_EXT,
614 	ATA_CMD_WRITE_MULTI_EXT,
615 	0,
616 	0,
617 	0,
618 	ATA_CMD_WRITE_MULTI_FUA_EXT,
619 	/* pio */
620 	ATA_CMD_PIO_READ,
621 	ATA_CMD_PIO_WRITE,
622 	ATA_CMD_PIO_READ_EXT,
623 	ATA_CMD_PIO_WRITE_EXT,
624 	0,
625 	0,
626 	0,
627 	0,
628 	/* dma */
629 	ATA_CMD_READ,
630 	ATA_CMD_WRITE,
631 	ATA_CMD_READ_EXT,
632 	ATA_CMD_WRITE_EXT,
633 	0,
634 	0,
635 	0,
636 	ATA_CMD_WRITE_FUA_EXT
637 };
638 
639 /**
640  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
641  *	@tf: command to examine and configure
642  *	@dev: device tf belongs to
643  *
644  *	Examine the device configuration and tf->flags to calculate
645  *	the proper read/write commands and protocol to use.
646  *
647  *	LOCKING:
648  *	caller.
649  */
650 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
651 {
652 	u8 cmd;
653 
654 	int index, fua, lba48, write;
655 
656 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
657 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
658 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
659 
660 	if (dev->flags & ATA_DFLAG_PIO) {
661 		tf->protocol = ATA_PROT_PIO;
662 		index = dev->multi_count ? 0 : 8;
663 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
664 		/* Unable to use DMA due to host limitation */
665 		tf->protocol = ATA_PROT_PIO;
666 		index = dev->multi_count ? 0 : 8;
667 	} else {
668 		tf->protocol = ATA_PROT_DMA;
669 		index = 16;
670 	}
671 
672 	cmd = ata_rw_cmds[index + fua + lba48 + write];
673 	if (cmd) {
674 		tf->command = cmd;
675 		return 0;
676 	}
677 	return -1;
678 }
679 
680 /**
681  *	ata_tf_read_block - Read block address from ATA taskfile
682  *	@tf: ATA taskfile of interest
683  *	@dev: ATA device @tf belongs to
684  *
685  *	LOCKING:
686  *	None.
687  *
688  *	Read block address from @tf.  This function can handle all
689  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
690  *	flags select the address format to use.
691  *
692  *	RETURNS:
693  *	Block address read from @tf.
694  */
695 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
696 {
697 	u64 block = 0;
698 
699 	if (tf->flags & ATA_TFLAG_LBA) {
700 		if (tf->flags & ATA_TFLAG_LBA48) {
701 			block |= (u64)tf->hob_lbah << 40;
702 			block |= (u64)tf->hob_lbam << 32;
703 			block |= (u64)tf->hob_lbal << 24;
704 		} else
705 			block |= (tf->device & 0xf) << 24;
706 
707 		block |= tf->lbah << 16;
708 		block |= tf->lbam << 8;
709 		block |= tf->lbal;
710 	} else {
711 		u32 cyl, head, sect;
712 
713 		cyl = tf->lbam | (tf->lbah << 8);
714 		head = tf->device & 0xf;
715 		sect = tf->lbal;
716 
717 		if (!sect) {
718 			ata_dev_printk(dev, KERN_WARNING, "device reported "
719 				       "invalid CHS sector 0\n");
720 			sect = 1; /* oh well */
721 		}
722 
723 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
724 	}
725 
726 	return block;
727 }
728 
729 /**
730  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
731  *	@tf: Target ATA taskfile
732  *	@dev: ATA device @tf belongs to
733  *	@block: Block address
734  *	@n_block: Number of blocks
735  *	@tf_flags: RW/FUA etc...
736  *	@tag: tag
737  *
738  *	LOCKING:
739  *	None.
740  *
741  *	Build ATA taskfile @tf for read/write request described by
742  *	@block, @n_block, @tf_flags and @tag on @dev.
743  *
744  *	RETURNS:
745  *
746  *	0 on success, -ERANGE if the request is too large for @dev,
747  *	-EINVAL if the request is invalid.
748  */
749 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
750 		    u64 block, u32 n_block, unsigned int tf_flags,
751 		    unsigned int tag)
752 {
753 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
754 	tf->flags |= tf_flags;
755 
756 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
757 		/* yay, NCQ */
758 		if (!lba_48_ok(block, n_block))
759 			return -ERANGE;
760 
761 		tf->protocol = ATA_PROT_NCQ;
762 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
763 
764 		if (tf->flags & ATA_TFLAG_WRITE)
765 			tf->command = ATA_CMD_FPDMA_WRITE;
766 		else
767 			tf->command = ATA_CMD_FPDMA_READ;
768 
769 		tf->nsect = tag << 3;
770 		tf->hob_feature = (n_block >> 8) & 0xff;
771 		tf->feature = n_block & 0xff;
772 
773 		tf->hob_lbah = (block >> 40) & 0xff;
774 		tf->hob_lbam = (block >> 32) & 0xff;
775 		tf->hob_lbal = (block >> 24) & 0xff;
776 		tf->lbah = (block >> 16) & 0xff;
777 		tf->lbam = (block >> 8) & 0xff;
778 		tf->lbal = block & 0xff;
779 
780 		tf->device = 1 << 6;
781 		if (tf->flags & ATA_TFLAG_FUA)
782 			tf->device |= 1 << 7;
783 	} else if (dev->flags & ATA_DFLAG_LBA) {
784 		tf->flags |= ATA_TFLAG_LBA;
785 
786 		if (lba_28_ok(block, n_block)) {
787 			/* use LBA28 */
788 			tf->device |= (block >> 24) & 0xf;
789 		} else if (lba_48_ok(block, n_block)) {
790 			if (!(dev->flags & ATA_DFLAG_LBA48))
791 				return -ERANGE;
792 
793 			/* use LBA48 */
794 			tf->flags |= ATA_TFLAG_LBA48;
795 
796 			tf->hob_nsect = (n_block >> 8) & 0xff;
797 
798 			tf->hob_lbah = (block >> 40) & 0xff;
799 			tf->hob_lbam = (block >> 32) & 0xff;
800 			tf->hob_lbal = (block >> 24) & 0xff;
801 		} else
802 			/* request too large even for LBA48 */
803 			return -ERANGE;
804 
805 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
806 			return -EINVAL;
807 
808 		tf->nsect = n_block & 0xff;
809 
810 		tf->lbah = (block >> 16) & 0xff;
811 		tf->lbam = (block >> 8) & 0xff;
812 		tf->lbal = block & 0xff;
813 
814 		tf->device |= ATA_LBA;
815 	} else {
816 		/* CHS */
817 		u32 sect, head, cyl, track;
818 
819 		/* The request -may- be too large for CHS addressing. */
820 		if (!lba_28_ok(block, n_block))
821 			return -ERANGE;
822 
823 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
824 			return -EINVAL;
825 
826 		/* Convert LBA to CHS */
827 		track = (u32)block / dev->sectors;
828 		cyl   = track / dev->heads;
829 		head  = track % dev->heads;
830 		sect  = (u32)block % dev->sectors + 1;
831 
832 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
833 			(u32)block, track, cyl, head, sect);
834 
835 		/* Check whether the converted CHS can fit.
836 		   Cylinder: 0-65535
837 		   Head: 0-15
838 		   Sector: 1-255*/
839 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
840 			return -ERANGE;
841 
842 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
843 		tf->lbal = sect;
844 		tf->lbam = cyl;
845 		tf->lbah = cyl >> 8;
846 		tf->device |= head;
847 	}
848 
849 	return 0;
850 }
851 
852 /**
853  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
854  *	@pio_mask: pio_mask
855  *	@mwdma_mask: mwdma_mask
856  *	@udma_mask: udma_mask
857  *
858  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
859  *	unsigned int xfer_mask.
860  *
861  *	LOCKING:
862  *	None.
863  *
864  *	RETURNS:
865  *	Packed xfer_mask.
866  */
867 unsigned long ata_pack_xfermask(unsigned long pio_mask,
868 				unsigned long mwdma_mask,
869 				unsigned long udma_mask)
870 {
871 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
872 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
873 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
874 }
875 
876 /**
877  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
878  *	@xfer_mask: xfer_mask to unpack
879  *	@pio_mask: resulting pio_mask
880  *	@mwdma_mask: resulting mwdma_mask
881  *	@udma_mask: resulting udma_mask
882  *
883  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
884  *	Any NULL distination masks will be ignored.
885  */
886 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
887 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
888 {
889 	if (pio_mask)
890 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
891 	if (mwdma_mask)
892 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
893 	if (udma_mask)
894 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
895 }
896 
897 static const struct ata_xfer_ent {
898 	int shift, bits;
899 	u8 base;
900 } ata_xfer_tbl[] = {
901 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
902 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
903 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
904 	{ -1, },
905 };
906 
907 /**
908  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
909  *	@xfer_mask: xfer_mask of interest
910  *
911  *	Return matching XFER_* value for @xfer_mask.  Only the highest
912  *	bit of @xfer_mask is considered.
913  *
914  *	LOCKING:
915  *	None.
916  *
917  *	RETURNS:
918  *	Matching XFER_* value, 0xff if no match found.
919  */
920 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
921 {
922 	int highbit = fls(xfer_mask) - 1;
923 	const struct ata_xfer_ent *ent;
924 
925 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
926 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
927 			return ent->base + highbit - ent->shift;
928 	return 0xff;
929 }
930 
931 /**
932  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
933  *	@xfer_mode: XFER_* of interest
934  *
935  *	Return matching xfer_mask for @xfer_mode.
936  *
937  *	LOCKING:
938  *	None.
939  *
940  *	RETURNS:
941  *	Matching xfer_mask, 0 if no match found.
942  */
943 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
944 {
945 	const struct ata_xfer_ent *ent;
946 
947 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
948 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
949 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
950 				& ~((1 << ent->shift) - 1);
951 	return 0;
952 }
953 
954 /**
955  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
956  *	@xfer_mode: XFER_* of interest
957  *
958  *	Return matching xfer_shift for @xfer_mode.
959  *
960  *	LOCKING:
961  *	None.
962  *
963  *	RETURNS:
964  *	Matching xfer_shift, -1 if no match found.
965  */
966 int ata_xfer_mode2shift(unsigned long xfer_mode)
967 {
968 	const struct ata_xfer_ent *ent;
969 
970 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
971 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
972 			return ent->shift;
973 	return -1;
974 }
975 
976 /**
977  *	ata_mode_string - convert xfer_mask to string
978  *	@xfer_mask: mask of bits supported; only highest bit counts.
979  *
980  *	Determine string which represents the highest speed
981  *	(highest bit in @modemask).
982  *
983  *	LOCKING:
984  *	None.
985  *
986  *	RETURNS:
987  *	Constant C string representing highest speed listed in
988  *	@mode_mask, or the constant C string "<n/a>".
989  */
990 const char *ata_mode_string(unsigned long xfer_mask)
991 {
992 	static const char * const xfer_mode_str[] = {
993 		"PIO0",
994 		"PIO1",
995 		"PIO2",
996 		"PIO3",
997 		"PIO4",
998 		"PIO5",
999 		"PIO6",
1000 		"MWDMA0",
1001 		"MWDMA1",
1002 		"MWDMA2",
1003 		"MWDMA3",
1004 		"MWDMA4",
1005 		"UDMA/16",
1006 		"UDMA/25",
1007 		"UDMA/33",
1008 		"UDMA/44",
1009 		"UDMA/66",
1010 		"UDMA/100",
1011 		"UDMA/133",
1012 		"UDMA7",
1013 	};
1014 	int highbit;
1015 
1016 	highbit = fls(xfer_mask) - 1;
1017 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1018 		return xfer_mode_str[highbit];
1019 	return "<n/a>";
1020 }
1021 
1022 static const char *sata_spd_string(unsigned int spd)
1023 {
1024 	static const char * const spd_str[] = {
1025 		"1.5 Gbps",
1026 		"3.0 Gbps",
1027 		"6.0 Gbps",
1028 	};
1029 
1030 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1031 		return "<unknown>";
1032 	return spd_str[spd - 1];
1033 }
1034 
1035 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1036 {
1037 	struct ata_link *link = dev->link;
1038 	struct ata_port *ap = link->ap;
1039 	u32 scontrol;
1040 	unsigned int err_mask;
1041 	int rc;
1042 
1043 	/*
1044 	 * disallow DIPM for drivers which haven't set
1045 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
1046 	 * phy ready will be set in the interrupt status on
1047 	 * state changes, which will cause some drivers to
1048 	 * think there are errors - additionally drivers will
1049 	 * need to disable hot plug.
1050 	 */
1051 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1052 		ap->pm_policy = NOT_AVAILABLE;
1053 		return -EINVAL;
1054 	}
1055 
1056 	/*
1057 	 * For DIPM, we will only enable it for the
1058 	 * min_power setting.
1059 	 *
1060 	 * Why?  Because Disks are too stupid to know that
1061 	 * If the host rejects a request to go to SLUMBER
1062 	 * they should retry at PARTIAL, and instead it
1063 	 * just would give up.  So, for medium_power to
1064 	 * work at all, we need to only allow HIPM.
1065 	 */
1066 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1067 	if (rc)
1068 		return rc;
1069 
1070 	switch (policy) {
1071 	case MIN_POWER:
1072 		/* no restrictions on IPM transitions */
1073 		scontrol &= ~(0x3 << 8);
1074 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1075 		if (rc)
1076 			return rc;
1077 
1078 		/* enable DIPM */
1079 		if (dev->flags & ATA_DFLAG_DIPM)
1080 			err_mask = ata_dev_set_feature(dev,
1081 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
1082 		break;
1083 	case MEDIUM_POWER:
1084 		/* allow IPM to PARTIAL */
1085 		scontrol &= ~(0x1 << 8);
1086 		scontrol |= (0x2 << 8);
1087 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1088 		if (rc)
1089 			return rc;
1090 
1091 		/*
1092 		 * we don't have to disable DIPM since IPM flags
1093 		 * disallow transitions to SLUMBER, which effectively
1094 		 * disable DIPM if it does not support PARTIAL
1095 		 */
1096 		break;
1097 	case NOT_AVAILABLE:
1098 	case MAX_PERFORMANCE:
1099 		/* disable all IPM transitions */
1100 		scontrol |= (0x3 << 8);
1101 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1102 		if (rc)
1103 			return rc;
1104 
1105 		/*
1106 		 * we don't have to disable DIPM since IPM flags
1107 		 * disallow all transitions which effectively
1108 		 * disable DIPM anyway.
1109 		 */
1110 		break;
1111 	}
1112 
1113 	/* FIXME: handle SET FEATURES failure */
1114 	(void) err_mask;
1115 
1116 	return 0;
1117 }
1118 
1119 /**
1120  *	ata_dev_enable_pm - enable SATA interface power management
1121  *	@dev:  device to enable power management
1122  *	@policy: the link power management policy
1123  *
1124  *	Enable SATA Interface power management.  This will enable
1125  *	Device Interface Power Management (DIPM) for min_power
1126  * 	policy, and then call driver specific callbacks for
1127  *	enabling Host Initiated Power management.
1128  *
1129  *	Locking: Caller.
1130  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
1131  */
1132 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1133 {
1134 	int rc = 0;
1135 	struct ata_port *ap = dev->link->ap;
1136 
1137 	/* set HIPM first, then DIPM */
1138 	if (ap->ops->enable_pm)
1139 		rc = ap->ops->enable_pm(ap, policy);
1140 	if (rc)
1141 		goto enable_pm_out;
1142 	rc = ata_dev_set_dipm(dev, policy);
1143 
1144 enable_pm_out:
1145 	if (rc)
1146 		ap->pm_policy = MAX_PERFORMANCE;
1147 	else
1148 		ap->pm_policy = policy;
1149 	return /* rc */;	/* hopefully we can use 'rc' eventually */
1150 }
1151 
1152 #ifdef CONFIG_PM
1153 /**
1154  *	ata_dev_disable_pm - disable SATA interface power management
1155  *	@dev: device to disable power management
1156  *
1157  *	Disable SATA Interface power management.  This will disable
1158  *	Device Interface Power Management (DIPM) without changing
1159  * 	policy,  call driver specific callbacks for disabling Host
1160  * 	Initiated Power management.
1161  *
1162  *	Locking: Caller.
1163  *	Returns: void
1164  */
1165 static void ata_dev_disable_pm(struct ata_device *dev)
1166 {
1167 	struct ata_port *ap = dev->link->ap;
1168 
1169 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1170 	if (ap->ops->disable_pm)
1171 		ap->ops->disable_pm(ap);
1172 }
1173 #endif	/* CONFIG_PM */
1174 
1175 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1176 {
1177 	ap->pm_policy = policy;
1178 	ap->link.eh_info.action |= ATA_EH_LPM;
1179 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1180 	ata_port_schedule_eh(ap);
1181 }
1182 
1183 #ifdef CONFIG_PM
1184 static void ata_lpm_enable(struct ata_host *host)
1185 {
1186 	struct ata_link *link;
1187 	struct ata_port *ap;
1188 	struct ata_device *dev;
1189 	int i;
1190 
1191 	for (i = 0; i < host->n_ports; i++) {
1192 		ap = host->ports[i];
1193 		ata_for_each_link(link, ap, EDGE) {
1194 			ata_for_each_dev(dev, link, ALL)
1195 				ata_dev_disable_pm(dev);
1196 		}
1197 	}
1198 }
1199 
1200 static void ata_lpm_disable(struct ata_host *host)
1201 {
1202 	int i;
1203 
1204 	for (i = 0; i < host->n_ports; i++) {
1205 		struct ata_port *ap = host->ports[i];
1206 		ata_lpm_schedule(ap, ap->pm_policy);
1207 	}
1208 }
1209 #endif	/* CONFIG_PM */
1210 
1211 /**
1212  *	ata_dev_classify - determine device type based on ATA-spec signature
1213  *	@tf: ATA taskfile register set for device to be identified
1214  *
1215  *	Determine from taskfile register contents whether a device is
1216  *	ATA or ATAPI, as per "Signature and persistence" section
1217  *	of ATA/PI spec (volume 1, sect 5.14).
1218  *
1219  *	LOCKING:
1220  *	None.
1221  *
1222  *	RETURNS:
1223  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1224  *	%ATA_DEV_UNKNOWN the event of failure.
1225  */
1226 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1227 {
1228 	/* Apple's open source Darwin code hints that some devices only
1229 	 * put a proper signature into the LBA mid/high registers,
1230 	 * So, we only check those.  It's sufficient for uniqueness.
1231 	 *
1232 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1233 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1234 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1235 	 * spec has never mentioned about using different signatures
1236 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1237 	 * Multiplier specification began to use 0x69/0x96 to identify
1238 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1239 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1240 	 * 0x69/0x96 shortly and described them as reserved for
1241 	 * SerialATA.
1242 	 *
1243 	 * We follow the current spec and consider that 0x69/0x96
1244 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1245 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1246 	 * SEMB signature.  This is worked around in
1247 	 * ata_dev_read_id().
1248 	 */
1249 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1250 		DPRINTK("found ATA device by sig\n");
1251 		return ATA_DEV_ATA;
1252 	}
1253 
1254 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1255 		DPRINTK("found ATAPI device by sig\n");
1256 		return ATA_DEV_ATAPI;
1257 	}
1258 
1259 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1260 		DPRINTK("found PMP device by sig\n");
1261 		return ATA_DEV_PMP;
1262 	}
1263 
1264 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1265 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1266 		return ATA_DEV_SEMB;
1267 	}
1268 
1269 	DPRINTK("unknown device\n");
1270 	return ATA_DEV_UNKNOWN;
1271 }
1272 
1273 /**
1274  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1275  *	@id: IDENTIFY DEVICE results we will examine
1276  *	@s: string into which data is output
1277  *	@ofs: offset into identify device page
1278  *	@len: length of string to return. must be an even number.
1279  *
1280  *	The strings in the IDENTIFY DEVICE page are broken up into
1281  *	16-bit chunks.  Run through the string, and output each
1282  *	8-bit chunk linearly, regardless of platform.
1283  *
1284  *	LOCKING:
1285  *	caller.
1286  */
1287 
1288 void ata_id_string(const u16 *id, unsigned char *s,
1289 		   unsigned int ofs, unsigned int len)
1290 {
1291 	unsigned int c;
1292 
1293 	BUG_ON(len & 1);
1294 
1295 	while (len > 0) {
1296 		c = id[ofs] >> 8;
1297 		*s = c;
1298 		s++;
1299 
1300 		c = id[ofs] & 0xff;
1301 		*s = c;
1302 		s++;
1303 
1304 		ofs++;
1305 		len -= 2;
1306 	}
1307 }
1308 
1309 /**
1310  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1311  *	@id: IDENTIFY DEVICE results we will examine
1312  *	@s: string into which data is output
1313  *	@ofs: offset into identify device page
1314  *	@len: length of string to return. must be an odd number.
1315  *
1316  *	This function is identical to ata_id_string except that it
1317  *	trims trailing spaces and terminates the resulting string with
1318  *	null.  @len must be actual maximum length (even number) + 1.
1319  *
1320  *	LOCKING:
1321  *	caller.
1322  */
1323 void ata_id_c_string(const u16 *id, unsigned char *s,
1324 		     unsigned int ofs, unsigned int len)
1325 {
1326 	unsigned char *p;
1327 
1328 	ata_id_string(id, s, ofs, len - 1);
1329 
1330 	p = s + strnlen(s, len - 1);
1331 	while (p > s && p[-1] == ' ')
1332 		p--;
1333 	*p = '\0';
1334 }
1335 
1336 static u64 ata_id_n_sectors(const u16 *id)
1337 {
1338 	if (ata_id_has_lba(id)) {
1339 		if (ata_id_has_lba48(id))
1340 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1341 		else
1342 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1343 	} else {
1344 		if (ata_id_current_chs_valid(id))
1345 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1346 			       id[ATA_ID_CUR_SECTORS];
1347 		else
1348 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1349 			       id[ATA_ID_SECTORS];
1350 	}
1351 }
1352 
1353 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1354 {
1355 	u64 sectors = 0;
1356 
1357 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1358 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1359 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1360 	sectors |= (tf->lbah & 0xff) << 16;
1361 	sectors |= (tf->lbam & 0xff) << 8;
1362 	sectors |= (tf->lbal & 0xff);
1363 
1364 	return sectors;
1365 }
1366 
1367 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1368 {
1369 	u64 sectors = 0;
1370 
1371 	sectors |= (tf->device & 0x0f) << 24;
1372 	sectors |= (tf->lbah & 0xff) << 16;
1373 	sectors |= (tf->lbam & 0xff) << 8;
1374 	sectors |= (tf->lbal & 0xff);
1375 
1376 	return sectors;
1377 }
1378 
1379 /**
1380  *	ata_read_native_max_address - Read native max address
1381  *	@dev: target device
1382  *	@max_sectors: out parameter for the result native max address
1383  *
1384  *	Perform an LBA48 or LBA28 native size query upon the device in
1385  *	question.
1386  *
1387  *	RETURNS:
1388  *	0 on success, -EACCES if command is aborted by the drive.
1389  *	-EIO on other errors.
1390  */
1391 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1392 {
1393 	unsigned int err_mask;
1394 	struct ata_taskfile tf;
1395 	int lba48 = ata_id_has_lba48(dev->id);
1396 
1397 	ata_tf_init(dev, &tf);
1398 
1399 	/* always clear all address registers */
1400 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1401 
1402 	if (lba48) {
1403 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1404 		tf.flags |= ATA_TFLAG_LBA48;
1405 	} else
1406 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1407 
1408 	tf.protocol |= ATA_PROT_NODATA;
1409 	tf.device |= ATA_LBA;
1410 
1411 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1412 	if (err_mask) {
1413 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1414 			       "max address (err_mask=0x%x)\n", err_mask);
1415 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1416 			return -EACCES;
1417 		return -EIO;
1418 	}
1419 
1420 	if (lba48)
1421 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1422 	else
1423 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1424 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1425 		(*max_sectors)--;
1426 	return 0;
1427 }
1428 
1429 /**
1430  *	ata_set_max_sectors - Set max sectors
1431  *	@dev: target device
1432  *	@new_sectors: new max sectors value to set for the device
1433  *
1434  *	Set max sectors of @dev to @new_sectors.
1435  *
1436  *	RETURNS:
1437  *	0 on success, -EACCES if command is aborted or denied (due to
1438  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1439  *	errors.
1440  */
1441 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1442 {
1443 	unsigned int err_mask;
1444 	struct ata_taskfile tf;
1445 	int lba48 = ata_id_has_lba48(dev->id);
1446 
1447 	new_sectors--;
1448 
1449 	ata_tf_init(dev, &tf);
1450 
1451 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1452 
1453 	if (lba48) {
1454 		tf.command = ATA_CMD_SET_MAX_EXT;
1455 		tf.flags |= ATA_TFLAG_LBA48;
1456 
1457 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1458 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1459 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1460 	} else {
1461 		tf.command = ATA_CMD_SET_MAX;
1462 
1463 		tf.device |= (new_sectors >> 24) & 0xf;
1464 	}
1465 
1466 	tf.protocol |= ATA_PROT_NODATA;
1467 	tf.device |= ATA_LBA;
1468 
1469 	tf.lbal = (new_sectors >> 0) & 0xff;
1470 	tf.lbam = (new_sectors >> 8) & 0xff;
1471 	tf.lbah = (new_sectors >> 16) & 0xff;
1472 
1473 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1474 	if (err_mask) {
1475 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1476 			       "max address (err_mask=0x%x)\n", err_mask);
1477 		if (err_mask == AC_ERR_DEV &&
1478 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1479 			return -EACCES;
1480 		return -EIO;
1481 	}
1482 
1483 	return 0;
1484 }
1485 
1486 /**
1487  *	ata_hpa_resize		-	Resize a device with an HPA set
1488  *	@dev: Device to resize
1489  *
1490  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1491  *	it if required to the full size of the media. The caller must check
1492  *	the drive has the HPA feature set enabled.
1493  *
1494  *	RETURNS:
1495  *	0 on success, -errno on failure.
1496  */
1497 static int ata_hpa_resize(struct ata_device *dev)
1498 {
1499 	struct ata_eh_context *ehc = &dev->link->eh_context;
1500 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1501 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1502 	u64 sectors = ata_id_n_sectors(dev->id);
1503 	u64 native_sectors;
1504 	int rc;
1505 
1506 	/* do we need to do it? */
1507 	if (dev->class != ATA_DEV_ATA ||
1508 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1509 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1510 		return 0;
1511 
1512 	/* read native max address */
1513 	rc = ata_read_native_max_address(dev, &native_sectors);
1514 	if (rc) {
1515 		/* If device aborted the command or HPA isn't going to
1516 		 * be unlocked, skip HPA resizing.
1517 		 */
1518 		if (rc == -EACCES || !unlock_hpa) {
1519 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1520 				       "broken, skipping HPA handling\n");
1521 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1522 
1523 			/* we can continue if device aborted the command */
1524 			if (rc == -EACCES)
1525 				rc = 0;
1526 		}
1527 
1528 		return rc;
1529 	}
1530 	dev->n_native_sectors = native_sectors;
1531 
1532 	/* nothing to do? */
1533 	if (native_sectors <= sectors || !unlock_hpa) {
1534 		if (!print_info || native_sectors == sectors)
1535 			return 0;
1536 
1537 		if (native_sectors > sectors)
1538 			ata_dev_printk(dev, KERN_INFO,
1539 				"HPA detected: current %llu, native %llu\n",
1540 				(unsigned long long)sectors,
1541 				(unsigned long long)native_sectors);
1542 		else if (native_sectors < sectors)
1543 			ata_dev_printk(dev, KERN_WARNING,
1544 				"native sectors (%llu) is smaller than "
1545 				"sectors (%llu)\n",
1546 				(unsigned long long)native_sectors,
1547 				(unsigned long long)sectors);
1548 		return 0;
1549 	}
1550 
1551 	/* let's unlock HPA */
1552 	rc = ata_set_max_sectors(dev, native_sectors);
1553 	if (rc == -EACCES) {
1554 		/* if device aborted the command, skip HPA resizing */
1555 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1556 			       "(%llu -> %llu), skipping HPA handling\n",
1557 			       (unsigned long long)sectors,
1558 			       (unsigned long long)native_sectors);
1559 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1560 		return 0;
1561 	} else if (rc)
1562 		return rc;
1563 
1564 	/* re-read IDENTIFY data */
1565 	rc = ata_dev_reread_id(dev, 0);
1566 	if (rc) {
1567 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1568 			       "data after HPA resizing\n");
1569 		return rc;
1570 	}
1571 
1572 	if (print_info) {
1573 		u64 new_sectors = ata_id_n_sectors(dev->id);
1574 		ata_dev_printk(dev, KERN_INFO,
1575 			"HPA unlocked: %llu -> %llu, native %llu\n",
1576 			(unsigned long long)sectors,
1577 			(unsigned long long)new_sectors,
1578 			(unsigned long long)native_sectors);
1579 	}
1580 
1581 	return 0;
1582 }
1583 
1584 /**
1585  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1586  *	@id: IDENTIFY DEVICE page to dump
1587  *
1588  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1589  *	page.
1590  *
1591  *	LOCKING:
1592  *	caller.
1593  */
1594 
1595 static inline void ata_dump_id(const u16 *id)
1596 {
1597 	DPRINTK("49==0x%04x  "
1598 		"53==0x%04x  "
1599 		"63==0x%04x  "
1600 		"64==0x%04x  "
1601 		"75==0x%04x  \n",
1602 		id[49],
1603 		id[53],
1604 		id[63],
1605 		id[64],
1606 		id[75]);
1607 	DPRINTK("80==0x%04x  "
1608 		"81==0x%04x  "
1609 		"82==0x%04x  "
1610 		"83==0x%04x  "
1611 		"84==0x%04x  \n",
1612 		id[80],
1613 		id[81],
1614 		id[82],
1615 		id[83],
1616 		id[84]);
1617 	DPRINTK("88==0x%04x  "
1618 		"93==0x%04x\n",
1619 		id[88],
1620 		id[93]);
1621 }
1622 
1623 /**
1624  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1625  *	@id: IDENTIFY data to compute xfer mask from
1626  *
1627  *	Compute the xfermask for this device. This is not as trivial
1628  *	as it seems if we must consider early devices correctly.
1629  *
1630  *	FIXME: pre IDE drive timing (do we care ?).
1631  *
1632  *	LOCKING:
1633  *	None.
1634  *
1635  *	RETURNS:
1636  *	Computed xfermask
1637  */
1638 unsigned long ata_id_xfermask(const u16 *id)
1639 {
1640 	unsigned long pio_mask, mwdma_mask, udma_mask;
1641 
1642 	/* Usual case. Word 53 indicates word 64 is valid */
1643 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1644 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1645 		pio_mask <<= 3;
1646 		pio_mask |= 0x7;
1647 	} else {
1648 		/* If word 64 isn't valid then Word 51 high byte holds
1649 		 * the PIO timing number for the maximum. Turn it into
1650 		 * a mask.
1651 		 */
1652 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1653 		if (mode < 5)	/* Valid PIO range */
1654 			pio_mask = (2 << mode) - 1;
1655 		else
1656 			pio_mask = 1;
1657 
1658 		/* But wait.. there's more. Design your standards by
1659 		 * committee and you too can get a free iordy field to
1660 		 * process. However its the speeds not the modes that
1661 		 * are supported... Note drivers using the timing API
1662 		 * will get this right anyway
1663 		 */
1664 	}
1665 
1666 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1667 
1668 	if (ata_id_is_cfa(id)) {
1669 		/*
1670 		 *	Process compact flash extended modes
1671 		 */
1672 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1673 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1674 
1675 		if (pio)
1676 			pio_mask |= (1 << 5);
1677 		if (pio > 1)
1678 			pio_mask |= (1 << 6);
1679 		if (dma)
1680 			mwdma_mask |= (1 << 3);
1681 		if (dma > 1)
1682 			mwdma_mask |= (1 << 4);
1683 	}
1684 
1685 	udma_mask = 0;
1686 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1687 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1688 
1689 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1690 }
1691 
1692 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1693 {
1694 	struct completion *waiting = qc->private_data;
1695 
1696 	complete(waiting);
1697 }
1698 
1699 /**
1700  *	ata_exec_internal_sg - execute libata internal command
1701  *	@dev: Device to which the command is sent
1702  *	@tf: Taskfile registers for the command and the result
1703  *	@cdb: CDB for packet command
1704  *	@dma_dir: Data tranfer direction of the command
1705  *	@sgl: sg list for the data buffer of the command
1706  *	@n_elem: Number of sg entries
1707  *	@timeout: Timeout in msecs (0 for default)
1708  *
1709  *	Executes libata internal command with timeout.  @tf contains
1710  *	command on entry and result on return.  Timeout and error
1711  *	conditions are reported via return value.  No recovery action
1712  *	is taken after a command times out.  It's caller's duty to
1713  *	clean up after timeout.
1714  *
1715  *	LOCKING:
1716  *	None.  Should be called with kernel context, might sleep.
1717  *
1718  *	RETURNS:
1719  *	Zero on success, AC_ERR_* mask on failure
1720  */
1721 unsigned ata_exec_internal_sg(struct ata_device *dev,
1722 			      struct ata_taskfile *tf, const u8 *cdb,
1723 			      int dma_dir, struct scatterlist *sgl,
1724 			      unsigned int n_elem, unsigned long timeout)
1725 {
1726 	struct ata_link *link = dev->link;
1727 	struct ata_port *ap = link->ap;
1728 	u8 command = tf->command;
1729 	int auto_timeout = 0;
1730 	struct ata_queued_cmd *qc;
1731 	unsigned int tag, preempted_tag;
1732 	u32 preempted_sactive, preempted_qc_active;
1733 	int preempted_nr_active_links;
1734 	DECLARE_COMPLETION_ONSTACK(wait);
1735 	unsigned long flags;
1736 	unsigned int err_mask;
1737 	int rc;
1738 
1739 	spin_lock_irqsave(ap->lock, flags);
1740 
1741 	/* no internal command while frozen */
1742 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1743 		spin_unlock_irqrestore(ap->lock, flags);
1744 		return AC_ERR_SYSTEM;
1745 	}
1746 
1747 	/* initialize internal qc */
1748 
1749 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1750 	 * drivers choke if any other tag is given.  This breaks
1751 	 * ata_tag_internal() test for those drivers.  Don't use new
1752 	 * EH stuff without converting to it.
1753 	 */
1754 	if (ap->ops->error_handler)
1755 		tag = ATA_TAG_INTERNAL;
1756 	else
1757 		tag = 0;
1758 
1759 	if (test_and_set_bit(tag, &ap->qc_allocated))
1760 		BUG();
1761 	qc = __ata_qc_from_tag(ap, tag);
1762 
1763 	qc->tag = tag;
1764 	qc->scsicmd = NULL;
1765 	qc->ap = ap;
1766 	qc->dev = dev;
1767 	ata_qc_reinit(qc);
1768 
1769 	preempted_tag = link->active_tag;
1770 	preempted_sactive = link->sactive;
1771 	preempted_qc_active = ap->qc_active;
1772 	preempted_nr_active_links = ap->nr_active_links;
1773 	link->active_tag = ATA_TAG_POISON;
1774 	link->sactive = 0;
1775 	ap->qc_active = 0;
1776 	ap->nr_active_links = 0;
1777 
1778 	/* prepare & issue qc */
1779 	qc->tf = *tf;
1780 	if (cdb)
1781 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1782 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1783 	qc->dma_dir = dma_dir;
1784 	if (dma_dir != DMA_NONE) {
1785 		unsigned int i, buflen = 0;
1786 		struct scatterlist *sg;
1787 
1788 		for_each_sg(sgl, sg, n_elem, i)
1789 			buflen += sg->length;
1790 
1791 		ata_sg_init(qc, sgl, n_elem);
1792 		qc->nbytes = buflen;
1793 	}
1794 
1795 	qc->private_data = &wait;
1796 	qc->complete_fn = ata_qc_complete_internal;
1797 
1798 	ata_qc_issue(qc);
1799 
1800 	spin_unlock_irqrestore(ap->lock, flags);
1801 
1802 	if (!timeout) {
1803 		if (ata_probe_timeout)
1804 			timeout = ata_probe_timeout * 1000;
1805 		else {
1806 			timeout = ata_internal_cmd_timeout(dev, command);
1807 			auto_timeout = 1;
1808 		}
1809 	}
1810 
1811 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1812 
1813 	ata_sff_flush_pio_task(ap);
1814 
1815 	if (!rc) {
1816 		spin_lock_irqsave(ap->lock, flags);
1817 
1818 		/* We're racing with irq here.  If we lose, the
1819 		 * following test prevents us from completing the qc
1820 		 * twice.  If we win, the port is frozen and will be
1821 		 * cleaned up by ->post_internal_cmd().
1822 		 */
1823 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1824 			qc->err_mask |= AC_ERR_TIMEOUT;
1825 
1826 			if (ap->ops->error_handler)
1827 				ata_port_freeze(ap);
1828 			else
1829 				ata_qc_complete(qc);
1830 
1831 			if (ata_msg_warn(ap))
1832 				ata_dev_printk(dev, KERN_WARNING,
1833 					"qc timeout (cmd 0x%x)\n", command);
1834 		}
1835 
1836 		spin_unlock_irqrestore(ap->lock, flags);
1837 	}
1838 
1839 	/* do post_internal_cmd */
1840 	if (ap->ops->post_internal_cmd)
1841 		ap->ops->post_internal_cmd(qc);
1842 
1843 	/* perform minimal error analysis */
1844 	if (qc->flags & ATA_QCFLAG_FAILED) {
1845 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1846 			qc->err_mask |= AC_ERR_DEV;
1847 
1848 		if (!qc->err_mask)
1849 			qc->err_mask |= AC_ERR_OTHER;
1850 
1851 		if (qc->err_mask & ~AC_ERR_OTHER)
1852 			qc->err_mask &= ~AC_ERR_OTHER;
1853 	}
1854 
1855 	/* finish up */
1856 	spin_lock_irqsave(ap->lock, flags);
1857 
1858 	*tf = qc->result_tf;
1859 	err_mask = qc->err_mask;
1860 
1861 	ata_qc_free(qc);
1862 	link->active_tag = preempted_tag;
1863 	link->sactive = preempted_sactive;
1864 	ap->qc_active = preempted_qc_active;
1865 	ap->nr_active_links = preempted_nr_active_links;
1866 
1867 	spin_unlock_irqrestore(ap->lock, flags);
1868 
1869 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1870 		ata_internal_cmd_timed_out(dev, command);
1871 
1872 	return err_mask;
1873 }
1874 
1875 /**
1876  *	ata_exec_internal - execute libata internal command
1877  *	@dev: Device to which the command is sent
1878  *	@tf: Taskfile registers for the command and the result
1879  *	@cdb: CDB for packet command
1880  *	@dma_dir: Data tranfer direction of the command
1881  *	@buf: Data buffer of the command
1882  *	@buflen: Length of data buffer
1883  *	@timeout: Timeout in msecs (0 for default)
1884  *
1885  *	Wrapper around ata_exec_internal_sg() which takes simple
1886  *	buffer instead of sg list.
1887  *
1888  *	LOCKING:
1889  *	None.  Should be called with kernel context, might sleep.
1890  *
1891  *	RETURNS:
1892  *	Zero on success, AC_ERR_* mask on failure
1893  */
1894 unsigned ata_exec_internal(struct ata_device *dev,
1895 			   struct ata_taskfile *tf, const u8 *cdb,
1896 			   int dma_dir, void *buf, unsigned int buflen,
1897 			   unsigned long timeout)
1898 {
1899 	struct scatterlist *psg = NULL, sg;
1900 	unsigned int n_elem = 0;
1901 
1902 	if (dma_dir != DMA_NONE) {
1903 		WARN_ON(!buf);
1904 		sg_init_one(&sg, buf, buflen);
1905 		psg = &sg;
1906 		n_elem++;
1907 	}
1908 
1909 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1910 				    timeout);
1911 }
1912 
1913 /**
1914  *	ata_do_simple_cmd - execute simple internal command
1915  *	@dev: Device to which the command is sent
1916  *	@cmd: Opcode to execute
1917  *
1918  *	Execute a 'simple' command, that only consists of the opcode
1919  *	'cmd' itself, without filling any other registers
1920  *
1921  *	LOCKING:
1922  *	Kernel thread context (may sleep).
1923  *
1924  *	RETURNS:
1925  *	Zero on success, AC_ERR_* mask on failure
1926  */
1927 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1928 {
1929 	struct ata_taskfile tf;
1930 
1931 	ata_tf_init(dev, &tf);
1932 
1933 	tf.command = cmd;
1934 	tf.flags |= ATA_TFLAG_DEVICE;
1935 	tf.protocol = ATA_PROT_NODATA;
1936 
1937 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1938 }
1939 
1940 /**
1941  *	ata_pio_need_iordy	-	check if iordy needed
1942  *	@adev: ATA device
1943  *
1944  *	Check if the current speed of the device requires IORDY. Used
1945  *	by various controllers for chip configuration.
1946  */
1947 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1948 {
1949 	/* Don't set IORDY if we're preparing for reset.  IORDY may
1950 	 * lead to controller lock up on certain controllers if the
1951 	 * port is not occupied.  See bko#11703 for details.
1952 	 */
1953 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1954 		return 0;
1955 	/* Controller doesn't support IORDY.  Probably a pointless
1956 	 * check as the caller should know this.
1957 	 */
1958 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1959 		return 0;
1960 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1961 	if (ata_id_is_cfa(adev->id)
1962 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1963 		return 0;
1964 	/* PIO3 and higher it is mandatory */
1965 	if (adev->pio_mode > XFER_PIO_2)
1966 		return 1;
1967 	/* We turn it on when possible */
1968 	if (ata_id_has_iordy(adev->id))
1969 		return 1;
1970 	return 0;
1971 }
1972 
1973 /**
1974  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1975  *	@adev: ATA device
1976  *
1977  *	Compute the highest mode possible if we are not using iordy. Return
1978  *	-1 if no iordy mode is available.
1979  */
1980 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1981 {
1982 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1983 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1984 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1985 		/* Is the speed faster than the drive allows non IORDY ? */
1986 		if (pio) {
1987 			/* This is cycle times not frequency - watch the logic! */
1988 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1989 				return 3 << ATA_SHIFT_PIO;
1990 			return 7 << ATA_SHIFT_PIO;
1991 		}
1992 	}
1993 	return 3 << ATA_SHIFT_PIO;
1994 }
1995 
1996 /**
1997  *	ata_do_dev_read_id		-	default ID read method
1998  *	@dev: device
1999  *	@tf: proposed taskfile
2000  *	@id: data buffer
2001  *
2002  *	Issue the identify taskfile and hand back the buffer containing
2003  *	identify data. For some RAID controllers and for pre ATA devices
2004  *	this function is wrapped or replaced by the driver
2005  */
2006 unsigned int ata_do_dev_read_id(struct ata_device *dev,
2007 					struct ata_taskfile *tf, u16 *id)
2008 {
2009 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
2010 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2011 }
2012 
2013 /**
2014  *	ata_dev_read_id - Read ID data from the specified device
2015  *	@dev: target device
2016  *	@p_class: pointer to class of the target device (may be changed)
2017  *	@flags: ATA_READID_* flags
2018  *	@id: buffer to read IDENTIFY data into
2019  *
2020  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
2021  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2022  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
2023  *	for pre-ATA4 drives.
2024  *
2025  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2026  *	now we abort if we hit that case.
2027  *
2028  *	LOCKING:
2029  *	Kernel thread context (may sleep)
2030  *
2031  *	RETURNS:
2032  *	0 on success, -errno otherwise.
2033  */
2034 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2035 		    unsigned int flags, u16 *id)
2036 {
2037 	struct ata_port *ap = dev->link->ap;
2038 	unsigned int class = *p_class;
2039 	struct ata_taskfile tf;
2040 	unsigned int err_mask = 0;
2041 	const char *reason;
2042 	bool is_semb = class == ATA_DEV_SEMB;
2043 	int may_fallback = 1, tried_spinup = 0;
2044 	int rc;
2045 
2046 	if (ata_msg_ctl(ap))
2047 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2048 
2049 retry:
2050 	ata_tf_init(dev, &tf);
2051 
2052 	switch (class) {
2053 	case ATA_DEV_SEMB:
2054 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
2055 	case ATA_DEV_ATA:
2056 		tf.command = ATA_CMD_ID_ATA;
2057 		break;
2058 	case ATA_DEV_ATAPI:
2059 		tf.command = ATA_CMD_ID_ATAPI;
2060 		break;
2061 	default:
2062 		rc = -ENODEV;
2063 		reason = "unsupported class";
2064 		goto err_out;
2065 	}
2066 
2067 	tf.protocol = ATA_PROT_PIO;
2068 
2069 	/* Some devices choke if TF registers contain garbage.  Make
2070 	 * sure those are properly initialized.
2071 	 */
2072 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2073 
2074 	/* Device presence detection is unreliable on some
2075 	 * controllers.  Always poll IDENTIFY if available.
2076 	 */
2077 	tf.flags |= ATA_TFLAG_POLLING;
2078 
2079 	if (ap->ops->read_id)
2080 		err_mask = ap->ops->read_id(dev, &tf, id);
2081 	else
2082 		err_mask = ata_do_dev_read_id(dev, &tf, id);
2083 
2084 	if (err_mask) {
2085 		if (err_mask & AC_ERR_NODEV_HINT) {
2086 			ata_dev_printk(dev, KERN_DEBUG,
2087 				       "NODEV after polling detection\n");
2088 			return -ENOENT;
2089 		}
2090 
2091 		if (is_semb) {
2092 			ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
2093 				       "device w/ SEMB sig, disabled\n");
2094 			/* SEMB is not supported yet */
2095 			*p_class = ATA_DEV_SEMB_UNSUP;
2096 			return 0;
2097 		}
2098 
2099 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2100 			/* Device or controller might have reported
2101 			 * the wrong device class.  Give a shot at the
2102 			 * other IDENTIFY if the current one is
2103 			 * aborted by the device.
2104 			 */
2105 			if (may_fallback) {
2106 				may_fallback = 0;
2107 
2108 				if (class == ATA_DEV_ATA)
2109 					class = ATA_DEV_ATAPI;
2110 				else
2111 					class = ATA_DEV_ATA;
2112 				goto retry;
2113 			}
2114 
2115 			/* Control reaches here iff the device aborted
2116 			 * both flavors of IDENTIFYs which happens
2117 			 * sometimes with phantom devices.
2118 			 */
2119 			ata_dev_printk(dev, KERN_DEBUG,
2120 				       "both IDENTIFYs aborted, assuming NODEV\n");
2121 			return -ENOENT;
2122 		}
2123 
2124 		rc = -EIO;
2125 		reason = "I/O error";
2126 		goto err_out;
2127 	}
2128 
2129 	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
2130 		ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
2131 			       "class=%d may_fallback=%d tried_spinup=%d\n",
2132 			       class, may_fallback, tried_spinup);
2133 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
2134 			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
2135 	}
2136 
2137 	/* Falling back doesn't make sense if ID data was read
2138 	 * successfully at least once.
2139 	 */
2140 	may_fallback = 0;
2141 
2142 	swap_buf_le16(id, ATA_ID_WORDS);
2143 
2144 	/* sanity check */
2145 	rc = -EINVAL;
2146 	reason = "device reports invalid type";
2147 
2148 	if (class == ATA_DEV_ATA) {
2149 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2150 			goto err_out;
2151 	} else {
2152 		if (ata_id_is_ata(id))
2153 			goto err_out;
2154 	}
2155 
2156 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2157 		tried_spinup = 1;
2158 		/*
2159 		 * Drive powered-up in standby mode, and requires a specific
2160 		 * SET_FEATURES spin-up subcommand before it will accept
2161 		 * anything other than the original IDENTIFY command.
2162 		 */
2163 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2164 		if (err_mask && id[2] != 0x738c) {
2165 			rc = -EIO;
2166 			reason = "SPINUP failed";
2167 			goto err_out;
2168 		}
2169 		/*
2170 		 * If the drive initially returned incomplete IDENTIFY info,
2171 		 * we now must reissue the IDENTIFY command.
2172 		 */
2173 		if (id[2] == 0x37c8)
2174 			goto retry;
2175 	}
2176 
2177 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2178 		/*
2179 		 * The exact sequence expected by certain pre-ATA4 drives is:
2180 		 * SRST RESET
2181 		 * IDENTIFY (optional in early ATA)
2182 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2183 		 * anything else..
2184 		 * Some drives were very specific about that exact sequence.
2185 		 *
2186 		 * Note that ATA4 says lba is mandatory so the second check
2187 		 * should never trigger.
2188 		 */
2189 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2190 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2191 			if (err_mask) {
2192 				rc = -EIO;
2193 				reason = "INIT_DEV_PARAMS failed";
2194 				goto err_out;
2195 			}
2196 
2197 			/* current CHS translation info (id[53-58]) might be
2198 			 * changed. reread the identify device info.
2199 			 */
2200 			flags &= ~ATA_READID_POSTRESET;
2201 			goto retry;
2202 		}
2203 	}
2204 
2205 	*p_class = class;
2206 
2207 	return 0;
2208 
2209  err_out:
2210 	if (ata_msg_warn(ap))
2211 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2212 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2213 	return rc;
2214 }
2215 
2216 static int ata_do_link_spd_horkage(struct ata_device *dev)
2217 {
2218 	struct ata_link *plink = ata_dev_phys_link(dev);
2219 	u32 target, target_limit;
2220 
2221 	if (!sata_scr_valid(plink))
2222 		return 0;
2223 
2224 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2225 		target = 1;
2226 	else
2227 		return 0;
2228 
2229 	target_limit = (1 << target) - 1;
2230 
2231 	/* if already on stricter limit, no need to push further */
2232 	if (plink->sata_spd_limit <= target_limit)
2233 		return 0;
2234 
2235 	plink->sata_spd_limit = target_limit;
2236 
2237 	/* Request another EH round by returning -EAGAIN if link is
2238 	 * going faster than the target speed.  Forward progress is
2239 	 * guaranteed by setting sata_spd_limit to target_limit above.
2240 	 */
2241 	if (plink->sata_spd > target) {
2242 		ata_dev_printk(dev, KERN_INFO,
2243 			       "applying link speed limit horkage to %s\n",
2244 			       sata_spd_string(target));
2245 		return -EAGAIN;
2246 	}
2247 	return 0;
2248 }
2249 
2250 static inline u8 ata_dev_knobble(struct ata_device *dev)
2251 {
2252 	struct ata_port *ap = dev->link->ap;
2253 
2254 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2255 		return 0;
2256 
2257 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2258 }
2259 
2260 static int ata_dev_config_ncq(struct ata_device *dev,
2261 			       char *desc, size_t desc_sz)
2262 {
2263 	struct ata_port *ap = dev->link->ap;
2264 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2265 	unsigned int err_mask;
2266 	char *aa_desc = "";
2267 
2268 	if (!ata_id_has_ncq(dev->id)) {
2269 		desc[0] = '\0';
2270 		return 0;
2271 	}
2272 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2273 		snprintf(desc, desc_sz, "NCQ (not used)");
2274 		return 0;
2275 	}
2276 	if (ap->flags & ATA_FLAG_NCQ) {
2277 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2278 		dev->flags |= ATA_DFLAG_NCQ;
2279 	}
2280 
2281 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2282 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2283 		ata_id_has_fpdma_aa(dev->id)) {
2284 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2285 			SATA_FPDMA_AA);
2286 		if (err_mask) {
2287 			ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
2288 				"(error_mask=0x%x)\n", err_mask);
2289 			if (err_mask != AC_ERR_DEV) {
2290 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2291 				return -EIO;
2292 			}
2293 		} else
2294 			aa_desc = ", AA";
2295 	}
2296 
2297 	if (hdepth >= ddepth)
2298 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2299 	else
2300 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2301 			ddepth, aa_desc);
2302 	return 0;
2303 }
2304 
2305 /**
2306  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2307  *	@dev: Target device to configure
2308  *
2309  *	Configure @dev according to @dev->id.  Generic and low-level
2310  *	driver specific fixups are also applied.
2311  *
2312  *	LOCKING:
2313  *	Kernel thread context (may sleep)
2314  *
2315  *	RETURNS:
2316  *	0 on success, -errno otherwise
2317  */
2318 int ata_dev_configure(struct ata_device *dev)
2319 {
2320 	struct ata_port *ap = dev->link->ap;
2321 	struct ata_eh_context *ehc = &dev->link->eh_context;
2322 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2323 	const u16 *id = dev->id;
2324 	unsigned long xfer_mask;
2325 	char revbuf[7];		/* XYZ-99\0 */
2326 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2327 	char modelbuf[ATA_ID_PROD_LEN+1];
2328 	int rc;
2329 
2330 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2331 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2332 			       __func__);
2333 		return 0;
2334 	}
2335 
2336 	if (ata_msg_probe(ap))
2337 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2338 
2339 	/* set horkage */
2340 	dev->horkage |= ata_dev_blacklisted(dev);
2341 	ata_force_horkage(dev);
2342 
2343 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2344 		ata_dev_printk(dev, KERN_INFO,
2345 			       "unsupported device, disabling\n");
2346 		ata_dev_disable(dev);
2347 		return 0;
2348 	}
2349 
2350 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2351 	    dev->class == ATA_DEV_ATAPI) {
2352 		ata_dev_printk(dev, KERN_WARNING,
2353 			"WARNING: ATAPI is %s, device ignored.\n",
2354 			atapi_enabled ? "not supported with this driver"
2355 				      : "disabled");
2356 		ata_dev_disable(dev);
2357 		return 0;
2358 	}
2359 
2360 	rc = ata_do_link_spd_horkage(dev);
2361 	if (rc)
2362 		return rc;
2363 
2364 	/* let ACPI work its magic */
2365 	rc = ata_acpi_on_devcfg(dev);
2366 	if (rc)
2367 		return rc;
2368 
2369 	/* massage HPA, do it early as it might change IDENTIFY data */
2370 	rc = ata_hpa_resize(dev);
2371 	if (rc)
2372 		return rc;
2373 
2374 	/* print device capabilities */
2375 	if (ata_msg_probe(ap))
2376 		ata_dev_printk(dev, KERN_DEBUG,
2377 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2378 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2379 			       __func__,
2380 			       id[49], id[82], id[83], id[84],
2381 			       id[85], id[86], id[87], id[88]);
2382 
2383 	/* initialize to-be-configured parameters */
2384 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2385 	dev->max_sectors = 0;
2386 	dev->cdb_len = 0;
2387 	dev->n_sectors = 0;
2388 	dev->cylinders = 0;
2389 	dev->heads = 0;
2390 	dev->sectors = 0;
2391 	dev->multi_count = 0;
2392 
2393 	/*
2394 	 * common ATA, ATAPI feature tests
2395 	 */
2396 
2397 	/* find max transfer mode; for printk only */
2398 	xfer_mask = ata_id_xfermask(id);
2399 
2400 	if (ata_msg_probe(ap))
2401 		ata_dump_id(id);
2402 
2403 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2404 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2405 			sizeof(fwrevbuf));
2406 
2407 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2408 			sizeof(modelbuf));
2409 
2410 	/* ATA-specific feature tests */
2411 	if (dev->class == ATA_DEV_ATA) {
2412 		if (ata_id_is_cfa(id)) {
2413 			/* CPRM may make this media unusable */
2414 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2415 				ata_dev_printk(dev, KERN_WARNING,
2416 					       "supports DRM functions and may "
2417 					       "not be fully accessable.\n");
2418 			snprintf(revbuf, 7, "CFA");
2419 		} else {
2420 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2421 			/* Warn the user if the device has TPM extensions */
2422 			if (ata_id_has_tpm(id))
2423 				ata_dev_printk(dev, KERN_WARNING,
2424 					       "supports DRM functions and may "
2425 					       "not be fully accessable.\n");
2426 		}
2427 
2428 		dev->n_sectors = ata_id_n_sectors(id);
2429 
2430 		/* get current R/W Multiple count setting */
2431 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2432 			unsigned int max = dev->id[47] & 0xff;
2433 			unsigned int cnt = dev->id[59] & 0xff;
2434 			/* only recognize/allow powers of two here */
2435 			if (is_power_of_2(max) && is_power_of_2(cnt))
2436 				if (cnt <= max)
2437 					dev->multi_count = cnt;
2438 		}
2439 
2440 		if (ata_id_has_lba(id)) {
2441 			const char *lba_desc;
2442 			char ncq_desc[24];
2443 
2444 			lba_desc = "LBA";
2445 			dev->flags |= ATA_DFLAG_LBA;
2446 			if (ata_id_has_lba48(id)) {
2447 				dev->flags |= ATA_DFLAG_LBA48;
2448 				lba_desc = "LBA48";
2449 
2450 				if (dev->n_sectors >= (1UL << 28) &&
2451 				    ata_id_has_flush_ext(id))
2452 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2453 			}
2454 
2455 			/* config NCQ */
2456 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2457 			if (rc)
2458 				return rc;
2459 
2460 			/* print device info to dmesg */
2461 			if (ata_msg_drv(ap) && print_info) {
2462 				ata_dev_printk(dev, KERN_INFO,
2463 					"%s: %s, %s, max %s\n",
2464 					revbuf, modelbuf, fwrevbuf,
2465 					ata_mode_string(xfer_mask));
2466 				ata_dev_printk(dev, KERN_INFO,
2467 					"%Lu sectors, multi %u: %s %s\n",
2468 					(unsigned long long)dev->n_sectors,
2469 					dev->multi_count, lba_desc, ncq_desc);
2470 			}
2471 		} else {
2472 			/* CHS */
2473 
2474 			/* Default translation */
2475 			dev->cylinders	= id[1];
2476 			dev->heads	= id[3];
2477 			dev->sectors	= id[6];
2478 
2479 			if (ata_id_current_chs_valid(id)) {
2480 				/* Current CHS translation is valid. */
2481 				dev->cylinders = id[54];
2482 				dev->heads     = id[55];
2483 				dev->sectors   = id[56];
2484 			}
2485 
2486 			/* print device info to dmesg */
2487 			if (ata_msg_drv(ap) && print_info) {
2488 				ata_dev_printk(dev, KERN_INFO,
2489 					"%s: %s, %s, max %s\n",
2490 					revbuf,	modelbuf, fwrevbuf,
2491 					ata_mode_string(xfer_mask));
2492 				ata_dev_printk(dev, KERN_INFO,
2493 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2494 					(unsigned long long)dev->n_sectors,
2495 					dev->multi_count, dev->cylinders,
2496 					dev->heads, dev->sectors);
2497 			}
2498 		}
2499 
2500 		dev->cdb_len = 16;
2501 	}
2502 
2503 	/* ATAPI-specific feature tests */
2504 	else if (dev->class == ATA_DEV_ATAPI) {
2505 		const char *cdb_intr_string = "";
2506 		const char *atapi_an_string = "";
2507 		const char *dma_dir_string = "";
2508 		u32 sntf;
2509 
2510 		rc = atapi_cdb_len(id);
2511 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2512 			if (ata_msg_warn(ap))
2513 				ata_dev_printk(dev, KERN_WARNING,
2514 					       "unsupported CDB len\n");
2515 			rc = -EINVAL;
2516 			goto err_out_nosup;
2517 		}
2518 		dev->cdb_len = (unsigned int) rc;
2519 
2520 		/* Enable ATAPI AN if both the host and device have
2521 		 * the support.  If PMP is attached, SNTF is required
2522 		 * to enable ATAPI AN to discern between PHY status
2523 		 * changed notifications and ATAPI ANs.
2524 		 */
2525 		if (atapi_an &&
2526 		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2527 		    (!sata_pmp_attached(ap) ||
2528 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2529 			unsigned int err_mask;
2530 
2531 			/* issue SET feature command to turn this on */
2532 			err_mask = ata_dev_set_feature(dev,
2533 					SETFEATURES_SATA_ENABLE, SATA_AN);
2534 			if (err_mask)
2535 				ata_dev_printk(dev, KERN_ERR,
2536 					"failed to enable ATAPI AN "
2537 					"(err_mask=0x%x)\n", err_mask);
2538 			else {
2539 				dev->flags |= ATA_DFLAG_AN;
2540 				atapi_an_string = ", ATAPI AN";
2541 			}
2542 		}
2543 
2544 		if (ata_id_cdb_intr(dev->id)) {
2545 			dev->flags |= ATA_DFLAG_CDB_INTR;
2546 			cdb_intr_string = ", CDB intr";
2547 		}
2548 
2549 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2550 			dev->flags |= ATA_DFLAG_DMADIR;
2551 			dma_dir_string = ", DMADIR";
2552 		}
2553 
2554 		/* print device info to dmesg */
2555 		if (ata_msg_drv(ap) && print_info)
2556 			ata_dev_printk(dev, KERN_INFO,
2557 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2558 				       modelbuf, fwrevbuf,
2559 				       ata_mode_string(xfer_mask),
2560 				       cdb_intr_string, atapi_an_string,
2561 				       dma_dir_string);
2562 	}
2563 
2564 	/* determine max_sectors */
2565 	dev->max_sectors = ATA_MAX_SECTORS;
2566 	if (dev->flags & ATA_DFLAG_LBA48)
2567 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2568 
2569 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2570 		if (ata_id_has_hipm(dev->id))
2571 			dev->flags |= ATA_DFLAG_HIPM;
2572 		if (ata_id_has_dipm(dev->id))
2573 			dev->flags |= ATA_DFLAG_DIPM;
2574 	}
2575 
2576 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2577 	   200 sectors */
2578 	if (ata_dev_knobble(dev)) {
2579 		if (ata_msg_drv(ap) && print_info)
2580 			ata_dev_printk(dev, KERN_INFO,
2581 				       "applying bridge limits\n");
2582 		dev->udma_mask &= ATA_UDMA5;
2583 		dev->max_sectors = ATA_MAX_SECTORS;
2584 	}
2585 
2586 	if ((dev->class == ATA_DEV_ATAPI) &&
2587 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2588 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2589 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2590 	}
2591 
2592 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2593 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2594 					 dev->max_sectors);
2595 
2596 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2597 		dev->horkage |= ATA_HORKAGE_IPM;
2598 
2599 		/* reset link pm_policy for this port to no pm */
2600 		ap->pm_policy = MAX_PERFORMANCE;
2601 	}
2602 
2603 	if (ap->ops->dev_config)
2604 		ap->ops->dev_config(dev);
2605 
2606 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2607 		/* Let the user know. We don't want to disallow opens for
2608 		   rescue purposes, or in case the vendor is just a blithering
2609 		   idiot. Do this after the dev_config call as some controllers
2610 		   with buggy firmware may want to avoid reporting false device
2611 		   bugs */
2612 
2613 		if (print_info) {
2614 			ata_dev_printk(dev, KERN_WARNING,
2615 "Drive reports diagnostics failure. This may indicate a drive\n");
2616 			ata_dev_printk(dev, KERN_WARNING,
2617 "fault or invalid emulation. Contact drive vendor for information.\n");
2618 		}
2619 	}
2620 
2621 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2622 		ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2623 			       "firmware update to be fully functional.\n");
2624 		ata_dev_printk(dev, KERN_WARNING, "         contact the vendor "
2625 			       "or visit http://ata.wiki.kernel.org.\n");
2626 	}
2627 
2628 	return 0;
2629 
2630 err_out_nosup:
2631 	if (ata_msg_probe(ap))
2632 		ata_dev_printk(dev, KERN_DEBUG,
2633 			       "%s: EXIT, err\n", __func__);
2634 	return rc;
2635 }
2636 
2637 /**
2638  *	ata_cable_40wire	-	return 40 wire cable type
2639  *	@ap: port
2640  *
2641  *	Helper method for drivers which want to hardwire 40 wire cable
2642  *	detection.
2643  */
2644 
2645 int ata_cable_40wire(struct ata_port *ap)
2646 {
2647 	return ATA_CBL_PATA40;
2648 }
2649 
2650 /**
2651  *	ata_cable_80wire	-	return 80 wire cable type
2652  *	@ap: port
2653  *
2654  *	Helper method for drivers which want to hardwire 80 wire cable
2655  *	detection.
2656  */
2657 
2658 int ata_cable_80wire(struct ata_port *ap)
2659 {
2660 	return ATA_CBL_PATA80;
2661 }
2662 
2663 /**
2664  *	ata_cable_unknown	-	return unknown PATA cable.
2665  *	@ap: port
2666  *
2667  *	Helper method for drivers which have no PATA cable detection.
2668  */
2669 
2670 int ata_cable_unknown(struct ata_port *ap)
2671 {
2672 	return ATA_CBL_PATA_UNK;
2673 }
2674 
2675 /**
2676  *	ata_cable_ignore	-	return ignored PATA cable.
2677  *	@ap: port
2678  *
2679  *	Helper method for drivers which don't use cable type to limit
2680  *	transfer mode.
2681  */
2682 int ata_cable_ignore(struct ata_port *ap)
2683 {
2684 	return ATA_CBL_PATA_IGN;
2685 }
2686 
2687 /**
2688  *	ata_cable_sata	-	return SATA cable type
2689  *	@ap: port
2690  *
2691  *	Helper method for drivers which have SATA cables
2692  */
2693 
2694 int ata_cable_sata(struct ata_port *ap)
2695 {
2696 	return ATA_CBL_SATA;
2697 }
2698 
2699 /**
2700  *	ata_bus_probe - Reset and probe ATA bus
2701  *	@ap: Bus to probe
2702  *
2703  *	Master ATA bus probing function.  Initiates a hardware-dependent
2704  *	bus reset, then attempts to identify any devices found on
2705  *	the bus.
2706  *
2707  *	LOCKING:
2708  *	PCI/etc. bus probe sem.
2709  *
2710  *	RETURNS:
2711  *	Zero on success, negative errno otherwise.
2712  */
2713 
2714 int ata_bus_probe(struct ata_port *ap)
2715 {
2716 	unsigned int classes[ATA_MAX_DEVICES];
2717 	int tries[ATA_MAX_DEVICES];
2718 	int rc;
2719 	struct ata_device *dev;
2720 
2721 	ata_for_each_dev(dev, &ap->link, ALL)
2722 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2723 
2724  retry:
2725 	ata_for_each_dev(dev, &ap->link, ALL) {
2726 		/* If we issue an SRST then an ATA drive (not ATAPI)
2727 		 * may change configuration and be in PIO0 timing. If
2728 		 * we do a hard reset (or are coming from power on)
2729 		 * this is true for ATA or ATAPI. Until we've set a
2730 		 * suitable controller mode we should not touch the
2731 		 * bus as we may be talking too fast.
2732 		 */
2733 		dev->pio_mode = XFER_PIO_0;
2734 
2735 		/* If the controller has a pio mode setup function
2736 		 * then use it to set the chipset to rights. Don't
2737 		 * touch the DMA setup as that will be dealt with when
2738 		 * configuring devices.
2739 		 */
2740 		if (ap->ops->set_piomode)
2741 			ap->ops->set_piomode(ap, dev);
2742 	}
2743 
2744 	/* reset and determine device classes */
2745 	ap->ops->phy_reset(ap);
2746 
2747 	ata_for_each_dev(dev, &ap->link, ALL) {
2748 		if (dev->class != ATA_DEV_UNKNOWN)
2749 			classes[dev->devno] = dev->class;
2750 		else
2751 			classes[dev->devno] = ATA_DEV_NONE;
2752 
2753 		dev->class = ATA_DEV_UNKNOWN;
2754 	}
2755 
2756 	/* read IDENTIFY page and configure devices. We have to do the identify
2757 	   specific sequence bass-ackwards so that PDIAG- is released by
2758 	   the slave device */
2759 
2760 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2761 		if (tries[dev->devno])
2762 			dev->class = classes[dev->devno];
2763 
2764 		if (!ata_dev_enabled(dev))
2765 			continue;
2766 
2767 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2768 				     dev->id);
2769 		if (rc)
2770 			goto fail;
2771 	}
2772 
2773 	/* Now ask for the cable type as PDIAG- should have been released */
2774 	if (ap->ops->cable_detect)
2775 		ap->cbl = ap->ops->cable_detect(ap);
2776 
2777 	/* We may have SATA bridge glue hiding here irrespective of
2778 	 * the reported cable types and sensed types.  When SATA
2779 	 * drives indicate we have a bridge, we don't know which end
2780 	 * of the link the bridge is which is a problem.
2781 	 */
2782 	ata_for_each_dev(dev, &ap->link, ENABLED)
2783 		if (ata_id_is_sata(dev->id))
2784 			ap->cbl = ATA_CBL_SATA;
2785 
2786 	/* After the identify sequence we can now set up the devices. We do
2787 	   this in the normal order so that the user doesn't get confused */
2788 
2789 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2790 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2791 		rc = ata_dev_configure(dev);
2792 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2793 		if (rc)
2794 			goto fail;
2795 	}
2796 
2797 	/* configure transfer mode */
2798 	rc = ata_set_mode(&ap->link, &dev);
2799 	if (rc)
2800 		goto fail;
2801 
2802 	ata_for_each_dev(dev, &ap->link, ENABLED)
2803 		return 0;
2804 
2805 	return -ENODEV;
2806 
2807  fail:
2808 	tries[dev->devno]--;
2809 
2810 	switch (rc) {
2811 	case -EINVAL:
2812 		/* eeek, something went very wrong, give up */
2813 		tries[dev->devno] = 0;
2814 		break;
2815 
2816 	case -ENODEV:
2817 		/* give it just one more chance */
2818 		tries[dev->devno] = min(tries[dev->devno], 1);
2819 	case -EIO:
2820 		if (tries[dev->devno] == 1) {
2821 			/* This is the last chance, better to slow
2822 			 * down than lose it.
2823 			 */
2824 			sata_down_spd_limit(&ap->link, 0);
2825 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2826 		}
2827 	}
2828 
2829 	if (!tries[dev->devno])
2830 		ata_dev_disable(dev);
2831 
2832 	goto retry;
2833 }
2834 
2835 /**
2836  *	sata_print_link_status - Print SATA link status
2837  *	@link: SATA link to printk link status about
2838  *
2839  *	This function prints link speed and status of a SATA link.
2840  *
2841  *	LOCKING:
2842  *	None.
2843  */
2844 static void sata_print_link_status(struct ata_link *link)
2845 {
2846 	u32 sstatus, scontrol, tmp;
2847 
2848 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2849 		return;
2850 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2851 
2852 	if (ata_phys_link_online(link)) {
2853 		tmp = (sstatus >> 4) & 0xf;
2854 		ata_link_printk(link, KERN_INFO,
2855 				"SATA link up %s (SStatus %X SControl %X)\n",
2856 				sata_spd_string(tmp), sstatus, scontrol);
2857 	} else {
2858 		ata_link_printk(link, KERN_INFO,
2859 				"SATA link down (SStatus %X SControl %X)\n",
2860 				sstatus, scontrol);
2861 	}
2862 }
2863 
2864 /**
2865  *	ata_dev_pair		-	return other device on cable
2866  *	@adev: device
2867  *
2868  *	Obtain the other device on the same cable, or if none is
2869  *	present NULL is returned
2870  */
2871 
2872 struct ata_device *ata_dev_pair(struct ata_device *adev)
2873 {
2874 	struct ata_link *link = adev->link;
2875 	struct ata_device *pair = &link->device[1 - adev->devno];
2876 	if (!ata_dev_enabled(pair))
2877 		return NULL;
2878 	return pair;
2879 }
2880 
2881 /**
2882  *	sata_down_spd_limit - adjust SATA spd limit downward
2883  *	@link: Link to adjust SATA spd limit for
2884  *	@spd_limit: Additional limit
2885  *
2886  *	Adjust SATA spd limit of @link downward.  Note that this
2887  *	function only adjusts the limit.  The change must be applied
2888  *	using sata_set_spd().
2889  *
2890  *	If @spd_limit is non-zero, the speed is limited to equal to or
2891  *	lower than @spd_limit if such speed is supported.  If
2892  *	@spd_limit is slower than any supported speed, only the lowest
2893  *	supported speed is allowed.
2894  *
2895  *	LOCKING:
2896  *	Inherited from caller.
2897  *
2898  *	RETURNS:
2899  *	0 on success, negative errno on failure
2900  */
2901 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2902 {
2903 	u32 sstatus, spd, mask;
2904 	int rc, bit;
2905 
2906 	if (!sata_scr_valid(link))
2907 		return -EOPNOTSUPP;
2908 
2909 	/* If SCR can be read, use it to determine the current SPD.
2910 	 * If not, use cached value in link->sata_spd.
2911 	 */
2912 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2913 	if (rc == 0 && ata_sstatus_online(sstatus))
2914 		spd = (sstatus >> 4) & 0xf;
2915 	else
2916 		spd = link->sata_spd;
2917 
2918 	mask = link->sata_spd_limit;
2919 	if (mask <= 1)
2920 		return -EINVAL;
2921 
2922 	/* unconditionally mask off the highest bit */
2923 	bit = fls(mask) - 1;
2924 	mask &= ~(1 << bit);
2925 
2926 	/* Mask off all speeds higher than or equal to the current
2927 	 * one.  Force 1.5Gbps if current SPD is not available.
2928 	 */
2929 	if (spd > 1)
2930 		mask &= (1 << (spd - 1)) - 1;
2931 	else
2932 		mask &= 1;
2933 
2934 	/* were we already at the bottom? */
2935 	if (!mask)
2936 		return -EINVAL;
2937 
2938 	if (spd_limit) {
2939 		if (mask & ((1 << spd_limit) - 1))
2940 			mask &= (1 << spd_limit) - 1;
2941 		else {
2942 			bit = ffs(mask) - 1;
2943 			mask = 1 << bit;
2944 		}
2945 	}
2946 
2947 	link->sata_spd_limit = mask;
2948 
2949 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2950 			sata_spd_string(fls(mask)));
2951 
2952 	return 0;
2953 }
2954 
2955 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2956 {
2957 	struct ata_link *host_link = &link->ap->link;
2958 	u32 limit, target, spd;
2959 
2960 	limit = link->sata_spd_limit;
2961 
2962 	/* Don't configure downstream link faster than upstream link.
2963 	 * It doesn't speed up anything and some PMPs choke on such
2964 	 * configuration.
2965 	 */
2966 	if (!ata_is_host_link(link) && host_link->sata_spd)
2967 		limit &= (1 << host_link->sata_spd) - 1;
2968 
2969 	if (limit == UINT_MAX)
2970 		target = 0;
2971 	else
2972 		target = fls(limit);
2973 
2974 	spd = (*scontrol >> 4) & 0xf;
2975 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2976 
2977 	return spd != target;
2978 }
2979 
2980 /**
2981  *	sata_set_spd_needed - is SATA spd configuration needed
2982  *	@link: Link in question
2983  *
2984  *	Test whether the spd limit in SControl matches
2985  *	@link->sata_spd_limit.  This function is used to determine
2986  *	whether hardreset is necessary to apply SATA spd
2987  *	configuration.
2988  *
2989  *	LOCKING:
2990  *	Inherited from caller.
2991  *
2992  *	RETURNS:
2993  *	1 if SATA spd configuration is needed, 0 otherwise.
2994  */
2995 static int sata_set_spd_needed(struct ata_link *link)
2996 {
2997 	u32 scontrol;
2998 
2999 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3000 		return 1;
3001 
3002 	return __sata_set_spd_needed(link, &scontrol);
3003 }
3004 
3005 /**
3006  *	sata_set_spd - set SATA spd according to spd limit
3007  *	@link: Link to set SATA spd for
3008  *
3009  *	Set SATA spd of @link according to sata_spd_limit.
3010  *
3011  *	LOCKING:
3012  *	Inherited from caller.
3013  *
3014  *	RETURNS:
3015  *	0 if spd doesn't need to be changed, 1 if spd has been
3016  *	changed.  Negative errno if SCR registers are inaccessible.
3017  */
3018 int sata_set_spd(struct ata_link *link)
3019 {
3020 	u32 scontrol;
3021 	int rc;
3022 
3023 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3024 		return rc;
3025 
3026 	if (!__sata_set_spd_needed(link, &scontrol))
3027 		return 0;
3028 
3029 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3030 		return rc;
3031 
3032 	return 1;
3033 }
3034 
3035 /*
3036  * This mode timing computation functionality is ported over from
3037  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3038  */
3039 /*
3040  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3041  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3042  * for UDMA6, which is currently supported only by Maxtor drives.
3043  *
3044  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3045  */
3046 
3047 static const struct ata_timing ata_timing[] = {
3048 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
3049 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
3050 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
3051 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
3052 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
3053 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
3054 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
3055 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
3056 
3057 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
3058 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
3059 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
3060 
3061 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
3062 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
3063 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
3064 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
3065 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
3066 
3067 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
3068 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
3069 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
3070 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
3071 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
3072 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
3073 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
3074 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
3075 
3076 	{ 0xFF }
3077 };
3078 
3079 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3080 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
3081 
3082 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3083 {
3084 	q->setup	= EZ(t->setup      * 1000,  T);
3085 	q->act8b	= EZ(t->act8b      * 1000,  T);
3086 	q->rec8b	= EZ(t->rec8b      * 1000,  T);
3087 	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
3088 	q->active	= EZ(t->active     * 1000,  T);
3089 	q->recover	= EZ(t->recover    * 1000,  T);
3090 	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
3091 	q->cycle	= EZ(t->cycle      * 1000,  T);
3092 	q->udma		= EZ(t->udma       * 1000, UT);
3093 }
3094 
3095 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3096 		      struct ata_timing *m, unsigned int what)
3097 {
3098 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3099 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3100 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3101 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3102 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3103 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3104 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3105 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3106 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3107 }
3108 
3109 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3110 {
3111 	const struct ata_timing *t = ata_timing;
3112 
3113 	while (xfer_mode > t->mode)
3114 		t++;
3115 
3116 	if (xfer_mode == t->mode)
3117 		return t;
3118 	return NULL;
3119 }
3120 
3121 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3122 		       struct ata_timing *t, int T, int UT)
3123 {
3124 	const u16 *id = adev->id;
3125 	const struct ata_timing *s;
3126 	struct ata_timing p;
3127 
3128 	/*
3129 	 * Find the mode.
3130 	 */
3131 
3132 	if (!(s = ata_timing_find_mode(speed)))
3133 		return -EINVAL;
3134 
3135 	memcpy(t, s, sizeof(*s));
3136 
3137 	/*
3138 	 * If the drive is an EIDE drive, it can tell us it needs extended
3139 	 * PIO/MW_DMA cycle timing.
3140 	 */
3141 
3142 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3143 		memset(&p, 0, sizeof(p));
3144 
3145 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3146 			if (speed <= XFER_PIO_2)
3147 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3148 			else if ((speed <= XFER_PIO_4) ||
3149 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3150 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3151 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3152 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3153 
3154 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3155 	}
3156 
3157 	/*
3158 	 * Convert the timing to bus clock counts.
3159 	 */
3160 
3161 	ata_timing_quantize(t, t, T, UT);
3162 
3163 	/*
3164 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3165 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3166 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3167 	 */
3168 
3169 	if (speed > XFER_PIO_6) {
3170 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3171 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3172 	}
3173 
3174 	/*
3175 	 * Lengthen active & recovery time so that cycle time is correct.
3176 	 */
3177 
3178 	if (t->act8b + t->rec8b < t->cyc8b) {
3179 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3180 		t->rec8b = t->cyc8b - t->act8b;
3181 	}
3182 
3183 	if (t->active + t->recover < t->cycle) {
3184 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3185 		t->recover = t->cycle - t->active;
3186 	}
3187 
3188 	/* In a few cases quantisation may produce enough errors to
3189 	   leave t->cycle too low for the sum of active and recovery
3190 	   if so we must correct this */
3191 	if (t->active + t->recover > t->cycle)
3192 		t->cycle = t->active + t->recover;
3193 
3194 	return 0;
3195 }
3196 
3197 /**
3198  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3199  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3200  *	@cycle: cycle duration in ns
3201  *
3202  *	Return matching xfer mode for @cycle.  The returned mode is of
3203  *	the transfer type specified by @xfer_shift.  If @cycle is too
3204  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3205  *	than the fastest known mode, the fasted mode is returned.
3206  *
3207  *	LOCKING:
3208  *	None.
3209  *
3210  *	RETURNS:
3211  *	Matching xfer_mode, 0xff if no match found.
3212  */
3213 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3214 {
3215 	u8 base_mode = 0xff, last_mode = 0xff;
3216 	const struct ata_xfer_ent *ent;
3217 	const struct ata_timing *t;
3218 
3219 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3220 		if (ent->shift == xfer_shift)
3221 			base_mode = ent->base;
3222 
3223 	for (t = ata_timing_find_mode(base_mode);
3224 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3225 		unsigned short this_cycle;
3226 
3227 		switch (xfer_shift) {
3228 		case ATA_SHIFT_PIO:
3229 		case ATA_SHIFT_MWDMA:
3230 			this_cycle = t->cycle;
3231 			break;
3232 		case ATA_SHIFT_UDMA:
3233 			this_cycle = t->udma;
3234 			break;
3235 		default:
3236 			return 0xff;
3237 		}
3238 
3239 		if (cycle > this_cycle)
3240 			break;
3241 
3242 		last_mode = t->mode;
3243 	}
3244 
3245 	return last_mode;
3246 }
3247 
3248 /**
3249  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3250  *	@dev: Device to adjust xfer masks
3251  *	@sel: ATA_DNXFER_* selector
3252  *
3253  *	Adjust xfer masks of @dev downward.  Note that this function
3254  *	does not apply the change.  Invoking ata_set_mode() afterwards
3255  *	will apply the limit.
3256  *
3257  *	LOCKING:
3258  *	Inherited from caller.
3259  *
3260  *	RETURNS:
3261  *	0 on success, negative errno on failure
3262  */
3263 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3264 {
3265 	char buf[32];
3266 	unsigned long orig_mask, xfer_mask;
3267 	unsigned long pio_mask, mwdma_mask, udma_mask;
3268 	int quiet, highbit;
3269 
3270 	quiet = !!(sel & ATA_DNXFER_QUIET);
3271 	sel &= ~ATA_DNXFER_QUIET;
3272 
3273 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3274 						  dev->mwdma_mask,
3275 						  dev->udma_mask);
3276 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3277 
3278 	switch (sel) {
3279 	case ATA_DNXFER_PIO:
3280 		highbit = fls(pio_mask) - 1;
3281 		pio_mask &= ~(1 << highbit);
3282 		break;
3283 
3284 	case ATA_DNXFER_DMA:
3285 		if (udma_mask) {
3286 			highbit = fls(udma_mask) - 1;
3287 			udma_mask &= ~(1 << highbit);
3288 			if (!udma_mask)
3289 				return -ENOENT;
3290 		} else if (mwdma_mask) {
3291 			highbit = fls(mwdma_mask) - 1;
3292 			mwdma_mask &= ~(1 << highbit);
3293 			if (!mwdma_mask)
3294 				return -ENOENT;
3295 		}
3296 		break;
3297 
3298 	case ATA_DNXFER_40C:
3299 		udma_mask &= ATA_UDMA_MASK_40C;
3300 		break;
3301 
3302 	case ATA_DNXFER_FORCE_PIO0:
3303 		pio_mask &= 1;
3304 	case ATA_DNXFER_FORCE_PIO:
3305 		mwdma_mask = 0;
3306 		udma_mask = 0;
3307 		break;
3308 
3309 	default:
3310 		BUG();
3311 	}
3312 
3313 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3314 
3315 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3316 		return -ENOENT;
3317 
3318 	if (!quiet) {
3319 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3320 			snprintf(buf, sizeof(buf), "%s:%s",
3321 				 ata_mode_string(xfer_mask),
3322 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3323 		else
3324 			snprintf(buf, sizeof(buf), "%s",
3325 				 ata_mode_string(xfer_mask));
3326 
3327 		ata_dev_printk(dev, KERN_WARNING,
3328 			       "limiting speed to %s\n", buf);
3329 	}
3330 
3331 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3332 			    &dev->udma_mask);
3333 
3334 	return 0;
3335 }
3336 
3337 static int ata_dev_set_mode(struct ata_device *dev)
3338 {
3339 	struct ata_port *ap = dev->link->ap;
3340 	struct ata_eh_context *ehc = &dev->link->eh_context;
3341 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3342 	const char *dev_err_whine = "";
3343 	int ign_dev_err = 0;
3344 	unsigned int err_mask = 0;
3345 	int rc;
3346 
3347 	dev->flags &= ~ATA_DFLAG_PIO;
3348 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3349 		dev->flags |= ATA_DFLAG_PIO;
3350 
3351 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3352 		dev_err_whine = " (SET_XFERMODE skipped)";
3353 	else {
3354 		if (nosetxfer)
3355 			ata_dev_printk(dev, KERN_WARNING,
3356 				       "NOSETXFER but PATA detected - can't "
3357 				       "skip SETXFER, might malfunction\n");
3358 		err_mask = ata_dev_set_xfermode(dev);
3359 	}
3360 
3361 	if (err_mask & ~AC_ERR_DEV)
3362 		goto fail;
3363 
3364 	/* revalidate */
3365 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3366 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3367 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3368 	if (rc)
3369 		return rc;
3370 
3371 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3372 		/* Old CFA may refuse this command, which is just fine */
3373 		if (ata_id_is_cfa(dev->id))
3374 			ign_dev_err = 1;
3375 		/* Catch several broken garbage emulations plus some pre
3376 		   ATA devices */
3377 		if (ata_id_major_version(dev->id) == 0 &&
3378 					dev->pio_mode <= XFER_PIO_2)
3379 			ign_dev_err = 1;
3380 		/* Some very old devices and some bad newer ones fail
3381 		   any kind of SET_XFERMODE request but support PIO0-2
3382 		   timings and no IORDY */
3383 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3384 			ign_dev_err = 1;
3385 	}
3386 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3387 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3388 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3389 	    dev->dma_mode == XFER_MW_DMA_0 &&
3390 	    (dev->id[63] >> 8) & 1)
3391 		ign_dev_err = 1;
3392 
3393 	/* if the device is actually configured correctly, ignore dev err */
3394 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3395 		ign_dev_err = 1;
3396 
3397 	if (err_mask & AC_ERR_DEV) {
3398 		if (!ign_dev_err)
3399 			goto fail;
3400 		else
3401 			dev_err_whine = " (device error ignored)";
3402 	}
3403 
3404 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3405 		dev->xfer_shift, (int)dev->xfer_mode);
3406 
3407 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3408 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3409 		       dev_err_whine);
3410 
3411 	return 0;
3412 
3413  fail:
3414 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3415 		       "(err_mask=0x%x)\n", err_mask);
3416 	return -EIO;
3417 }
3418 
3419 /**
3420  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3421  *	@link: link on which timings will be programmed
3422  *	@r_failed_dev: out parameter for failed device
3423  *
3424  *	Standard implementation of the function used to tune and set
3425  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3426  *	ata_dev_set_mode() fails, pointer to the failing device is
3427  *	returned in @r_failed_dev.
3428  *
3429  *	LOCKING:
3430  *	PCI/etc. bus probe sem.
3431  *
3432  *	RETURNS:
3433  *	0 on success, negative errno otherwise
3434  */
3435 
3436 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3437 {
3438 	struct ata_port *ap = link->ap;
3439 	struct ata_device *dev;
3440 	int rc = 0, used_dma = 0, found = 0;
3441 
3442 	/* step 1: calculate xfer_mask */
3443 	ata_for_each_dev(dev, link, ENABLED) {
3444 		unsigned long pio_mask, dma_mask;
3445 		unsigned int mode_mask;
3446 
3447 		mode_mask = ATA_DMA_MASK_ATA;
3448 		if (dev->class == ATA_DEV_ATAPI)
3449 			mode_mask = ATA_DMA_MASK_ATAPI;
3450 		else if (ata_id_is_cfa(dev->id))
3451 			mode_mask = ATA_DMA_MASK_CFA;
3452 
3453 		ata_dev_xfermask(dev);
3454 		ata_force_xfermask(dev);
3455 
3456 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3457 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3458 
3459 		if (libata_dma_mask & mode_mask)
3460 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3461 		else
3462 			dma_mask = 0;
3463 
3464 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3465 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3466 
3467 		found = 1;
3468 		if (ata_dma_enabled(dev))
3469 			used_dma = 1;
3470 	}
3471 	if (!found)
3472 		goto out;
3473 
3474 	/* step 2: always set host PIO timings */
3475 	ata_for_each_dev(dev, link, ENABLED) {
3476 		if (dev->pio_mode == 0xff) {
3477 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3478 			rc = -EINVAL;
3479 			goto out;
3480 		}
3481 
3482 		dev->xfer_mode = dev->pio_mode;
3483 		dev->xfer_shift = ATA_SHIFT_PIO;
3484 		if (ap->ops->set_piomode)
3485 			ap->ops->set_piomode(ap, dev);
3486 	}
3487 
3488 	/* step 3: set host DMA timings */
3489 	ata_for_each_dev(dev, link, ENABLED) {
3490 		if (!ata_dma_enabled(dev))
3491 			continue;
3492 
3493 		dev->xfer_mode = dev->dma_mode;
3494 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3495 		if (ap->ops->set_dmamode)
3496 			ap->ops->set_dmamode(ap, dev);
3497 	}
3498 
3499 	/* step 4: update devices' xfer mode */
3500 	ata_for_each_dev(dev, link, ENABLED) {
3501 		rc = ata_dev_set_mode(dev);
3502 		if (rc)
3503 			goto out;
3504 	}
3505 
3506 	/* Record simplex status. If we selected DMA then the other
3507 	 * host channels are not permitted to do so.
3508 	 */
3509 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3510 		ap->host->simplex_claimed = ap;
3511 
3512  out:
3513 	if (rc)
3514 		*r_failed_dev = dev;
3515 	return rc;
3516 }
3517 
3518 /**
3519  *	ata_wait_ready - wait for link to become ready
3520  *	@link: link to be waited on
3521  *	@deadline: deadline jiffies for the operation
3522  *	@check_ready: callback to check link readiness
3523  *
3524  *	Wait for @link to become ready.  @check_ready should return
3525  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3526  *	link doesn't seem to be occupied, other errno for other error
3527  *	conditions.
3528  *
3529  *	Transient -ENODEV conditions are allowed for
3530  *	ATA_TMOUT_FF_WAIT.
3531  *
3532  *	LOCKING:
3533  *	EH context.
3534  *
3535  *	RETURNS:
3536  *	0 if @linke is ready before @deadline; otherwise, -errno.
3537  */
3538 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3539 		   int (*check_ready)(struct ata_link *link))
3540 {
3541 	unsigned long start = jiffies;
3542 	unsigned long nodev_deadline;
3543 	int warned = 0;
3544 
3545 	/* choose which 0xff timeout to use, read comment in libata.h */
3546 	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3547 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3548 	else
3549 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3550 
3551 	/* Slave readiness can't be tested separately from master.  On
3552 	 * M/S emulation configuration, this function should be called
3553 	 * only on the master and it will handle both master and slave.
3554 	 */
3555 	WARN_ON(link == link->ap->slave_link);
3556 
3557 	if (time_after(nodev_deadline, deadline))
3558 		nodev_deadline = deadline;
3559 
3560 	while (1) {
3561 		unsigned long now = jiffies;
3562 		int ready, tmp;
3563 
3564 		ready = tmp = check_ready(link);
3565 		if (ready > 0)
3566 			return 0;
3567 
3568 		/*
3569 		 * -ENODEV could be transient.  Ignore -ENODEV if link
3570 		 * is online.  Also, some SATA devices take a long
3571 		 * time to clear 0xff after reset.  Wait for
3572 		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3573 		 * offline.
3574 		 *
3575 		 * Note that some PATA controllers (pata_ali) explode
3576 		 * if status register is read more than once when
3577 		 * there's no device attached.
3578 		 */
3579 		if (ready == -ENODEV) {
3580 			if (ata_link_online(link))
3581 				ready = 0;
3582 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3583 				 !ata_link_offline(link) &&
3584 				 time_before(now, nodev_deadline))
3585 				ready = 0;
3586 		}
3587 
3588 		if (ready)
3589 			return ready;
3590 		if (time_after(now, deadline))
3591 			return -EBUSY;
3592 
3593 		if (!warned && time_after(now, start + 5 * HZ) &&
3594 		    (deadline - now > 3 * HZ)) {
3595 			ata_link_printk(link, KERN_WARNING,
3596 				"link is slow to respond, please be patient "
3597 				"(ready=%d)\n", tmp);
3598 			warned = 1;
3599 		}
3600 
3601 		msleep(50);
3602 	}
3603 }
3604 
3605 /**
3606  *	ata_wait_after_reset - wait for link to become ready after reset
3607  *	@link: link to be waited on
3608  *	@deadline: deadline jiffies for the operation
3609  *	@check_ready: callback to check link readiness
3610  *
3611  *	Wait for @link to become ready after reset.
3612  *
3613  *	LOCKING:
3614  *	EH context.
3615  *
3616  *	RETURNS:
3617  *	0 if @linke is ready before @deadline; otherwise, -errno.
3618  */
3619 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3620 				int (*check_ready)(struct ata_link *link))
3621 {
3622 	msleep(ATA_WAIT_AFTER_RESET);
3623 
3624 	return ata_wait_ready(link, deadline, check_ready);
3625 }
3626 
3627 /**
3628  *	sata_link_debounce - debounce SATA phy status
3629  *	@link: ATA link to debounce SATA phy status for
3630  *	@params: timing parameters { interval, duratinon, timeout } in msec
3631  *	@deadline: deadline jiffies for the operation
3632  *
3633 *	Make sure SStatus of @link reaches stable state, determined by
3634  *	holding the same value where DET is not 1 for @duration polled
3635  *	every @interval, before @timeout.  Timeout constraints the
3636  *	beginning of the stable state.  Because DET gets stuck at 1 on
3637  *	some controllers after hot unplugging, this functions waits
3638  *	until timeout then returns 0 if DET is stable at 1.
3639  *
3640  *	@timeout is further limited by @deadline.  The sooner of the
3641  *	two is used.
3642  *
3643  *	LOCKING:
3644  *	Kernel thread context (may sleep)
3645  *
3646  *	RETURNS:
3647  *	0 on success, -errno on failure.
3648  */
3649 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3650 		       unsigned long deadline)
3651 {
3652 	unsigned long interval = params[0];
3653 	unsigned long duration = params[1];
3654 	unsigned long last_jiffies, t;
3655 	u32 last, cur;
3656 	int rc;
3657 
3658 	t = ata_deadline(jiffies, params[2]);
3659 	if (time_before(t, deadline))
3660 		deadline = t;
3661 
3662 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3663 		return rc;
3664 	cur &= 0xf;
3665 
3666 	last = cur;
3667 	last_jiffies = jiffies;
3668 
3669 	while (1) {
3670 		msleep(interval);
3671 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3672 			return rc;
3673 		cur &= 0xf;
3674 
3675 		/* DET stable? */
3676 		if (cur == last) {
3677 			if (cur == 1 && time_before(jiffies, deadline))
3678 				continue;
3679 			if (time_after(jiffies,
3680 				       ata_deadline(last_jiffies, duration)))
3681 				return 0;
3682 			continue;
3683 		}
3684 
3685 		/* unstable, start over */
3686 		last = cur;
3687 		last_jiffies = jiffies;
3688 
3689 		/* Check deadline.  If debouncing failed, return
3690 		 * -EPIPE to tell upper layer to lower link speed.
3691 		 */
3692 		if (time_after(jiffies, deadline))
3693 			return -EPIPE;
3694 	}
3695 }
3696 
3697 /**
3698  *	sata_link_resume - resume SATA link
3699  *	@link: ATA link to resume SATA
3700  *	@params: timing parameters { interval, duratinon, timeout } in msec
3701  *	@deadline: deadline jiffies for the operation
3702  *
3703  *	Resume SATA phy @link and debounce it.
3704  *
3705  *	LOCKING:
3706  *	Kernel thread context (may sleep)
3707  *
3708  *	RETURNS:
3709  *	0 on success, -errno on failure.
3710  */
3711 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3712 		     unsigned long deadline)
3713 {
3714 	int tries = ATA_LINK_RESUME_TRIES;
3715 	u32 scontrol, serror;
3716 	int rc;
3717 
3718 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3719 		return rc;
3720 
3721 	/*
3722 	 * Writes to SControl sometimes get ignored under certain
3723 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3724 	 * cleared.
3725 	 */
3726 	do {
3727 		scontrol = (scontrol & 0x0f0) | 0x300;
3728 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3729 			return rc;
3730 		/*
3731 		 * Some PHYs react badly if SStatus is pounded
3732 		 * immediately after resuming.  Delay 200ms before
3733 		 * debouncing.
3734 		 */
3735 		msleep(200);
3736 
3737 		/* is SControl restored correctly? */
3738 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3739 			return rc;
3740 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3741 
3742 	if ((scontrol & 0xf0f) != 0x300) {
3743 		ata_link_printk(link, KERN_ERR,
3744 				"failed to resume link (SControl %X)\n",
3745 				scontrol);
3746 		return 0;
3747 	}
3748 
3749 	if (tries < ATA_LINK_RESUME_TRIES)
3750 		ata_link_printk(link, KERN_WARNING,
3751 				"link resume succeeded after %d retries\n",
3752 				ATA_LINK_RESUME_TRIES - tries);
3753 
3754 	if ((rc = sata_link_debounce(link, params, deadline)))
3755 		return rc;
3756 
3757 	/* clear SError, some PHYs require this even for SRST to work */
3758 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3759 		rc = sata_scr_write(link, SCR_ERROR, serror);
3760 
3761 	return rc != -EINVAL ? rc : 0;
3762 }
3763 
3764 /**
3765  *	ata_std_prereset - prepare for reset
3766  *	@link: ATA link to be reset
3767  *	@deadline: deadline jiffies for the operation
3768  *
3769  *	@link is about to be reset.  Initialize it.  Failure from
3770  *	prereset makes libata abort whole reset sequence and give up
3771  *	that port, so prereset should be best-effort.  It does its
3772  *	best to prepare for reset sequence but if things go wrong, it
3773  *	should just whine, not fail.
3774  *
3775  *	LOCKING:
3776  *	Kernel thread context (may sleep)
3777  *
3778  *	RETURNS:
3779  *	0 on success, -errno otherwise.
3780  */
3781 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3782 {
3783 	struct ata_port *ap = link->ap;
3784 	struct ata_eh_context *ehc = &link->eh_context;
3785 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3786 	int rc;
3787 
3788 	/* if we're about to do hardreset, nothing more to do */
3789 	if (ehc->i.action & ATA_EH_HARDRESET)
3790 		return 0;
3791 
3792 	/* if SATA, resume link */
3793 	if (ap->flags & ATA_FLAG_SATA) {
3794 		rc = sata_link_resume(link, timing, deadline);
3795 		/* whine about phy resume failure but proceed */
3796 		if (rc && rc != -EOPNOTSUPP)
3797 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3798 					"link for reset (errno=%d)\n", rc);
3799 	}
3800 
3801 	/* no point in trying softreset on offline link */
3802 	if (ata_phys_link_offline(link))
3803 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3804 
3805 	return 0;
3806 }
3807 
3808 /**
3809  *	sata_link_hardreset - reset link via SATA phy reset
3810  *	@link: link to reset
3811  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3812  *	@deadline: deadline jiffies for the operation
3813  *	@online: optional out parameter indicating link onlineness
3814  *	@check_ready: optional callback to check link readiness
3815  *
3816  *	SATA phy-reset @link using DET bits of SControl register.
3817  *	After hardreset, link readiness is waited upon using
3818  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3819  *	allowed to not specify @check_ready and wait itself after this
3820  *	function returns.  Device classification is LLD's
3821  *	responsibility.
3822  *
3823  *	*@online is set to one iff reset succeeded and @link is online
3824  *	after reset.
3825  *
3826  *	LOCKING:
3827  *	Kernel thread context (may sleep)
3828  *
3829  *	RETURNS:
3830  *	0 on success, -errno otherwise.
3831  */
3832 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3833 			unsigned long deadline,
3834 			bool *online, int (*check_ready)(struct ata_link *))
3835 {
3836 	u32 scontrol;
3837 	int rc;
3838 
3839 	DPRINTK("ENTER\n");
3840 
3841 	if (online)
3842 		*online = false;
3843 
3844 	if (sata_set_spd_needed(link)) {
3845 		/* SATA spec says nothing about how to reconfigure
3846 		 * spd.  To be on the safe side, turn off phy during
3847 		 * reconfiguration.  This works for at least ICH7 AHCI
3848 		 * and Sil3124.
3849 		 */
3850 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3851 			goto out;
3852 
3853 		scontrol = (scontrol & 0x0f0) | 0x304;
3854 
3855 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3856 			goto out;
3857 
3858 		sata_set_spd(link);
3859 	}
3860 
3861 	/* issue phy wake/reset */
3862 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3863 		goto out;
3864 
3865 	scontrol = (scontrol & 0x0f0) | 0x301;
3866 
3867 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3868 		goto out;
3869 
3870 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3871 	 * 10.4.2 says at least 1 ms.
3872 	 */
3873 	msleep(1);
3874 
3875 	/* bring link back */
3876 	rc = sata_link_resume(link, timing, deadline);
3877 	if (rc)
3878 		goto out;
3879 	/* if link is offline nothing more to do */
3880 	if (ata_phys_link_offline(link))
3881 		goto out;
3882 
3883 	/* Link is online.  From this point, -ENODEV too is an error. */
3884 	if (online)
3885 		*online = true;
3886 
3887 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3888 		/* If PMP is supported, we have to do follow-up SRST.
3889 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3890 		 * the first port is empty.  Wait only for
3891 		 * ATA_TMOUT_PMP_SRST_WAIT.
3892 		 */
3893 		if (check_ready) {
3894 			unsigned long pmp_deadline;
3895 
3896 			pmp_deadline = ata_deadline(jiffies,
3897 						    ATA_TMOUT_PMP_SRST_WAIT);
3898 			if (time_after(pmp_deadline, deadline))
3899 				pmp_deadline = deadline;
3900 			ata_wait_ready(link, pmp_deadline, check_ready);
3901 		}
3902 		rc = -EAGAIN;
3903 		goto out;
3904 	}
3905 
3906 	rc = 0;
3907 	if (check_ready)
3908 		rc = ata_wait_ready(link, deadline, check_ready);
3909  out:
3910 	if (rc && rc != -EAGAIN) {
3911 		/* online is set iff link is online && reset succeeded */
3912 		if (online)
3913 			*online = false;
3914 		ata_link_printk(link, KERN_ERR,
3915 				"COMRESET failed (errno=%d)\n", rc);
3916 	}
3917 	DPRINTK("EXIT, rc=%d\n", rc);
3918 	return rc;
3919 }
3920 
3921 /**
3922  *	sata_std_hardreset - COMRESET w/o waiting or classification
3923  *	@link: link to reset
3924  *	@class: resulting class of attached device
3925  *	@deadline: deadline jiffies for the operation
3926  *
3927  *	Standard SATA COMRESET w/o waiting or classification.
3928  *
3929  *	LOCKING:
3930  *	Kernel thread context (may sleep)
3931  *
3932  *	RETURNS:
3933  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3934  */
3935 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3936 		       unsigned long deadline)
3937 {
3938 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3939 	bool online;
3940 	int rc;
3941 
3942 	/* do hardreset */
3943 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3944 	return online ? -EAGAIN : rc;
3945 }
3946 
3947 /**
3948  *	ata_std_postreset - standard postreset callback
3949  *	@link: the target ata_link
3950  *	@classes: classes of attached devices
3951  *
3952  *	This function is invoked after a successful reset.  Note that
3953  *	the device might have been reset more than once using
3954  *	different reset methods before postreset is invoked.
3955  *
3956  *	LOCKING:
3957  *	Kernel thread context (may sleep)
3958  */
3959 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3960 {
3961 	u32 serror;
3962 
3963 	DPRINTK("ENTER\n");
3964 
3965 	/* reset complete, clear SError */
3966 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3967 		sata_scr_write(link, SCR_ERROR, serror);
3968 
3969 	/* print link status */
3970 	sata_print_link_status(link);
3971 
3972 	DPRINTK("EXIT\n");
3973 }
3974 
3975 /**
3976  *	ata_dev_same_device - Determine whether new ID matches configured device
3977  *	@dev: device to compare against
3978  *	@new_class: class of the new device
3979  *	@new_id: IDENTIFY page of the new device
3980  *
3981  *	Compare @new_class and @new_id against @dev and determine
3982  *	whether @dev is the device indicated by @new_class and
3983  *	@new_id.
3984  *
3985  *	LOCKING:
3986  *	None.
3987  *
3988  *	RETURNS:
3989  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3990  */
3991 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3992 			       const u16 *new_id)
3993 {
3994 	const u16 *old_id = dev->id;
3995 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3996 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3997 
3998 	if (dev->class != new_class) {
3999 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4000 			       dev->class, new_class);
4001 		return 0;
4002 	}
4003 
4004 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4005 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4006 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4007 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4008 
4009 	if (strcmp(model[0], model[1])) {
4010 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4011 			       "'%s' != '%s'\n", model[0], model[1]);
4012 		return 0;
4013 	}
4014 
4015 	if (strcmp(serial[0], serial[1])) {
4016 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4017 			       "'%s' != '%s'\n", serial[0], serial[1]);
4018 		return 0;
4019 	}
4020 
4021 	return 1;
4022 }
4023 
4024 /**
4025  *	ata_dev_reread_id - Re-read IDENTIFY data
4026  *	@dev: target ATA device
4027  *	@readid_flags: read ID flags
4028  *
4029  *	Re-read IDENTIFY page and make sure @dev is still attached to
4030  *	the port.
4031  *
4032  *	LOCKING:
4033  *	Kernel thread context (may sleep)
4034  *
4035  *	RETURNS:
4036  *	0 on success, negative errno otherwise
4037  */
4038 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4039 {
4040 	unsigned int class = dev->class;
4041 	u16 *id = (void *)dev->link->ap->sector_buf;
4042 	int rc;
4043 
4044 	/* read ID data */
4045 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4046 	if (rc)
4047 		return rc;
4048 
4049 	/* is the device still there? */
4050 	if (!ata_dev_same_device(dev, class, id))
4051 		return -ENODEV;
4052 
4053 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4054 	return 0;
4055 }
4056 
4057 /**
4058  *	ata_dev_revalidate - Revalidate ATA device
4059  *	@dev: device to revalidate
4060  *	@new_class: new class code
4061  *	@readid_flags: read ID flags
4062  *
4063  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4064  *	port and reconfigure it according to the new IDENTIFY page.
4065  *
4066  *	LOCKING:
4067  *	Kernel thread context (may sleep)
4068  *
4069  *	RETURNS:
4070  *	0 on success, negative errno otherwise
4071  */
4072 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4073 		       unsigned int readid_flags)
4074 {
4075 	u64 n_sectors = dev->n_sectors;
4076 	u64 n_native_sectors = dev->n_native_sectors;
4077 	int rc;
4078 
4079 	if (!ata_dev_enabled(dev))
4080 		return -ENODEV;
4081 
4082 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4083 	if (ata_class_enabled(new_class) &&
4084 	    new_class != ATA_DEV_ATA &&
4085 	    new_class != ATA_DEV_ATAPI &&
4086 	    new_class != ATA_DEV_SEMB) {
4087 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4088 			       dev->class, new_class);
4089 		rc = -ENODEV;
4090 		goto fail;
4091 	}
4092 
4093 	/* re-read ID */
4094 	rc = ata_dev_reread_id(dev, readid_flags);
4095 	if (rc)
4096 		goto fail;
4097 
4098 	/* configure device according to the new ID */
4099 	rc = ata_dev_configure(dev);
4100 	if (rc)
4101 		goto fail;
4102 
4103 	/* verify n_sectors hasn't changed */
4104 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4105 	    dev->n_sectors == n_sectors)
4106 		return 0;
4107 
4108 	/* n_sectors has changed */
4109 	ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
4110 		       (unsigned long long)n_sectors,
4111 		       (unsigned long long)dev->n_sectors);
4112 
4113 	/*
4114 	 * Something could have caused HPA to be unlocked
4115 	 * involuntarily.  If n_native_sectors hasn't changed and the
4116 	 * new size matches it, keep the device.
4117 	 */
4118 	if (dev->n_native_sectors == n_native_sectors &&
4119 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4120 		ata_dev_printk(dev, KERN_WARNING,
4121 			       "new n_sectors matches native, probably "
4122 			       "late HPA unlock, n_sectors updated\n");
4123 		/* use the larger n_sectors */
4124 		return 0;
4125 	}
4126 
4127 	/*
4128 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4129 	 * unlocking HPA in those cases.
4130 	 *
4131 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4132 	 */
4133 	if (dev->n_native_sectors == n_native_sectors &&
4134 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4135 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4136 		ata_dev_printk(dev, KERN_WARNING,
4137 			       "old n_sectors matches native, probably "
4138 			       "late HPA lock, will try to unlock HPA\n");
4139 		/* try unlocking HPA */
4140 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4141 		rc = -EIO;
4142 	} else
4143 		rc = -ENODEV;
4144 
4145 	/* restore original n_[native_]sectors and fail */
4146 	dev->n_native_sectors = n_native_sectors;
4147 	dev->n_sectors = n_sectors;
4148  fail:
4149 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4150 	return rc;
4151 }
4152 
4153 struct ata_blacklist_entry {
4154 	const char *model_num;
4155 	const char *model_rev;
4156 	unsigned long horkage;
4157 };
4158 
4159 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4160 	/* Devices with DMA related problems under Linux */
4161 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4162 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4163 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4164 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4165 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4166 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4167 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4168 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4169 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4170 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
4171 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
4172 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4173 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4174 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4175 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4176 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4177 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
4178 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
4179 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4180 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4181 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4182 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4183 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4184 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4185 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4186 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4187 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4188 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4189 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4190 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4191 	/* Odd clown on sil3726/4726 PMPs */
4192 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4193 
4194 	/* Weird ATAPI devices */
4195 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4196 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4197 
4198 	/* Devices we expect to fail diagnostics */
4199 
4200 	/* Devices where NCQ should be avoided */
4201 	/* NCQ is slow */
4202 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4203 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4204 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4205 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4206 	/* NCQ is broken */
4207 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4208 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4209 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4210 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4211 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4212 
4213 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4214 	{ "ST31500341AS",	"SD15",		ATA_HORKAGE_NONCQ |
4215 						ATA_HORKAGE_FIRMWARE_WARN },
4216 	{ "ST31500341AS",	"SD16",		ATA_HORKAGE_NONCQ |
4217 						ATA_HORKAGE_FIRMWARE_WARN },
4218 	{ "ST31500341AS",	"SD17",		ATA_HORKAGE_NONCQ |
4219 						ATA_HORKAGE_FIRMWARE_WARN },
4220 	{ "ST31500341AS",	"SD18",		ATA_HORKAGE_NONCQ |
4221 						ATA_HORKAGE_FIRMWARE_WARN },
4222 	{ "ST31500341AS",	"SD19",		ATA_HORKAGE_NONCQ |
4223 						ATA_HORKAGE_FIRMWARE_WARN },
4224 
4225 	{ "ST31000333AS",	"SD15",		ATA_HORKAGE_NONCQ |
4226 						ATA_HORKAGE_FIRMWARE_WARN },
4227 	{ "ST31000333AS",	"SD16",		ATA_HORKAGE_NONCQ |
4228 						ATA_HORKAGE_FIRMWARE_WARN },
4229 	{ "ST31000333AS",	"SD17",		ATA_HORKAGE_NONCQ |
4230 						ATA_HORKAGE_FIRMWARE_WARN },
4231 	{ "ST31000333AS",	"SD18",		ATA_HORKAGE_NONCQ |
4232 						ATA_HORKAGE_FIRMWARE_WARN },
4233 	{ "ST31000333AS",	"SD19",		ATA_HORKAGE_NONCQ |
4234 						ATA_HORKAGE_FIRMWARE_WARN },
4235 
4236 	{ "ST3640623AS",	"SD15",		ATA_HORKAGE_NONCQ |
4237 						ATA_HORKAGE_FIRMWARE_WARN },
4238 	{ "ST3640623AS",	"SD16",		ATA_HORKAGE_NONCQ |
4239 						ATA_HORKAGE_FIRMWARE_WARN },
4240 	{ "ST3640623AS",	"SD17",		ATA_HORKAGE_NONCQ |
4241 						ATA_HORKAGE_FIRMWARE_WARN },
4242 	{ "ST3640623AS",	"SD18",		ATA_HORKAGE_NONCQ |
4243 						ATA_HORKAGE_FIRMWARE_WARN },
4244 	{ "ST3640623AS",	"SD19",		ATA_HORKAGE_NONCQ |
4245 						ATA_HORKAGE_FIRMWARE_WARN },
4246 
4247 	{ "ST3640323AS",	"SD15",		ATA_HORKAGE_NONCQ |
4248 						ATA_HORKAGE_FIRMWARE_WARN },
4249 	{ "ST3640323AS",	"SD16",		ATA_HORKAGE_NONCQ |
4250 						ATA_HORKAGE_FIRMWARE_WARN },
4251 	{ "ST3640323AS",	"SD17",		ATA_HORKAGE_NONCQ |
4252 						ATA_HORKAGE_FIRMWARE_WARN },
4253 	{ "ST3640323AS",	"SD18",		ATA_HORKAGE_NONCQ |
4254 						ATA_HORKAGE_FIRMWARE_WARN },
4255 	{ "ST3640323AS",	"SD19",		ATA_HORKAGE_NONCQ |
4256 						ATA_HORKAGE_FIRMWARE_WARN },
4257 
4258 	{ "ST3320813AS",	"SD15",		ATA_HORKAGE_NONCQ |
4259 						ATA_HORKAGE_FIRMWARE_WARN },
4260 	{ "ST3320813AS",	"SD16",		ATA_HORKAGE_NONCQ |
4261 						ATA_HORKAGE_FIRMWARE_WARN },
4262 	{ "ST3320813AS",	"SD17",		ATA_HORKAGE_NONCQ |
4263 						ATA_HORKAGE_FIRMWARE_WARN },
4264 	{ "ST3320813AS",	"SD18",		ATA_HORKAGE_NONCQ |
4265 						ATA_HORKAGE_FIRMWARE_WARN },
4266 	{ "ST3320813AS",	"SD19",		ATA_HORKAGE_NONCQ |
4267 						ATA_HORKAGE_FIRMWARE_WARN },
4268 
4269 	{ "ST3320613AS",	"SD15",		ATA_HORKAGE_NONCQ |
4270 						ATA_HORKAGE_FIRMWARE_WARN },
4271 	{ "ST3320613AS",	"SD16",		ATA_HORKAGE_NONCQ |
4272 						ATA_HORKAGE_FIRMWARE_WARN },
4273 	{ "ST3320613AS",	"SD17",		ATA_HORKAGE_NONCQ |
4274 						ATA_HORKAGE_FIRMWARE_WARN },
4275 	{ "ST3320613AS",	"SD18",		ATA_HORKAGE_NONCQ |
4276 						ATA_HORKAGE_FIRMWARE_WARN },
4277 	{ "ST3320613AS",	"SD19",		ATA_HORKAGE_NONCQ |
4278 						ATA_HORKAGE_FIRMWARE_WARN },
4279 
4280 	/* Blacklist entries taken from Silicon Image 3124/3132
4281 	   Windows driver .inf file - also several Linux problem reports */
4282 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4283 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4284 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4285 
4286 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4287 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4288 
4289 	/* devices which puke on READ_NATIVE_MAX */
4290 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4291 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4292 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4293 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4294 
4295 	/* this one allows HPA unlocking but fails IOs on the area */
4296 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4297 
4298 	/* Devices which report 1 sector over size HPA */
4299 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4300 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4301 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4302 
4303 	/* Devices which get the IVB wrong */
4304 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4305 	/* Maybe we should just blacklist TSSTcorp... */
4306 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
4307 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
4308 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4309 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4310 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4311 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
4312 
4313 	/* Devices that do not need bridging limits applied */
4314 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4315 
4316 	/* Devices which aren't very happy with higher link speeds */
4317 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4318 
4319 	/*
4320 	 * Devices which choke on SETXFER.  Applies only if both the
4321 	 * device and controller are SATA.
4322 	 */
4323 	{ "PIONEER DVD-RW  DVRTD08",	"1.00",	ATA_HORKAGE_NOSETXFER },
4324 
4325 	/* End Marker */
4326 	{ }
4327 };
4328 
4329 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4330 {
4331 	const char *p;
4332 	int len;
4333 
4334 	/*
4335 	 * check for trailing wildcard: *\0
4336 	 */
4337 	p = strchr(patt, wildchar);
4338 	if (p && ((*(p + 1)) == 0))
4339 		len = p - patt;
4340 	else {
4341 		len = strlen(name);
4342 		if (!len) {
4343 			if (!*patt)
4344 				return 0;
4345 			return -1;
4346 		}
4347 	}
4348 
4349 	return strncmp(patt, name, len);
4350 }
4351 
4352 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4353 {
4354 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4355 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4356 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4357 
4358 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4359 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4360 
4361 	while (ad->model_num) {
4362 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4363 			if (ad->model_rev == NULL)
4364 				return ad->horkage;
4365 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4366 				return ad->horkage;
4367 		}
4368 		ad++;
4369 	}
4370 	return 0;
4371 }
4372 
4373 static int ata_dma_blacklisted(const struct ata_device *dev)
4374 {
4375 	/* We don't support polling DMA.
4376 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4377 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4378 	 */
4379 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4380 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4381 		return 1;
4382 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4383 }
4384 
4385 /**
4386  *	ata_is_40wire		-	check drive side detection
4387  *	@dev: device
4388  *
4389  *	Perform drive side detection decoding, allowing for device vendors
4390  *	who can't follow the documentation.
4391  */
4392 
4393 static int ata_is_40wire(struct ata_device *dev)
4394 {
4395 	if (dev->horkage & ATA_HORKAGE_IVB)
4396 		return ata_drive_40wire_relaxed(dev->id);
4397 	return ata_drive_40wire(dev->id);
4398 }
4399 
4400 /**
4401  *	cable_is_40wire		-	40/80/SATA decider
4402  *	@ap: port to consider
4403  *
4404  *	This function encapsulates the policy for speed management
4405  *	in one place. At the moment we don't cache the result but
4406  *	there is a good case for setting ap->cbl to the result when
4407  *	we are called with unknown cables (and figuring out if it
4408  *	impacts hotplug at all).
4409  *
4410  *	Return 1 if the cable appears to be 40 wire.
4411  */
4412 
4413 static int cable_is_40wire(struct ata_port *ap)
4414 {
4415 	struct ata_link *link;
4416 	struct ata_device *dev;
4417 
4418 	/* If the controller thinks we are 40 wire, we are. */
4419 	if (ap->cbl == ATA_CBL_PATA40)
4420 		return 1;
4421 
4422 	/* If the controller thinks we are 80 wire, we are. */
4423 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4424 		return 0;
4425 
4426 	/* If the system is known to be 40 wire short cable (eg
4427 	 * laptop), then we allow 80 wire modes even if the drive
4428 	 * isn't sure.
4429 	 */
4430 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4431 		return 0;
4432 
4433 	/* If the controller doesn't know, we scan.
4434 	 *
4435 	 * Note: We look for all 40 wire detects at this point.  Any
4436 	 *       80 wire detect is taken to be 80 wire cable because
4437 	 * - in many setups only the one drive (slave if present) will
4438 	 *   give a valid detect
4439 	 * - if you have a non detect capable drive you don't want it
4440 	 *   to colour the choice
4441 	 */
4442 	ata_for_each_link(link, ap, EDGE) {
4443 		ata_for_each_dev(dev, link, ENABLED) {
4444 			if (!ata_is_40wire(dev))
4445 				return 0;
4446 		}
4447 	}
4448 	return 1;
4449 }
4450 
4451 /**
4452  *	ata_dev_xfermask - Compute supported xfermask of the given device
4453  *	@dev: Device to compute xfermask for
4454  *
4455  *	Compute supported xfermask of @dev and store it in
4456  *	dev->*_mask.  This function is responsible for applying all
4457  *	known limits including host controller limits, device
4458  *	blacklist, etc...
4459  *
4460  *	LOCKING:
4461  *	None.
4462  */
4463 static void ata_dev_xfermask(struct ata_device *dev)
4464 {
4465 	struct ata_link *link = dev->link;
4466 	struct ata_port *ap = link->ap;
4467 	struct ata_host *host = ap->host;
4468 	unsigned long xfer_mask;
4469 
4470 	/* controller modes available */
4471 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4472 				      ap->mwdma_mask, ap->udma_mask);
4473 
4474 	/* drive modes available */
4475 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4476 				       dev->mwdma_mask, dev->udma_mask);
4477 	xfer_mask &= ata_id_xfermask(dev->id);
4478 
4479 	/*
4480 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4481 	 *	cable
4482 	 */
4483 	if (ata_dev_pair(dev)) {
4484 		/* No PIO5 or PIO6 */
4485 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4486 		/* No MWDMA3 or MWDMA 4 */
4487 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4488 	}
4489 
4490 	if (ata_dma_blacklisted(dev)) {
4491 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4492 		ata_dev_printk(dev, KERN_WARNING,
4493 			       "device is on DMA blacklist, disabling DMA\n");
4494 	}
4495 
4496 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4497 	    host->simplex_claimed && host->simplex_claimed != ap) {
4498 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4499 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4500 			       "other device, disabling DMA\n");
4501 	}
4502 
4503 	if (ap->flags & ATA_FLAG_NO_IORDY)
4504 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4505 
4506 	if (ap->ops->mode_filter)
4507 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4508 
4509 	/* Apply cable rule here.  Don't apply it early because when
4510 	 * we handle hot plug the cable type can itself change.
4511 	 * Check this last so that we know if the transfer rate was
4512 	 * solely limited by the cable.
4513 	 * Unknown or 80 wire cables reported host side are checked
4514 	 * drive side as well. Cases where we know a 40wire cable
4515 	 * is used safely for 80 are not checked here.
4516 	 */
4517 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4518 		/* UDMA/44 or higher would be available */
4519 		if (cable_is_40wire(ap)) {
4520 			ata_dev_printk(dev, KERN_WARNING,
4521 				 "limited to UDMA/33 due to 40-wire cable\n");
4522 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4523 		}
4524 
4525 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4526 			    &dev->mwdma_mask, &dev->udma_mask);
4527 }
4528 
4529 /**
4530  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4531  *	@dev: Device to which command will be sent
4532  *
4533  *	Issue SET FEATURES - XFER MODE command to device @dev
4534  *	on port @ap.
4535  *
4536  *	LOCKING:
4537  *	PCI/etc. bus probe sem.
4538  *
4539  *	RETURNS:
4540  *	0 on success, AC_ERR_* mask otherwise.
4541  */
4542 
4543 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4544 {
4545 	struct ata_taskfile tf;
4546 	unsigned int err_mask;
4547 
4548 	/* set up set-features taskfile */
4549 	DPRINTK("set features - xfer mode\n");
4550 
4551 	/* Some controllers and ATAPI devices show flaky interrupt
4552 	 * behavior after setting xfer mode.  Use polling instead.
4553 	 */
4554 	ata_tf_init(dev, &tf);
4555 	tf.command = ATA_CMD_SET_FEATURES;
4556 	tf.feature = SETFEATURES_XFER;
4557 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4558 	tf.protocol = ATA_PROT_NODATA;
4559 	/* If we are using IORDY we must send the mode setting command */
4560 	if (ata_pio_need_iordy(dev))
4561 		tf.nsect = dev->xfer_mode;
4562 	/* If the device has IORDY and the controller does not - turn it off */
4563  	else if (ata_id_has_iordy(dev->id))
4564 		tf.nsect = 0x01;
4565 	else /* In the ancient relic department - skip all of this */
4566 		return 0;
4567 
4568 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4569 
4570 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4571 	return err_mask;
4572 }
4573 /**
4574  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4575  *	@dev: Device to which command will be sent
4576  *	@enable: Whether to enable or disable the feature
4577  *	@feature: The sector count represents the feature to set
4578  *
4579  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4580  *	on port @ap with sector count
4581  *
4582  *	LOCKING:
4583  *	PCI/etc. bus probe sem.
4584  *
4585  *	RETURNS:
4586  *	0 on success, AC_ERR_* mask otherwise.
4587  */
4588 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4589 					u8 feature)
4590 {
4591 	struct ata_taskfile tf;
4592 	unsigned int err_mask;
4593 
4594 	/* set up set-features taskfile */
4595 	DPRINTK("set features - SATA features\n");
4596 
4597 	ata_tf_init(dev, &tf);
4598 	tf.command = ATA_CMD_SET_FEATURES;
4599 	tf.feature = enable;
4600 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4601 	tf.protocol = ATA_PROT_NODATA;
4602 	tf.nsect = feature;
4603 
4604 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4605 
4606 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4607 	return err_mask;
4608 }
4609 
4610 /**
4611  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4612  *	@dev: Device to which command will be sent
4613  *	@heads: Number of heads (taskfile parameter)
4614  *	@sectors: Number of sectors (taskfile parameter)
4615  *
4616  *	LOCKING:
4617  *	Kernel thread context (may sleep)
4618  *
4619  *	RETURNS:
4620  *	0 on success, AC_ERR_* mask otherwise.
4621  */
4622 static unsigned int ata_dev_init_params(struct ata_device *dev,
4623 					u16 heads, u16 sectors)
4624 {
4625 	struct ata_taskfile tf;
4626 	unsigned int err_mask;
4627 
4628 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4629 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4630 		return AC_ERR_INVALID;
4631 
4632 	/* set up init dev params taskfile */
4633 	DPRINTK("init dev params \n");
4634 
4635 	ata_tf_init(dev, &tf);
4636 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4637 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4638 	tf.protocol = ATA_PROT_NODATA;
4639 	tf.nsect = sectors;
4640 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4641 
4642 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4643 	/* A clean abort indicates an original or just out of spec drive
4644 	   and we should continue as we issue the setup based on the
4645 	   drive reported working geometry */
4646 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4647 		err_mask = 0;
4648 
4649 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4650 	return err_mask;
4651 }
4652 
4653 /**
4654  *	ata_sg_clean - Unmap DMA memory associated with command
4655  *	@qc: Command containing DMA memory to be released
4656  *
4657  *	Unmap all mapped DMA memory associated with this command.
4658  *
4659  *	LOCKING:
4660  *	spin_lock_irqsave(host lock)
4661  */
4662 void ata_sg_clean(struct ata_queued_cmd *qc)
4663 {
4664 	struct ata_port *ap = qc->ap;
4665 	struct scatterlist *sg = qc->sg;
4666 	int dir = qc->dma_dir;
4667 
4668 	WARN_ON_ONCE(sg == NULL);
4669 
4670 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4671 
4672 	if (qc->n_elem)
4673 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4674 
4675 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4676 	qc->sg = NULL;
4677 }
4678 
4679 /**
4680  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4681  *	@qc: Metadata associated with taskfile to check
4682  *
4683  *	Allow low-level driver to filter ATA PACKET commands, returning
4684  *	a status indicating whether or not it is OK to use DMA for the
4685  *	supplied PACKET command.
4686  *
4687  *	LOCKING:
4688  *	spin_lock_irqsave(host lock)
4689  *
4690  *	RETURNS: 0 when ATAPI DMA can be used
4691  *               nonzero otherwise
4692  */
4693 int atapi_check_dma(struct ata_queued_cmd *qc)
4694 {
4695 	struct ata_port *ap = qc->ap;
4696 
4697 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4698 	 * few ATAPI devices choke on such DMA requests.
4699 	 */
4700 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4701 	    unlikely(qc->nbytes & 15))
4702 		return 1;
4703 
4704 	if (ap->ops->check_atapi_dma)
4705 		return ap->ops->check_atapi_dma(qc);
4706 
4707 	return 0;
4708 }
4709 
4710 /**
4711  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4712  *	@qc: ATA command in question
4713  *
4714  *	Non-NCQ commands cannot run with any other command, NCQ or
4715  *	not.  As upper layer only knows the queue depth, we are
4716  *	responsible for maintaining exclusion.  This function checks
4717  *	whether a new command @qc can be issued.
4718  *
4719  *	LOCKING:
4720  *	spin_lock_irqsave(host lock)
4721  *
4722  *	RETURNS:
4723  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4724  */
4725 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4726 {
4727 	struct ata_link *link = qc->dev->link;
4728 
4729 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4730 		if (!ata_tag_valid(link->active_tag))
4731 			return 0;
4732 	} else {
4733 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4734 			return 0;
4735 	}
4736 
4737 	return ATA_DEFER_LINK;
4738 }
4739 
4740 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4741 
4742 /**
4743  *	ata_sg_init - Associate command with scatter-gather table.
4744  *	@qc: Command to be associated
4745  *	@sg: Scatter-gather table.
4746  *	@n_elem: Number of elements in s/g table.
4747  *
4748  *	Initialize the data-related elements of queued_cmd @qc
4749  *	to point to a scatter-gather table @sg, containing @n_elem
4750  *	elements.
4751  *
4752  *	LOCKING:
4753  *	spin_lock_irqsave(host lock)
4754  */
4755 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4756 		 unsigned int n_elem)
4757 {
4758 	qc->sg = sg;
4759 	qc->n_elem = n_elem;
4760 	qc->cursg = qc->sg;
4761 }
4762 
4763 /**
4764  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4765  *	@qc: Command with scatter-gather table to be mapped.
4766  *
4767  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4768  *
4769  *	LOCKING:
4770  *	spin_lock_irqsave(host lock)
4771  *
4772  *	RETURNS:
4773  *	Zero on success, negative on error.
4774  *
4775  */
4776 static int ata_sg_setup(struct ata_queued_cmd *qc)
4777 {
4778 	struct ata_port *ap = qc->ap;
4779 	unsigned int n_elem;
4780 
4781 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4782 
4783 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4784 	if (n_elem < 1)
4785 		return -1;
4786 
4787 	DPRINTK("%d sg elements mapped\n", n_elem);
4788 	qc->orig_n_elem = qc->n_elem;
4789 	qc->n_elem = n_elem;
4790 	qc->flags |= ATA_QCFLAG_DMAMAP;
4791 
4792 	return 0;
4793 }
4794 
4795 /**
4796  *	swap_buf_le16 - swap halves of 16-bit words in place
4797  *	@buf:  Buffer to swap
4798  *	@buf_words:  Number of 16-bit words in buffer.
4799  *
4800  *	Swap halves of 16-bit words if needed to convert from
4801  *	little-endian byte order to native cpu byte order, or
4802  *	vice-versa.
4803  *
4804  *	LOCKING:
4805  *	Inherited from caller.
4806  */
4807 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4808 {
4809 #ifdef __BIG_ENDIAN
4810 	unsigned int i;
4811 
4812 	for (i = 0; i < buf_words; i++)
4813 		buf[i] = le16_to_cpu(buf[i]);
4814 #endif /* __BIG_ENDIAN */
4815 }
4816 
4817 /**
4818  *	ata_qc_new - Request an available ATA command, for queueing
4819  *	@ap: target port
4820  *
4821  *	LOCKING:
4822  *	None.
4823  */
4824 
4825 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4826 {
4827 	struct ata_queued_cmd *qc = NULL;
4828 	unsigned int i;
4829 
4830 	/* no command while frozen */
4831 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4832 		return NULL;
4833 
4834 	/* the last tag is reserved for internal command. */
4835 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4836 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4837 			qc = __ata_qc_from_tag(ap, i);
4838 			break;
4839 		}
4840 
4841 	if (qc)
4842 		qc->tag = i;
4843 
4844 	return qc;
4845 }
4846 
4847 /**
4848  *	ata_qc_new_init - Request an available ATA command, and initialize it
4849  *	@dev: Device from whom we request an available command structure
4850  *
4851  *	LOCKING:
4852  *	None.
4853  */
4854 
4855 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4856 {
4857 	struct ata_port *ap = dev->link->ap;
4858 	struct ata_queued_cmd *qc;
4859 
4860 	qc = ata_qc_new(ap);
4861 	if (qc) {
4862 		qc->scsicmd = NULL;
4863 		qc->ap = ap;
4864 		qc->dev = dev;
4865 
4866 		ata_qc_reinit(qc);
4867 	}
4868 
4869 	return qc;
4870 }
4871 
4872 /**
4873  *	ata_qc_free - free unused ata_queued_cmd
4874  *	@qc: Command to complete
4875  *
4876  *	Designed to free unused ata_queued_cmd object
4877  *	in case something prevents using it.
4878  *
4879  *	LOCKING:
4880  *	spin_lock_irqsave(host lock)
4881  */
4882 void ata_qc_free(struct ata_queued_cmd *qc)
4883 {
4884 	struct ata_port *ap;
4885 	unsigned int tag;
4886 
4887 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4888 	ap = qc->ap;
4889 
4890 	qc->flags = 0;
4891 	tag = qc->tag;
4892 	if (likely(ata_tag_valid(tag))) {
4893 		qc->tag = ATA_TAG_POISON;
4894 		clear_bit(tag, &ap->qc_allocated);
4895 	}
4896 }
4897 
4898 void __ata_qc_complete(struct ata_queued_cmd *qc)
4899 {
4900 	struct ata_port *ap;
4901 	struct ata_link *link;
4902 
4903 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4904 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4905 	ap = qc->ap;
4906 	link = qc->dev->link;
4907 
4908 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4909 		ata_sg_clean(qc);
4910 
4911 	/* command should be marked inactive atomically with qc completion */
4912 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4913 		link->sactive &= ~(1 << qc->tag);
4914 		if (!link->sactive)
4915 			ap->nr_active_links--;
4916 	} else {
4917 		link->active_tag = ATA_TAG_POISON;
4918 		ap->nr_active_links--;
4919 	}
4920 
4921 	/* clear exclusive status */
4922 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4923 		     ap->excl_link == link))
4924 		ap->excl_link = NULL;
4925 
4926 	/* atapi: mark qc as inactive to prevent the interrupt handler
4927 	 * from completing the command twice later, before the error handler
4928 	 * is called. (when rc != 0 and atapi request sense is needed)
4929 	 */
4930 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4931 	ap->qc_active &= ~(1 << qc->tag);
4932 
4933 	/* call completion callback */
4934 	qc->complete_fn(qc);
4935 }
4936 
4937 static void fill_result_tf(struct ata_queued_cmd *qc)
4938 {
4939 	struct ata_port *ap = qc->ap;
4940 
4941 	qc->result_tf.flags = qc->tf.flags;
4942 	ap->ops->qc_fill_rtf(qc);
4943 }
4944 
4945 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4946 {
4947 	struct ata_device *dev = qc->dev;
4948 
4949 	if (ata_tag_internal(qc->tag))
4950 		return;
4951 
4952 	if (ata_is_nodata(qc->tf.protocol))
4953 		return;
4954 
4955 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4956 		return;
4957 
4958 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4959 }
4960 
4961 /**
4962  *	ata_qc_complete - Complete an active ATA command
4963  *	@qc: Command to complete
4964  *
4965  *	Indicate to the mid and upper layers that an ATA
4966  *	command has completed, with either an ok or not-ok status.
4967  *
4968  *	LOCKING:
4969  *	spin_lock_irqsave(host lock)
4970  */
4971 void ata_qc_complete(struct ata_queued_cmd *qc)
4972 {
4973 	struct ata_port *ap = qc->ap;
4974 
4975 	/* XXX: New EH and old EH use different mechanisms to
4976 	 * synchronize EH with regular execution path.
4977 	 *
4978 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4979 	 * Normal execution path is responsible for not accessing a
4980 	 * failed qc.  libata core enforces the rule by returning NULL
4981 	 * from ata_qc_from_tag() for failed qcs.
4982 	 *
4983 	 * Old EH depends on ata_qc_complete() nullifying completion
4984 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4985 	 * not synchronize with interrupt handler.  Only PIO task is
4986 	 * taken care of.
4987 	 */
4988 	if (ap->ops->error_handler) {
4989 		struct ata_device *dev = qc->dev;
4990 		struct ata_eh_info *ehi = &dev->link->eh_info;
4991 
4992 		if (unlikely(qc->err_mask))
4993 			qc->flags |= ATA_QCFLAG_FAILED;
4994 
4995 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4996 			/* always fill result TF for failed qc */
4997 			fill_result_tf(qc);
4998 
4999 			if (!ata_tag_internal(qc->tag))
5000 				ata_qc_schedule_eh(qc);
5001 			else
5002 				__ata_qc_complete(qc);
5003 			return;
5004 		}
5005 
5006 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5007 
5008 		/* read result TF if requested */
5009 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5010 			fill_result_tf(qc);
5011 
5012 		/* Some commands need post-processing after successful
5013 		 * completion.
5014 		 */
5015 		switch (qc->tf.command) {
5016 		case ATA_CMD_SET_FEATURES:
5017 			if (qc->tf.feature != SETFEATURES_WC_ON &&
5018 			    qc->tf.feature != SETFEATURES_WC_OFF)
5019 				break;
5020 			/* fall through */
5021 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5022 		case ATA_CMD_SET_MULTI: /* multi_count changed */
5023 			/* revalidate device */
5024 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5025 			ata_port_schedule_eh(ap);
5026 			break;
5027 
5028 		case ATA_CMD_SLEEP:
5029 			dev->flags |= ATA_DFLAG_SLEEPING;
5030 			break;
5031 		}
5032 
5033 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5034 			ata_verify_xfer(qc);
5035 
5036 		__ata_qc_complete(qc);
5037 	} else {
5038 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5039 			return;
5040 
5041 		/* read result TF if failed or requested */
5042 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5043 			fill_result_tf(qc);
5044 
5045 		__ata_qc_complete(qc);
5046 	}
5047 }
5048 
5049 /**
5050  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5051  *	@ap: port in question
5052  *	@qc_active: new qc_active mask
5053  *
5054  *	Complete in-flight commands.  This functions is meant to be
5055  *	called from low-level driver's interrupt routine to complete
5056  *	requests normally.  ap->qc_active and @qc_active is compared
5057  *	and commands are completed accordingly.
5058  *
5059  *	LOCKING:
5060  *	spin_lock_irqsave(host lock)
5061  *
5062  *	RETURNS:
5063  *	Number of completed commands on success, -errno otherwise.
5064  */
5065 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5066 {
5067 	int nr_done = 0;
5068 	u32 done_mask;
5069 
5070 	done_mask = ap->qc_active ^ qc_active;
5071 
5072 	if (unlikely(done_mask & qc_active)) {
5073 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5074 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5075 		return -EINVAL;
5076 	}
5077 
5078 	while (done_mask) {
5079 		struct ata_queued_cmd *qc;
5080 		unsigned int tag = __ffs(done_mask);
5081 
5082 		qc = ata_qc_from_tag(ap, tag);
5083 		if (qc) {
5084 			ata_qc_complete(qc);
5085 			nr_done++;
5086 		}
5087 		done_mask &= ~(1 << tag);
5088 	}
5089 
5090 	return nr_done;
5091 }
5092 
5093 /**
5094  *	ata_qc_issue - issue taskfile to device
5095  *	@qc: command to issue to device
5096  *
5097  *	Prepare an ATA command to submission to device.
5098  *	This includes mapping the data into a DMA-able
5099  *	area, filling in the S/G table, and finally
5100  *	writing the taskfile to hardware, starting the command.
5101  *
5102  *	LOCKING:
5103  *	spin_lock_irqsave(host lock)
5104  */
5105 void ata_qc_issue(struct ata_queued_cmd *qc)
5106 {
5107 	struct ata_port *ap = qc->ap;
5108 	struct ata_link *link = qc->dev->link;
5109 	u8 prot = qc->tf.protocol;
5110 
5111 	/* Make sure only one non-NCQ command is outstanding.  The
5112 	 * check is skipped for old EH because it reuses active qc to
5113 	 * request ATAPI sense.
5114 	 */
5115 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5116 
5117 	if (ata_is_ncq(prot)) {
5118 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5119 
5120 		if (!link->sactive)
5121 			ap->nr_active_links++;
5122 		link->sactive |= 1 << qc->tag;
5123 	} else {
5124 		WARN_ON_ONCE(link->sactive);
5125 
5126 		ap->nr_active_links++;
5127 		link->active_tag = qc->tag;
5128 	}
5129 
5130 	qc->flags |= ATA_QCFLAG_ACTIVE;
5131 	ap->qc_active |= 1 << qc->tag;
5132 
5133 	/* We guarantee to LLDs that they will have at least one
5134 	 * non-zero sg if the command is a data command.
5135 	 */
5136 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5137 
5138 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5139 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5140 		if (ata_sg_setup(qc))
5141 			goto sg_err;
5142 
5143 	/* if device is sleeping, schedule reset and abort the link */
5144 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5145 		link->eh_info.action |= ATA_EH_RESET;
5146 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5147 		ata_link_abort(link);
5148 		return;
5149 	}
5150 
5151 	ap->ops->qc_prep(qc);
5152 
5153 	qc->err_mask |= ap->ops->qc_issue(qc);
5154 	if (unlikely(qc->err_mask))
5155 		goto err;
5156 	return;
5157 
5158 sg_err:
5159 	qc->err_mask |= AC_ERR_SYSTEM;
5160 err:
5161 	ata_qc_complete(qc);
5162 }
5163 
5164 /**
5165  *	sata_scr_valid - test whether SCRs are accessible
5166  *	@link: ATA link to test SCR accessibility for
5167  *
5168  *	Test whether SCRs are accessible for @link.
5169  *
5170  *	LOCKING:
5171  *	None.
5172  *
5173  *	RETURNS:
5174  *	1 if SCRs are accessible, 0 otherwise.
5175  */
5176 int sata_scr_valid(struct ata_link *link)
5177 {
5178 	struct ata_port *ap = link->ap;
5179 
5180 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5181 }
5182 
5183 /**
5184  *	sata_scr_read - read SCR register of the specified port
5185  *	@link: ATA link to read SCR for
5186  *	@reg: SCR to read
5187  *	@val: Place to store read value
5188  *
5189  *	Read SCR register @reg of @link into *@val.  This function is
5190  *	guaranteed to succeed if @link is ap->link, the cable type of
5191  *	the port is SATA and the port implements ->scr_read.
5192  *
5193  *	LOCKING:
5194  *	None if @link is ap->link.  Kernel thread context otherwise.
5195  *
5196  *	RETURNS:
5197  *	0 on success, negative errno on failure.
5198  */
5199 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5200 {
5201 	if (ata_is_host_link(link)) {
5202 		if (sata_scr_valid(link))
5203 			return link->ap->ops->scr_read(link, reg, val);
5204 		return -EOPNOTSUPP;
5205 	}
5206 
5207 	return sata_pmp_scr_read(link, reg, val);
5208 }
5209 
5210 /**
5211  *	sata_scr_write - write SCR register of the specified port
5212  *	@link: ATA link to write SCR for
5213  *	@reg: SCR to write
5214  *	@val: value to write
5215  *
5216  *	Write @val to SCR register @reg of @link.  This function is
5217  *	guaranteed to succeed if @link is ap->link, the cable type of
5218  *	the port is SATA and the port implements ->scr_read.
5219  *
5220  *	LOCKING:
5221  *	None if @link is ap->link.  Kernel thread context otherwise.
5222  *
5223  *	RETURNS:
5224  *	0 on success, negative errno on failure.
5225  */
5226 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5227 {
5228 	if (ata_is_host_link(link)) {
5229 		if (sata_scr_valid(link))
5230 			return link->ap->ops->scr_write(link, reg, val);
5231 		return -EOPNOTSUPP;
5232 	}
5233 
5234 	return sata_pmp_scr_write(link, reg, val);
5235 }
5236 
5237 /**
5238  *	sata_scr_write_flush - write SCR register of the specified port and flush
5239  *	@link: ATA link to write SCR for
5240  *	@reg: SCR to write
5241  *	@val: value to write
5242  *
5243  *	This function is identical to sata_scr_write() except that this
5244  *	function performs flush after writing to the register.
5245  *
5246  *	LOCKING:
5247  *	None if @link is ap->link.  Kernel thread context otherwise.
5248  *
5249  *	RETURNS:
5250  *	0 on success, negative errno on failure.
5251  */
5252 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5253 {
5254 	if (ata_is_host_link(link)) {
5255 		int rc;
5256 
5257 		if (sata_scr_valid(link)) {
5258 			rc = link->ap->ops->scr_write(link, reg, val);
5259 			if (rc == 0)
5260 				rc = link->ap->ops->scr_read(link, reg, &val);
5261 			return rc;
5262 		}
5263 		return -EOPNOTSUPP;
5264 	}
5265 
5266 	return sata_pmp_scr_write(link, reg, val);
5267 }
5268 
5269 /**
5270  *	ata_phys_link_online - test whether the given link is online
5271  *	@link: ATA link to test
5272  *
5273  *	Test whether @link is online.  Note that this function returns
5274  *	0 if online status of @link cannot be obtained, so
5275  *	ata_link_online(link) != !ata_link_offline(link).
5276  *
5277  *	LOCKING:
5278  *	None.
5279  *
5280  *	RETURNS:
5281  *	True if the port online status is available and online.
5282  */
5283 bool ata_phys_link_online(struct ata_link *link)
5284 {
5285 	u32 sstatus;
5286 
5287 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5288 	    ata_sstatus_online(sstatus))
5289 		return true;
5290 	return false;
5291 }
5292 
5293 /**
5294  *	ata_phys_link_offline - test whether the given link is offline
5295  *	@link: ATA link to test
5296  *
5297  *	Test whether @link is offline.  Note that this function
5298  *	returns 0 if offline status of @link cannot be obtained, so
5299  *	ata_link_online(link) != !ata_link_offline(link).
5300  *
5301  *	LOCKING:
5302  *	None.
5303  *
5304  *	RETURNS:
5305  *	True if the port offline status is available and offline.
5306  */
5307 bool ata_phys_link_offline(struct ata_link *link)
5308 {
5309 	u32 sstatus;
5310 
5311 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5312 	    !ata_sstatus_online(sstatus))
5313 		return true;
5314 	return false;
5315 }
5316 
5317 /**
5318  *	ata_link_online - test whether the given link is online
5319  *	@link: ATA link to test
5320  *
5321  *	Test whether @link is online.  This is identical to
5322  *	ata_phys_link_online() when there's no slave link.  When
5323  *	there's a slave link, this function should only be called on
5324  *	the master link and will return true if any of M/S links is
5325  *	online.
5326  *
5327  *	LOCKING:
5328  *	None.
5329  *
5330  *	RETURNS:
5331  *	True if the port online status is available and online.
5332  */
5333 bool ata_link_online(struct ata_link *link)
5334 {
5335 	struct ata_link *slave = link->ap->slave_link;
5336 
5337 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5338 
5339 	return ata_phys_link_online(link) ||
5340 		(slave && ata_phys_link_online(slave));
5341 }
5342 
5343 /**
5344  *	ata_link_offline - test whether the given link is offline
5345  *	@link: ATA link to test
5346  *
5347  *	Test whether @link is offline.  This is identical to
5348  *	ata_phys_link_offline() when there's no slave link.  When
5349  *	there's a slave link, this function should only be called on
5350  *	the master link and will return true if both M/S links are
5351  *	offline.
5352  *
5353  *	LOCKING:
5354  *	None.
5355  *
5356  *	RETURNS:
5357  *	True if the port offline status is available and offline.
5358  */
5359 bool ata_link_offline(struct ata_link *link)
5360 {
5361 	struct ata_link *slave = link->ap->slave_link;
5362 
5363 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5364 
5365 	return ata_phys_link_offline(link) &&
5366 		(!slave || ata_phys_link_offline(slave));
5367 }
5368 
5369 #ifdef CONFIG_PM
5370 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5371 			       unsigned int action, unsigned int ehi_flags,
5372 			       int wait)
5373 {
5374 	unsigned long flags;
5375 	int i, rc;
5376 
5377 	for (i = 0; i < host->n_ports; i++) {
5378 		struct ata_port *ap = host->ports[i];
5379 		struct ata_link *link;
5380 
5381 		/* Previous resume operation might still be in
5382 		 * progress.  Wait for PM_PENDING to clear.
5383 		 */
5384 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5385 			ata_port_wait_eh(ap);
5386 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5387 		}
5388 
5389 		/* request PM ops to EH */
5390 		spin_lock_irqsave(ap->lock, flags);
5391 
5392 		ap->pm_mesg = mesg;
5393 		if (wait) {
5394 			rc = 0;
5395 			ap->pm_result = &rc;
5396 		}
5397 
5398 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5399 		ata_for_each_link(link, ap, HOST_FIRST) {
5400 			link->eh_info.action |= action;
5401 			link->eh_info.flags |= ehi_flags;
5402 		}
5403 
5404 		ata_port_schedule_eh(ap);
5405 
5406 		spin_unlock_irqrestore(ap->lock, flags);
5407 
5408 		/* wait and check result */
5409 		if (wait) {
5410 			ata_port_wait_eh(ap);
5411 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5412 			if (rc)
5413 				return rc;
5414 		}
5415 	}
5416 
5417 	return 0;
5418 }
5419 
5420 /**
5421  *	ata_host_suspend - suspend host
5422  *	@host: host to suspend
5423  *	@mesg: PM message
5424  *
5425  *	Suspend @host.  Actual operation is performed by EH.  This
5426  *	function requests EH to perform PM operations and waits for EH
5427  *	to finish.
5428  *
5429  *	LOCKING:
5430  *	Kernel thread context (may sleep).
5431  *
5432  *	RETURNS:
5433  *	0 on success, -errno on failure.
5434  */
5435 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5436 {
5437 	int rc;
5438 
5439 	/*
5440 	 * disable link pm on all ports before requesting
5441 	 * any pm activity
5442 	 */
5443 	ata_lpm_enable(host);
5444 
5445 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5446 	if (rc == 0)
5447 		host->dev->power.power_state = mesg;
5448 	return rc;
5449 }
5450 
5451 /**
5452  *	ata_host_resume - resume host
5453  *	@host: host to resume
5454  *
5455  *	Resume @host.  Actual operation is performed by EH.  This
5456  *	function requests EH to perform PM operations and returns.
5457  *	Note that all resume operations are performed parallely.
5458  *
5459  *	LOCKING:
5460  *	Kernel thread context (may sleep).
5461  */
5462 void ata_host_resume(struct ata_host *host)
5463 {
5464 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5465 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5466 	host->dev->power.power_state = PMSG_ON;
5467 
5468 	/* reenable link pm */
5469 	ata_lpm_disable(host);
5470 }
5471 #endif
5472 
5473 /**
5474  *	ata_dev_init - Initialize an ata_device structure
5475  *	@dev: Device structure to initialize
5476  *
5477  *	Initialize @dev in preparation for probing.
5478  *
5479  *	LOCKING:
5480  *	Inherited from caller.
5481  */
5482 void ata_dev_init(struct ata_device *dev)
5483 {
5484 	struct ata_link *link = ata_dev_phys_link(dev);
5485 	struct ata_port *ap = link->ap;
5486 	unsigned long flags;
5487 
5488 	/* SATA spd limit is bound to the attached device, reset together */
5489 	link->sata_spd_limit = link->hw_sata_spd_limit;
5490 	link->sata_spd = 0;
5491 
5492 	/* High bits of dev->flags are used to record warm plug
5493 	 * requests which occur asynchronously.  Synchronize using
5494 	 * host lock.
5495 	 */
5496 	spin_lock_irqsave(ap->lock, flags);
5497 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5498 	dev->horkage = 0;
5499 	spin_unlock_irqrestore(ap->lock, flags);
5500 
5501 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5502 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5503 	dev->pio_mask = UINT_MAX;
5504 	dev->mwdma_mask = UINT_MAX;
5505 	dev->udma_mask = UINT_MAX;
5506 }
5507 
5508 /**
5509  *	ata_link_init - Initialize an ata_link structure
5510  *	@ap: ATA port link is attached to
5511  *	@link: Link structure to initialize
5512  *	@pmp: Port multiplier port number
5513  *
5514  *	Initialize @link.
5515  *
5516  *	LOCKING:
5517  *	Kernel thread context (may sleep)
5518  */
5519 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5520 {
5521 	int i;
5522 
5523 	/* clear everything except for devices */
5524 	memset(link, 0, offsetof(struct ata_link, device[0]));
5525 
5526 	link->ap = ap;
5527 	link->pmp = pmp;
5528 	link->active_tag = ATA_TAG_POISON;
5529 	link->hw_sata_spd_limit = UINT_MAX;
5530 
5531 	/* can't use iterator, ap isn't initialized yet */
5532 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5533 		struct ata_device *dev = &link->device[i];
5534 
5535 		dev->link = link;
5536 		dev->devno = dev - link->device;
5537 #ifdef CONFIG_ATA_ACPI
5538 		dev->gtf_filter = ata_acpi_gtf_filter;
5539 #endif
5540 		ata_dev_init(dev);
5541 	}
5542 }
5543 
5544 /**
5545  *	sata_link_init_spd - Initialize link->sata_spd_limit
5546  *	@link: Link to configure sata_spd_limit for
5547  *
5548  *	Initialize @link->[hw_]sata_spd_limit to the currently
5549  *	configured value.
5550  *
5551  *	LOCKING:
5552  *	Kernel thread context (may sleep).
5553  *
5554  *	RETURNS:
5555  *	0 on success, -errno on failure.
5556  */
5557 int sata_link_init_spd(struct ata_link *link)
5558 {
5559 	u8 spd;
5560 	int rc;
5561 
5562 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5563 	if (rc)
5564 		return rc;
5565 
5566 	spd = (link->saved_scontrol >> 4) & 0xf;
5567 	if (spd)
5568 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5569 
5570 	ata_force_link_limits(link);
5571 
5572 	link->sata_spd_limit = link->hw_sata_spd_limit;
5573 
5574 	return 0;
5575 }
5576 
5577 /**
5578  *	ata_port_alloc - allocate and initialize basic ATA port resources
5579  *	@host: ATA host this allocated port belongs to
5580  *
5581  *	Allocate and initialize basic ATA port resources.
5582  *
5583  *	RETURNS:
5584  *	Allocate ATA port on success, NULL on failure.
5585  *
5586  *	LOCKING:
5587  *	Inherited from calling layer (may sleep).
5588  */
5589 struct ata_port *ata_port_alloc(struct ata_host *host)
5590 {
5591 	struct ata_port *ap;
5592 
5593 	DPRINTK("ENTER\n");
5594 
5595 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5596 	if (!ap)
5597 		return NULL;
5598 
5599 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5600 	ap->lock = &host->lock;
5601 	ap->print_id = -1;
5602 	ap->host = host;
5603 	ap->dev = host->dev;
5604 
5605 #if defined(ATA_VERBOSE_DEBUG)
5606 	/* turn on all debugging levels */
5607 	ap->msg_enable = 0x00FF;
5608 #elif defined(ATA_DEBUG)
5609 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5610 #else
5611 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5612 #endif
5613 
5614 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5615 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5616 	INIT_LIST_HEAD(&ap->eh_done_q);
5617 	init_waitqueue_head(&ap->eh_wait_q);
5618 	init_completion(&ap->park_req_pending);
5619 	init_timer_deferrable(&ap->fastdrain_timer);
5620 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5621 	ap->fastdrain_timer.data = (unsigned long)ap;
5622 
5623 	ap->cbl = ATA_CBL_NONE;
5624 
5625 	ata_link_init(ap, &ap->link, 0);
5626 
5627 #ifdef ATA_IRQ_TRAP
5628 	ap->stats.unhandled_irq = 1;
5629 	ap->stats.idle_irq = 1;
5630 #endif
5631 	ata_sff_port_init(ap);
5632 
5633 	return ap;
5634 }
5635 
5636 static void ata_host_release(struct device *gendev, void *res)
5637 {
5638 	struct ata_host *host = dev_get_drvdata(gendev);
5639 	int i;
5640 
5641 	for (i = 0; i < host->n_ports; i++) {
5642 		struct ata_port *ap = host->ports[i];
5643 
5644 		if (!ap)
5645 			continue;
5646 
5647 		if (ap->scsi_host)
5648 			scsi_host_put(ap->scsi_host);
5649 
5650 		kfree(ap->pmp_link);
5651 		kfree(ap->slave_link);
5652 		kfree(ap);
5653 		host->ports[i] = NULL;
5654 	}
5655 
5656 	dev_set_drvdata(gendev, NULL);
5657 }
5658 
5659 /**
5660  *	ata_host_alloc - allocate and init basic ATA host resources
5661  *	@dev: generic device this host is associated with
5662  *	@max_ports: maximum number of ATA ports associated with this host
5663  *
5664  *	Allocate and initialize basic ATA host resources.  LLD calls
5665  *	this function to allocate a host, initializes it fully and
5666  *	attaches it using ata_host_register().
5667  *
5668  *	@max_ports ports are allocated and host->n_ports is
5669  *	initialized to @max_ports.  The caller is allowed to decrease
5670  *	host->n_ports before calling ata_host_register().  The unused
5671  *	ports will be automatically freed on registration.
5672  *
5673  *	RETURNS:
5674  *	Allocate ATA host on success, NULL on failure.
5675  *
5676  *	LOCKING:
5677  *	Inherited from calling layer (may sleep).
5678  */
5679 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5680 {
5681 	struct ata_host *host;
5682 	size_t sz;
5683 	int i;
5684 
5685 	DPRINTK("ENTER\n");
5686 
5687 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5688 		return NULL;
5689 
5690 	/* alloc a container for our list of ATA ports (buses) */
5691 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5692 	/* alloc a container for our list of ATA ports (buses) */
5693 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5694 	if (!host)
5695 		goto err_out;
5696 
5697 	devres_add(dev, host);
5698 	dev_set_drvdata(dev, host);
5699 
5700 	spin_lock_init(&host->lock);
5701 	host->dev = dev;
5702 	host->n_ports = max_ports;
5703 
5704 	/* allocate ports bound to this host */
5705 	for (i = 0; i < max_ports; i++) {
5706 		struct ata_port *ap;
5707 
5708 		ap = ata_port_alloc(host);
5709 		if (!ap)
5710 			goto err_out;
5711 
5712 		ap->port_no = i;
5713 		host->ports[i] = ap;
5714 	}
5715 
5716 	devres_remove_group(dev, NULL);
5717 	return host;
5718 
5719  err_out:
5720 	devres_release_group(dev, NULL);
5721 	return NULL;
5722 }
5723 
5724 /**
5725  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5726  *	@dev: generic device this host is associated with
5727  *	@ppi: array of ATA port_info to initialize host with
5728  *	@n_ports: number of ATA ports attached to this host
5729  *
5730  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5731  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5732  *	last entry will be used for the remaining ports.
5733  *
5734  *	RETURNS:
5735  *	Allocate ATA host on success, NULL on failure.
5736  *
5737  *	LOCKING:
5738  *	Inherited from calling layer (may sleep).
5739  */
5740 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5741 				      const struct ata_port_info * const * ppi,
5742 				      int n_ports)
5743 {
5744 	const struct ata_port_info *pi;
5745 	struct ata_host *host;
5746 	int i, j;
5747 
5748 	host = ata_host_alloc(dev, n_ports);
5749 	if (!host)
5750 		return NULL;
5751 
5752 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5753 		struct ata_port *ap = host->ports[i];
5754 
5755 		if (ppi[j])
5756 			pi = ppi[j++];
5757 
5758 		ap->pio_mask = pi->pio_mask;
5759 		ap->mwdma_mask = pi->mwdma_mask;
5760 		ap->udma_mask = pi->udma_mask;
5761 		ap->flags |= pi->flags;
5762 		ap->link.flags |= pi->link_flags;
5763 		ap->ops = pi->port_ops;
5764 
5765 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5766 			host->ops = pi->port_ops;
5767 	}
5768 
5769 	return host;
5770 }
5771 
5772 /**
5773  *	ata_slave_link_init - initialize slave link
5774  *	@ap: port to initialize slave link for
5775  *
5776  *	Create and initialize slave link for @ap.  This enables slave
5777  *	link handling on the port.
5778  *
5779  *	In libata, a port contains links and a link contains devices.
5780  *	There is single host link but if a PMP is attached to it,
5781  *	there can be multiple fan-out links.  On SATA, there's usually
5782  *	a single device connected to a link but PATA and SATA
5783  *	controllers emulating TF based interface can have two - master
5784  *	and slave.
5785  *
5786  *	However, there are a few controllers which don't fit into this
5787  *	abstraction too well - SATA controllers which emulate TF
5788  *	interface with both master and slave devices but also have
5789  *	separate SCR register sets for each device.  These controllers
5790  *	need separate links for physical link handling
5791  *	(e.g. onlineness, link speed) but should be treated like a
5792  *	traditional M/S controller for everything else (e.g. command
5793  *	issue, softreset).
5794  *
5795  *	slave_link is libata's way of handling this class of
5796  *	controllers without impacting core layer too much.  For
5797  *	anything other than physical link handling, the default host
5798  *	link is used for both master and slave.  For physical link
5799  *	handling, separate @ap->slave_link is used.  All dirty details
5800  *	are implemented inside libata core layer.  From LLD's POV, the
5801  *	only difference is that prereset, hardreset and postreset are
5802  *	called once more for the slave link, so the reset sequence
5803  *	looks like the following.
5804  *
5805  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5806  *	softreset(M) -> postreset(M) -> postreset(S)
5807  *
5808  *	Note that softreset is called only for the master.  Softreset
5809  *	resets both M/S by definition, so SRST on master should handle
5810  *	both (the standard method will work just fine).
5811  *
5812  *	LOCKING:
5813  *	Should be called before host is registered.
5814  *
5815  *	RETURNS:
5816  *	0 on success, -errno on failure.
5817  */
5818 int ata_slave_link_init(struct ata_port *ap)
5819 {
5820 	struct ata_link *link;
5821 
5822 	WARN_ON(ap->slave_link);
5823 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5824 
5825 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5826 	if (!link)
5827 		return -ENOMEM;
5828 
5829 	ata_link_init(ap, link, 1);
5830 	ap->slave_link = link;
5831 	return 0;
5832 }
5833 
5834 static void ata_host_stop(struct device *gendev, void *res)
5835 {
5836 	struct ata_host *host = dev_get_drvdata(gendev);
5837 	int i;
5838 
5839 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5840 
5841 	for (i = 0; i < host->n_ports; i++) {
5842 		struct ata_port *ap = host->ports[i];
5843 
5844 		if (ap->ops->port_stop)
5845 			ap->ops->port_stop(ap);
5846 	}
5847 
5848 	if (host->ops->host_stop)
5849 		host->ops->host_stop(host);
5850 }
5851 
5852 /**
5853  *	ata_finalize_port_ops - finalize ata_port_operations
5854  *	@ops: ata_port_operations to finalize
5855  *
5856  *	An ata_port_operations can inherit from another ops and that
5857  *	ops can again inherit from another.  This can go on as many
5858  *	times as necessary as long as there is no loop in the
5859  *	inheritance chain.
5860  *
5861  *	Ops tables are finalized when the host is started.  NULL or
5862  *	unspecified entries are inherited from the closet ancestor
5863  *	which has the method and the entry is populated with it.
5864  *	After finalization, the ops table directly points to all the
5865  *	methods and ->inherits is no longer necessary and cleared.
5866  *
5867  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5868  *
5869  *	LOCKING:
5870  *	None.
5871  */
5872 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5873 {
5874 	static DEFINE_SPINLOCK(lock);
5875 	const struct ata_port_operations *cur;
5876 	void **begin = (void **)ops;
5877 	void **end = (void **)&ops->inherits;
5878 	void **pp;
5879 
5880 	if (!ops || !ops->inherits)
5881 		return;
5882 
5883 	spin_lock(&lock);
5884 
5885 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5886 		void **inherit = (void **)cur;
5887 
5888 		for (pp = begin; pp < end; pp++, inherit++)
5889 			if (!*pp)
5890 				*pp = *inherit;
5891 	}
5892 
5893 	for (pp = begin; pp < end; pp++)
5894 		if (IS_ERR(*pp))
5895 			*pp = NULL;
5896 
5897 	ops->inherits = NULL;
5898 
5899 	spin_unlock(&lock);
5900 }
5901 
5902 /**
5903  *	ata_host_start - start and freeze ports of an ATA host
5904  *	@host: ATA host to start ports for
5905  *
5906  *	Start and then freeze ports of @host.  Started status is
5907  *	recorded in host->flags, so this function can be called
5908  *	multiple times.  Ports are guaranteed to get started only
5909  *	once.  If host->ops isn't initialized yet, its set to the
5910  *	first non-dummy port ops.
5911  *
5912  *	LOCKING:
5913  *	Inherited from calling layer (may sleep).
5914  *
5915  *	RETURNS:
5916  *	0 if all ports are started successfully, -errno otherwise.
5917  */
5918 int ata_host_start(struct ata_host *host)
5919 {
5920 	int have_stop = 0;
5921 	void *start_dr = NULL;
5922 	int i, rc;
5923 
5924 	if (host->flags & ATA_HOST_STARTED)
5925 		return 0;
5926 
5927 	ata_finalize_port_ops(host->ops);
5928 
5929 	for (i = 0; i < host->n_ports; i++) {
5930 		struct ata_port *ap = host->ports[i];
5931 
5932 		ata_finalize_port_ops(ap->ops);
5933 
5934 		if (!host->ops && !ata_port_is_dummy(ap))
5935 			host->ops = ap->ops;
5936 
5937 		if (ap->ops->port_stop)
5938 			have_stop = 1;
5939 	}
5940 
5941 	if (host->ops->host_stop)
5942 		have_stop = 1;
5943 
5944 	if (have_stop) {
5945 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5946 		if (!start_dr)
5947 			return -ENOMEM;
5948 	}
5949 
5950 	for (i = 0; i < host->n_ports; i++) {
5951 		struct ata_port *ap = host->ports[i];
5952 
5953 		if (ap->ops->port_start) {
5954 			rc = ap->ops->port_start(ap);
5955 			if (rc) {
5956 				if (rc != -ENODEV)
5957 					dev_printk(KERN_ERR, host->dev,
5958 						"failed to start port %d "
5959 						"(errno=%d)\n", i, rc);
5960 				goto err_out;
5961 			}
5962 		}
5963 		ata_eh_freeze_port(ap);
5964 	}
5965 
5966 	if (start_dr)
5967 		devres_add(host->dev, start_dr);
5968 	host->flags |= ATA_HOST_STARTED;
5969 	return 0;
5970 
5971  err_out:
5972 	while (--i >= 0) {
5973 		struct ata_port *ap = host->ports[i];
5974 
5975 		if (ap->ops->port_stop)
5976 			ap->ops->port_stop(ap);
5977 	}
5978 	devres_free(start_dr);
5979 	return rc;
5980 }
5981 
5982 /**
5983  *	ata_sas_host_init - Initialize a host struct
5984  *	@host:	host to initialize
5985  *	@dev:	device host is attached to
5986  *	@flags:	host flags
5987  *	@ops:	port_ops
5988  *
5989  *	LOCKING:
5990  *	PCI/etc. bus probe sem.
5991  *
5992  */
5993 /* KILLME - the only user left is ipr */
5994 void ata_host_init(struct ata_host *host, struct device *dev,
5995 		   unsigned long flags, struct ata_port_operations *ops)
5996 {
5997 	spin_lock_init(&host->lock);
5998 	host->dev = dev;
5999 	host->flags = flags;
6000 	host->ops = ops;
6001 }
6002 
6003 
6004 static void async_port_probe(void *data, async_cookie_t cookie)
6005 {
6006 	int rc;
6007 	struct ata_port *ap = data;
6008 
6009 	/*
6010 	 * If we're not allowed to scan this host in parallel,
6011 	 * we need to wait until all previous scans have completed
6012 	 * before going further.
6013 	 * Jeff Garzik says this is only within a controller, so we
6014 	 * don't need to wait for port 0, only for later ports.
6015 	 */
6016 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6017 		async_synchronize_cookie(cookie);
6018 
6019 	/* probe */
6020 	if (ap->ops->error_handler) {
6021 		struct ata_eh_info *ehi = &ap->link.eh_info;
6022 		unsigned long flags;
6023 
6024 		/* kick EH for boot probing */
6025 		spin_lock_irqsave(ap->lock, flags);
6026 
6027 		ehi->probe_mask |= ATA_ALL_DEVICES;
6028 		ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
6029 		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6030 
6031 		ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6032 		ap->pflags |= ATA_PFLAG_LOADING;
6033 		ata_port_schedule_eh(ap);
6034 
6035 		spin_unlock_irqrestore(ap->lock, flags);
6036 
6037 		/* wait for EH to finish */
6038 		ata_port_wait_eh(ap);
6039 	} else {
6040 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6041 		rc = ata_bus_probe(ap);
6042 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6043 
6044 		if (rc) {
6045 			/* FIXME: do something useful here?
6046 			 * Current libata behavior will
6047 			 * tear down everything when
6048 			 * the module is removed
6049 			 * or the h/w is unplugged.
6050 			 */
6051 		}
6052 	}
6053 
6054 	/* in order to keep device order, we need to synchronize at this point */
6055 	async_synchronize_cookie(cookie);
6056 
6057 	ata_scsi_scan_host(ap, 1);
6058 
6059 }
6060 /**
6061  *	ata_host_register - register initialized ATA host
6062  *	@host: ATA host to register
6063  *	@sht: template for SCSI host
6064  *
6065  *	Register initialized ATA host.  @host is allocated using
6066  *	ata_host_alloc() and fully initialized by LLD.  This function
6067  *	starts ports, registers @host with ATA and SCSI layers and
6068  *	probe registered devices.
6069  *
6070  *	LOCKING:
6071  *	Inherited from calling layer (may sleep).
6072  *
6073  *	RETURNS:
6074  *	0 on success, -errno otherwise.
6075  */
6076 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6077 {
6078 	int i, rc;
6079 
6080 	/* host must have been started */
6081 	if (!(host->flags & ATA_HOST_STARTED)) {
6082 		dev_printk(KERN_ERR, host->dev,
6083 			   "BUG: trying to register unstarted host\n");
6084 		WARN_ON(1);
6085 		return -EINVAL;
6086 	}
6087 
6088 	/* Blow away unused ports.  This happens when LLD can't
6089 	 * determine the exact number of ports to allocate at
6090 	 * allocation time.
6091 	 */
6092 	for (i = host->n_ports; host->ports[i]; i++)
6093 		kfree(host->ports[i]);
6094 
6095 	/* give ports names and add SCSI hosts */
6096 	for (i = 0; i < host->n_ports; i++)
6097 		host->ports[i]->print_id = ata_print_id++;
6098 
6099 	rc = ata_scsi_add_hosts(host, sht);
6100 	if (rc)
6101 		return rc;
6102 
6103 	/* associate with ACPI nodes */
6104 	ata_acpi_associate(host);
6105 
6106 	/* set cable, sata_spd_limit and report */
6107 	for (i = 0; i < host->n_ports; i++) {
6108 		struct ata_port *ap = host->ports[i];
6109 		unsigned long xfer_mask;
6110 
6111 		/* set SATA cable type if still unset */
6112 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6113 			ap->cbl = ATA_CBL_SATA;
6114 
6115 		/* init sata_spd_limit to the current value */
6116 		sata_link_init_spd(&ap->link);
6117 		if (ap->slave_link)
6118 			sata_link_init_spd(ap->slave_link);
6119 
6120 		/* print per-port info to dmesg */
6121 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6122 					      ap->udma_mask);
6123 
6124 		if (!ata_port_is_dummy(ap)) {
6125 			ata_port_printk(ap, KERN_INFO,
6126 					"%cATA max %s %s\n",
6127 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6128 					ata_mode_string(xfer_mask),
6129 					ap->link.eh_info.desc);
6130 			ata_ehi_clear_desc(&ap->link.eh_info);
6131 		} else
6132 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6133 	}
6134 
6135 	/* perform each probe asynchronously */
6136 	for (i = 0; i < host->n_ports; i++) {
6137 		struct ata_port *ap = host->ports[i];
6138 		async_schedule(async_port_probe, ap);
6139 	}
6140 
6141 	return 0;
6142 }
6143 
6144 /**
6145  *	ata_host_activate - start host, request IRQ and register it
6146  *	@host: target ATA host
6147  *	@irq: IRQ to request
6148  *	@irq_handler: irq_handler used when requesting IRQ
6149  *	@irq_flags: irq_flags used when requesting IRQ
6150  *	@sht: scsi_host_template to use when registering the host
6151  *
6152  *	After allocating an ATA host and initializing it, most libata
6153  *	LLDs perform three steps to activate the host - start host,
6154  *	request IRQ and register it.  This helper takes necessasry
6155  *	arguments and performs the three steps in one go.
6156  *
6157  *	An invalid IRQ skips the IRQ registration and expects the host to
6158  *	have set polling mode on the port. In this case, @irq_handler
6159  *	should be NULL.
6160  *
6161  *	LOCKING:
6162  *	Inherited from calling layer (may sleep).
6163  *
6164  *	RETURNS:
6165  *	0 on success, -errno otherwise.
6166  */
6167 int ata_host_activate(struct ata_host *host, int irq,
6168 		      irq_handler_t irq_handler, unsigned long irq_flags,
6169 		      struct scsi_host_template *sht)
6170 {
6171 	int i, rc;
6172 
6173 	rc = ata_host_start(host);
6174 	if (rc)
6175 		return rc;
6176 
6177 	/* Special case for polling mode */
6178 	if (!irq) {
6179 		WARN_ON(irq_handler);
6180 		return ata_host_register(host, sht);
6181 	}
6182 
6183 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6184 			      dev_driver_string(host->dev), host);
6185 	if (rc)
6186 		return rc;
6187 
6188 	for (i = 0; i < host->n_ports; i++)
6189 		ata_port_desc(host->ports[i], "irq %d", irq);
6190 
6191 	rc = ata_host_register(host, sht);
6192 	/* if failed, just free the IRQ and leave ports alone */
6193 	if (rc)
6194 		devm_free_irq(host->dev, irq, host);
6195 
6196 	return rc;
6197 }
6198 
6199 /**
6200  *	ata_port_detach - Detach ATA port in prepration of device removal
6201  *	@ap: ATA port to be detached
6202  *
6203  *	Detach all ATA devices and the associated SCSI devices of @ap;
6204  *	then, remove the associated SCSI host.  @ap is guaranteed to
6205  *	be quiescent on return from this function.
6206  *
6207  *	LOCKING:
6208  *	Kernel thread context (may sleep).
6209  */
6210 static void ata_port_detach(struct ata_port *ap)
6211 {
6212 	unsigned long flags;
6213 
6214 	if (!ap->ops->error_handler)
6215 		goto skip_eh;
6216 
6217 	/* tell EH we're leaving & flush EH */
6218 	spin_lock_irqsave(ap->lock, flags);
6219 	ap->pflags |= ATA_PFLAG_UNLOADING;
6220 	ata_port_schedule_eh(ap);
6221 	spin_unlock_irqrestore(ap->lock, flags);
6222 
6223 	/* wait till EH commits suicide */
6224 	ata_port_wait_eh(ap);
6225 
6226 	/* it better be dead now */
6227 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6228 
6229 	cancel_rearming_delayed_work(&ap->hotplug_task);
6230 
6231  skip_eh:
6232 	/* remove the associated SCSI host */
6233 	scsi_remove_host(ap->scsi_host);
6234 }
6235 
6236 /**
6237  *	ata_host_detach - Detach all ports of an ATA host
6238  *	@host: Host to detach
6239  *
6240  *	Detach all ports of @host.
6241  *
6242  *	LOCKING:
6243  *	Kernel thread context (may sleep).
6244  */
6245 void ata_host_detach(struct ata_host *host)
6246 {
6247 	int i;
6248 
6249 	for (i = 0; i < host->n_ports; i++)
6250 		ata_port_detach(host->ports[i]);
6251 
6252 	/* the host is dead now, dissociate ACPI */
6253 	ata_acpi_dissociate(host);
6254 }
6255 
6256 #ifdef CONFIG_PCI
6257 
6258 /**
6259  *	ata_pci_remove_one - PCI layer callback for device removal
6260  *	@pdev: PCI device that was removed
6261  *
6262  *	PCI layer indicates to libata via this hook that hot-unplug or
6263  *	module unload event has occurred.  Detach all ports.  Resource
6264  *	release is handled via devres.
6265  *
6266  *	LOCKING:
6267  *	Inherited from PCI layer (may sleep).
6268  */
6269 void ata_pci_remove_one(struct pci_dev *pdev)
6270 {
6271 	struct device *dev = &pdev->dev;
6272 	struct ata_host *host = dev_get_drvdata(dev);
6273 
6274 	ata_host_detach(host);
6275 }
6276 
6277 /* move to PCI subsystem */
6278 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6279 {
6280 	unsigned long tmp = 0;
6281 
6282 	switch (bits->width) {
6283 	case 1: {
6284 		u8 tmp8 = 0;
6285 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6286 		tmp = tmp8;
6287 		break;
6288 	}
6289 	case 2: {
6290 		u16 tmp16 = 0;
6291 		pci_read_config_word(pdev, bits->reg, &tmp16);
6292 		tmp = tmp16;
6293 		break;
6294 	}
6295 	case 4: {
6296 		u32 tmp32 = 0;
6297 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6298 		tmp = tmp32;
6299 		break;
6300 	}
6301 
6302 	default:
6303 		return -EINVAL;
6304 	}
6305 
6306 	tmp &= bits->mask;
6307 
6308 	return (tmp == bits->val) ? 1 : 0;
6309 }
6310 
6311 #ifdef CONFIG_PM
6312 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6313 {
6314 	pci_save_state(pdev);
6315 	pci_disable_device(pdev);
6316 
6317 	if (mesg.event & PM_EVENT_SLEEP)
6318 		pci_set_power_state(pdev, PCI_D3hot);
6319 }
6320 
6321 int ata_pci_device_do_resume(struct pci_dev *pdev)
6322 {
6323 	int rc;
6324 
6325 	pci_set_power_state(pdev, PCI_D0);
6326 	pci_restore_state(pdev);
6327 
6328 	rc = pcim_enable_device(pdev);
6329 	if (rc) {
6330 		dev_printk(KERN_ERR, &pdev->dev,
6331 			   "failed to enable device after resume (%d)\n", rc);
6332 		return rc;
6333 	}
6334 
6335 	pci_set_master(pdev);
6336 	return 0;
6337 }
6338 
6339 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6340 {
6341 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6342 	int rc = 0;
6343 
6344 	rc = ata_host_suspend(host, mesg);
6345 	if (rc)
6346 		return rc;
6347 
6348 	ata_pci_device_do_suspend(pdev, mesg);
6349 
6350 	return 0;
6351 }
6352 
6353 int ata_pci_device_resume(struct pci_dev *pdev)
6354 {
6355 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6356 	int rc;
6357 
6358 	rc = ata_pci_device_do_resume(pdev);
6359 	if (rc == 0)
6360 		ata_host_resume(host);
6361 	return rc;
6362 }
6363 #endif /* CONFIG_PM */
6364 
6365 #endif /* CONFIG_PCI */
6366 
6367 static int __init ata_parse_force_one(char **cur,
6368 				      struct ata_force_ent *force_ent,
6369 				      const char **reason)
6370 {
6371 	/* FIXME: Currently, there's no way to tag init const data and
6372 	 * using __initdata causes build failure on some versions of
6373 	 * gcc.  Once __initdataconst is implemented, add const to the
6374 	 * following structure.
6375 	 */
6376 	static struct ata_force_param force_tbl[] __initdata = {
6377 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6378 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6379 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6380 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6381 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6382 		{ "sata",	.cbl		= ATA_CBL_SATA },
6383 		{ "1.5Gbps",	.spd_limit	= 1 },
6384 		{ "3.0Gbps",	.spd_limit	= 2 },
6385 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6386 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6387 		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6388 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6389 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6390 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6391 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6392 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6393 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6394 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6395 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6396 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6397 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6398 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6399 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6400 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6401 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6402 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6403 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6404 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6405 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6406 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6407 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6408 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6409 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6410 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6411 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6412 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6413 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6414 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6415 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6416 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6417 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6418 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6419 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6420 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6421 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6422 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6423 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6424 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6425 	};
6426 	char *start = *cur, *p = *cur;
6427 	char *id, *val, *endp;
6428 	const struct ata_force_param *match_fp = NULL;
6429 	int nr_matches = 0, i;
6430 
6431 	/* find where this param ends and update *cur */
6432 	while (*p != '\0' && *p != ',')
6433 		p++;
6434 
6435 	if (*p == '\0')
6436 		*cur = p;
6437 	else
6438 		*cur = p + 1;
6439 
6440 	*p = '\0';
6441 
6442 	/* parse */
6443 	p = strchr(start, ':');
6444 	if (!p) {
6445 		val = strstrip(start);
6446 		goto parse_val;
6447 	}
6448 	*p = '\0';
6449 
6450 	id = strstrip(start);
6451 	val = strstrip(p + 1);
6452 
6453 	/* parse id */
6454 	p = strchr(id, '.');
6455 	if (p) {
6456 		*p++ = '\0';
6457 		force_ent->device = simple_strtoul(p, &endp, 10);
6458 		if (p == endp || *endp != '\0') {
6459 			*reason = "invalid device";
6460 			return -EINVAL;
6461 		}
6462 	}
6463 
6464 	force_ent->port = simple_strtoul(id, &endp, 10);
6465 	if (p == endp || *endp != '\0') {
6466 		*reason = "invalid port/link";
6467 		return -EINVAL;
6468 	}
6469 
6470  parse_val:
6471 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6472 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6473 		const struct ata_force_param *fp = &force_tbl[i];
6474 
6475 		if (strncasecmp(val, fp->name, strlen(val)))
6476 			continue;
6477 
6478 		nr_matches++;
6479 		match_fp = fp;
6480 
6481 		if (strcasecmp(val, fp->name) == 0) {
6482 			nr_matches = 1;
6483 			break;
6484 		}
6485 	}
6486 
6487 	if (!nr_matches) {
6488 		*reason = "unknown value";
6489 		return -EINVAL;
6490 	}
6491 	if (nr_matches > 1) {
6492 		*reason = "ambigious value";
6493 		return -EINVAL;
6494 	}
6495 
6496 	force_ent->param = *match_fp;
6497 
6498 	return 0;
6499 }
6500 
6501 static void __init ata_parse_force_param(void)
6502 {
6503 	int idx = 0, size = 1;
6504 	int last_port = -1, last_device = -1;
6505 	char *p, *cur, *next;
6506 
6507 	/* calculate maximum number of params and allocate force_tbl */
6508 	for (p = ata_force_param_buf; *p; p++)
6509 		if (*p == ',')
6510 			size++;
6511 
6512 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6513 	if (!ata_force_tbl) {
6514 		printk(KERN_WARNING "ata: failed to extend force table, "
6515 		       "libata.force ignored\n");
6516 		return;
6517 	}
6518 
6519 	/* parse and populate the table */
6520 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6521 		const char *reason = "";
6522 		struct ata_force_ent te = { .port = -1, .device = -1 };
6523 
6524 		next = cur;
6525 		if (ata_parse_force_one(&next, &te, &reason)) {
6526 			printk(KERN_WARNING "ata: failed to parse force "
6527 			       "parameter \"%s\" (%s)\n",
6528 			       cur, reason);
6529 			continue;
6530 		}
6531 
6532 		if (te.port == -1) {
6533 			te.port = last_port;
6534 			te.device = last_device;
6535 		}
6536 
6537 		ata_force_tbl[idx++] = te;
6538 
6539 		last_port = te.port;
6540 		last_device = te.device;
6541 	}
6542 
6543 	ata_force_tbl_size = idx;
6544 }
6545 
6546 static int __init ata_init(void)
6547 {
6548 	int rc = -ENOMEM;
6549 
6550 	ata_parse_force_param();
6551 
6552 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6553 	if (!ata_aux_wq)
6554 		goto fail;
6555 
6556 	rc = ata_sff_init();
6557 	if (rc)
6558 		goto fail;
6559 
6560 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6561 	return 0;
6562 
6563 fail:
6564 	kfree(ata_force_tbl);
6565 	if (ata_aux_wq)
6566 		destroy_workqueue(ata_aux_wq);
6567 	return rc;
6568 }
6569 
6570 static void __exit ata_exit(void)
6571 {
6572 	ata_sff_exit();
6573 	kfree(ata_force_tbl);
6574 	destroy_workqueue(ata_aux_wq);
6575 }
6576 
6577 subsys_initcall(ata_init);
6578 module_exit(ata_exit);
6579 
6580 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6581 
6582 int ata_ratelimit(void)
6583 {
6584 	return __ratelimit(&ratelimit);
6585 }
6586 
6587 /**
6588  *	ata_wait_register - wait until register value changes
6589  *	@reg: IO-mapped register
6590  *	@mask: Mask to apply to read register value
6591  *	@val: Wait condition
6592  *	@interval: polling interval in milliseconds
6593  *	@timeout: timeout in milliseconds
6594  *
6595  *	Waiting for some bits of register to change is a common
6596  *	operation for ATA controllers.  This function reads 32bit LE
6597  *	IO-mapped register @reg and tests for the following condition.
6598  *
6599  *	(*@reg & mask) != val
6600  *
6601  *	If the condition is met, it returns; otherwise, the process is
6602  *	repeated after @interval_msec until timeout.
6603  *
6604  *	LOCKING:
6605  *	Kernel thread context (may sleep)
6606  *
6607  *	RETURNS:
6608  *	The final register value.
6609  */
6610 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6611 		      unsigned long interval, unsigned long timeout)
6612 {
6613 	unsigned long deadline;
6614 	u32 tmp;
6615 
6616 	tmp = ioread32(reg);
6617 
6618 	/* Calculate timeout _after_ the first read to make sure
6619 	 * preceding writes reach the controller before starting to
6620 	 * eat away the timeout.
6621 	 */
6622 	deadline = ata_deadline(jiffies, timeout);
6623 
6624 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6625 		msleep(interval);
6626 		tmp = ioread32(reg);
6627 	}
6628 
6629 	return tmp;
6630 }
6631 
6632 /*
6633  * Dummy port_ops
6634  */
6635 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6636 {
6637 	return AC_ERR_SYSTEM;
6638 }
6639 
6640 static void ata_dummy_error_handler(struct ata_port *ap)
6641 {
6642 	/* truly dummy */
6643 }
6644 
6645 struct ata_port_operations ata_dummy_port_ops = {
6646 	.qc_prep		= ata_noop_qc_prep,
6647 	.qc_issue		= ata_dummy_qc_issue,
6648 	.error_handler		= ata_dummy_error_handler,
6649 };
6650 
6651 const struct ata_port_info ata_dummy_port_info = {
6652 	.port_ops		= &ata_dummy_port_ops,
6653 };
6654 
6655 /*
6656  * libata is essentially a library of internal helper functions for
6657  * low-level ATA host controller drivers.  As such, the API/ABI is
6658  * likely to change as new drivers are added and updated.
6659  * Do not depend on ABI/API stability.
6660  */
6661 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6662 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6663 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6664 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6665 EXPORT_SYMBOL_GPL(sata_port_ops);
6666 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6667 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6668 EXPORT_SYMBOL_GPL(ata_link_next);
6669 EXPORT_SYMBOL_GPL(ata_dev_next);
6670 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6671 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6672 EXPORT_SYMBOL_GPL(ata_host_init);
6673 EXPORT_SYMBOL_GPL(ata_host_alloc);
6674 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6675 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6676 EXPORT_SYMBOL_GPL(ata_host_start);
6677 EXPORT_SYMBOL_GPL(ata_host_register);
6678 EXPORT_SYMBOL_GPL(ata_host_activate);
6679 EXPORT_SYMBOL_GPL(ata_host_detach);
6680 EXPORT_SYMBOL_GPL(ata_sg_init);
6681 EXPORT_SYMBOL_GPL(ata_qc_complete);
6682 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6683 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6684 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6685 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6686 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6687 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6688 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6689 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6690 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6691 EXPORT_SYMBOL_GPL(ata_mode_string);
6692 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6693 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6694 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6695 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6696 EXPORT_SYMBOL_GPL(ata_dev_disable);
6697 EXPORT_SYMBOL_GPL(sata_set_spd);
6698 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6699 EXPORT_SYMBOL_GPL(sata_link_debounce);
6700 EXPORT_SYMBOL_GPL(sata_link_resume);
6701 EXPORT_SYMBOL_GPL(ata_std_prereset);
6702 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6703 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6704 EXPORT_SYMBOL_GPL(ata_std_postreset);
6705 EXPORT_SYMBOL_GPL(ata_dev_classify);
6706 EXPORT_SYMBOL_GPL(ata_dev_pair);
6707 EXPORT_SYMBOL_GPL(ata_ratelimit);
6708 EXPORT_SYMBOL_GPL(ata_wait_register);
6709 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6710 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6711 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6712 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6713 EXPORT_SYMBOL_GPL(sata_scr_valid);
6714 EXPORT_SYMBOL_GPL(sata_scr_read);
6715 EXPORT_SYMBOL_GPL(sata_scr_write);
6716 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6717 EXPORT_SYMBOL_GPL(ata_link_online);
6718 EXPORT_SYMBOL_GPL(ata_link_offline);
6719 #ifdef CONFIG_PM
6720 EXPORT_SYMBOL_GPL(ata_host_suspend);
6721 EXPORT_SYMBOL_GPL(ata_host_resume);
6722 #endif /* CONFIG_PM */
6723 EXPORT_SYMBOL_GPL(ata_id_string);
6724 EXPORT_SYMBOL_GPL(ata_id_c_string);
6725 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6726 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6727 
6728 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6729 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6730 EXPORT_SYMBOL_GPL(ata_timing_compute);
6731 EXPORT_SYMBOL_GPL(ata_timing_merge);
6732 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6733 
6734 #ifdef CONFIG_PCI
6735 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6736 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6737 #ifdef CONFIG_PM
6738 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6739 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6740 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6741 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6742 #endif /* CONFIG_PM */
6743 #endif /* CONFIG_PCI */
6744 
6745 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6746 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6747 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6748 EXPORT_SYMBOL_GPL(ata_port_desc);
6749 #ifdef CONFIG_PCI
6750 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6751 #endif /* CONFIG_PCI */
6752 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6753 EXPORT_SYMBOL_GPL(ata_link_abort);
6754 EXPORT_SYMBOL_GPL(ata_port_abort);
6755 EXPORT_SYMBOL_GPL(ata_port_freeze);
6756 EXPORT_SYMBOL_GPL(sata_async_notification);
6757 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6758 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6759 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6760 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6761 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6762 EXPORT_SYMBOL_GPL(ata_do_eh);
6763 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6764 
6765 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6766 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6767 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6768 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6769 EXPORT_SYMBOL_GPL(ata_cable_sata);
6770