xref: /linux/drivers/ata/libata-core.c (revision ac6a0cf6716bb46813d0161024c66c2af66e53d1)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_host.h>
64 #include <linux/libata.h>
65 #include <asm/byteorder.h>
66 #include <linux/cdrom.h>
67 
68 #include "libata.h"
69 
70 
71 /* debounce timing parameters in msecs { interval, duration, timeout } */
72 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
73 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
74 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
75 
76 const struct ata_port_operations ata_base_port_ops = {
77 	.prereset		= ata_std_prereset,
78 	.postreset		= ata_std_postreset,
79 	.error_handler		= ata_std_error_handler,
80 };
81 
82 const struct ata_port_operations sata_port_ops = {
83 	.inherits		= &ata_base_port_ops,
84 
85 	.qc_defer		= ata_std_qc_defer,
86 	.hardreset		= sata_std_hardreset,
87 };
88 
89 static unsigned int ata_dev_init_params(struct ata_device *dev,
90 					u16 heads, u16 sectors);
91 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
92 static unsigned int ata_dev_set_feature(struct ata_device *dev,
93 					u8 enable, u8 feature);
94 static void ata_dev_xfermask(struct ata_device *dev);
95 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
96 
97 unsigned int ata_print_id = 1;
98 static struct workqueue_struct *ata_wq;
99 
100 struct workqueue_struct *ata_aux_wq;
101 
102 struct ata_force_param {
103 	const char	*name;
104 	unsigned int	cbl;
105 	int		spd_limit;
106 	unsigned long	xfer_mask;
107 	unsigned int	horkage_on;
108 	unsigned int	horkage_off;
109 	unsigned int	lflags;
110 };
111 
112 struct ata_force_ent {
113 	int			port;
114 	int			device;
115 	struct ata_force_param	param;
116 };
117 
118 static struct ata_force_ent *ata_force_tbl;
119 static int ata_force_tbl_size;
120 
121 static char ata_force_param_buf[PAGE_SIZE] __initdata;
122 /* param_buf is thrown away after initialization, disallow read */
123 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
124 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
125 
126 static int atapi_enabled = 1;
127 module_param(atapi_enabled, int, 0444);
128 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
129 
130 static int atapi_dmadir = 0;
131 module_param(atapi_dmadir, int, 0444);
132 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
133 
134 int atapi_passthru16 = 1;
135 module_param(atapi_passthru16, int, 0444);
136 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
137 
138 int libata_fua = 0;
139 module_param_named(fua, libata_fua, int, 0444);
140 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
141 
142 static int ata_ignore_hpa;
143 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
144 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
145 
146 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
147 module_param_named(dma, libata_dma_mask, int, 0444);
148 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
149 
150 static int ata_probe_timeout;
151 module_param(ata_probe_timeout, int, 0444);
152 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
153 
154 int libata_noacpi = 0;
155 module_param_named(noacpi, libata_noacpi, int, 0444);
156 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
157 
158 int libata_allow_tpm = 0;
159 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
160 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
161 
162 MODULE_AUTHOR("Jeff Garzik");
163 MODULE_DESCRIPTION("Library module for ATA devices");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(DRV_VERSION);
166 
167 
168 static bool ata_sstatus_online(u32 sstatus)
169 {
170 	return (sstatus & 0xf) == 0x3;
171 }
172 
173 /**
174  *	ata_link_next - link iteration helper
175  *	@link: the previous link, NULL to start
176  *	@ap: ATA port containing links to iterate
177  *	@mode: iteration mode, one of ATA_LITER_*
178  *
179  *	LOCKING:
180  *	Host lock or EH context.
181  *
182  *	RETURNS:
183  *	Pointer to the next link.
184  */
185 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
186 			       enum ata_link_iter_mode mode)
187 {
188 	BUG_ON(mode != ATA_LITER_EDGE &&
189 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
190 
191 	/* NULL link indicates start of iteration */
192 	if (!link)
193 		switch (mode) {
194 		case ATA_LITER_EDGE:
195 		case ATA_LITER_PMP_FIRST:
196 			if (sata_pmp_attached(ap))
197 				return ap->pmp_link;
198 			/* fall through */
199 		case ATA_LITER_HOST_FIRST:
200 			return &ap->link;
201 		}
202 
203 	/* we just iterated over the host link, what's next? */
204 	if (link == &ap->link)
205 		switch (mode) {
206 		case ATA_LITER_HOST_FIRST:
207 			if (sata_pmp_attached(ap))
208 				return ap->pmp_link;
209 			/* fall through */
210 		case ATA_LITER_PMP_FIRST:
211 			if (unlikely(ap->slave_link))
212 				return ap->slave_link;
213 			/* fall through */
214 		case ATA_LITER_EDGE:
215 			return NULL;
216 		}
217 
218 	/* slave_link excludes PMP */
219 	if (unlikely(link == ap->slave_link))
220 		return NULL;
221 
222 	/* we were over a PMP link */
223 	if (++link < ap->pmp_link + ap->nr_pmp_links)
224 		return link;
225 
226 	if (mode == ATA_LITER_PMP_FIRST)
227 		return &ap->link;
228 
229 	return NULL;
230 }
231 
232 /**
233  *	ata_dev_next - device iteration helper
234  *	@dev: the previous device, NULL to start
235  *	@link: ATA link containing devices to iterate
236  *	@mode: iteration mode, one of ATA_DITER_*
237  *
238  *	LOCKING:
239  *	Host lock or EH context.
240  *
241  *	RETURNS:
242  *	Pointer to the next device.
243  */
244 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
245 				enum ata_dev_iter_mode mode)
246 {
247 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
248 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
249 
250 	/* NULL dev indicates start of iteration */
251 	if (!dev)
252 		switch (mode) {
253 		case ATA_DITER_ENABLED:
254 		case ATA_DITER_ALL:
255 			dev = link->device;
256 			goto check;
257 		case ATA_DITER_ENABLED_REVERSE:
258 		case ATA_DITER_ALL_REVERSE:
259 			dev = link->device + ata_link_max_devices(link) - 1;
260 			goto check;
261 		}
262 
263  next:
264 	/* move to the next one */
265 	switch (mode) {
266 	case ATA_DITER_ENABLED:
267 	case ATA_DITER_ALL:
268 		if (++dev < link->device + ata_link_max_devices(link))
269 			goto check;
270 		return NULL;
271 	case ATA_DITER_ENABLED_REVERSE:
272 	case ATA_DITER_ALL_REVERSE:
273 		if (--dev >= link->device)
274 			goto check;
275 		return NULL;
276 	}
277 
278  check:
279 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
280 	    !ata_dev_enabled(dev))
281 		goto next;
282 	return dev;
283 }
284 
285 /**
286  *	ata_dev_phys_link - find physical link for a device
287  *	@dev: ATA device to look up physical link for
288  *
289  *	Look up physical link which @dev is attached to.  Note that
290  *	this is different from @dev->link only when @dev is on slave
291  *	link.  For all other cases, it's the same as @dev->link.
292  *
293  *	LOCKING:
294  *	Don't care.
295  *
296  *	RETURNS:
297  *	Pointer to the found physical link.
298  */
299 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
300 {
301 	struct ata_port *ap = dev->link->ap;
302 
303 	if (!ap->slave_link)
304 		return dev->link;
305 	if (!dev->devno)
306 		return &ap->link;
307 	return ap->slave_link;
308 }
309 
310 /**
311  *	ata_force_cbl - force cable type according to libata.force
312  *	@ap: ATA port of interest
313  *
314  *	Force cable type according to libata.force and whine about it.
315  *	The last entry which has matching port number is used, so it
316  *	can be specified as part of device force parameters.  For
317  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
318  *	same effect.
319  *
320  *	LOCKING:
321  *	EH context.
322  */
323 void ata_force_cbl(struct ata_port *ap)
324 {
325 	int i;
326 
327 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
328 		const struct ata_force_ent *fe = &ata_force_tbl[i];
329 
330 		if (fe->port != -1 && fe->port != ap->print_id)
331 			continue;
332 
333 		if (fe->param.cbl == ATA_CBL_NONE)
334 			continue;
335 
336 		ap->cbl = fe->param.cbl;
337 		ata_port_printk(ap, KERN_NOTICE,
338 				"FORCE: cable set to %s\n", fe->param.name);
339 		return;
340 	}
341 }
342 
343 /**
344  *	ata_force_link_limits - force link limits according to libata.force
345  *	@link: ATA link of interest
346  *
347  *	Force link flags and SATA spd limit according to libata.force
348  *	and whine about it.  When only the port part is specified
349  *	(e.g. 1:), the limit applies to all links connected to both
350  *	the host link and all fan-out ports connected via PMP.  If the
351  *	device part is specified as 0 (e.g. 1.00:), it specifies the
352  *	first fan-out link not the host link.  Device number 15 always
353  *	points to the host link whether PMP is attached or not.  If the
354  *	controller has slave link, device number 16 points to it.
355  *
356  *	LOCKING:
357  *	EH context.
358  */
359 static void ata_force_link_limits(struct ata_link *link)
360 {
361 	bool did_spd = false;
362 	int linkno = link->pmp;
363 	int i;
364 
365 	if (ata_is_host_link(link))
366 		linkno += 15;
367 
368 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
369 		const struct ata_force_ent *fe = &ata_force_tbl[i];
370 
371 		if (fe->port != -1 && fe->port != link->ap->print_id)
372 			continue;
373 
374 		if (fe->device != -1 && fe->device != linkno)
375 			continue;
376 
377 		/* only honor the first spd limit */
378 		if (!did_spd && fe->param.spd_limit) {
379 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
380 			ata_link_printk(link, KERN_NOTICE,
381 					"FORCE: PHY spd limit set to %s\n",
382 					fe->param.name);
383 			did_spd = true;
384 		}
385 
386 		/* let lflags stack */
387 		if (fe->param.lflags) {
388 			link->flags |= fe->param.lflags;
389 			ata_link_printk(link, KERN_NOTICE,
390 					"FORCE: link flag 0x%x forced -> 0x%x\n",
391 					fe->param.lflags, link->flags);
392 		}
393 	}
394 }
395 
396 /**
397  *	ata_force_xfermask - force xfermask according to libata.force
398  *	@dev: ATA device of interest
399  *
400  *	Force xfer_mask according to libata.force and whine about it.
401  *	For consistency with link selection, device number 15 selects
402  *	the first device connected to the host link.
403  *
404  *	LOCKING:
405  *	EH context.
406  */
407 static void ata_force_xfermask(struct ata_device *dev)
408 {
409 	int devno = dev->link->pmp + dev->devno;
410 	int alt_devno = devno;
411 	int i;
412 
413 	/* allow n.15/16 for devices attached to host port */
414 	if (ata_is_host_link(dev->link))
415 		alt_devno += 15;
416 
417 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
418 		const struct ata_force_ent *fe = &ata_force_tbl[i];
419 		unsigned long pio_mask, mwdma_mask, udma_mask;
420 
421 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
422 			continue;
423 
424 		if (fe->device != -1 && fe->device != devno &&
425 		    fe->device != alt_devno)
426 			continue;
427 
428 		if (!fe->param.xfer_mask)
429 			continue;
430 
431 		ata_unpack_xfermask(fe->param.xfer_mask,
432 				    &pio_mask, &mwdma_mask, &udma_mask);
433 		if (udma_mask)
434 			dev->udma_mask = udma_mask;
435 		else if (mwdma_mask) {
436 			dev->udma_mask = 0;
437 			dev->mwdma_mask = mwdma_mask;
438 		} else {
439 			dev->udma_mask = 0;
440 			dev->mwdma_mask = 0;
441 			dev->pio_mask = pio_mask;
442 		}
443 
444 		ata_dev_printk(dev, KERN_NOTICE,
445 			"FORCE: xfer_mask set to %s\n", fe->param.name);
446 		return;
447 	}
448 }
449 
450 /**
451  *	ata_force_horkage - force horkage according to libata.force
452  *	@dev: ATA device of interest
453  *
454  *	Force horkage according to libata.force and whine about it.
455  *	For consistency with link selection, device number 15 selects
456  *	the first device connected to the host link.
457  *
458  *	LOCKING:
459  *	EH context.
460  */
461 static void ata_force_horkage(struct ata_device *dev)
462 {
463 	int devno = dev->link->pmp + dev->devno;
464 	int alt_devno = devno;
465 	int i;
466 
467 	/* allow n.15/16 for devices attached to host port */
468 	if (ata_is_host_link(dev->link))
469 		alt_devno += 15;
470 
471 	for (i = 0; i < ata_force_tbl_size; i++) {
472 		const struct ata_force_ent *fe = &ata_force_tbl[i];
473 
474 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
475 			continue;
476 
477 		if (fe->device != -1 && fe->device != devno &&
478 		    fe->device != alt_devno)
479 			continue;
480 
481 		if (!(~dev->horkage & fe->param.horkage_on) &&
482 		    !(dev->horkage & fe->param.horkage_off))
483 			continue;
484 
485 		dev->horkage |= fe->param.horkage_on;
486 		dev->horkage &= ~fe->param.horkage_off;
487 
488 		ata_dev_printk(dev, KERN_NOTICE,
489 			"FORCE: horkage modified (%s)\n", fe->param.name);
490 	}
491 }
492 
493 /**
494  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
495  *	@opcode: SCSI opcode
496  *
497  *	Determine ATAPI command type from @opcode.
498  *
499  *	LOCKING:
500  *	None.
501  *
502  *	RETURNS:
503  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
504  */
505 int atapi_cmd_type(u8 opcode)
506 {
507 	switch (opcode) {
508 	case GPCMD_READ_10:
509 	case GPCMD_READ_12:
510 		return ATAPI_READ;
511 
512 	case GPCMD_WRITE_10:
513 	case GPCMD_WRITE_12:
514 	case GPCMD_WRITE_AND_VERIFY_10:
515 		return ATAPI_WRITE;
516 
517 	case GPCMD_READ_CD:
518 	case GPCMD_READ_CD_MSF:
519 		return ATAPI_READ_CD;
520 
521 	case ATA_16:
522 	case ATA_12:
523 		if (atapi_passthru16)
524 			return ATAPI_PASS_THRU;
525 		/* fall thru */
526 	default:
527 		return ATAPI_MISC;
528 	}
529 }
530 
531 /**
532  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
533  *	@tf: Taskfile to convert
534  *	@pmp: Port multiplier port
535  *	@is_cmd: This FIS is for command
536  *	@fis: Buffer into which data will output
537  *
538  *	Converts a standard ATA taskfile to a Serial ATA
539  *	FIS structure (Register - Host to Device).
540  *
541  *	LOCKING:
542  *	Inherited from caller.
543  */
544 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
545 {
546 	fis[0] = 0x27;			/* Register - Host to Device FIS */
547 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
548 	if (is_cmd)
549 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
550 
551 	fis[2] = tf->command;
552 	fis[3] = tf->feature;
553 
554 	fis[4] = tf->lbal;
555 	fis[5] = tf->lbam;
556 	fis[6] = tf->lbah;
557 	fis[7] = tf->device;
558 
559 	fis[8] = tf->hob_lbal;
560 	fis[9] = tf->hob_lbam;
561 	fis[10] = tf->hob_lbah;
562 	fis[11] = tf->hob_feature;
563 
564 	fis[12] = tf->nsect;
565 	fis[13] = tf->hob_nsect;
566 	fis[14] = 0;
567 	fis[15] = tf->ctl;
568 
569 	fis[16] = 0;
570 	fis[17] = 0;
571 	fis[18] = 0;
572 	fis[19] = 0;
573 }
574 
575 /**
576  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
577  *	@fis: Buffer from which data will be input
578  *	@tf: Taskfile to output
579  *
580  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
581  *
582  *	LOCKING:
583  *	Inherited from caller.
584  */
585 
586 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
587 {
588 	tf->command	= fis[2];	/* status */
589 	tf->feature	= fis[3];	/* error */
590 
591 	tf->lbal	= fis[4];
592 	tf->lbam	= fis[5];
593 	tf->lbah	= fis[6];
594 	tf->device	= fis[7];
595 
596 	tf->hob_lbal	= fis[8];
597 	tf->hob_lbam	= fis[9];
598 	tf->hob_lbah	= fis[10];
599 
600 	tf->nsect	= fis[12];
601 	tf->hob_nsect	= fis[13];
602 }
603 
604 static const u8 ata_rw_cmds[] = {
605 	/* pio multi */
606 	ATA_CMD_READ_MULTI,
607 	ATA_CMD_WRITE_MULTI,
608 	ATA_CMD_READ_MULTI_EXT,
609 	ATA_CMD_WRITE_MULTI_EXT,
610 	0,
611 	0,
612 	0,
613 	ATA_CMD_WRITE_MULTI_FUA_EXT,
614 	/* pio */
615 	ATA_CMD_PIO_READ,
616 	ATA_CMD_PIO_WRITE,
617 	ATA_CMD_PIO_READ_EXT,
618 	ATA_CMD_PIO_WRITE_EXT,
619 	0,
620 	0,
621 	0,
622 	0,
623 	/* dma */
624 	ATA_CMD_READ,
625 	ATA_CMD_WRITE,
626 	ATA_CMD_READ_EXT,
627 	ATA_CMD_WRITE_EXT,
628 	0,
629 	0,
630 	0,
631 	ATA_CMD_WRITE_FUA_EXT
632 };
633 
634 /**
635  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
636  *	@tf: command to examine and configure
637  *	@dev: device tf belongs to
638  *
639  *	Examine the device configuration and tf->flags to calculate
640  *	the proper read/write commands and protocol to use.
641  *
642  *	LOCKING:
643  *	caller.
644  */
645 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
646 {
647 	u8 cmd;
648 
649 	int index, fua, lba48, write;
650 
651 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
652 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
653 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
654 
655 	if (dev->flags & ATA_DFLAG_PIO) {
656 		tf->protocol = ATA_PROT_PIO;
657 		index = dev->multi_count ? 0 : 8;
658 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
659 		/* Unable to use DMA due to host limitation */
660 		tf->protocol = ATA_PROT_PIO;
661 		index = dev->multi_count ? 0 : 8;
662 	} else {
663 		tf->protocol = ATA_PROT_DMA;
664 		index = 16;
665 	}
666 
667 	cmd = ata_rw_cmds[index + fua + lba48 + write];
668 	if (cmd) {
669 		tf->command = cmd;
670 		return 0;
671 	}
672 	return -1;
673 }
674 
675 /**
676  *	ata_tf_read_block - Read block address from ATA taskfile
677  *	@tf: ATA taskfile of interest
678  *	@dev: ATA device @tf belongs to
679  *
680  *	LOCKING:
681  *	None.
682  *
683  *	Read block address from @tf.  This function can handle all
684  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
685  *	flags select the address format to use.
686  *
687  *	RETURNS:
688  *	Block address read from @tf.
689  */
690 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
691 {
692 	u64 block = 0;
693 
694 	if (tf->flags & ATA_TFLAG_LBA) {
695 		if (tf->flags & ATA_TFLAG_LBA48) {
696 			block |= (u64)tf->hob_lbah << 40;
697 			block |= (u64)tf->hob_lbam << 32;
698 			block |= (u64)tf->hob_lbal << 24;
699 		} else
700 			block |= (tf->device & 0xf) << 24;
701 
702 		block |= tf->lbah << 16;
703 		block |= tf->lbam << 8;
704 		block |= tf->lbal;
705 	} else {
706 		u32 cyl, head, sect;
707 
708 		cyl = tf->lbam | (tf->lbah << 8);
709 		head = tf->device & 0xf;
710 		sect = tf->lbal;
711 
712 		block = (cyl * dev->heads + head) * dev->sectors + sect;
713 	}
714 
715 	return block;
716 }
717 
718 /**
719  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
720  *	@tf: Target ATA taskfile
721  *	@dev: ATA device @tf belongs to
722  *	@block: Block address
723  *	@n_block: Number of blocks
724  *	@tf_flags: RW/FUA etc...
725  *	@tag: tag
726  *
727  *	LOCKING:
728  *	None.
729  *
730  *	Build ATA taskfile @tf for read/write request described by
731  *	@block, @n_block, @tf_flags and @tag on @dev.
732  *
733  *	RETURNS:
734  *
735  *	0 on success, -ERANGE if the request is too large for @dev,
736  *	-EINVAL if the request is invalid.
737  */
738 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
739 		    u64 block, u32 n_block, unsigned int tf_flags,
740 		    unsigned int tag)
741 {
742 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
743 	tf->flags |= tf_flags;
744 
745 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
746 		/* yay, NCQ */
747 		if (!lba_48_ok(block, n_block))
748 			return -ERANGE;
749 
750 		tf->protocol = ATA_PROT_NCQ;
751 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
752 
753 		if (tf->flags & ATA_TFLAG_WRITE)
754 			tf->command = ATA_CMD_FPDMA_WRITE;
755 		else
756 			tf->command = ATA_CMD_FPDMA_READ;
757 
758 		tf->nsect = tag << 3;
759 		tf->hob_feature = (n_block >> 8) & 0xff;
760 		tf->feature = n_block & 0xff;
761 
762 		tf->hob_lbah = (block >> 40) & 0xff;
763 		tf->hob_lbam = (block >> 32) & 0xff;
764 		tf->hob_lbal = (block >> 24) & 0xff;
765 		tf->lbah = (block >> 16) & 0xff;
766 		tf->lbam = (block >> 8) & 0xff;
767 		tf->lbal = block & 0xff;
768 
769 		tf->device = 1 << 6;
770 		if (tf->flags & ATA_TFLAG_FUA)
771 			tf->device |= 1 << 7;
772 	} else if (dev->flags & ATA_DFLAG_LBA) {
773 		tf->flags |= ATA_TFLAG_LBA;
774 
775 		if (lba_28_ok(block, n_block)) {
776 			/* use LBA28 */
777 			tf->device |= (block >> 24) & 0xf;
778 		} else if (lba_48_ok(block, n_block)) {
779 			if (!(dev->flags & ATA_DFLAG_LBA48))
780 				return -ERANGE;
781 
782 			/* use LBA48 */
783 			tf->flags |= ATA_TFLAG_LBA48;
784 
785 			tf->hob_nsect = (n_block >> 8) & 0xff;
786 
787 			tf->hob_lbah = (block >> 40) & 0xff;
788 			tf->hob_lbam = (block >> 32) & 0xff;
789 			tf->hob_lbal = (block >> 24) & 0xff;
790 		} else
791 			/* request too large even for LBA48 */
792 			return -ERANGE;
793 
794 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
795 			return -EINVAL;
796 
797 		tf->nsect = n_block & 0xff;
798 
799 		tf->lbah = (block >> 16) & 0xff;
800 		tf->lbam = (block >> 8) & 0xff;
801 		tf->lbal = block & 0xff;
802 
803 		tf->device |= ATA_LBA;
804 	} else {
805 		/* CHS */
806 		u32 sect, head, cyl, track;
807 
808 		/* The request -may- be too large for CHS addressing. */
809 		if (!lba_28_ok(block, n_block))
810 			return -ERANGE;
811 
812 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
813 			return -EINVAL;
814 
815 		/* Convert LBA to CHS */
816 		track = (u32)block / dev->sectors;
817 		cyl   = track / dev->heads;
818 		head  = track % dev->heads;
819 		sect  = (u32)block % dev->sectors + 1;
820 
821 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
822 			(u32)block, track, cyl, head, sect);
823 
824 		/* Check whether the converted CHS can fit.
825 		   Cylinder: 0-65535
826 		   Head: 0-15
827 		   Sector: 1-255*/
828 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
829 			return -ERANGE;
830 
831 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
832 		tf->lbal = sect;
833 		tf->lbam = cyl;
834 		tf->lbah = cyl >> 8;
835 		tf->device |= head;
836 	}
837 
838 	return 0;
839 }
840 
841 /**
842  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
843  *	@pio_mask: pio_mask
844  *	@mwdma_mask: mwdma_mask
845  *	@udma_mask: udma_mask
846  *
847  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
848  *	unsigned int xfer_mask.
849  *
850  *	LOCKING:
851  *	None.
852  *
853  *	RETURNS:
854  *	Packed xfer_mask.
855  */
856 unsigned long ata_pack_xfermask(unsigned long pio_mask,
857 				unsigned long mwdma_mask,
858 				unsigned long udma_mask)
859 {
860 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
861 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
862 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
863 }
864 
865 /**
866  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
867  *	@xfer_mask: xfer_mask to unpack
868  *	@pio_mask: resulting pio_mask
869  *	@mwdma_mask: resulting mwdma_mask
870  *	@udma_mask: resulting udma_mask
871  *
872  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
873  *	Any NULL distination masks will be ignored.
874  */
875 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
876 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
877 {
878 	if (pio_mask)
879 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
880 	if (mwdma_mask)
881 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
882 	if (udma_mask)
883 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
884 }
885 
886 static const struct ata_xfer_ent {
887 	int shift, bits;
888 	u8 base;
889 } ata_xfer_tbl[] = {
890 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
891 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
892 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
893 	{ -1, },
894 };
895 
896 /**
897  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
898  *	@xfer_mask: xfer_mask of interest
899  *
900  *	Return matching XFER_* value for @xfer_mask.  Only the highest
901  *	bit of @xfer_mask is considered.
902  *
903  *	LOCKING:
904  *	None.
905  *
906  *	RETURNS:
907  *	Matching XFER_* value, 0xff if no match found.
908  */
909 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
910 {
911 	int highbit = fls(xfer_mask) - 1;
912 	const struct ata_xfer_ent *ent;
913 
914 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
915 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
916 			return ent->base + highbit - ent->shift;
917 	return 0xff;
918 }
919 
920 /**
921  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
922  *	@xfer_mode: XFER_* of interest
923  *
924  *	Return matching xfer_mask for @xfer_mode.
925  *
926  *	LOCKING:
927  *	None.
928  *
929  *	RETURNS:
930  *	Matching xfer_mask, 0 if no match found.
931  */
932 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
933 {
934 	const struct ata_xfer_ent *ent;
935 
936 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
937 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
938 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
939 				& ~((1 << ent->shift) - 1);
940 	return 0;
941 }
942 
943 /**
944  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
945  *	@xfer_mode: XFER_* of interest
946  *
947  *	Return matching xfer_shift for @xfer_mode.
948  *
949  *	LOCKING:
950  *	None.
951  *
952  *	RETURNS:
953  *	Matching xfer_shift, -1 if no match found.
954  */
955 int ata_xfer_mode2shift(unsigned long xfer_mode)
956 {
957 	const struct ata_xfer_ent *ent;
958 
959 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
960 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
961 			return ent->shift;
962 	return -1;
963 }
964 
965 /**
966  *	ata_mode_string - convert xfer_mask to string
967  *	@xfer_mask: mask of bits supported; only highest bit counts.
968  *
969  *	Determine string which represents the highest speed
970  *	(highest bit in @modemask).
971  *
972  *	LOCKING:
973  *	None.
974  *
975  *	RETURNS:
976  *	Constant C string representing highest speed listed in
977  *	@mode_mask, or the constant C string "<n/a>".
978  */
979 const char *ata_mode_string(unsigned long xfer_mask)
980 {
981 	static const char * const xfer_mode_str[] = {
982 		"PIO0",
983 		"PIO1",
984 		"PIO2",
985 		"PIO3",
986 		"PIO4",
987 		"PIO5",
988 		"PIO6",
989 		"MWDMA0",
990 		"MWDMA1",
991 		"MWDMA2",
992 		"MWDMA3",
993 		"MWDMA4",
994 		"UDMA/16",
995 		"UDMA/25",
996 		"UDMA/33",
997 		"UDMA/44",
998 		"UDMA/66",
999 		"UDMA/100",
1000 		"UDMA/133",
1001 		"UDMA7",
1002 	};
1003 	int highbit;
1004 
1005 	highbit = fls(xfer_mask) - 1;
1006 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1007 		return xfer_mode_str[highbit];
1008 	return "<n/a>";
1009 }
1010 
1011 static const char *sata_spd_string(unsigned int spd)
1012 {
1013 	static const char * const spd_str[] = {
1014 		"1.5 Gbps",
1015 		"3.0 Gbps",
1016 		"6.0 Gbps",
1017 	};
1018 
1019 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1020 		return "<unknown>";
1021 	return spd_str[spd - 1];
1022 }
1023 
1024 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1025 {
1026 	struct ata_link *link = dev->link;
1027 	struct ata_port *ap = link->ap;
1028 	u32 scontrol;
1029 	unsigned int err_mask;
1030 	int rc;
1031 
1032 	/*
1033 	 * disallow DIPM for drivers which haven't set
1034 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
1035 	 * phy ready will be set in the interrupt status on
1036 	 * state changes, which will cause some drivers to
1037 	 * think there are errors - additionally drivers will
1038 	 * need to disable hot plug.
1039 	 */
1040 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1041 		ap->pm_policy = NOT_AVAILABLE;
1042 		return -EINVAL;
1043 	}
1044 
1045 	/*
1046 	 * For DIPM, we will only enable it for the
1047 	 * min_power setting.
1048 	 *
1049 	 * Why?  Because Disks are too stupid to know that
1050 	 * If the host rejects a request to go to SLUMBER
1051 	 * they should retry at PARTIAL, and instead it
1052 	 * just would give up.  So, for medium_power to
1053 	 * work at all, we need to only allow HIPM.
1054 	 */
1055 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1056 	if (rc)
1057 		return rc;
1058 
1059 	switch (policy) {
1060 	case MIN_POWER:
1061 		/* no restrictions on IPM transitions */
1062 		scontrol &= ~(0x3 << 8);
1063 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1064 		if (rc)
1065 			return rc;
1066 
1067 		/* enable DIPM */
1068 		if (dev->flags & ATA_DFLAG_DIPM)
1069 			err_mask = ata_dev_set_feature(dev,
1070 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
1071 		break;
1072 	case MEDIUM_POWER:
1073 		/* allow IPM to PARTIAL */
1074 		scontrol &= ~(0x1 << 8);
1075 		scontrol |= (0x2 << 8);
1076 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1077 		if (rc)
1078 			return rc;
1079 
1080 		/*
1081 		 * we don't have to disable DIPM since IPM flags
1082 		 * disallow transitions to SLUMBER, which effectively
1083 		 * disable DIPM if it does not support PARTIAL
1084 		 */
1085 		break;
1086 	case NOT_AVAILABLE:
1087 	case MAX_PERFORMANCE:
1088 		/* disable all IPM transitions */
1089 		scontrol |= (0x3 << 8);
1090 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1091 		if (rc)
1092 			return rc;
1093 
1094 		/*
1095 		 * we don't have to disable DIPM since IPM flags
1096 		 * disallow all transitions which effectively
1097 		 * disable DIPM anyway.
1098 		 */
1099 		break;
1100 	}
1101 
1102 	/* FIXME: handle SET FEATURES failure */
1103 	(void) err_mask;
1104 
1105 	return 0;
1106 }
1107 
1108 /**
1109  *	ata_dev_enable_pm - enable SATA interface power management
1110  *	@dev:  device to enable power management
1111  *	@policy: the link power management policy
1112  *
1113  *	Enable SATA Interface power management.  This will enable
1114  *	Device Interface Power Management (DIPM) for min_power
1115  * 	policy, and then call driver specific callbacks for
1116  *	enabling Host Initiated Power management.
1117  *
1118  *	Locking: Caller.
1119  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
1120  */
1121 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1122 {
1123 	int rc = 0;
1124 	struct ata_port *ap = dev->link->ap;
1125 
1126 	/* set HIPM first, then DIPM */
1127 	if (ap->ops->enable_pm)
1128 		rc = ap->ops->enable_pm(ap, policy);
1129 	if (rc)
1130 		goto enable_pm_out;
1131 	rc = ata_dev_set_dipm(dev, policy);
1132 
1133 enable_pm_out:
1134 	if (rc)
1135 		ap->pm_policy = MAX_PERFORMANCE;
1136 	else
1137 		ap->pm_policy = policy;
1138 	return /* rc */;	/* hopefully we can use 'rc' eventually */
1139 }
1140 
1141 #ifdef CONFIG_PM
1142 /**
1143  *	ata_dev_disable_pm - disable SATA interface power management
1144  *	@dev: device to disable power management
1145  *
1146  *	Disable SATA Interface power management.  This will disable
1147  *	Device Interface Power Management (DIPM) without changing
1148  * 	policy,  call driver specific callbacks for disabling Host
1149  * 	Initiated Power management.
1150  *
1151  *	Locking: Caller.
1152  *	Returns: void
1153  */
1154 static void ata_dev_disable_pm(struct ata_device *dev)
1155 {
1156 	struct ata_port *ap = dev->link->ap;
1157 
1158 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1159 	if (ap->ops->disable_pm)
1160 		ap->ops->disable_pm(ap);
1161 }
1162 #endif	/* CONFIG_PM */
1163 
1164 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1165 {
1166 	ap->pm_policy = policy;
1167 	ap->link.eh_info.action |= ATA_EH_LPM;
1168 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1169 	ata_port_schedule_eh(ap);
1170 }
1171 
1172 #ifdef CONFIG_PM
1173 static void ata_lpm_enable(struct ata_host *host)
1174 {
1175 	struct ata_link *link;
1176 	struct ata_port *ap;
1177 	struct ata_device *dev;
1178 	int i;
1179 
1180 	for (i = 0; i < host->n_ports; i++) {
1181 		ap = host->ports[i];
1182 		ata_for_each_link(link, ap, EDGE) {
1183 			ata_for_each_dev(dev, link, ALL)
1184 				ata_dev_disable_pm(dev);
1185 		}
1186 	}
1187 }
1188 
1189 static void ata_lpm_disable(struct ata_host *host)
1190 {
1191 	int i;
1192 
1193 	for (i = 0; i < host->n_ports; i++) {
1194 		struct ata_port *ap = host->ports[i];
1195 		ata_lpm_schedule(ap, ap->pm_policy);
1196 	}
1197 }
1198 #endif	/* CONFIG_PM */
1199 
1200 /**
1201  *	ata_dev_classify - determine device type based on ATA-spec signature
1202  *	@tf: ATA taskfile register set for device to be identified
1203  *
1204  *	Determine from taskfile register contents whether a device is
1205  *	ATA or ATAPI, as per "Signature and persistence" section
1206  *	of ATA/PI spec (volume 1, sect 5.14).
1207  *
1208  *	LOCKING:
1209  *	None.
1210  *
1211  *	RETURNS:
1212  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1213  *	%ATA_DEV_UNKNOWN the event of failure.
1214  */
1215 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1216 {
1217 	/* Apple's open source Darwin code hints that some devices only
1218 	 * put a proper signature into the LBA mid/high registers,
1219 	 * So, we only check those.  It's sufficient for uniqueness.
1220 	 *
1221 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1222 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1223 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1224 	 * spec has never mentioned about using different signatures
1225 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1226 	 * Multiplier specification began to use 0x69/0x96 to identify
1227 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1228 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1229 	 * 0x69/0x96 shortly and described them as reserved for
1230 	 * SerialATA.
1231 	 *
1232 	 * We follow the current spec and consider that 0x69/0x96
1233 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1234 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1235 	 * SEMB signature.  This is worked around in
1236 	 * ata_dev_read_id().
1237 	 */
1238 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1239 		DPRINTK("found ATA device by sig\n");
1240 		return ATA_DEV_ATA;
1241 	}
1242 
1243 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1244 		DPRINTK("found ATAPI device by sig\n");
1245 		return ATA_DEV_ATAPI;
1246 	}
1247 
1248 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1249 		DPRINTK("found PMP device by sig\n");
1250 		return ATA_DEV_PMP;
1251 	}
1252 
1253 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1254 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1255 		return ATA_DEV_SEMB;
1256 	}
1257 
1258 	DPRINTK("unknown device\n");
1259 	return ATA_DEV_UNKNOWN;
1260 }
1261 
1262 /**
1263  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1264  *	@id: IDENTIFY DEVICE results we will examine
1265  *	@s: string into which data is output
1266  *	@ofs: offset into identify device page
1267  *	@len: length of string to return. must be an even number.
1268  *
1269  *	The strings in the IDENTIFY DEVICE page are broken up into
1270  *	16-bit chunks.  Run through the string, and output each
1271  *	8-bit chunk linearly, regardless of platform.
1272  *
1273  *	LOCKING:
1274  *	caller.
1275  */
1276 
1277 void ata_id_string(const u16 *id, unsigned char *s,
1278 		   unsigned int ofs, unsigned int len)
1279 {
1280 	unsigned int c;
1281 
1282 	BUG_ON(len & 1);
1283 
1284 	while (len > 0) {
1285 		c = id[ofs] >> 8;
1286 		*s = c;
1287 		s++;
1288 
1289 		c = id[ofs] & 0xff;
1290 		*s = c;
1291 		s++;
1292 
1293 		ofs++;
1294 		len -= 2;
1295 	}
1296 }
1297 
1298 /**
1299  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1300  *	@id: IDENTIFY DEVICE results we will examine
1301  *	@s: string into which data is output
1302  *	@ofs: offset into identify device page
1303  *	@len: length of string to return. must be an odd number.
1304  *
1305  *	This function is identical to ata_id_string except that it
1306  *	trims trailing spaces and terminates the resulting string with
1307  *	null.  @len must be actual maximum length (even number) + 1.
1308  *
1309  *	LOCKING:
1310  *	caller.
1311  */
1312 void ata_id_c_string(const u16 *id, unsigned char *s,
1313 		     unsigned int ofs, unsigned int len)
1314 {
1315 	unsigned char *p;
1316 
1317 	ata_id_string(id, s, ofs, len - 1);
1318 
1319 	p = s + strnlen(s, len - 1);
1320 	while (p > s && p[-1] == ' ')
1321 		p--;
1322 	*p = '\0';
1323 }
1324 
1325 static u64 ata_id_n_sectors(const u16 *id)
1326 {
1327 	if (ata_id_has_lba(id)) {
1328 		if (ata_id_has_lba48(id))
1329 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1330 		else
1331 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1332 	} else {
1333 		if (ata_id_current_chs_valid(id))
1334 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1335 			       id[ATA_ID_CUR_SECTORS];
1336 		else
1337 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1338 			       id[ATA_ID_SECTORS];
1339 	}
1340 }
1341 
1342 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1343 {
1344 	u64 sectors = 0;
1345 
1346 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1347 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1348 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1349 	sectors |= (tf->lbah & 0xff) << 16;
1350 	sectors |= (tf->lbam & 0xff) << 8;
1351 	sectors |= (tf->lbal & 0xff);
1352 
1353 	return sectors;
1354 }
1355 
1356 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1357 {
1358 	u64 sectors = 0;
1359 
1360 	sectors |= (tf->device & 0x0f) << 24;
1361 	sectors |= (tf->lbah & 0xff) << 16;
1362 	sectors |= (tf->lbam & 0xff) << 8;
1363 	sectors |= (tf->lbal & 0xff);
1364 
1365 	return sectors;
1366 }
1367 
1368 /**
1369  *	ata_read_native_max_address - Read native max address
1370  *	@dev: target device
1371  *	@max_sectors: out parameter for the result native max address
1372  *
1373  *	Perform an LBA48 or LBA28 native size query upon the device in
1374  *	question.
1375  *
1376  *	RETURNS:
1377  *	0 on success, -EACCES if command is aborted by the drive.
1378  *	-EIO on other errors.
1379  */
1380 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1381 {
1382 	unsigned int err_mask;
1383 	struct ata_taskfile tf;
1384 	int lba48 = ata_id_has_lba48(dev->id);
1385 
1386 	ata_tf_init(dev, &tf);
1387 
1388 	/* always clear all address registers */
1389 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1390 
1391 	if (lba48) {
1392 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1393 		tf.flags |= ATA_TFLAG_LBA48;
1394 	} else
1395 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1396 
1397 	tf.protocol |= ATA_PROT_NODATA;
1398 	tf.device |= ATA_LBA;
1399 
1400 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1401 	if (err_mask) {
1402 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1403 			       "max address (err_mask=0x%x)\n", err_mask);
1404 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1405 			return -EACCES;
1406 		return -EIO;
1407 	}
1408 
1409 	if (lba48)
1410 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1411 	else
1412 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1413 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1414 		(*max_sectors)--;
1415 	return 0;
1416 }
1417 
1418 /**
1419  *	ata_set_max_sectors - Set max sectors
1420  *	@dev: target device
1421  *	@new_sectors: new max sectors value to set for the device
1422  *
1423  *	Set max sectors of @dev to @new_sectors.
1424  *
1425  *	RETURNS:
1426  *	0 on success, -EACCES if command is aborted or denied (due to
1427  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1428  *	errors.
1429  */
1430 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1431 {
1432 	unsigned int err_mask;
1433 	struct ata_taskfile tf;
1434 	int lba48 = ata_id_has_lba48(dev->id);
1435 
1436 	new_sectors--;
1437 
1438 	ata_tf_init(dev, &tf);
1439 
1440 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1441 
1442 	if (lba48) {
1443 		tf.command = ATA_CMD_SET_MAX_EXT;
1444 		tf.flags |= ATA_TFLAG_LBA48;
1445 
1446 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1447 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1448 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1449 	} else {
1450 		tf.command = ATA_CMD_SET_MAX;
1451 
1452 		tf.device |= (new_sectors >> 24) & 0xf;
1453 	}
1454 
1455 	tf.protocol |= ATA_PROT_NODATA;
1456 	tf.device |= ATA_LBA;
1457 
1458 	tf.lbal = (new_sectors >> 0) & 0xff;
1459 	tf.lbam = (new_sectors >> 8) & 0xff;
1460 	tf.lbah = (new_sectors >> 16) & 0xff;
1461 
1462 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1463 	if (err_mask) {
1464 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1465 			       "max address (err_mask=0x%x)\n", err_mask);
1466 		if (err_mask == AC_ERR_DEV &&
1467 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1468 			return -EACCES;
1469 		return -EIO;
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 /**
1476  *	ata_hpa_resize		-	Resize a device with an HPA set
1477  *	@dev: Device to resize
1478  *
1479  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1480  *	it if required to the full size of the media. The caller must check
1481  *	the drive has the HPA feature set enabled.
1482  *
1483  *	RETURNS:
1484  *	0 on success, -errno on failure.
1485  */
1486 static int ata_hpa_resize(struct ata_device *dev)
1487 {
1488 	struct ata_eh_context *ehc = &dev->link->eh_context;
1489 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1490 	u64 sectors = ata_id_n_sectors(dev->id);
1491 	u64 native_sectors;
1492 	int rc;
1493 
1494 	/* do we need to do it? */
1495 	if (dev->class != ATA_DEV_ATA ||
1496 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1497 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1498 		return 0;
1499 
1500 	/* read native max address */
1501 	rc = ata_read_native_max_address(dev, &native_sectors);
1502 	if (rc) {
1503 		/* If device aborted the command or HPA isn't going to
1504 		 * be unlocked, skip HPA resizing.
1505 		 */
1506 		if (rc == -EACCES || !ata_ignore_hpa) {
1507 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1508 				       "broken, skipping HPA handling\n");
1509 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1510 
1511 			/* we can continue if device aborted the command */
1512 			if (rc == -EACCES)
1513 				rc = 0;
1514 		}
1515 
1516 		return rc;
1517 	}
1518 	dev->n_native_sectors = native_sectors;
1519 
1520 	/* nothing to do? */
1521 	if (native_sectors <= sectors || !ata_ignore_hpa) {
1522 		if (!print_info || native_sectors == sectors)
1523 			return 0;
1524 
1525 		if (native_sectors > sectors)
1526 			ata_dev_printk(dev, KERN_INFO,
1527 				"HPA detected: current %llu, native %llu\n",
1528 				(unsigned long long)sectors,
1529 				(unsigned long long)native_sectors);
1530 		else if (native_sectors < sectors)
1531 			ata_dev_printk(dev, KERN_WARNING,
1532 				"native sectors (%llu) is smaller than "
1533 				"sectors (%llu)\n",
1534 				(unsigned long long)native_sectors,
1535 				(unsigned long long)sectors);
1536 		return 0;
1537 	}
1538 
1539 	/* let's unlock HPA */
1540 	rc = ata_set_max_sectors(dev, native_sectors);
1541 	if (rc == -EACCES) {
1542 		/* if device aborted the command, skip HPA resizing */
1543 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1544 			       "(%llu -> %llu), skipping HPA handling\n",
1545 			       (unsigned long long)sectors,
1546 			       (unsigned long long)native_sectors);
1547 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1548 		return 0;
1549 	} else if (rc)
1550 		return rc;
1551 
1552 	/* re-read IDENTIFY data */
1553 	rc = ata_dev_reread_id(dev, 0);
1554 	if (rc) {
1555 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1556 			       "data after HPA resizing\n");
1557 		return rc;
1558 	}
1559 
1560 	if (print_info) {
1561 		u64 new_sectors = ata_id_n_sectors(dev->id);
1562 		ata_dev_printk(dev, KERN_INFO,
1563 			"HPA unlocked: %llu -> %llu, native %llu\n",
1564 			(unsigned long long)sectors,
1565 			(unsigned long long)new_sectors,
1566 			(unsigned long long)native_sectors);
1567 	}
1568 
1569 	return 0;
1570 }
1571 
1572 /**
1573  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1574  *	@id: IDENTIFY DEVICE page to dump
1575  *
1576  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1577  *	page.
1578  *
1579  *	LOCKING:
1580  *	caller.
1581  */
1582 
1583 static inline void ata_dump_id(const u16 *id)
1584 {
1585 	DPRINTK("49==0x%04x  "
1586 		"53==0x%04x  "
1587 		"63==0x%04x  "
1588 		"64==0x%04x  "
1589 		"75==0x%04x  \n",
1590 		id[49],
1591 		id[53],
1592 		id[63],
1593 		id[64],
1594 		id[75]);
1595 	DPRINTK("80==0x%04x  "
1596 		"81==0x%04x  "
1597 		"82==0x%04x  "
1598 		"83==0x%04x  "
1599 		"84==0x%04x  \n",
1600 		id[80],
1601 		id[81],
1602 		id[82],
1603 		id[83],
1604 		id[84]);
1605 	DPRINTK("88==0x%04x  "
1606 		"93==0x%04x\n",
1607 		id[88],
1608 		id[93]);
1609 }
1610 
1611 /**
1612  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1613  *	@id: IDENTIFY data to compute xfer mask from
1614  *
1615  *	Compute the xfermask for this device. This is not as trivial
1616  *	as it seems if we must consider early devices correctly.
1617  *
1618  *	FIXME: pre IDE drive timing (do we care ?).
1619  *
1620  *	LOCKING:
1621  *	None.
1622  *
1623  *	RETURNS:
1624  *	Computed xfermask
1625  */
1626 unsigned long ata_id_xfermask(const u16 *id)
1627 {
1628 	unsigned long pio_mask, mwdma_mask, udma_mask;
1629 
1630 	/* Usual case. Word 53 indicates word 64 is valid */
1631 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1632 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1633 		pio_mask <<= 3;
1634 		pio_mask |= 0x7;
1635 	} else {
1636 		/* If word 64 isn't valid then Word 51 high byte holds
1637 		 * the PIO timing number for the maximum. Turn it into
1638 		 * a mask.
1639 		 */
1640 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1641 		if (mode < 5)	/* Valid PIO range */
1642 			pio_mask = (2 << mode) - 1;
1643 		else
1644 			pio_mask = 1;
1645 
1646 		/* But wait.. there's more. Design your standards by
1647 		 * committee and you too can get a free iordy field to
1648 		 * process. However its the speeds not the modes that
1649 		 * are supported... Note drivers using the timing API
1650 		 * will get this right anyway
1651 		 */
1652 	}
1653 
1654 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1655 
1656 	if (ata_id_is_cfa(id)) {
1657 		/*
1658 		 *	Process compact flash extended modes
1659 		 */
1660 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1661 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1662 
1663 		if (pio)
1664 			pio_mask |= (1 << 5);
1665 		if (pio > 1)
1666 			pio_mask |= (1 << 6);
1667 		if (dma)
1668 			mwdma_mask |= (1 << 3);
1669 		if (dma > 1)
1670 			mwdma_mask |= (1 << 4);
1671 	}
1672 
1673 	udma_mask = 0;
1674 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1675 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1676 
1677 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1678 }
1679 
1680 /**
1681  *	ata_pio_queue_task - Queue port_task
1682  *	@ap: The ata_port to queue port_task for
1683  *	@data: data for @fn to use
1684  *	@delay: delay time in msecs for workqueue function
1685  *
1686  *	Schedule @fn(@data) for execution after @delay jiffies using
1687  *	port_task.  There is one port_task per port and it's the
1688  *	user(low level driver)'s responsibility to make sure that only
1689  *	one task is active at any given time.
1690  *
1691  *	libata core layer takes care of synchronization between
1692  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1693  *	synchronization.
1694  *
1695  *	LOCKING:
1696  *	Inherited from caller.
1697  */
1698 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1699 {
1700 	ap->port_task_data = data;
1701 
1702 	/* may fail if ata_port_flush_task() in progress */
1703 	queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1704 }
1705 
1706 /**
1707  *	ata_port_flush_task - Flush port_task
1708  *	@ap: The ata_port to flush port_task for
1709  *
1710  *	After this function completes, port_task is guranteed not to
1711  *	be running or scheduled.
1712  *
1713  *	LOCKING:
1714  *	Kernel thread context (may sleep)
1715  */
1716 void ata_port_flush_task(struct ata_port *ap)
1717 {
1718 	DPRINTK("ENTER\n");
1719 
1720 	cancel_rearming_delayed_work(&ap->port_task);
1721 
1722 	if (ata_msg_ctl(ap))
1723 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1724 }
1725 
1726 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1727 {
1728 	struct completion *waiting = qc->private_data;
1729 
1730 	complete(waiting);
1731 }
1732 
1733 /**
1734  *	ata_exec_internal_sg - execute libata internal command
1735  *	@dev: Device to which the command is sent
1736  *	@tf: Taskfile registers for the command and the result
1737  *	@cdb: CDB for packet command
1738  *	@dma_dir: Data tranfer direction of the command
1739  *	@sgl: sg list for the data buffer of the command
1740  *	@n_elem: Number of sg entries
1741  *	@timeout: Timeout in msecs (0 for default)
1742  *
1743  *	Executes libata internal command with timeout.  @tf contains
1744  *	command on entry and result on return.  Timeout and error
1745  *	conditions are reported via return value.  No recovery action
1746  *	is taken after a command times out.  It's caller's duty to
1747  *	clean up after timeout.
1748  *
1749  *	LOCKING:
1750  *	None.  Should be called with kernel context, might sleep.
1751  *
1752  *	RETURNS:
1753  *	Zero on success, AC_ERR_* mask on failure
1754  */
1755 unsigned ata_exec_internal_sg(struct ata_device *dev,
1756 			      struct ata_taskfile *tf, const u8 *cdb,
1757 			      int dma_dir, struct scatterlist *sgl,
1758 			      unsigned int n_elem, unsigned long timeout)
1759 {
1760 	struct ata_link *link = dev->link;
1761 	struct ata_port *ap = link->ap;
1762 	u8 command = tf->command;
1763 	int auto_timeout = 0;
1764 	struct ata_queued_cmd *qc;
1765 	unsigned int tag, preempted_tag;
1766 	u32 preempted_sactive, preempted_qc_active;
1767 	int preempted_nr_active_links;
1768 	DECLARE_COMPLETION_ONSTACK(wait);
1769 	unsigned long flags;
1770 	unsigned int err_mask;
1771 	int rc;
1772 
1773 	spin_lock_irqsave(ap->lock, flags);
1774 
1775 	/* no internal command while frozen */
1776 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1777 		spin_unlock_irqrestore(ap->lock, flags);
1778 		return AC_ERR_SYSTEM;
1779 	}
1780 
1781 	/* initialize internal qc */
1782 
1783 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1784 	 * drivers choke if any other tag is given.  This breaks
1785 	 * ata_tag_internal() test for those drivers.  Don't use new
1786 	 * EH stuff without converting to it.
1787 	 */
1788 	if (ap->ops->error_handler)
1789 		tag = ATA_TAG_INTERNAL;
1790 	else
1791 		tag = 0;
1792 
1793 	if (test_and_set_bit(tag, &ap->qc_allocated))
1794 		BUG();
1795 	qc = __ata_qc_from_tag(ap, tag);
1796 
1797 	qc->tag = tag;
1798 	qc->scsicmd = NULL;
1799 	qc->ap = ap;
1800 	qc->dev = dev;
1801 	ata_qc_reinit(qc);
1802 
1803 	preempted_tag = link->active_tag;
1804 	preempted_sactive = link->sactive;
1805 	preempted_qc_active = ap->qc_active;
1806 	preempted_nr_active_links = ap->nr_active_links;
1807 	link->active_tag = ATA_TAG_POISON;
1808 	link->sactive = 0;
1809 	ap->qc_active = 0;
1810 	ap->nr_active_links = 0;
1811 
1812 	/* prepare & issue qc */
1813 	qc->tf = *tf;
1814 	if (cdb)
1815 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1816 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1817 	qc->dma_dir = dma_dir;
1818 	if (dma_dir != DMA_NONE) {
1819 		unsigned int i, buflen = 0;
1820 		struct scatterlist *sg;
1821 
1822 		for_each_sg(sgl, sg, n_elem, i)
1823 			buflen += sg->length;
1824 
1825 		ata_sg_init(qc, sgl, n_elem);
1826 		qc->nbytes = buflen;
1827 	}
1828 
1829 	qc->private_data = &wait;
1830 	qc->complete_fn = ata_qc_complete_internal;
1831 
1832 	ata_qc_issue(qc);
1833 
1834 	spin_unlock_irqrestore(ap->lock, flags);
1835 
1836 	if (!timeout) {
1837 		if (ata_probe_timeout)
1838 			timeout = ata_probe_timeout * 1000;
1839 		else {
1840 			timeout = ata_internal_cmd_timeout(dev, command);
1841 			auto_timeout = 1;
1842 		}
1843 	}
1844 
1845 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1846 
1847 	ata_port_flush_task(ap);
1848 
1849 	if (!rc) {
1850 		spin_lock_irqsave(ap->lock, flags);
1851 
1852 		/* We're racing with irq here.  If we lose, the
1853 		 * following test prevents us from completing the qc
1854 		 * twice.  If we win, the port is frozen and will be
1855 		 * cleaned up by ->post_internal_cmd().
1856 		 */
1857 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1858 			qc->err_mask |= AC_ERR_TIMEOUT;
1859 
1860 			if (ap->ops->error_handler)
1861 				ata_port_freeze(ap);
1862 			else
1863 				ata_qc_complete(qc);
1864 
1865 			if (ata_msg_warn(ap))
1866 				ata_dev_printk(dev, KERN_WARNING,
1867 					"qc timeout (cmd 0x%x)\n", command);
1868 		}
1869 
1870 		spin_unlock_irqrestore(ap->lock, flags);
1871 	}
1872 
1873 	/* do post_internal_cmd */
1874 	if (ap->ops->post_internal_cmd)
1875 		ap->ops->post_internal_cmd(qc);
1876 
1877 	/* perform minimal error analysis */
1878 	if (qc->flags & ATA_QCFLAG_FAILED) {
1879 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1880 			qc->err_mask |= AC_ERR_DEV;
1881 
1882 		if (!qc->err_mask)
1883 			qc->err_mask |= AC_ERR_OTHER;
1884 
1885 		if (qc->err_mask & ~AC_ERR_OTHER)
1886 			qc->err_mask &= ~AC_ERR_OTHER;
1887 	}
1888 
1889 	/* finish up */
1890 	spin_lock_irqsave(ap->lock, flags);
1891 
1892 	*tf = qc->result_tf;
1893 	err_mask = qc->err_mask;
1894 
1895 	ata_qc_free(qc);
1896 	link->active_tag = preempted_tag;
1897 	link->sactive = preempted_sactive;
1898 	ap->qc_active = preempted_qc_active;
1899 	ap->nr_active_links = preempted_nr_active_links;
1900 
1901 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1902 	 * Until those drivers are fixed, we detect the condition
1903 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1904 	 * port.
1905 	 *
1906 	 * Note that this doesn't change any behavior as internal
1907 	 * command failure results in disabling the device in the
1908 	 * higher layer for LLDDs without new reset/EH callbacks.
1909 	 *
1910 	 * Kill the following code as soon as those drivers are fixed.
1911 	 */
1912 	if (ap->flags & ATA_FLAG_DISABLED) {
1913 		err_mask |= AC_ERR_SYSTEM;
1914 		ata_port_probe(ap);
1915 	}
1916 
1917 	spin_unlock_irqrestore(ap->lock, flags);
1918 
1919 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1920 		ata_internal_cmd_timed_out(dev, command);
1921 
1922 	return err_mask;
1923 }
1924 
1925 /**
1926  *	ata_exec_internal - execute libata internal command
1927  *	@dev: Device to which the command is sent
1928  *	@tf: Taskfile registers for the command and the result
1929  *	@cdb: CDB for packet command
1930  *	@dma_dir: Data tranfer direction of the command
1931  *	@buf: Data buffer of the command
1932  *	@buflen: Length of data buffer
1933  *	@timeout: Timeout in msecs (0 for default)
1934  *
1935  *	Wrapper around ata_exec_internal_sg() which takes simple
1936  *	buffer instead of sg list.
1937  *
1938  *	LOCKING:
1939  *	None.  Should be called with kernel context, might sleep.
1940  *
1941  *	RETURNS:
1942  *	Zero on success, AC_ERR_* mask on failure
1943  */
1944 unsigned ata_exec_internal(struct ata_device *dev,
1945 			   struct ata_taskfile *tf, const u8 *cdb,
1946 			   int dma_dir, void *buf, unsigned int buflen,
1947 			   unsigned long timeout)
1948 {
1949 	struct scatterlist *psg = NULL, sg;
1950 	unsigned int n_elem = 0;
1951 
1952 	if (dma_dir != DMA_NONE) {
1953 		WARN_ON(!buf);
1954 		sg_init_one(&sg, buf, buflen);
1955 		psg = &sg;
1956 		n_elem++;
1957 	}
1958 
1959 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1960 				    timeout);
1961 }
1962 
1963 /**
1964  *	ata_do_simple_cmd - execute simple internal command
1965  *	@dev: Device to which the command is sent
1966  *	@cmd: Opcode to execute
1967  *
1968  *	Execute a 'simple' command, that only consists of the opcode
1969  *	'cmd' itself, without filling any other registers
1970  *
1971  *	LOCKING:
1972  *	Kernel thread context (may sleep).
1973  *
1974  *	RETURNS:
1975  *	Zero on success, AC_ERR_* mask on failure
1976  */
1977 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1978 {
1979 	struct ata_taskfile tf;
1980 
1981 	ata_tf_init(dev, &tf);
1982 
1983 	tf.command = cmd;
1984 	tf.flags |= ATA_TFLAG_DEVICE;
1985 	tf.protocol = ATA_PROT_NODATA;
1986 
1987 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1988 }
1989 
1990 /**
1991  *	ata_pio_need_iordy	-	check if iordy needed
1992  *	@adev: ATA device
1993  *
1994  *	Check if the current speed of the device requires IORDY. Used
1995  *	by various controllers for chip configuration.
1996  */
1997 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1998 {
1999 	/* Don't set IORDY if we're preparing for reset.  IORDY may
2000 	 * lead to controller lock up on certain controllers if the
2001 	 * port is not occupied.  See bko#11703 for details.
2002 	 */
2003 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
2004 		return 0;
2005 	/* Controller doesn't support IORDY.  Probably a pointless
2006 	 * check as the caller should know this.
2007 	 */
2008 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2009 		return 0;
2010 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
2011 	if (ata_id_is_cfa(adev->id)
2012 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
2013 		return 0;
2014 	/* PIO3 and higher it is mandatory */
2015 	if (adev->pio_mode > XFER_PIO_2)
2016 		return 1;
2017 	/* We turn it on when possible */
2018 	if (ata_id_has_iordy(adev->id))
2019 		return 1;
2020 	return 0;
2021 }
2022 
2023 /**
2024  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
2025  *	@adev: ATA device
2026  *
2027  *	Compute the highest mode possible if we are not using iordy. Return
2028  *	-1 if no iordy mode is available.
2029  */
2030 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2031 {
2032 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
2033 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
2034 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
2035 		/* Is the speed faster than the drive allows non IORDY ? */
2036 		if (pio) {
2037 			/* This is cycle times not frequency - watch the logic! */
2038 			if (pio > 240)	/* PIO2 is 240nS per cycle */
2039 				return 3 << ATA_SHIFT_PIO;
2040 			return 7 << ATA_SHIFT_PIO;
2041 		}
2042 	}
2043 	return 3 << ATA_SHIFT_PIO;
2044 }
2045 
2046 /**
2047  *	ata_do_dev_read_id		-	default ID read method
2048  *	@dev: device
2049  *	@tf: proposed taskfile
2050  *	@id: data buffer
2051  *
2052  *	Issue the identify taskfile and hand back the buffer containing
2053  *	identify data. For some RAID controllers and for pre ATA devices
2054  *	this function is wrapped or replaced by the driver
2055  */
2056 unsigned int ata_do_dev_read_id(struct ata_device *dev,
2057 					struct ata_taskfile *tf, u16 *id)
2058 {
2059 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
2060 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2061 }
2062 
2063 /**
2064  *	ata_dev_read_id - Read ID data from the specified device
2065  *	@dev: target device
2066  *	@p_class: pointer to class of the target device (may be changed)
2067  *	@flags: ATA_READID_* flags
2068  *	@id: buffer to read IDENTIFY data into
2069  *
2070  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
2071  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2072  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
2073  *	for pre-ATA4 drives.
2074  *
2075  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2076  *	now we abort if we hit that case.
2077  *
2078  *	LOCKING:
2079  *	Kernel thread context (may sleep)
2080  *
2081  *	RETURNS:
2082  *	0 on success, -errno otherwise.
2083  */
2084 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2085 		    unsigned int flags, u16 *id)
2086 {
2087 	struct ata_port *ap = dev->link->ap;
2088 	unsigned int class = *p_class;
2089 	struct ata_taskfile tf;
2090 	unsigned int err_mask = 0;
2091 	const char *reason;
2092 	bool is_semb = class == ATA_DEV_SEMB;
2093 	int may_fallback = 1, tried_spinup = 0;
2094 	int rc;
2095 
2096 	if (ata_msg_ctl(ap))
2097 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2098 
2099 retry:
2100 	ata_tf_init(dev, &tf);
2101 
2102 	switch (class) {
2103 	case ATA_DEV_SEMB:
2104 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
2105 	case ATA_DEV_ATA:
2106 		tf.command = ATA_CMD_ID_ATA;
2107 		break;
2108 	case ATA_DEV_ATAPI:
2109 		tf.command = ATA_CMD_ID_ATAPI;
2110 		break;
2111 	default:
2112 		rc = -ENODEV;
2113 		reason = "unsupported class";
2114 		goto err_out;
2115 	}
2116 
2117 	tf.protocol = ATA_PROT_PIO;
2118 
2119 	/* Some devices choke if TF registers contain garbage.  Make
2120 	 * sure those are properly initialized.
2121 	 */
2122 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2123 
2124 	/* Device presence detection is unreliable on some
2125 	 * controllers.  Always poll IDENTIFY if available.
2126 	 */
2127 	tf.flags |= ATA_TFLAG_POLLING;
2128 
2129 	if (ap->ops->read_id)
2130 		err_mask = ap->ops->read_id(dev, &tf, id);
2131 	else
2132 		err_mask = ata_do_dev_read_id(dev, &tf, id);
2133 
2134 	if (err_mask) {
2135 		if (err_mask & AC_ERR_NODEV_HINT) {
2136 			ata_dev_printk(dev, KERN_DEBUG,
2137 				       "NODEV after polling detection\n");
2138 			return -ENOENT;
2139 		}
2140 
2141 		if (is_semb) {
2142 			ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
2143 				       "device w/ SEMB sig, disabled\n");
2144 			/* SEMB is not supported yet */
2145 			*p_class = ATA_DEV_SEMB_UNSUP;
2146 			return 0;
2147 		}
2148 
2149 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2150 			/* Device or controller might have reported
2151 			 * the wrong device class.  Give a shot at the
2152 			 * other IDENTIFY if the current one is
2153 			 * aborted by the device.
2154 			 */
2155 			if (may_fallback) {
2156 				may_fallback = 0;
2157 
2158 				if (class == ATA_DEV_ATA)
2159 					class = ATA_DEV_ATAPI;
2160 				else
2161 					class = ATA_DEV_ATA;
2162 				goto retry;
2163 			}
2164 
2165 			/* Control reaches here iff the device aborted
2166 			 * both flavors of IDENTIFYs which happens
2167 			 * sometimes with phantom devices.
2168 			 */
2169 			ata_dev_printk(dev, KERN_DEBUG,
2170 				       "both IDENTIFYs aborted, assuming NODEV\n");
2171 			return -ENOENT;
2172 		}
2173 
2174 		rc = -EIO;
2175 		reason = "I/O error";
2176 		goto err_out;
2177 	}
2178 
2179 	/* Falling back doesn't make sense if ID data was read
2180 	 * successfully at least once.
2181 	 */
2182 	may_fallback = 0;
2183 
2184 	swap_buf_le16(id, ATA_ID_WORDS);
2185 
2186 	/* sanity check */
2187 	rc = -EINVAL;
2188 	reason = "device reports invalid type";
2189 
2190 	if (class == ATA_DEV_ATA) {
2191 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2192 			goto err_out;
2193 	} else {
2194 		if (ata_id_is_ata(id))
2195 			goto err_out;
2196 	}
2197 
2198 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2199 		tried_spinup = 1;
2200 		/*
2201 		 * Drive powered-up in standby mode, and requires a specific
2202 		 * SET_FEATURES spin-up subcommand before it will accept
2203 		 * anything other than the original IDENTIFY command.
2204 		 */
2205 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2206 		if (err_mask && id[2] != 0x738c) {
2207 			rc = -EIO;
2208 			reason = "SPINUP failed";
2209 			goto err_out;
2210 		}
2211 		/*
2212 		 * If the drive initially returned incomplete IDENTIFY info,
2213 		 * we now must reissue the IDENTIFY command.
2214 		 */
2215 		if (id[2] == 0x37c8)
2216 			goto retry;
2217 	}
2218 
2219 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2220 		/*
2221 		 * The exact sequence expected by certain pre-ATA4 drives is:
2222 		 * SRST RESET
2223 		 * IDENTIFY (optional in early ATA)
2224 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2225 		 * anything else..
2226 		 * Some drives were very specific about that exact sequence.
2227 		 *
2228 		 * Note that ATA4 says lba is mandatory so the second check
2229 		 * shoud never trigger.
2230 		 */
2231 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2232 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2233 			if (err_mask) {
2234 				rc = -EIO;
2235 				reason = "INIT_DEV_PARAMS failed";
2236 				goto err_out;
2237 			}
2238 
2239 			/* current CHS translation info (id[53-58]) might be
2240 			 * changed. reread the identify device info.
2241 			 */
2242 			flags &= ~ATA_READID_POSTRESET;
2243 			goto retry;
2244 		}
2245 	}
2246 
2247 	*p_class = class;
2248 
2249 	return 0;
2250 
2251  err_out:
2252 	if (ata_msg_warn(ap))
2253 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2254 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2255 	return rc;
2256 }
2257 
2258 static int ata_do_link_spd_horkage(struct ata_device *dev)
2259 {
2260 	struct ata_link *plink = ata_dev_phys_link(dev);
2261 	u32 target, target_limit;
2262 
2263 	if (!sata_scr_valid(plink))
2264 		return 0;
2265 
2266 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2267 		target = 1;
2268 	else
2269 		return 0;
2270 
2271 	target_limit = (1 << target) - 1;
2272 
2273 	/* if already on stricter limit, no need to push further */
2274 	if (plink->sata_spd_limit <= target_limit)
2275 		return 0;
2276 
2277 	plink->sata_spd_limit = target_limit;
2278 
2279 	/* Request another EH round by returning -EAGAIN if link is
2280 	 * going faster than the target speed.  Forward progress is
2281 	 * guaranteed by setting sata_spd_limit to target_limit above.
2282 	 */
2283 	if (plink->sata_spd > target) {
2284 		ata_dev_printk(dev, KERN_INFO,
2285 			       "applying link speed limit horkage to %s\n",
2286 			       sata_spd_string(target));
2287 		return -EAGAIN;
2288 	}
2289 	return 0;
2290 }
2291 
2292 static inline u8 ata_dev_knobble(struct ata_device *dev)
2293 {
2294 	struct ata_port *ap = dev->link->ap;
2295 
2296 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2297 		return 0;
2298 
2299 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2300 }
2301 
2302 static void ata_dev_config_ncq(struct ata_device *dev,
2303 			       char *desc, size_t desc_sz)
2304 {
2305 	struct ata_port *ap = dev->link->ap;
2306 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2307 
2308 	if (!ata_id_has_ncq(dev->id)) {
2309 		desc[0] = '\0';
2310 		return;
2311 	}
2312 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2313 		snprintf(desc, desc_sz, "NCQ (not used)");
2314 		return;
2315 	}
2316 	if (ap->flags & ATA_FLAG_NCQ) {
2317 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2318 		dev->flags |= ATA_DFLAG_NCQ;
2319 	}
2320 
2321 	if (hdepth >= ddepth)
2322 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2323 	else
2324 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2325 }
2326 
2327 /**
2328  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2329  *	@dev: Target device to configure
2330  *
2331  *	Configure @dev according to @dev->id.  Generic and low-level
2332  *	driver specific fixups are also applied.
2333  *
2334  *	LOCKING:
2335  *	Kernel thread context (may sleep)
2336  *
2337  *	RETURNS:
2338  *	0 on success, -errno otherwise
2339  */
2340 int ata_dev_configure(struct ata_device *dev)
2341 {
2342 	struct ata_port *ap = dev->link->ap;
2343 	struct ata_eh_context *ehc = &dev->link->eh_context;
2344 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2345 	const u16 *id = dev->id;
2346 	unsigned long xfer_mask;
2347 	char revbuf[7];		/* XYZ-99\0 */
2348 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2349 	char modelbuf[ATA_ID_PROD_LEN+1];
2350 	int rc;
2351 
2352 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2353 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2354 			       __func__);
2355 		return 0;
2356 	}
2357 
2358 	if (ata_msg_probe(ap))
2359 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2360 
2361 	/* set horkage */
2362 	dev->horkage |= ata_dev_blacklisted(dev);
2363 	ata_force_horkage(dev);
2364 
2365 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2366 		ata_dev_printk(dev, KERN_INFO,
2367 			       "unsupported device, disabling\n");
2368 		ata_dev_disable(dev);
2369 		return 0;
2370 	}
2371 
2372 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2373 	    dev->class == ATA_DEV_ATAPI) {
2374 		ata_dev_printk(dev, KERN_WARNING,
2375 			"WARNING: ATAPI is %s, device ignored.\n",
2376 			atapi_enabled ? "not supported with this driver"
2377 				      : "disabled");
2378 		ata_dev_disable(dev);
2379 		return 0;
2380 	}
2381 
2382 	rc = ata_do_link_spd_horkage(dev);
2383 	if (rc)
2384 		return rc;
2385 
2386 	/* let ACPI work its magic */
2387 	rc = ata_acpi_on_devcfg(dev);
2388 	if (rc)
2389 		return rc;
2390 
2391 	/* massage HPA, do it early as it might change IDENTIFY data */
2392 	rc = ata_hpa_resize(dev);
2393 	if (rc)
2394 		return rc;
2395 
2396 	/* print device capabilities */
2397 	if (ata_msg_probe(ap))
2398 		ata_dev_printk(dev, KERN_DEBUG,
2399 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2400 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2401 			       __func__,
2402 			       id[49], id[82], id[83], id[84],
2403 			       id[85], id[86], id[87], id[88]);
2404 
2405 	/* initialize to-be-configured parameters */
2406 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2407 	dev->max_sectors = 0;
2408 	dev->cdb_len = 0;
2409 	dev->n_sectors = 0;
2410 	dev->cylinders = 0;
2411 	dev->heads = 0;
2412 	dev->sectors = 0;
2413 	dev->multi_count = 0;
2414 
2415 	/*
2416 	 * common ATA, ATAPI feature tests
2417 	 */
2418 
2419 	/* find max transfer mode; for printk only */
2420 	xfer_mask = ata_id_xfermask(id);
2421 
2422 	if (ata_msg_probe(ap))
2423 		ata_dump_id(id);
2424 
2425 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2426 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2427 			sizeof(fwrevbuf));
2428 
2429 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2430 			sizeof(modelbuf));
2431 
2432 	/* ATA-specific feature tests */
2433 	if (dev->class == ATA_DEV_ATA) {
2434 		if (ata_id_is_cfa(id)) {
2435 			/* CPRM may make this media unusable */
2436 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2437 				ata_dev_printk(dev, KERN_WARNING,
2438 					       "supports DRM functions and may "
2439 					       "not be fully accessable.\n");
2440 			snprintf(revbuf, 7, "CFA");
2441 		} else {
2442 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2443 			/* Warn the user if the device has TPM extensions */
2444 			if (ata_id_has_tpm(id))
2445 				ata_dev_printk(dev, KERN_WARNING,
2446 					       "supports DRM functions and may "
2447 					       "not be fully accessable.\n");
2448 		}
2449 
2450 		dev->n_sectors = ata_id_n_sectors(id);
2451 
2452 		/* get current R/W Multiple count setting */
2453 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2454 			unsigned int max = dev->id[47] & 0xff;
2455 			unsigned int cnt = dev->id[59] & 0xff;
2456 			/* only recognize/allow powers of two here */
2457 			if (is_power_of_2(max) && is_power_of_2(cnt))
2458 				if (cnt <= max)
2459 					dev->multi_count = cnt;
2460 		}
2461 
2462 		if (ata_id_has_lba(id)) {
2463 			const char *lba_desc;
2464 			char ncq_desc[20];
2465 
2466 			lba_desc = "LBA";
2467 			dev->flags |= ATA_DFLAG_LBA;
2468 			if (ata_id_has_lba48(id)) {
2469 				dev->flags |= ATA_DFLAG_LBA48;
2470 				lba_desc = "LBA48";
2471 
2472 				if (dev->n_sectors >= (1UL << 28) &&
2473 				    ata_id_has_flush_ext(id))
2474 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2475 			}
2476 
2477 			/* config NCQ */
2478 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2479 
2480 			/* print device info to dmesg */
2481 			if (ata_msg_drv(ap) && print_info) {
2482 				ata_dev_printk(dev, KERN_INFO,
2483 					"%s: %s, %s, max %s\n",
2484 					revbuf, modelbuf, fwrevbuf,
2485 					ata_mode_string(xfer_mask));
2486 				ata_dev_printk(dev, KERN_INFO,
2487 					"%Lu sectors, multi %u: %s %s\n",
2488 					(unsigned long long)dev->n_sectors,
2489 					dev->multi_count, lba_desc, ncq_desc);
2490 			}
2491 		} else {
2492 			/* CHS */
2493 
2494 			/* Default translation */
2495 			dev->cylinders	= id[1];
2496 			dev->heads	= id[3];
2497 			dev->sectors	= id[6];
2498 
2499 			if (ata_id_current_chs_valid(id)) {
2500 				/* Current CHS translation is valid. */
2501 				dev->cylinders = id[54];
2502 				dev->heads     = id[55];
2503 				dev->sectors   = id[56];
2504 			}
2505 
2506 			/* print device info to dmesg */
2507 			if (ata_msg_drv(ap) && print_info) {
2508 				ata_dev_printk(dev, KERN_INFO,
2509 					"%s: %s, %s, max %s\n",
2510 					revbuf,	modelbuf, fwrevbuf,
2511 					ata_mode_string(xfer_mask));
2512 				ata_dev_printk(dev, KERN_INFO,
2513 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2514 					(unsigned long long)dev->n_sectors,
2515 					dev->multi_count, dev->cylinders,
2516 					dev->heads, dev->sectors);
2517 			}
2518 		}
2519 
2520 		dev->cdb_len = 16;
2521 	}
2522 
2523 	/* ATAPI-specific feature tests */
2524 	else if (dev->class == ATA_DEV_ATAPI) {
2525 		const char *cdb_intr_string = "";
2526 		const char *atapi_an_string = "";
2527 		const char *dma_dir_string = "";
2528 		u32 sntf;
2529 
2530 		rc = atapi_cdb_len(id);
2531 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2532 			if (ata_msg_warn(ap))
2533 				ata_dev_printk(dev, KERN_WARNING,
2534 					       "unsupported CDB len\n");
2535 			rc = -EINVAL;
2536 			goto err_out_nosup;
2537 		}
2538 		dev->cdb_len = (unsigned int) rc;
2539 
2540 		/* Enable ATAPI AN if both the host and device have
2541 		 * the support.  If PMP is attached, SNTF is required
2542 		 * to enable ATAPI AN to discern between PHY status
2543 		 * changed notifications and ATAPI ANs.
2544 		 */
2545 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2546 		    (!sata_pmp_attached(ap) ||
2547 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2548 			unsigned int err_mask;
2549 
2550 			/* issue SET feature command to turn this on */
2551 			err_mask = ata_dev_set_feature(dev,
2552 					SETFEATURES_SATA_ENABLE, SATA_AN);
2553 			if (err_mask)
2554 				ata_dev_printk(dev, KERN_ERR,
2555 					"failed to enable ATAPI AN "
2556 					"(err_mask=0x%x)\n", err_mask);
2557 			else {
2558 				dev->flags |= ATA_DFLAG_AN;
2559 				atapi_an_string = ", ATAPI AN";
2560 			}
2561 		}
2562 
2563 		if (ata_id_cdb_intr(dev->id)) {
2564 			dev->flags |= ATA_DFLAG_CDB_INTR;
2565 			cdb_intr_string = ", CDB intr";
2566 		}
2567 
2568 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2569 			dev->flags |= ATA_DFLAG_DMADIR;
2570 			dma_dir_string = ", DMADIR";
2571 		}
2572 
2573 		/* print device info to dmesg */
2574 		if (ata_msg_drv(ap) && print_info)
2575 			ata_dev_printk(dev, KERN_INFO,
2576 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2577 				       modelbuf, fwrevbuf,
2578 				       ata_mode_string(xfer_mask),
2579 				       cdb_intr_string, atapi_an_string,
2580 				       dma_dir_string);
2581 	}
2582 
2583 	/* determine max_sectors */
2584 	dev->max_sectors = ATA_MAX_SECTORS;
2585 	if (dev->flags & ATA_DFLAG_LBA48)
2586 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2587 
2588 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2589 		if (ata_id_has_hipm(dev->id))
2590 			dev->flags |= ATA_DFLAG_HIPM;
2591 		if (ata_id_has_dipm(dev->id))
2592 			dev->flags |= ATA_DFLAG_DIPM;
2593 	}
2594 
2595 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2596 	   200 sectors */
2597 	if (ata_dev_knobble(dev)) {
2598 		if (ata_msg_drv(ap) && print_info)
2599 			ata_dev_printk(dev, KERN_INFO,
2600 				       "applying bridge limits\n");
2601 		dev->udma_mask &= ATA_UDMA5;
2602 		dev->max_sectors = ATA_MAX_SECTORS;
2603 	}
2604 
2605 	if ((dev->class == ATA_DEV_ATAPI) &&
2606 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2607 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2608 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2609 	}
2610 
2611 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2612 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2613 					 dev->max_sectors);
2614 
2615 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2616 		dev->horkage |= ATA_HORKAGE_IPM;
2617 
2618 		/* reset link pm_policy for this port to no pm */
2619 		ap->pm_policy = MAX_PERFORMANCE;
2620 	}
2621 
2622 	if (ap->ops->dev_config)
2623 		ap->ops->dev_config(dev);
2624 
2625 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2626 		/* Let the user know. We don't want to disallow opens for
2627 		   rescue purposes, or in case the vendor is just a blithering
2628 		   idiot. Do this after the dev_config call as some controllers
2629 		   with buggy firmware may want to avoid reporting false device
2630 		   bugs */
2631 
2632 		if (print_info) {
2633 			ata_dev_printk(dev, KERN_WARNING,
2634 "Drive reports diagnostics failure. This may indicate a drive\n");
2635 			ata_dev_printk(dev, KERN_WARNING,
2636 "fault or invalid emulation. Contact drive vendor for information.\n");
2637 		}
2638 	}
2639 
2640 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2641 		ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2642 			       "firmware update to be fully functional.\n");
2643 		ata_dev_printk(dev, KERN_WARNING, "         contact the vendor "
2644 			       "or visit http://ata.wiki.kernel.org.\n");
2645 	}
2646 
2647 	return 0;
2648 
2649 err_out_nosup:
2650 	if (ata_msg_probe(ap))
2651 		ata_dev_printk(dev, KERN_DEBUG,
2652 			       "%s: EXIT, err\n", __func__);
2653 	return rc;
2654 }
2655 
2656 /**
2657  *	ata_cable_40wire	-	return 40 wire cable type
2658  *	@ap: port
2659  *
2660  *	Helper method for drivers which want to hardwire 40 wire cable
2661  *	detection.
2662  */
2663 
2664 int ata_cable_40wire(struct ata_port *ap)
2665 {
2666 	return ATA_CBL_PATA40;
2667 }
2668 
2669 /**
2670  *	ata_cable_80wire	-	return 80 wire cable type
2671  *	@ap: port
2672  *
2673  *	Helper method for drivers which want to hardwire 80 wire cable
2674  *	detection.
2675  */
2676 
2677 int ata_cable_80wire(struct ata_port *ap)
2678 {
2679 	return ATA_CBL_PATA80;
2680 }
2681 
2682 /**
2683  *	ata_cable_unknown	-	return unknown PATA cable.
2684  *	@ap: port
2685  *
2686  *	Helper method for drivers which have no PATA cable detection.
2687  */
2688 
2689 int ata_cable_unknown(struct ata_port *ap)
2690 {
2691 	return ATA_CBL_PATA_UNK;
2692 }
2693 
2694 /**
2695  *	ata_cable_ignore	-	return ignored PATA cable.
2696  *	@ap: port
2697  *
2698  *	Helper method for drivers which don't use cable type to limit
2699  *	transfer mode.
2700  */
2701 int ata_cable_ignore(struct ata_port *ap)
2702 {
2703 	return ATA_CBL_PATA_IGN;
2704 }
2705 
2706 /**
2707  *	ata_cable_sata	-	return SATA cable type
2708  *	@ap: port
2709  *
2710  *	Helper method for drivers which have SATA cables
2711  */
2712 
2713 int ata_cable_sata(struct ata_port *ap)
2714 {
2715 	return ATA_CBL_SATA;
2716 }
2717 
2718 /**
2719  *	ata_bus_probe - Reset and probe ATA bus
2720  *	@ap: Bus to probe
2721  *
2722  *	Master ATA bus probing function.  Initiates a hardware-dependent
2723  *	bus reset, then attempts to identify any devices found on
2724  *	the bus.
2725  *
2726  *	LOCKING:
2727  *	PCI/etc. bus probe sem.
2728  *
2729  *	RETURNS:
2730  *	Zero on success, negative errno otherwise.
2731  */
2732 
2733 int ata_bus_probe(struct ata_port *ap)
2734 {
2735 	unsigned int classes[ATA_MAX_DEVICES];
2736 	int tries[ATA_MAX_DEVICES];
2737 	int rc;
2738 	struct ata_device *dev;
2739 
2740 	ata_port_probe(ap);
2741 
2742 	ata_for_each_dev(dev, &ap->link, ALL)
2743 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2744 
2745  retry:
2746 	ata_for_each_dev(dev, &ap->link, ALL) {
2747 		/* If we issue an SRST then an ATA drive (not ATAPI)
2748 		 * may change configuration and be in PIO0 timing. If
2749 		 * we do a hard reset (or are coming from power on)
2750 		 * this is true for ATA or ATAPI. Until we've set a
2751 		 * suitable controller mode we should not touch the
2752 		 * bus as we may be talking too fast.
2753 		 */
2754 		dev->pio_mode = XFER_PIO_0;
2755 
2756 		/* If the controller has a pio mode setup function
2757 		 * then use it to set the chipset to rights. Don't
2758 		 * touch the DMA setup as that will be dealt with when
2759 		 * configuring devices.
2760 		 */
2761 		if (ap->ops->set_piomode)
2762 			ap->ops->set_piomode(ap, dev);
2763 	}
2764 
2765 	/* reset and determine device classes */
2766 	ap->ops->phy_reset(ap);
2767 
2768 	ata_for_each_dev(dev, &ap->link, ALL) {
2769 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2770 		    dev->class != ATA_DEV_UNKNOWN)
2771 			classes[dev->devno] = dev->class;
2772 		else
2773 			classes[dev->devno] = ATA_DEV_NONE;
2774 
2775 		dev->class = ATA_DEV_UNKNOWN;
2776 	}
2777 
2778 	ata_port_probe(ap);
2779 
2780 	/* read IDENTIFY page and configure devices. We have to do the identify
2781 	   specific sequence bass-ackwards so that PDIAG- is released by
2782 	   the slave device */
2783 
2784 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2785 		if (tries[dev->devno])
2786 			dev->class = classes[dev->devno];
2787 
2788 		if (!ata_dev_enabled(dev))
2789 			continue;
2790 
2791 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2792 				     dev->id);
2793 		if (rc)
2794 			goto fail;
2795 	}
2796 
2797 	/* Now ask for the cable type as PDIAG- should have been released */
2798 	if (ap->ops->cable_detect)
2799 		ap->cbl = ap->ops->cable_detect(ap);
2800 
2801 	/* We may have SATA bridge glue hiding here irrespective of
2802 	 * the reported cable types and sensed types.  When SATA
2803 	 * drives indicate we have a bridge, we don't know which end
2804 	 * of the link the bridge is which is a problem.
2805 	 */
2806 	ata_for_each_dev(dev, &ap->link, ENABLED)
2807 		if (ata_id_is_sata(dev->id))
2808 			ap->cbl = ATA_CBL_SATA;
2809 
2810 	/* After the identify sequence we can now set up the devices. We do
2811 	   this in the normal order so that the user doesn't get confused */
2812 
2813 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2814 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2815 		rc = ata_dev_configure(dev);
2816 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2817 		if (rc)
2818 			goto fail;
2819 	}
2820 
2821 	/* configure transfer mode */
2822 	rc = ata_set_mode(&ap->link, &dev);
2823 	if (rc)
2824 		goto fail;
2825 
2826 	ata_for_each_dev(dev, &ap->link, ENABLED)
2827 		return 0;
2828 
2829 	/* no device present, disable port */
2830 	ata_port_disable(ap);
2831 	return -ENODEV;
2832 
2833  fail:
2834 	tries[dev->devno]--;
2835 
2836 	switch (rc) {
2837 	case -EINVAL:
2838 		/* eeek, something went very wrong, give up */
2839 		tries[dev->devno] = 0;
2840 		break;
2841 
2842 	case -ENODEV:
2843 		/* give it just one more chance */
2844 		tries[dev->devno] = min(tries[dev->devno], 1);
2845 	case -EIO:
2846 		if (tries[dev->devno] == 1) {
2847 			/* This is the last chance, better to slow
2848 			 * down than lose it.
2849 			 */
2850 			sata_down_spd_limit(&ap->link, 0);
2851 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2852 		}
2853 	}
2854 
2855 	if (!tries[dev->devno])
2856 		ata_dev_disable(dev);
2857 
2858 	goto retry;
2859 }
2860 
2861 /**
2862  *	ata_port_probe - Mark port as enabled
2863  *	@ap: Port for which we indicate enablement
2864  *
2865  *	Modify @ap data structure such that the system
2866  *	thinks that the entire port is enabled.
2867  *
2868  *	LOCKING: host lock, or some other form of
2869  *	serialization.
2870  */
2871 
2872 void ata_port_probe(struct ata_port *ap)
2873 {
2874 	ap->flags &= ~ATA_FLAG_DISABLED;
2875 }
2876 
2877 /**
2878  *	sata_print_link_status - Print SATA link status
2879  *	@link: SATA link to printk link status about
2880  *
2881  *	This function prints link speed and status of a SATA link.
2882  *
2883  *	LOCKING:
2884  *	None.
2885  */
2886 static void sata_print_link_status(struct ata_link *link)
2887 {
2888 	u32 sstatus, scontrol, tmp;
2889 
2890 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2891 		return;
2892 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2893 
2894 	if (ata_phys_link_online(link)) {
2895 		tmp = (sstatus >> 4) & 0xf;
2896 		ata_link_printk(link, KERN_INFO,
2897 				"SATA link up %s (SStatus %X SControl %X)\n",
2898 				sata_spd_string(tmp), sstatus, scontrol);
2899 	} else {
2900 		ata_link_printk(link, KERN_INFO,
2901 				"SATA link down (SStatus %X SControl %X)\n",
2902 				sstatus, scontrol);
2903 	}
2904 }
2905 
2906 /**
2907  *	ata_dev_pair		-	return other device on cable
2908  *	@adev: device
2909  *
2910  *	Obtain the other device on the same cable, or if none is
2911  *	present NULL is returned
2912  */
2913 
2914 struct ata_device *ata_dev_pair(struct ata_device *adev)
2915 {
2916 	struct ata_link *link = adev->link;
2917 	struct ata_device *pair = &link->device[1 - adev->devno];
2918 	if (!ata_dev_enabled(pair))
2919 		return NULL;
2920 	return pair;
2921 }
2922 
2923 /**
2924  *	ata_port_disable - Disable port.
2925  *	@ap: Port to be disabled.
2926  *
2927  *	Modify @ap data structure such that the system
2928  *	thinks that the entire port is disabled, and should
2929  *	never attempt to probe or communicate with devices
2930  *	on this port.
2931  *
2932  *	LOCKING: host lock, or some other form of
2933  *	serialization.
2934  */
2935 
2936 void ata_port_disable(struct ata_port *ap)
2937 {
2938 	ap->link.device[0].class = ATA_DEV_NONE;
2939 	ap->link.device[1].class = ATA_DEV_NONE;
2940 	ap->flags |= ATA_FLAG_DISABLED;
2941 }
2942 
2943 /**
2944  *	sata_down_spd_limit - adjust SATA spd limit downward
2945  *	@link: Link to adjust SATA spd limit for
2946  *	@spd_limit: Additional limit
2947  *
2948  *	Adjust SATA spd limit of @link downward.  Note that this
2949  *	function only adjusts the limit.  The change must be applied
2950  *	using sata_set_spd().
2951  *
2952  *	If @spd_limit is non-zero, the speed is limited to equal to or
2953  *	lower than @spd_limit if such speed is supported.  If
2954  *	@spd_limit is slower than any supported speed, only the lowest
2955  *	supported speed is allowed.
2956  *
2957  *	LOCKING:
2958  *	Inherited from caller.
2959  *
2960  *	RETURNS:
2961  *	0 on success, negative errno on failure
2962  */
2963 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2964 {
2965 	u32 sstatus, spd, mask;
2966 	int rc, bit;
2967 
2968 	if (!sata_scr_valid(link))
2969 		return -EOPNOTSUPP;
2970 
2971 	/* If SCR can be read, use it to determine the current SPD.
2972 	 * If not, use cached value in link->sata_spd.
2973 	 */
2974 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2975 	if (rc == 0 && ata_sstatus_online(sstatus))
2976 		spd = (sstatus >> 4) & 0xf;
2977 	else
2978 		spd = link->sata_spd;
2979 
2980 	mask = link->sata_spd_limit;
2981 	if (mask <= 1)
2982 		return -EINVAL;
2983 
2984 	/* unconditionally mask off the highest bit */
2985 	bit = fls(mask) - 1;
2986 	mask &= ~(1 << bit);
2987 
2988 	/* Mask off all speeds higher than or equal to the current
2989 	 * one.  Force 1.5Gbps if current SPD is not available.
2990 	 */
2991 	if (spd > 1)
2992 		mask &= (1 << (spd - 1)) - 1;
2993 	else
2994 		mask &= 1;
2995 
2996 	/* were we already at the bottom? */
2997 	if (!mask)
2998 		return -EINVAL;
2999 
3000 	if (spd_limit) {
3001 		if (mask & ((1 << spd_limit) - 1))
3002 			mask &= (1 << spd_limit) - 1;
3003 		else {
3004 			bit = ffs(mask) - 1;
3005 			mask = 1 << bit;
3006 		}
3007 	}
3008 
3009 	link->sata_spd_limit = mask;
3010 
3011 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
3012 			sata_spd_string(fls(mask)));
3013 
3014 	return 0;
3015 }
3016 
3017 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3018 {
3019 	struct ata_link *host_link = &link->ap->link;
3020 	u32 limit, target, spd;
3021 
3022 	limit = link->sata_spd_limit;
3023 
3024 	/* Don't configure downstream link faster than upstream link.
3025 	 * It doesn't speed up anything and some PMPs choke on such
3026 	 * configuration.
3027 	 */
3028 	if (!ata_is_host_link(link) && host_link->sata_spd)
3029 		limit &= (1 << host_link->sata_spd) - 1;
3030 
3031 	if (limit == UINT_MAX)
3032 		target = 0;
3033 	else
3034 		target = fls(limit);
3035 
3036 	spd = (*scontrol >> 4) & 0xf;
3037 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3038 
3039 	return spd != target;
3040 }
3041 
3042 /**
3043  *	sata_set_spd_needed - is SATA spd configuration needed
3044  *	@link: Link in question
3045  *
3046  *	Test whether the spd limit in SControl matches
3047  *	@link->sata_spd_limit.  This function is used to determine
3048  *	whether hardreset is necessary to apply SATA spd
3049  *	configuration.
3050  *
3051  *	LOCKING:
3052  *	Inherited from caller.
3053  *
3054  *	RETURNS:
3055  *	1 if SATA spd configuration is needed, 0 otherwise.
3056  */
3057 static int sata_set_spd_needed(struct ata_link *link)
3058 {
3059 	u32 scontrol;
3060 
3061 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3062 		return 1;
3063 
3064 	return __sata_set_spd_needed(link, &scontrol);
3065 }
3066 
3067 /**
3068  *	sata_set_spd - set SATA spd according to spd limit
3069  *	@link: Link to set SATA spd for
3070  *
3071  *	Set SATA spd of @link according to sata_spd_limit.
3072  *
3073  *	LOCKING:
3074  *	Inherited from caller.
3075  *
3076  *	RETURNS:
3077  *	0 if spd doesn't need to be changed, 1 if spd has been
3078  *	changed.  Negative errno if SCR registers are inaccessible.
3079  */
3080 int sata_set_spd(struct ata_link *link)
3081 {
3082 	u32 scontrol;
3083 	int rc;
3084 
3085 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3086 		return rc;
3087 
3088 	if (!__sata_set_spd_needed(link, &scontrol))
3089 		return 0;
3090 
3091 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3092 		return rc;
3093 
3094 	return 1;
3095 }
3096 
3097 /*
3098  * This mode timing computation functionality is ported over from
3099  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3100  */
3101 /*
3102  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3103  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3104  * for UDMA6, which is currently supported only by Maxtor drives.
3105  *
3106  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3107  */
3108 
3109 static const struct ata_timing ata_timing[] = {
3110 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
3111 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
3112 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
3113 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
3114 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
3115 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
3116 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
3117 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
3118 
3119 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
3120 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
3121 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
3122 
3123 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
3124 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
3125 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
3126 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
3127 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
3128 
3129 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
3130 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
3131 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
3132 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
3133 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
3134 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
3135 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
3136 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
3137 
3138 	{ 0xFF }
3139 };
3140 
3141 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3142 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
3143 
3144 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3145 {
3146 	q->setup	= EZ(t->setup      * 1000,  T);
3147 	q->act8b	= EZ(t->act8b      * 1000,  T);
3148 	q->rec8b	= EZ(t->rec8b      * 1000,  T);
3149 	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
3150 	q->active	= EZ(t->active     * 1000,  T);
3151 	q->recover	= EZ(t->recover    * 1000,  T);
3152 	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
3153 	q->cycle	= EZ(t->cycle      * 1000,  T);
3154 	q->udma		= EZ(t->udma       * 1000, UT);
3155 }
3156 
3157 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3158 		      struct ata_timing *m, unsigned int what)
3159 {
3160 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3161 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3162 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3163 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3164 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3165 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3166 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3167 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3168 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3169 }
3170 
3171 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3172 {
3173 	const struct ata_timing *t = ata_timing;
3174 
3175 	while (xfer_mode > t->mode)
3176 		t++;
3177 
3178 	if (xfer_mode == t->mode)
3179 		return t;
3180 	return NULL;
3181 }
3182 
3183 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3184 		       struct ata_timing *t, int T, int UT)
3185 {
3186 	const struct ata_timing *s;
3187 	struct ata_timing p;
3188 
3189 	/*
3190 	 * Find the mode.
3191 	 */
3192 
3193 	if (!(s = ata_timing_find_mode(speed)))
3194 		return -EINVAL;
3195 
3196 	memcpy(t, s, sizeof(*s));
3197 
3198 	/*
3199 	 * If the drive is an EIDE drive, it can tell us it needs extended
3200 	 * PIO/MW_DMA cycle timing.
3201 	 */
3202 
3203 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3204 		memset(&p, 0, sizeof(p));
3205 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3206 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3207 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3208 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3209 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3210 		}
3211 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3212 	}
3213 
3214 	/*
3215 	 * Convert the timing to bus clock counts.
3216 	 */
3217 
3218 	ata_timing_quantize(t, t, T, UT);
3219 
3220 	/*
3221 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3222 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3223 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3224 	 */
3225 
3226 	if (speed > XFER_PIO_6) {
3227 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3228 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3229 	}
3230 
3231 	/*
3232 	 * Lengthen active & recovery time so that cycle time is correct.
3233 	 */
3234 
3235 	if (t->act8b + t->rec8b < t->cyc8b) {
3236 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3237 		t->rec8b = t->cyc8b - t->act8b;
3238 	}
3239 
3240 	if (t->active + t->recover < t->cycle) {
3241 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3242 		t->recover = t->cycle - t->active;
3243 	}
3244 
3245 	/* In a few cases quantisation may produce enough errors to
3246 	   leave t->cycle too low for the sum of active and recovery
3247 	   if so we must correct this */
3248 	if (t->active + t->recover > t->cycle)
3249 		t->cycle = t->active + t->recover;
3250 
3251 	return 0;
3252 }
3253 
3254 /**
3255  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3256  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3257  *	@cycle: cycle duration in ns
3258  *
3259  *	Return matching xfer mode for @cycle.  The returned mode is of
3260  *	the transfer type specified by @xfer_shift.  If @cycle is too
3261  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3262  *	than the fastest known mode, the fasted mode is returned.
3263  *
3264  *	LOCKING:
3265  *	None.
3266  *
3267  *	RETURNS:
3268  *	Matching xfer_mode, 0xff if no match found.
3269  */
3270 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3271 {
3272 	u8 base_mode = 0xff, last_mode = 0xff;
3273 	const struct ata_xfer_ent *ent;
3274 	const struct ata_timing *t;
3275 
3276 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3277 		if (ent->shift == xfer_shift)
3278 			base_mode = ent->base;
3279 
3280 	for (t = ata_timing_find_mode(base_mode);
3281 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3282 		unsigned short this_cycle;
3283 
3284 		switch (xfer_shift) {
3285 		case ATA_SHIFT_PIO:
3286 		case ATA_SHIFT_MWDMA:
3287 			this_cycle = t->cycle;
3288 			break;
3289 		case ATA_SHIFT_UDMA:
3290 			this_cycle = t->udma;
3291 			break;
3292 		default:
3293 			return 0xff;
3294 		}
3295 
3296 		if (cycle > this_cycle)
3297 			break;
3298 
3299 		last_mode = t->mode;
3300 	}
3301 
3302 	return last_mode;
3303 }
3304 
3305 /**
3306  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3307  *	@dev: Device to adjust xfer masks
3308  *	@sel: ATA_DNXFER_* selector
3309  *
3310  *	Adjust xfer masks of @dev downward.  Note that this function
3311  *	does not apply the change.  Invoking ata_set_mode() afterwards
3312  *	will apply the limit.
3313  *
3314  *	LOCKING:
3315  *	Inherited from caller.
3316  *
3317  *	RETURNS:
3318  *	0 on success, negative errno on failure
3319  */
3320 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3321 {
3322 	char buf[32];
3323 	unsigned long orig_mask, xfer_mask;
3324 	unsigned long pio_mask, mwdma_mask, udma_mask;
3325 	int quiet, highbit;
3326 
3327 	quiet = !!(sel & ATA_DNXFER_QUIET);
3328 	sel &= ~ATA_DNXFER_QUIET;
3329 
3330 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3331 						  dev->mwdma_mask,
3332 						  dev->udma_mask);
3333 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3334 
3335 	switch (sel) {
3336 	case ATA_DNXFER_PIO:
3337 		highbit = fls(pio_mask) - 1;
3338 		pio_mask &= ~(1 << highbit);
3339 		break;
3340 
3341 	case ATA_DNXFER_DMA:
3342 		if (udma_mask) {
3343 			highbit = fls(udma_mask) - 1;
3344 			udma_mask &= ~(1 << highbit);
3345 			if (!udma_mask)
3346 				return -ENOENT;
3347 		} else if (mwdma_mask) {
3348 			highbit = fls(mwdma_mask) - 1;
3349 			mwdma_mask &= ~(1 << highbit);
3350 			if (!mwdma_mask)
3351 				return -ENOENT;
3352 		}
3353 		break;
3354 
3355 	case ATA_DNXFER_40C:
3356 		udma_mask &= ATA_UDMA_MASK_40C;
3357 		break;
3358 
3359 	case ATA_DNXFER_FORCE_PIO0:
3360 		pio_mask &= 1;
3361 	case ATA_DNXFER_FORCE_PIO:
3362 		mwdma_mask = 0;
3363 		udma_mask = 0;
3364 		break;
3365 
3366 	default:
3367 		BUG();
3368 	}
3369 
3370 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3371 
3372 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3373 		return -ENOENT;
3374 
3375 	if (!quiet) {
3376 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3377 			snprintf(buf, sizeof(buf), "%s:%s",
3378 				 ata_mode_string(xfer_mask),
3379 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3380 		else
3381 			snprintf(buf, sizeof(buf), "%s",
3382 				 ata_mode_string(xfer_mask));
3383 
3384 		ata_dev_printk(dev, KERN_WARNING,
3385 			       "limiting speed to %s\n", buf);
3386 	}
3387 
3388 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3389 			    &dev->udma_mask);
3390 
3391 	return 0;
3392 }
3393 
3394 static int ata_dev_set_mode(struct ata_device *dev)
3395 {
3396 	struct ata_port *ap = dev->link->ap;
3397 	struct ata_eh_context *ehc = &dev->link->eh_context;
3398 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3399 	const char *dev_err_whine = "";
3400 	int ign_dev_err = 0;
3401 	unsigned int err_mask = 0;
3402 	int rc;
3403 
3404 	dev->flags &= ~ATA_DFLAG_PIO;
3405 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3406 		dev->flags |= ATA_DFLAG_PIO;
3407 
3408 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3409 		dev_err_whine = " (SET_XFERMODE skipped)";
3410 	else {
3411 		if (nosetxfer)
3412 			ata_dev_printk(dev, KERN_WARNING,
3413 				       "NOSETXFER but PATA detected - can't "
3414 				       "skip SETXFER, might malfunction\n");
3415 		err_mask = ata_dev_set_xfermode(dev);
3416 	}
3417 
3418 	if (err_mask & ~AC_ERR_DEV)
3419 		goto fail;
3420 
3421 	/* revalidate */
3422 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3423 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3424 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3425 	if (rc)
3426 		return rc;
3427 
3428 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3429 		/* Old CFA may refuse this command, which is just fine */
3430 		if (ata_id_is_cfa(dev->id))
3431 			ign_dev_err = 1;
3432 		/* Catch several broken garbage emulations plus some pre
3433 		   ATA devices */
3434 		if (ata_id_major_version(dev->id) == 0 &&
3435 					dev->pio_mode <= XFER_PIO_2)
3436 			ign_dev_err = 1;
3437 		/* Some very old devices and some bad newer ones fail
3438 		   any kind of SET_XFERMODE request but support PIO0-2
3439 		   timings and no IORDY */
3440 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3441 			ign_dev_err = 1;
3442 	}
3443 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3444 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3445 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3446 	    dev->dma_mode == XFER_MW_DMA_0 &&
3447 	    (dev->id[63] >> 8) & 1)
3448 		ign_dev_err = 1;
3449 
3450 	/* if the device is actually configured correctly, ignore dev err */
3451 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3452 		ign_dev_err = 1;
3453 
3454 	if (err_mask & AC_ERR_DEV) {
3455 		if (!ign_dev_err)
3456 			goto fail;
3457 		else
3458 			dev_err_whine = " (device error ignored)";
3459 	}
3460 
3461 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3462 		dev->xfer_shift, (int)dev->xfer_mode);
3463 
3464 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3465 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3466 		       dev_err_whine);
3467 
3468 	return 0;
3469 
3470  fail:
3471 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3472 		       "(err_mask=0x%x)\n", err_mask);
3473 	return -EIO;
3474 }
3475 
3476 /**
3477  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3478  *	@link: link on which timings will be programmed
3479  *	@r_failed_dev: out parameter for failed device
3480  *
3481  *	Standard implementation of the function used to tune and set
3482  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3483  *	ata_dev_set_mode() fails, pointer to the failing device is
3484  *	returned in @r_failed_dev.
3485  *
3486  *	LOCKING:
3487  *	PCI/etc. bus probe sem.
3488  *
3489  *	RETURNS:
3490  *	0 on success, negative errno otherwise
3491  */
3492 
3493 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3494 {
3495 	struct ata_port *ap = link->ap;
3496 	struct ata_device *dev;
3497 	int rc = 0, used_dma = 0, found = 0;
3498 
3499 	/* step 1: calculate xfer_mask */
3500 	ata_for_each_dev(dev, link, ENABLED) {
3501 		unsigned long pio_mask, dma_mask;
3502 		unsigned int mode_mask;
3503 
3504 		mode_mask = ATA_DMA_MASK_ATA;
3505 		if (dev->class == ATA_DEV_ATAPI)
3506 			mode_mask = ATA_DMA_MASK_ATAPI;
3507 		else if (ata_id_is_cfa(dev->id))
3508 			mode_mask = ATA_DMA_MASK_CFA;
3509 
3510 		ata_dev_xfermask(dev);
3511 		ata_force_xfermask(dev);
3512 
3513 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3514 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3515 
3516 		if (libata_dma_mask & mode_mask)
3517 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3518 		else
3519 			dma_mask = 0;
3520 
3521 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3522 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3523 
3524 		found = 1;
3525 		if (ata_dma_enabled(dev))
3526 			used_dma = 1;
3527 	}
3528 	if (!found)
3529 		goto out;
3530 
3531 	/* step 2: always set host PIO timings */
3532 	ata_for_each_dev(dev, link, ENABLED) {
3533 		if (dev->pio_mode == 0xff) {
3534 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3535 			rc = -EINVAL;
3536 			goto out;
3537 		}
3538 
3539 		dev->xfer_mode = dev->pio_mode;
3540 		dev->xfer_shift = ATA_SHIFT_PIO;
3541 		if (ap->ops->set_piomode)
3542 			ap->ops->set_piomode(ap, dev);
3543 	}
3544 
3545 	/* step 3: set host DMA timings */
3546 	ata_for_each_dev(dev, link, ENABLED) {
3547 		if (!ata_dma_enabled(dev))
3548 			continue;
3549 
3550 		dev->xfer_mode = dev->dma_mode;
3551 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3552 		if (ap->ops->set_dmamode)
3553 			ap->ops->set_dmamode(ap, dev);
3554 	}
3555 
3556 	/* step 4: update devices' xfer mode */
3557 	ata_for_each_dev(dev, link, ENABLED) {
3558 		rc = ata_dev_set_mode(dev);
3559 		if (rc)
3560 			goto out;
3561 	}
3562 
3563 	/* Record simplex status. If we selected DMA then the other
3564 	 * host channels are not permitted to do so.
3565 	 */
3566 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3567 		ap->host->simplex_claimed = ap;
3568 
3569  out:
3570 	if (rc)
3571 		*r_failed_dev = dev;
3572 	return rc;
3573 }
3574 
3575 /**
3576  *	ata_wait_ready - wait for link to become ready
3577  *	@link: link to be waited on
3578  *	@deadline: deadline jiffies for the operation
3579  *	@check_ready: callback to check link readiness
3580  *
3581  *	Wait for @link to become ready.  @check_ready should return
3582  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3583  *	link doesn't seem to be occupied, other errno for other error
3584  *	conditions.
3585  *
3586  *	Transient -ENODEV conditions are allowed for
3587  *	ATA_TMOUT_FF_WAIT.
3588  *
3589  *	LOCKING:
3590  *	EH context.
3591  *
3592  *	RETURNS:
3593  *	0 if @linke is ready before @deadline; otherwise, -errno.
3594  */
3595 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3596 		   int (*check_ready)(struct ata_link *link))
3597 {
3598 	unsigned long start = jiffies;
3599 	unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3600 	int warned = 0;
3601 
3602 	/* Slave readiness can't be tested separately from master.  On
3603 	 * M/S emulation configuration, this function should be called
3604 	 * only on the master and it will handle both master and slave.
3605 	 */
3606 	WARN_ON(link == link->ap->slave_link);
3607 
3608 	if (time_after(nodev_deadline, deadline))
3609 		nodev_deadline = deadline;
3610 
3611 	while (1) {
3612 		unsigned long now = jiffies;
3613 		int ready, tmp;
3614 
3615 		ready = tmp = check_ready(link);
3616 		if (ready > 0)
3617 			return 0;
3618 
3619 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3620 		 * is online.  Also, some SATA devices take a long
3621 		 * time to clear 0xff after reset.  For example,
3622 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3623 		 * GoVault needs even more than that.  Wait for
3624 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3625 		 *
3626 		 * Note that some PATA controllers (pata_ali) explode
3627 		 * if status register is read more than once when
3628 		 * there's no device attached.
3629 		 */
3630 		if (ready == -ENODEV) {
3631 			if (ata_link_online(link))
3632 				ready = 0;
3633 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3634 				 !ata_link_offline(link) &&
3635 				 time_before(now, nodev_deadline))
3636 				ready = 0;
3637 		}
3638 
3639 		if (ready)
3640 			return ready;
3641 		if (time_after(now, deadline))
3642 			return -EBUSY;
3643 
3644 		if (!warned && time_after(now, start + 5 * HZ) &&
3645 		    (deadline - now > 3 * HZ)) {
3646 			ata_link_printk(link, KERN_WARNING,
3647 				"link is slow to respond, please be patient "
3648 				"(ready=%d)\n", tmp);
3649 			warned = 1;
3650 		}
3651 
3652 		msleep(50);
3653 	}
3654 }
3655 
3656 /**
3657  *	ata_wait_after_reset - wait for link to become ready after reset
3658  *	@link: link to be waited on
3659  *	@deadline: deadline jiffies for the operation
3660  *	@check_ready: callback to check link readiness
3661  *
3662  *	Wait for @link to become ready after reset.
3663  *
3664  *	LOCKING:
3665  *	EH context.
3666  *
3667  *	RETURNS:
3668  *	0 if @linke is ready before @deadline; otherwise, -errno.
3669  */
3670 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3671 				int (*check_ready)(struct ata_link *link))
3672 {
3673 	msleep(ATA_WAIT_AFTER_RESET);
3674 
3675 	return ata_wait_ready(link, deadline, check_ready);
3676 }
3677 
3678 /**
3679  *	sata_link_debounce - debounce SATA phy status
3680  *	@link: ATA link to debounce SATA phy status for
3681  *	@params: timing parameters { interval, duratinon, timeout } in msec
3682  *	@deadline: deadline jiffies for the operation
3683  *
3684 *	Make sure SStatus of @link reaches stable state, determined by
3685  *	holding the same value where DET is not 1 for @duration polled
3686  *	every @interval, before @timeout.  Timeout constraints the
3687  *	beginning of the stable state.  Because DET gets stuck at 1 on
3688  *	some controllers after hot unplugging, this functions waits
3689  *	until timeout then returns 0 if DET is stable at 1.
3690  *
3691  *	@timeout is further limited by @deadline.  The sooner of the
3692  *	two is used.
3693  *
3694  *	LOCKING:
3695  *	Kernel thread context (may sleep)
3696  *
3697  *	RETURNS:
3698  *	0 on success, -errno on failure.
3699  */
3700 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3701 		       unsigned long deadline)
3702 {
3703 	unsigned long interval = params[0];
3704 	unsigned long duration = params[1];
3705 	unsigned long last_jiffies, t;
3706 	u32 last, cur;
3707 	int rc;
3708 
3709 	t = ata_deadline(jiffies, params[2]);
3710 	if (time_before(t, deadline))
3711 		deadline = t;
3712 
3713 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3714 		return rc;
3715 	cur &= 0xf;
3716 
3717 	last = cur;
3718 	last_jiffies = jiffies;
3719 
3720 	while (1) {
3721 		msleep(interval);
3722 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3723 			return rc;
3724 		cur &= 0xf;
3725 
3726 		/* DET stable? */
3727 		if (cur == last) {
3728 			if (cur == 1 && time_before(jiffies, deadline))
3729 				continue;
3730 			if (time_after(jiffies,
3731 				       ata_deadline(last_jiffies, duration)))
3732 				return 0;
3733 			continue;
3734 		}
3735 
3736 		/* unstable, start over */
3737 		last = cur;
3738 		last_jiffies = jiffies;
3739 
3740 		/* Check deadline.  If debouncing failed, return
3741 		 * -EPIPE to tell upper layer to lower link speed.
3742 		 */
3743 		if (time_after(jiffies, deadline))
3744 			return -EPIPE;
3745 	}
3746 }
3747 
3748 /**
3749  *	sata_link_resume - resume SATA link
3750  *	@link: ATA link to resume SATA
3751  *	@params: timing parameters { interval, duratinon, timeout } in msec
3752  *	@deadline: deadline jiffies for the operation
3753  *
3754  *	Resume SATA phy @link and debounce it.
3755  *
3756  *	LOCKING:
3757  *	Kernel thread context (may sleep)
3758  *
3759  *	RETURNS:
3760  *	0 on success, -errno on failure.
3761  */
3762 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3763 		     unsigned long deadline)
3764 {
3765 	u32 scontrol, serror;
3766 	int rc;
3767 
3768 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3769 		return rc;
3770 
3771 	scontrol = (scontrol & 0x0f0) | 0x300;
3772 
3773 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3774 		return rc;
3775 
3776 	/* Some PHYs react badly if SStatus is pounded immediately
3777 	 * after resuming.  Delay 200ms before debouncing.
3778 	 */
3779 	msleep(200);
3780 
3781 	if ((rc = sata_link_debounce(link, params, deadline)))
3782 		return rc;
3783 
3784 	/* clear SError, some PHYs require this even for SRST to work */
3785 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3786 		rc = sata_scr_write(link, SCR_ERROR, serror);
3787 
3788 	return rc != -EINVAL ? rc : 0;
3789 }
3790 
3791 /**
3792  *	ata_std_prereset - prepare for reset
3793  *	@link: ATA link to be reset
3794  *	@deadline: deadline jiffies for the operation
3795  *
3796  *	@link is about to be reset.  Initialize it.  Failure from
3797  *	prereset makes libata abort whole reset sequence and give up
3798  *	that port, so prereset should be best-effort.  It does its
3799  *	best to prepare for reset sequence but if things go wrong, it
3800  *	should just whine, not fail.
3801  *
3802  *	LOCKING:
3803  *	Kernel thread context (may sleep)
3804  *
3805  *	RETURNS:
3806  *	0 on success, -errno otherwise.
3807  */
3808 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3809 {
3810 	struct ata_port *ap = link->ap;
3811 	struct ata_eh_context *ehc = &link->eh_context;
3812 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3813 	int rc;
3814 
3815 	/* if we're about to do hardreset, nothing more to do */
3816 	if (ehc->i.action & ATA_EH_HARDRESET)
3817 		return 0;
3818 
3819 	/* if SATA, resume link */
3820 	if (ap->flags & ATA_FLAG_SATA) {
3821 		rc = sata_link_resume(link, timing, deadline);
3822 		/* whine about phy resume failure but proceed */
3823 		if (rc && rc != -EOPNOTSUPP)
3824 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3825 					"link for reset (errno=%d)\n", rc);
3826 	}
3827 
3828 	/* no point in trying softreset on offline link */
3829 	if (ata_phys_link_offline(link))
3830 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3831 
3832 	return 0;
3833 }
3834 
3835 /**
3836  *	sata_link_hardreset - reset link via SATA phy reset
3837  *	@link: link to reset
3838  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3839  *	@deadline: deadline jiffies for the operation
3840  *	@online: optional out parameter indicating link onlineness
3841  *	@check_ready: optional callback to check link readiness
3842  *
3843  *	SATA phy-reset @link using DET bits of SControl register.
3844  *	After hardreset, link readiness is waited upon using
3845  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3846  *	allowed to not specify @check_ready and wait itself after this
3847  *	function returns.  Device classification is LLD's
3848  *	responsibility.
3849  *
3850  *	*@online is set to one iff reset succeeded and @link is online
3851  *	after reset.
3852  *
3853  *	LOCKING:
3854  *	Kernel thread context (may sleep)
3855  *
3856  *	RETURNS:
3857  *	0 on success, -errno otherwise.
3858  */
3859 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3860 			unsigned long deadline,
3861 			bool *online, int (*check_ready)(struct ata_link *))
3862 {
3863 	u32 scontrol;
3864 	int rc;
3865 
3866 	DPRINTK("ENTER\n");
3867 
3868 	if (online)
3869 		*online = false;
3870 
3871 	if (sata_set_spd_needed(link)) {
3872 		/* SATA spec says nothing about how to reconfigure
3873 		 * spd.  To be on the safe side, turn off phy during
3874 		 * reconfiguration.  This works for at least ICH7 AHCI
3875 		 * and Sil3124.
3876 		 */
3877 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3878 			goto out;
3879 
3880 		scontrol = (scontrol & 0x0f0) | 0x304;
3881 
3882 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3883 			goto out;
3884 
3885 		sata_set_spd(link);
3886 	}
3887 
3888 	/* issue phy wake/reset */
3889 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3890 		goto out;
3891 
3892 	scontrol = (scontrol & 0x0f0) | 0x301;
3893 
3894 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3895 		goto out;
3896 
3897 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3898 	 * 10.4.2 says at least 1 ms.
3899 	 */
3900 	msleep(1);
3901 
3902 	/* bring link back */
3903 	rc = sata_link_resume(link, timing, deadline);
3904 	if (rc)
3905 		goto out;
3906 	/* if link is offline nothing more to do */
3907 	if (ata_phys_link_offline(link))
3908 		goto out;
3909 
3910 	/* Link is online.  From this point, -ENODEV too is an error. */
3911 	if (online)
3912 		*online = true;
3913 
3914 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3915 		/* If PMP is supported, we have to do follow-up SRST.
3916 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3917 		 * the first port is empty.  Wait only for
3918 		 * ATA_TMOUT_PMP_SRST_WAIT.
3919 		 */
3920 		if (check_ready) {
3921 			unsigned long pmp_deadline;
3922 
3923 			pmp_deadline = ata_deadline(jiffies,
3924 						    ATA_TMOUT_PMP_SRST_WAIT);
3925 			if (time_after(pmp_deadline, deadline))
3926 				pmp_deadline = deadline;
3927 			ata_wait_ready(link, pmp_deadline, check_ready);
3928 		}
3929 		rc = -EAGAIN;
3930 		goto out;
3931 	}
3932 
3933 	rc = 0;
3934 	if (check_ready)
3935 		rc = ata_wait_ready(link, deadline, check_ready);
3936  out:
3937 	if (rc && rc != -EAGAIN) {
3938 		/* online is set iff link is online && reset succeeded */
3939 		if (online)
3940 			*online = false;
3941 		ata_link_printk(link, KERN_ERR,
3942 				"COMRESET failed (errno=%d)\n", rc);
3943 	}
3944 	DPRINTK("EXIT, rc=%d\n", rc);
3945 	return rc;
3946 }
3947 
3948 /**
3949  *	sata_std_hardreset - COMRESET w/o waiting or classification
3950  *	@link: link to reset
3951  *	@class: resulting class of attached device
3952  *	@deadline: deadline jiffies for the operation
3953  *
3954  *	Standard SATA COMRESET w/o waiting or classification.
3955  *
3956  *	LOCKING:
3957  *	Kernel thread context (may sleep)
3958  *
3959  *	RETURNS:
3960  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3961  */
3962 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3963 		       unsigned long deadline)
3964 {
3965 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3966 	bool online;
3967 	int rc;
3968 
3969 	/* do hardreset */
3970 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3971 	return online ? -EAGAIN : rc;
3972 }
3973 
3974 /**
3975  *	ata_std_postreset - standard postreset callback
3976  *	@link: the target ata_link
3977  *	@classes: classes of attached devices
3978  *
3979  *	This function is invoked after a successful reset.  Note that
3980  *	the device might have been reset more than once using
3981  *	different reset methods before postreset is invoked.
3982  *
3983  *	LOCKING:
3984  *	Kernel thread context (may sleep)
3985  */
3986 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3987 {
3988 	u32 serror;
3989 
3990 	DPRINTK("ENTER\n");
3991 
3992 	/* reset complete, clear SError */
3993 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3994 		sata_scr_write(link, SCR_ERROR, serror);
3995 
3996 	/* print link status */
3997 	sata_print_link_status(link);
3998 
3999 	DPRINTK("EXIT\n");
4000 }
4001 
4002 /**
4003  *	ata_dev_same_device - Determine whether new ID matches configured device
4004  *	@dev: device to compare against
4005  *	@new_class: class of the new device
4006  *	@new_id: IDENTIFY page of the new device
4007  *
4008  *	Compare @new_class and @new_id against @dev and determine
4009  *	whether @dev is the device indicated by @new_class and
4010  *	@new_id.
4011  *
4012  *	LOCKING:
4013  *	None.
4014  *
4015  *	RETURNS:
4016  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
4017  */
4018 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4019 			       const u16 *new_id)
4020 {
4021 	const u16 *old_id = dev->id;
4022 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
4023 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4024 
4025 	if (dev->class != new_class) {
4026 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4027 			       dev->class, new_class);
4028 		return 0;
4029 	}
4030 
4031 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4032 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4033 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4034 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4035 
4036 	if (strcmp(model[0], model[1])) {
4037 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4038 			       "'%s' != '%s'\n", model[0], model[1]);
4039 		return 0;
4040 	}
4041 
4042 	if (strcmp(serial[0], serial[1])) {
4043 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4044 			       "'%s' != '%s'\n", serial[0], serial[1]);
4045 		return 0;
4046 	}
4047 
4048 	return 1;
4049 }
4050 
4051 /**
4052  *	ata_dev_reread_id - Re-read IDENTIFY data
4053  *	@dev: target ATA device
4054  *	@readid_flags: read ID flags
4055  *
4056  *	Re-read IDENTIFY page and make sure @dev is still attached to
4057  *	the port.
4058  *
4059  *	LOCKING:
4060  *	Kernel thread context (may sleep)
4061  *
4062  *	RETURNS:
4063  *	0 on success, negative errno otherwise
4064  */
4065 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4066 {
4067 	unsigned int class = dev->class;
4068 	u16 *id = (void *)dev->link->ap->sector_buf;
4069 	int rc;
4070 
4071 	/* read ID data */
4072 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4073 	if (rc)
4074 		return rc;
4075 
4076 	/* is the device still there? */
4077 	if (!ata_dev_same_device(dev, class, id))
4078 		return -ENODEV;
4079 
4080 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4081 	return 0;
4082 }
4083 
4084 /**
4085  *	ata_dev_revalidate - Revalidate ATA device
4086  *	@dev: device to revalidate
4087  *	@new_class: new class code
4088  *	@readid_flags: read ID flags
4089  *
4090  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4091  *	port and reconfigure it according to the new IDENTIFY page.
4092  *
4093  *	LOCKING:
4094  *	Kernel thread context (may sleep)
4095  *
4096  *	RETURNS:
4097  *	0 on success, negative errno otherwise
4098  */
4099 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4100 		       unsigned int readid_flags)
4101 {
4102 	u64 n_sectors = dev->n_sectors;
4103 	u64 n_native_sectors = dev->n_native_sectors;
4104 	int rc;
4105 
4106 	if (!ata_dev_enabled(dev))
4107 		return -ENODEV;
4108 
4109 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4110 	if (ata_class_enabled(new_class) &&
4111 	    new_class != ATA_DEV_ATA &&
4112 	    new_class != ATA_DEV_ATAPI &&
4113 	    new_class != ATA_DEV_SEMB) {
4114 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4115 			       dev->class, new_class);
4116 		rc = -ENODEV;
4117 		goto fail;
4118 	}
4119 
4120 	/* re-read ID */
4121 	rc = ata_dev_reread_id(dev, readid_flags);
4122 	if (rc)
4123 		goto fail;
4124 
4125 	/* configure device according to the new ID */
4126 	rc = ata_dev_configure(dev);
4127 	if (rc)
4128 		goto fail;
4129 
4130 	/* verify n_sectors hasn't changed */
4131 	if (dev->class == ATA_DEV_ATA && n_sectors &&
4132 	    dev->n_sectors != n_sectors) {
4133 		ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch "
4134 			       "%llu != %llu\n",
4135 			       (unsigned long long)n_sectors,
4136 			       (unsigned long long)dev->n_sectors);
4137 		/*
4138 		 * Something could have caused HPA to be unlocked
4139 		 * involuntarily.  If n_native_sectors hasn't changed
4140 		 * and the new size matches it, keep the device.
4141 		 */
4142 		if (dev->n_native_sectors == n_native_sectors &&
4143 		    dev->n_sectors > n_sectors &&
4144 		    dev->n_sectors == n_native_sectors) {
4145 			ata_dev_printk(dev, KERN_WARNING,
4146 				       "new n_sectors matches native, probably "
4147 				       "late HPA unlock, continuing\n");
4148 			/* keep using the old n_sectors */
4149 			dev->n_sectors = n_sectors;
4150 		} else {
4151 			/* restore original n_[native]_sectors and fail */
4152 			dev->n_native_sectors = n_native_sectors;
4153 			dev->n_sectors = n_sectors;
4154 			rc = -ENODEV;
4155 			goto fail;
4156 		}
4157 	}
4158 
4159 	return 0;
4160 
4161  fail:
4162 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4163 	return rc;
4164 }
4165 
4166 struct ata_blacklist_entry {
4167 	const char *model_num;
4168 	const char *model_rev;
4169 	unsigned long horkage;
4170 };
4171 
4172 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4173 	/* Devices with DMA related problems under Linux */
4174 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4175 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4176 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4177 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4178 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4179 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4180 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4181 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4182 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4183 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
4184 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
4185 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4186 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4187 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4188 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4189 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4190 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
4191 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
4192 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4193 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4194 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4195 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4196 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4197 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4198 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4199 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4200 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4201 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4202 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4203 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4204 	/* Odd clown on sil3726/4726 PMPs */
4205 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4206 
4207 	/* Weird ATAPI devices */
4208 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4209 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4210 
4211 	/* Devices we expect to fail diagnostics */
4212 
4213 	/* Devices where NCQ should be avoided */
4214 	/* NCQ is slow */
4215 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4216 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4217 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4218 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4219 	/* NCQ is broken */
4220 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4221 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4222 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4223 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4224 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4225 
4226 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4227 	{ "ST31500341AS",	"SD15",		ATA_HORKAGE_NONCQ |
4228 						ATA_HORKAGE_FIRMWARE_WARN },
4229 	{ "ST31500341AS",	"SD16",		ATA_HORKAGE_NONCQ |
4230 						ATA_HORKAGE_FIRMWARE_WARN },
4231 	{ "ST31500341AS",	"SD17",		ATA_HORKAGE_NONCQ |
4232 						ATA_HORKAGE_FIRMWARE_WARN },
4233 	{ "ST31500341AS",	"SD18",		ATA_HORKAGE_NONCQ |
4234 						ATA_HORKAGE_FIRMWARE_WARN },
4235 	{ "ST31500341AS",	"SD19",		ATA_HORKAGE_NONCQ |
4236 						ATA_HORKAGE_FIRMWARE_WARN },
4237 
4238 	{ "ST31000333AS",	"SD15",		ATA_HORKAGE_NONCQ |
4239 						ATA_HORKAGE_FIRMWARE_WARN },
4240 	{ "ST31000333AS",	"SD16",		ATA_HORKAGE_NONCQ |
4241 						ATA_HORKAGE_FIRMWARE_WARN },
4242 	{ "ST31000333AS",	"SD17",		ATA_HORKAGE_NONCQ |
4243 						ATA_HORKAGE_FIRMWARE_WARN },
4244 	{ "ST31000333AS",	"SD18",		ATA_HORKAGE_NONCQ |
4245 						ATA_HORKAGE_FIRMWARE_WARN },
4246 	{ "ST31000333AS",	"SD19",		ATA_HORKAGE_NONCQ |
4247 						ATA_HORKAGE_FIRMWARE_WARN },
4248 
4249 	{ "ST3640623AS",	"SD15",		ATA_HORKAGE_NONCQ |
4250 						ATA_HORKAGE_FIRMWARE_WARN },
4251 	{ "ST3640623AS",	"SD16",		ATA_HORKAGE_NONCQ |
4252 						ATA_HORKAGE_FIRMWARE_WARN },
4253 	{ "ST3640623AS",	"SD17",		ATA_HORKAGE_NONCQ |
4254 						ATA_HORKAGE_FIRMWARE_WARN },
4255 	{ "ST3640623AS",	"SD18",		ATA_HORKAGE_NONCQ |
4256 						ATA_HORKAGE_FIRMWARE_WARN },
4257 	{ "ST3640623AS",	"SD19",		ATA_HORKAGE_NONCQ |
4258 						ATA_HORKAGE_FIRMWARE_WARN },
4259 
4260 	{ "ST3640323AS",	"SD15",		ATA_HORKAGE_NONCQ |
4261 						ATA_HORKAGE_FIRMWARE_WARN },
4262 	{ "ST3640323AS",	"SD16",		ATA_HORKAGE_NONCQ |
4263 						ATA_HORKAGE_FIRMWARE_WARN },
4264 	{ "ST3640323AS",	"SD17",		ATA_HORKAGE_NONCQ |
4265 						ATA_HORKAGE_FIRMWARE_WARN },
4266 	{ "ST3640323AS",	"SD18",		ATA_HORKAGE_NONCQ |
4267 						ATA_HORKAGE_FIRMWARE_WARN },
4268 	{ "ST3640323AS",	"SD19",		ATA_HORKAGE_NONCQ |
4269 						ATA_HORKAGE_FIRMWARE_WARN },
4270 
4271 	{ "ST3320813AS",	"SD15",		ATA_HORKAGE_NONCQ |
4272 						ATA_HORKAGE_FIRMWARE_WARN },
4273 	{ "ST3320813AS",	"SD16",		ATA_HORKAGE_NONCQ |
4274 						ATA_HORKAGE_FIRMWARE_WARN },
4275 	{ "ST3320813AS",	"SD17",		ATA_HORKAGE_NONCQ |
4276 						ATA_HORKAGE_FIRMWARE_WARN },
4277 	{ "ST3320813AS",	"SD18",		ATA_HORKAGE_NONCQ |
4278 						ATA_HORKAGE_FIRMWARE_WARN },
4279 	{ "ST3320813AS",	"SD19",		ATA_HORKAGE_NONCQ |
4280 						ATA_HORKAGE_FIRMWARE_WARN },
4281 
4282 	{ "ST3320613AS",	"SD15",		ATA_HORKAGE_NONCQ |
4283 						ATA_HORKAGE_FIRMWARE_WARN },
4284 	{ "ST3320613AS",	"SD16",		ATA_HORKAGE_NONCQ |
4285 						ATA_HORKAGE_FIRMWARE_WARN },
4286 	{ "ST3320613AS",	"SD17",		ATA_HORKAGE_NONCQ |
4287 						ATA_HORKAGE_FIRMWARE_WARN },
4288 	{ "ST3320613AS",	"SD18",		ATA_HORKAGE_NONCQ |
4289 						ATA_HORKAGE_FIRMWARE_WARN },
4290 	{ "ST3320613AS",	"SD19",		ATA_HORKAGE_NONCQ |
4291 						ATA_HORKAGE_FIRMWARE_WARN },
4292 
4293 	/* Blacklist entries taken from Silicon Image 3124/3132
4294 	   Windows driver .inf file - also several Linux problem reports */
4295 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4296 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4297 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4298 
4299 	/* devices which puke on READ_NATIVE_MAX */
4300 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4301 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4302 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4303 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4304 
4305 	/* this one allows HPA unlocking but fails IOs on the area */
4306 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4307 
4308 	/* Devices which report 1 sector over size HPA */
4309 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4310 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4311 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4312 
4313 	/* Devices which get the IVB wrong */
4314 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4315 	/* Maybe we should just blacklist TSSTcorp... */
4316 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
4317 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
4318 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4319 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4320 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4321 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
4322 
4323 	/* Devices that do not need bridging limits applied */
4324 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4325 
4326 	/* Devices which aren't very happy with higher link speeds */
4327 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4328 
4329 	/*
4330 	 * Devices which choke on SETXFER.  Applies only if both the
4331 	 * device and controller are SATA.
4332 	 */
4333 	{ "PIONEER DVD-RW  DVRTD08",	"1.00",	ATA_HORKAGE_NOSETXFER },
4334 
4335 	/* End Marker */
4336 	{ }
4337 };
4338 
4339 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4340 {
4341 	const char *p;
4342 	int len;
4343 
4344 	/*
4345 	 * check for trailing wildcard: *\0
4346 	 */
4347 	p = strchr(patt, wildchar);
4348 	if (p && ((*(p + 1)) == 0))
4349 		len = p - patt;
4350 	else {
4351 		len = strlen(name);
4352 		if (!len) {
4353 			if (!*patt)
4354 				return 0;
4355 			return -1;
4356 		}
4357 	}
4358 
4359 	return strncmp(patt, name, len);
4360 }
4361 
4362 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4363 {
4364 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4365 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4366 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4367 
4368 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4369 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4370 
4371 	while (ad->model_num) {
4372 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4373 			if (ad->model_rev == NULL)
4374 				return ad->horkage;
4375 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4376 				return ad->horkage;
4377 		}
4378 		ad++;
4379 	}
4380 	return 0;
4381 }
4382 
4383 static int ata_dma_blacklisted(const struct ata_device *dev)
4384 {
4385 	/* We don't support polling DMA.
4386 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4387 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4388 	 */
4389 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4390 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4391 		return 1;
4392 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4393 }
4394 
4395 /**
4396  *	ata_is_40wire		-	check drive side detection
4397  *	@dev: device
4398  *
4399  *	Perform drive side detection decoding, allowing for device vendors
4400  *	who can't follow the documentation.
4401  */
4402 
4403 static int ata_is_40wire(struct ata_device *dev)
4404 {
4405 	if (dev->horkage & ATA_HORKAGE_IVB)
4406 		return ata_drive_40wire_relaxed(dev->id);
4407 	return ata_drive_40wire(dev->id);
4408 }
4409 
4410 /**
4411  *	cable_is_40wire		-	40/80/SATA decider
4412  *	@ap: port to consider
4413  *
4414  *	This function encapsulates the policy for speed management
4415  *	in one place. At the moment we don't cache the result but
4416  *	there is a good case for setting ap->cbl to the result when
4417  *	we are called with unknown cables (and figuring out if it
4418  *	impacts hotplug at all).
4419  *
4420  *	Return 1 if the cable appears to be 40 wire.
4421  */
4422 
4423 static int cable_is_40wire(struct ata_port *ap)
4424 {
4425 	struct ata_link *link;
4426 	struct ata_device *dev;
4427 
4428 	/* If the controller thinks we are 40 wire, we are. */
4429 	if (ap->cbl == ATA_CBL_PATA40)
4430 		return 1;
4431 
4432 	/* If the controller thinks we are 80 wire, we are. */
4433 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4434 		return 0;
4435 
4436 	/* If the system is known to be 40 wire short cable (eg
4437 	 * laptop), then we allow 80 wire modes even if the drive
4438 	 * isn't sure.
4439 	 */
4440 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4441 		return 0;
4442 
4443 	/* If the controller doesn't know, we scan.
4444 	 *
4445 	 * Note: We look for all 40 wire detects at this point.  Any
4446 	 *       80 wire detect is taken to be 80 wire cable because
4447 	 * - in many setups only the one drive (slave if present) will
4448 	 *   give a valid detect
4449 	 * - if you have a non detect capable drive you don't want it
4450 	 *   to colour the choice
4451 	 */
4452 	ata_for_each_link(link, ap, EDGE) {
4453 		ata_for_each_dev(dev, link, ENABLED) {
4454 			if (!ata_is_40wire(dev))
4455 				return 0;
4456 		}
4457 	}
4458 	return 1;
4459 }
4460 
4461 /**
4462  *	ata_dev_xfermask - Compute supported xfermask of the given device
4463  *	@dev: Device to compute xfermask for
4464  *
4465  *	Compute supported xfermask of @dev and store it in
4466  *	dev->*_mask.  This function is responsible for applying all
4467  *	known limits including host controller limits, device
4468  *	blacklist, etc...
4469  *
4470  *	LOCKING:
4471  *	None.
4472  */
4473 static void ata_dev_xfermask(struct ata_device *dev)
4474 {
4475 	struct ata_link *link = dev->link;
4476 	struct ata_port *ap = link->ap;
4477 	struct ata_host *host = ap->host;
4478 	unsigned long xfer_mask;
4479 
4480 	/* controller modes available */
4481 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4482 				      ap->mwdma_mask, ap->udma_mask);
4483 
4484 	/* drive modes available */
4485 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4486 				       dev->mwdma_mask, dev->udma_mask);
4487 	xfer_mask &= ata_id_xfermask(dev->id);
4488 
4489 	/*
4490 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4491 	 *	cable
4492 	 */
4493 	if (ata_dev_pair(dev)) {
4494 		/* No PIO5 or PIO6 */
4495 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4496 		/* No MWDMA3 or MWDMA 4 */
4497 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4498 	}
4499 
4500 	if (ata_dma_blacklisted(dev)) {
4501 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4502 		ata_dev_printk(dev, KERN_WARNING,
4503 			       "device is on DMA blacklist, disabling DMA\n");
4504 	}
4505 
4506 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4507 	    host->simplex_claimed && host->simplex_claimed != ap) {
4508 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4509 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4510 			       "other device, disabling DMA\n");
4511 	}
4512 
4513 	if (ap->flags & ATA_FLAG_NO_IORDY)
4514 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4515 
4516 	if (ap->ops->mode_filter)
4517 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4518 
4519 	/* Apply cable rule here.  Don't apply it early because when
4520 	 * we handle hot plug the cable type can itself change.
4521 	 * Check this last so that we know if the transfer rate was
4522 	 * solely limited by the cable.
4523 	 * Unknown or 80 wire cables reported host side are checked
4524 	 * drive side as well. Cases where we know a 40wire cable
4525 	 * is used safely for 80 are not checked here.
4526 	 */
4527 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4528 		/* UDMA/44 or higher would be available */
4529 		if (cable_is_40wire(ap)) {
4530 			ata_dev_printk(dev, KERN_WARNING,
4531 				 "limited to UDMA/33 due to 40-wire cable\n");
4532 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4533 		}
4534 
4535 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4536 			    &dev->mwdma_mask, &dev->udma_mask);
4537 }
4538 
4539 /**
4540  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4541  *	@dev: Device to which command will be sent
4542  *
4543  *	Issue SET FEATURES - XFER MODE command to device @dev
4544  *	on port @ap.
4545  *
4546  *	LOCKING:
4547  *	PCI/etc. bus probe sem.
4548  *
4549  *	RETURNS:
4550  *	0 on success, AC_ERR_* mask otherwise.
4551  */
4552 
4553 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4554 {
4555 	struct ata_taskfile tf;
4556 	unsigned int err_mask;
4557 
4558 	/* set up set-features taskfile */
4559 	DPRINTK("set features - xfer mode\n");
4560 
4561 	/* Some controllers and ATAPI devices show flaky interrupt
4562 	 * behavior after setting xfer mode.  Use polling instead.
4563 	 */
4564 	ata_tf_init(dev, &tf);
4565 	tf.command = ATA_CMD_SET_FEATURES;
4566 	tf.feature = SETFEATURES_XFER;
4567 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4568 	tf.protocol = ATA_PROT_NODATA;
4569 	/* If we are using IORDY we must send the mode setting command */
4570 	if (ata_pio_need_iordy(dev))
4571 		tf.nsect = dev->xfer_mode;
4572 	/* If the device has IORDY and the controller does not - turn it off */
4573  	else if (ata_id_has_iordy(dev->id))
4574 		tf.nsect = 0x01;
4575 	else /* In the ancient relic department - skip all of this */
4576 		return 0;
4577 
4578 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4579 
4580 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4581 	return err_mask;
4582 }
4583 /**
4584  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4585  *	@dev: Device to which command will be sent
4586  *	@enable: Whether to enable or disable the feature
4587  *	@feature: The sector count represents the feature to set
4588  *
4589  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4590  *	on port @ap with sector count
4591  *
4592  *	LOCKING:
4593  *	PCI/etc. bus probe sem.
4594  *
4595  *	RETURNS:
4596  *	0 on success, AC_ERR_* mask otherwise.
4597  */
4598 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4599 					u8 feature)
4600 {
4601 	struct ata_taskfile tf;
4602 	unsigned int err_mask;
4603 
4604 	/* set up set-features taskfile */
4605 	DPRINTK("set features - SATA features\n");
4606 
4607 	ata_tf_init(dev, &tf);
4608 	tf.command = ATA_CMD_SET_FEATURES;
4609 	tf.feature = enable;
4610 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4611 	tf.protocol = ATA_PROT_NODATA;
4612 	tf.nsect = feature;
4613 
4614 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4615 
4616 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4617 	return err_mask;
4618 }
4619 
4620 /**
4621  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4622  *	@dev: Device to which command will be sent
4623  *	@heads: Number of heads (taskfile parameter)
4624  *	@sectors: Number of sectors (taskfile parameter)
4625  *
4626  *	LOCKING:
4627  *	Kernel thread context (may sleep)
4628  *
4629  *	RETURNS:
4630  *	0 on success, AC_ERR_* mask otherwise.
4631  */
4632 static unsigned int ata_dev_init_params(struct ata_device *dev,
4633 					u16 heads, u16 sectors)
4634 {
4635 	struct ata_taskfile tf;
4636 	unsigned int err_mask;
4637 
4638 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4639 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4640 		return AC_ERR_INVALID;
4641 
4642 	/* set up init dev params taskfile */
4643 	DPRINTK("init dev params \n");
4644 
4645 	ata_tf_init(dev, &tf);
4646 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4647 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4648 	tf.protocol = ATA_PROT_NODATA;
4649 	tf.nsect = sectors;
4650 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4651 
4652 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4653 	/* A clean abort indicates an original or just out of spec drive
4654 	   and we should continue as we issue the setup based on the
4655 	   drive reported working geometry */
4656 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4657 		err_mask = 0;
4658 
4659 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4660 	return err_mask;
4661 }
4662 
4663 /**
4664  *	ata_sg_clean - Unmap DMA memory associated with command
4665  *	@qc: Command containing DMA memory to be released
4666  *
4667  *	Unmap all mapped DMA memory associated with this command.
4668  *
4669  *	LOCKING:
4670  *	spin_lock_irqsave(host lock)
4671  */
4672 void ata_sg_clean(struct ata_queued_cmd *qc)
4673 {
4674 	struct ata_port *ap = qc->ap;
4675 	struct scatterlist *sg = qc->sg;
4676 	int dir = qc->dma_dir;
4677 
4678 	WARN_ON_ONCE(sg == NULL);
4679 
4680 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4681 
4682 	if (qc->n_elem)
4683 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4684 
4685 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4686 	qc->sg = NULL;
4687 }
4688 
4689 /**
4690  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4691  *	@qc: Metadata associated with taskfile to check
4692  *
4693  *	Allow low-level driver to filter ATA PACKET commands, returning
4694  *	a status indicating whether or not it is OK to use DMA for the
4695  *	supplied PACKET command.
4696  *
4697  *	LOCKING:
4698  *	spin_lock_irqsave(host lock)
4699  *
4700  *	RETURNS: 0 when ATAPI DMA can be used
4701  *               nonzero otherwise
4702  */
4703 int atapi_check_dma(struct ata_queued_cmd *qc)
4704 {
4705 	struct ata_port *ap = qc->ap;
4706 
4707 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4708 	 * few ATAPI devices choke on such DMA requests.
4709 	 */
4710 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4711 	    unlikely(qc->nbytes & 15))
4712 		return 1;
4713 
4714 	if (ap->ops->check_atapi_dma)
4715 		return ap->ops->check_atapi_dma(qc);
4716 
4717 	return 0;
4718 }
4719 
4720 /**
4721  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4722  *	@qc: ATA command in question
4723  *
4724  *	Non-NCQ commands cannot run with any other command, NCQ or
4725  *	not.  As upper layer only knows the queue depth, we are
4726  *	responsible for maintaining exclusion.  This function checks
4727  *	whether a new command @qc can be issued.
4728  *
4729  *	LOCKING:
4730  *	spin_lock_irqsave(host lock)
4731  *
4732  *	RETURNS:
4733  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4734  */
4735 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4736 {
4737 	struct ata_link *link = qc->dev->link;
4738 
4739 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4740 		if (!ata_tag_valid(link->active_tag))
4741 			return 0;
4742 	} else {
4743 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4744 			return 0;
4745 	}
4746 
4747 	return ATA_DEFER_LINK;
4748 }
4749 
4750 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4751 
4752 /**
4753  *	ata_sg_init - Associate command with scatter-gather table.
4754  *	@qc: Command to be associated
4755  *	@sg: Scatter-gather table.
4756  *	@n_elem: Number of elements in s/g table.
4757  *
4758  *	Initialize the data-related elements of queued_cmd @qc
4759  *	to point to a scatter-gather table @sg, containing @n_elem
4760  *	elements.
4761  *
4762  *	LOCKING:
4763  *	spin_lock_irqsave(host lock)
4764  */
4765 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4766 		 unsigned int n_elem)
4767 {
4768 	qc->sg = sg;
4769 	qc->n_elem = n_elem;
4770 	qc->cursg = qc->sg;
4771 }
4772 
4773 /**
4774  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4775  *	@qc: Command with scatter-gather table to be mapped.
4776  *
4777  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4778  *
4779  *	LOCKING:
4780  *	spin_lock_irqsave(host lock)
4781  *
4782  *	RETURNS:
4783  *	Zero on success, negative on error.
4784  *
4785  */
4786 static int ata_sg_setup(struct ata_queued_cmd *qc)
4787 {
4788 	struct ata_port *ap = qc->ap;
4789 	unsigned int n_elem;
4790 
4791 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4792 
4793 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4794 	if (n_elem < 1)
4795 		return -1;
4796 
4797 	DPRINTK("%d sg elements mapped\n", n_elem);
4798 	qc->orig_n_elem = qc->n_elem;
4799 	qc->n_elem = n_elem;
4800 	qc->flags |= ATA_QCFLAG_DMAMAP;
4801 
4802 	return 0;
4803 }
4804 
4805 /**
4806  *	swap_buf_le16 - swap halves of 16-bit words in place
4807  *	@buf:  Buffer to swap
4808  *	@buf_words:  Number of 16-bit words in buffer.
4809  *
4810  *	Swap halves of 16-bit words if needed to convert from
4811  *	little-endian byte order to native cpu byte order, or
4812  *	vice-versa.
4813  *
4814  *	LOCKING:
4815  *	Inherited from caller.
4816  */
4817 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4818 {
4819 #ifdef __BIG_ENDIAN
4820 	unsigned int i;
4821 
4822 	for (i = 0; i < buf_words; i++)
4823 		buf[i] = le16_to_cpu(buf[i]);
4824 #endif /* __BIG_ENDIAN */
4825 }
4826 
4827 /**
4828  *	ata_qc_new - Request an available ATA command, for queueing
4829  *	@ap: target port
4830  *
4831  *	LOCKING:
4832  *	None.
4833  */
4834 
4835 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4836 {
4837 	struct ata_queued_cmd *qc = NULL;
4838 	unsigned int i;
4839 
4840 	/* no command while frozen */
4841 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4842 		return NULL;
4843 
4844 	/* the last tag is reserved for internal command. */
4845 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4846 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4847 			qc = __ata_qc_from_tag(ap, i);
4848 			break;
4849 		}
4850 
4851 	if (qc)
4852 		qc->tag = i;
4853 
4854 	return qc;
4855 }
4856 
4857 /**
4858  *	ata_qc_new_init - Request an available ATA command, and initialize it
4859  *	@dev: Device from whom we request an available command structure
4860  *
4861  *	LOCKING:
4862  *	None.
4863  */
4864 
4865 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4866 {
4867 	struct ata_port *ap = dev->link->ap;
4868 	struct ata_queued_cmd *qc;
4869 
4870 	qc = ata_qc_new(ap);
4871 	if (qc) {
4872 		qc->scsicmd = NULL;
4873 		qc->ap = ap;
4874 		qc->dev = dev;
4875 
4876 		ata_qc_reinit(qc);
4877 	}
4878 
4879 	return qc;
4880 }
4881 
4882 /**
4883  *	ata_qc_free - free unused ata_queued_cmd
4884  *	@qc: Command to complete
4885  *
4886  *	Designed to free unused ata_queued_cmd object
4887  *	in case something prevents using it.
4888  *
4889  *	LOCKING:
4890  *	spin_lock_irqsave(host lock)
4891  */
4892 void ata_qc_free(struct ata_queued_cmd *qc)
4893 {
4894 	struct ata_port *ap = qc->ap;
4895 	unsigned int tag;
4896 
4897 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4898 
4899 	qc->flags = 0;
4900 	tag = qc->tag;
4901 	if (likely(ata_tag_valid(tag))) {
4902 		qc->tag = ATA_TAG_POISON;
4903 		clear_bit(tag, &ap->qc_allocated);
4904 	}
4905 }
4906 
4907 void __ata_qc_complete(struct ata_queued_cmd *qc)
4908 {
4909 	struct ata_port *ap = qc->ap;
4910 	struct ata_link *link = qc->dev->link;
4911 
4912 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4913 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4914 
4915 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4916 		ata_sg_clean(qc);
4917 
4918 	/* command should be marked inactive atomically with qc completion */
4919 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4920 		link->sactive &= ~(1 << qc->tag);
4921 		if (!link->sactive)
4922 			ap->nr_active_links--;
4923 	} else {
4924 		link->active_tag = ATA_TAG_POISON;
4925 		ap->nr_active_links--;
4926 	}
4927 
4928 	/* clear exclusive status */
4929 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4930 		     ap->excl_link == link))
4931 		ap->excl_link = NULL;
4932 
4933 	/* atapi: mark qc as inactive to prevent the interrupt handler
4934 	 * from completing the command twice later, before the error handler
4935 	 * is called. (when rc != 0 and atapi request sense is needed)
4936 	 */
4937 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4938 	ap->qc_active &= ~(1 << qc->tag);
4939 
4940 	/* call completion callback */
4941 	qc->complete_fn(qc);
4942 }
4943 
4944 static void fill_result_tf(struct ata_queued_cmd *qc)
4945 {
4946 	struct ata_port *ap = qc->ap;
4947 
4948 	qc->result_tf.flags = qc->tf.flags;
4949 	ap->ops->qc_fill_rtf(qc);
4950 }
4951 
4952 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4953 {
4954 	struct ata_device *dev = qc->dev;
4955 
4956 	if (ata_tag_internal(qc->tag))
4957 		return;
4958 
4959 	if (ata_is_nodata(qc->tf.protocol))
4960 		return;
4961 
4962 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4963 		return;
4964 
4965 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4966 }
4967 
4968 /**
4969  *	ata_qc_complete - Complete an active ATA command
4970  *	@qc: Command to complete
4971  *
4972  *	Indicate to the mid and upper layers that an ATA
4973  *	command has completed, with either an ok or not-ok status.
4974  *
4975  *	LOCKING:
4976  *	spin_lock_irqsave(host lock)
4977  */
4978 void ata_qc_complete(struct ata_queued_cmd *qc)
4979 {
4980 	struct ata_port *ap = qc->ap;
4981 
4982 	/* XXX: New EH and old EH use different mechanisms to
4983 	 * synchronize EH with regular execution path.
4984 	 *
4985 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4986 	 * Normal execution path is responsible for not accessing a
4987 	 * failed qc.  libata core enforces the rule by returning NULL
4988 	 * from ata_qc_from_tag() for failed qcs.
4989 	 *
4990 	 * Old EH depends on ata_qc_complete() nullifying completion
4991 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4992 	 * not synchronize with interrupt handler.  Only PIO task is
4993 	 * taken care of.
4994 	 */
4995 	if (ap->ops->error_handler) {
4996 		struct ata_device *dev = qc->dev;
4997 		struct ata_eh_info *ehi = &dev->link->eh_info;
4998 
4999 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5000 
5001 		if (unlikely(qc->err_mask))
5002 			qc->flags |= ATA_QCFLAG_FAILED;
5003 
5004 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5005 			if (!ata_tag_internal(qc->tag)) {
5006 				/* always fill result TF for failed qc */
5007 				fill_result_tf(qc);
5008 				ata_qc_schedule_eh(qc);
5009 				return;
5010 			}
5011 		}
5012 
5013 		/* read result TF if requested */
5014 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5015 			fill_result_tf(qc);
5016 
5017 		/* Some commands need post-processing after successful
5018 		 * completion.
5019 		 */
5020 		switch (qc->tf.command) {
5021 		case ATA_CMD_SET_FEATURES:
5022 			if (qc->tf.feature != SETFEATURES_WC_ON &&
5023 			    qc->tf.feature != SETFEATURES_WC_OFF)
5024 				break;
5025 			/* fall through */
5026 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5027 		case ATA_CMD_SET_MULTI: /* multi_count changed */
5028 			/* revalidate device */
5029 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5030 			ata_port_schedule_eh(ap);
5031 			break;
5032 
5033 		case ATA_CMD_SLEEP:
5034 			dev->flags |= ATA_DFLAG_SLEEPING;
5035 			break;
5036 		}
5037 
5038 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5039 			ata_verify_xfer(qc);
5040 
5041 		__ata_qc_complete(qc);
5042 	} else {
5043 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5044 			return;
5045 
5046 		/* read result TF if failed or requested */
5047 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5048 			fill_result_tf(qc);
5049 
5050 		__ata_qc_complete(qc);
5051 	}
5052 }
5053 
5054 /**
5055  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5056  *	@ap: port in question
5057  *	@qc_active: new qc_active mask
5058  *
5059  *	Complete in-flight commands.  This functions is meant to be
5060  *	called from low-level driver's interrupt routine to complete
5061  *	requests normally.  ap->qc_active and @qc_active is compared
5062  *	and commands are completed accordingly.
5063  *
5064  *	LOCKING:
5065  *	spin_lock_irqsave(host lock)
5066  *
5067  *	RETURNS:
5068  *	Number of completed commands on success, -errno otherwise.
5069  */
5070 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5071 {
5072 	int nr_done = 0;
5073 	u32 done_mask;
5074 
5075 	done_mask = ap->qc_active ^ qc_active;
5076 
5077 	if (unlikely(done_mask & qc_active)) {
5078 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5079 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5080 		return -EINVAL;
5081 	}
5082 
5083 	while (done_mask) {
5084 		struct ata_queued_cmd *qc;
5085 		unsigned int tag = __ffs(done_mask);
5086 
5087 		qc = ata_qc_from_tag(ap, tag);
5088 		if (qc) {
5089 			ata_qc_complete(qc);
5090 			nr_done++;
5091 		}
5092 		done_mask &= ~(1 << tag);
5093 	}
5094 
5095 	return nr_done;
5096 }
5097 
5098 /**
5099  *	ata_qc_issue - issue taskfile to device
5100  *	@qc: command to issue to device
5101  *
5102  *	Prepare an ATA command to submission to device.
5103  *	This includes mapping the data into a DMA-able
5104  *	area, filling in the S/G table, and finally
5105  *	writing the taskfile to hardware, starting the command.
5106  *
5107  *	LOCKING:
5108  *	spin_lock_irqsave(host lock)
5109  */
5110 void ata_qc_issue(struct ata_queued_cmd *qc)
5111 {
5112 	struct ata_port *ap = qc->ap;
5113 	struct ata_link *link = qc->dev->link;
5114 	u8 prot = qc->tf.protocol;
5115 
5116 	/* Make sure only one non-NCQ command is outstanding.  The
5117 	 * check is skipped for old EH because it reuses active qc to
5118 	 * request ATAPI sense.
5119 	 */
5120 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5121 
5122 	if (ata_is_ncq(prot)) {
5123 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5124 
5125 		if (!link->sactive)
5126 			ap->nr_active_links++;
5127 		link->sactive |= 1 << qc->tag;
5128 	} else {
5129 		WARN_ON_ONCE(link->sactive);
5130 
5131 		ap->nr_active_links++;
5132 		link->active_tag = qc->tag;
5133 	}
5134 
5135 	qc->flags |= ATA_QCFLAG_ACTIVE;
5136 	ap->qc_active |= 1 << qc->tag;
5137 
5138 	/* We guarantee to LLDs that they will have at least one
5139 	 * non-zero sg if the command is a data command.
5140 	 */
5141 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5142 
5143 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5144 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5145 		if (ata_sg_setup(qc))
5146 			goto sg_err;
5147 
5148 	/* if device is sleeping, schedule reset and abort the link */
5149 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5150 		link->eh_info.action |= ATA_EH_RESET;
5151 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5152 		ata_link_abort(link);
5153 		return;
5154 	}
5155 
5156 	ap->ops->qc_prep(qc);
5157 
5158 	qc->err_mask |= ap->ops->qc_issue(qc);
5159 	if (unlikely(qc->err_mask))
5160 		goto err;
5161 	return;
5162 
5163 sg_err:
5164 	qc->err_mask |= AC_ERR_SYSTEM;
5165 err:
5166 	ata_qc_complete(qc);
5167 }
5168 
5169 /**
5170  *	sata_scr_valid - test whether SCRs are accessible
5171  *	@link: ATA link to test SCR accessibility for
5172  *
5173  *	Test whether SCRs are accessible for @link.
5174  *
5175  *	LOCKING:
5176  *	None.
5177  *
5178  *	RETURNS:
5179  *	1 if SCRs are accessible, 0 otherwise.
5180  */
5181 int sata_scr_valid(struct ata_link *link)
5182 {
5183 	struct ata_port *ap = link->ap;
5184 
5185 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5186 }
5187 
5188 /**
5189  *	sata_scr_read - read SCR register of the specified port
5190  *	@link: ATA link to read SCR for
5191  *	@reg: SCR to read
5192  *	@val: Place to store read value
5193  *
5194  *	Read SCR register @reg of @link into *@val.  This function is
5195  *	guaranteed to succeed if @link is ap->link, the cable type of
5196  *	the port is SATA and the port implements ->scr_read.
5197  *
5198  *	LOCKING:
5199  *	None if @link is ap->link.  Kernel thread context otherwise.
5200  *
5201  *	RETURNS:
5202  *	0 on success, negative errno on failure.
5203  */
5204 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5205 {
5206 	if (ata_is_host_link(link)) {
5207 		if (sata_scr_valid(link))
5208 			return link->ap->ops->scr_read(link, reg, val);
5209 		return -EOPNOTSUPP;
5210 	}
5211 
5212 	return sata_pmp_scr_read(link, reg, val);
5213 }
5214 
5215 /**
5216  *	sata_scr_write - write SCR register of the specified port
5217  *	@link: ATA link to write SCR for
5218  *	@reg: SCR to write
5219  *	@val: value to write
5220  *
5221  *	Write @val to SCR register @reg of @link.  This function is
5222  *	guaranteed to succeed if @link is ap->link, the cable type of
5223  *	the port is SATA and the port implements ->scr_read.
5224  *
5225  *	LOCKING:
5226  *	None if @link is ap->link.  Kernel thread context otherwise.
5227  *
5228  *	RETURNS:
5229  *	0 on success, negative errno on failure.
5230  */
5231 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5232 {
5233 	if (ata_is_host_link(link)) {
5234 		if (sata_scr_valid(link))
5235 			return link->ap->ops->scr_write(link, reg, val);
5236 		return -EOPNOTSUPP;
5237 	}
5238 
5239 	return sata_pmp_scr_write(link, reg, val);
5240 }
5241 
5242 /**
5243  *	sata_scr_write_flush - write SCR register of the specified port and flush
5244  *	@link: ATA link to write SCR for
5245  *	@reg: SCR to write
5246  *	@val: value to write
5247  *
5248  *	This function is identical to sata_scr_write() except that this
5249  *	function performs flush after writing to the register.
5250  *
5251  *	LOCKING:
5252  *	None if @link is ap->link.  Kernel thread context otherwise.
5253  *
5254  *	RETURNS:
5255  *	0 on success, negative errno on failure.
5256  */
5257 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5258 {
5259 	if (ata_is_host_link(link)) {
5260 		int rc;
5261 
5262 		if (sata_scr_valid(link)) {
5263 			rc = link->ap->ops->scr_write(link, reg, val);
5264 			if (rc == 0)
5265 				rc = link->ap->ops->scr_read(link, reg, &val);
5266 			return rc;
5267 		}
5268 		return -EOPNOTSUPP;
5269 	}
5270 
5271 	return sata_pmp_scr_write(link, reg, val);
5272 }
5273 
5274 /**
5275  *	ata_phys_link_online - test whether the given link is online
5276  *	@link: ATA link to test
5277  *
5278  *	Test whether @link is online.  Note that this function returns
5279  *	0 if online status of @link cannot be obtained, so
5280  *	ata_link_online(link) != !ata_link_offline(link).
5281  *
5282  *	LOCKING:
5283  *	None.
5284  *
5285  *	RETURNS:
5286  *	True if the port online status is available and online.
5287  */
5288 bool ata_phys_link_online(struct ata_link *link)
5289 {
5290 	u32 sstatus;
5291 
5292 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5293 	    ata_sstatus_online(sstatus))
5294 		return true;
5295 	return false;
5296 }
5297 
5298 /**
5299  *	ata_phys_link_offline - test whether the given link is offline
5300  *	@link: ATA link to test
5301  *
5302  *	Test whether @link is offline.  Note that this function
5303  *	returns 0 if offline status of @link cannot be obtained, so
5304  *	ata_link_online(link) != !ata_link_offline(link).
5305  *
5306  *	LOCKING:
5307  *	None.
5308  *
5309  *	RETURNS:
5310  *	True if the port offline status is available and offline.
5311  */
5312 bool ata_phys_link_offline(struct ata_link *link)
5313 {
5314 	u32 sstatus;
5315 
5316 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5317 	    !ata_sstatus_online(sstatus))
5318 		return true;
5319 	return false;
5320 }
5321 
5322 /**
5323  *	ata_link_online - test whether the given link is online
5324  *	@link: ATA link to test
5325  *
5326  *	Test whether @link is online.  This is identical to
5327  *	ata_phys_link_online() when there's no slave link.  When
5328  *	there's a slave link, this function should only be called on
5329  *	the master link and will return true if any of M/S links is
5330  *	online.
5331  *
5332  *	LOCKING:
5333  *	None.
5334  *
5335  *	RETURNS:
5336  *	True if the port online status is available and online.
5337  */
5338 bool ata_link_online(struct ata_link *link)
5339 {
5340 	struct ata_link *slave = link->ap->slave_link;
5341 
5342 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5343 
5344 	return ata_phys_link_online(link) ||
5345 		(slave && ata_phys_link_online(slave));
5346 }
5347 
5348 /**
5349  *	ata_link_offline - test whether the given link is offline
5350  *	@link: ATA link to test
5351  *
5352  *	Test whether @link is offline.  This is identical to
5353  *	ata_phys_link_offline() when there's no slave link.  When
5354  *	there's a slave link, this function should only be called on
5355  *	the master link and will return true if both M/S links are
5356  *	offline.
5357  *
5358  *	LOCKING:
5359  *	None.
5360  *
5361  *	RETURNS:
5362  *	True if the port offline status is available and offline.
5363  */
5364 bool ata_link_offline(struct ata_link *link)
5365 {
5366 	struct ata_link *slave = link->ap->slave_link;
5367 
5368 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5369 
5370 	return ata_phys_link_offline(link) &&
5371 		(!slave || ata_phys_link_offline(slave));
5372 }
5373 
5374 #ifdef CONFIG_PM
5375 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5376 			       unsigned int action, unsigned int ehi_flags,
5377 			       int wait)
5378 {
5379 	unsigned long flags;
5380 	int i, rc;
5381 
5382 	for (i = 0; i < host->n_ports; i++) {
5383 		struct ata_port *ap = host->ports[i];
5384 		struct ata_link *link;
5385 
5386 		/* Previous resume operation might still be in
5387 		 * progress.  Wait for PM_PENDING to clear.
5388 		 */
5389 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5390 			ata_port_wait_eh(ap);
5391 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5392 		}
5393 
5394 		/* request PM ops to EH */
5395 		spin_lock_irqsave(ap->lock, flags);
5396 
5397 		ap->pm_mesg = mesg;
5398 		if (wait) {
5399 			rc = 0;
5400 			ap->pm_result = &rc;
5401 		}
5402 
5403 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5404 		ata_for_each_link(link, ap, HOST_FIRST) {
5405 			link->eh_info.action |= action;
5406 			link->eh_info.flags |= ehi_flags;
5407 		}
5408 
5409 		ata_port_schedule_eh(ap);
5410 
5411 		spin_unlock_irqrestore(ap->lock, flags);
5412 
5413 		/* wait and check result */
5414 		if (wait) {
5415 			ata_port_wait_eh(ap);
5416 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5417 			if (rc)
5418 				return rc;
5419 		}
5420 	}
5421 
5422 	return 0;
5423 }
5424 
5425 /**
5426  *	ata_host_suspend - suspend host
5427  *	@host: host to suspend
5428  *	@mesg: PM message
5429  *
5430  *	Suspend @host.  Actual operation is performed by EH.  This
5431  *	function requests EH to perform PM operations and waits for EH
5432  *	to finish.
5433  *
5434  *	LOCKING:
5435  *	Kernel thread context (may sleep).
5436  *
5437  *	RETURNS:
5438  *	0 on success, -errno on failure.
5439  */
5440 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5441 {
5442 	int rc;
5443 
5444 	/*
5445 	 * disable link pm on all ports before requesting
5446 	 * any pm activity
5447 	 */
5448 	ata_lpm_enable(host);
5449 
5450 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5451 	if (rc == 0)
5452 		host->dev->power.power_state = mesg;
5453 	return rc;
5454 }
5455 
5456 /**
5457  *	ata_host_resume - resume host
5458  *	@host: host to resume
5459  *
5460  *	Resume @host.  Actual operation is performed by EH.  This
5461  *	function requests EH to perform PM operations and returns.
5462  *	Note that all resume operations are performed parallely.
5463  *
5464  *	LOCKING:
5465  *	Kernel thread context (may sleep).
5466  */
5467 void ata_host_resume(struct ata_host *host)
5468 {
5469 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5470 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5471 	host->dev->power.power_state = PMSG_ON;
5472 
5473 	/* reenable link pm */
5474 	ata_lpm_disable(host);
5475 }
5476 #endif
5477 
5478 /**
5479  *	ata_port_start - Set port up for dma.
5480  *	@ap: Port to initialize
5481  *
5482  *	Called just after data structures for each port are
5483  *	initialized.  Allocates space for PRD table.
5484  *
5485  *	May be used as the port_start() entry in ata_port_operations.
5486  *
5487  *	LOCKING:
5488  *	Inherited from caller.
5489  */
5490 int ata_port_start(struct ata_port *ap)
5491 {
5492 	struct device *dev = ap->dev;
5493 
5494 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5495 				      GFP_KERNEL);
5496 	if (!ap->prd)
5497 		return -ENOMEM;
5498 
5499 	return 0;
5500 }
5501 
5502 /**
5503  *	ata_dev_init - Initialize an ata_device structure
5504  *	@dev: Device structure to initialize
5505  *
5506  *	Initialize @dev in preparation for probing.
5507  *
5508  *	LOCKING:
5509  *	Inherited from caller.
5510  */
5511 void ata_dev_init(struct ata_device *dev)
5512 {
5513 	struct ata_link *link = ata_dev_phys_link(dev);
5514 	struct ata_port *ap = link->ap;
5515 	unsigned long flags;
5516 
5517 	/* SATA spd limit is bound to the attached device, reset together */
5518 	link->sata_spd_limit = link->hw_sata_spd_limit;
5519 	link->sata_spd = 0;
5520 
5521 	/* High bits of dev->flags are used to record warm plug
5522 	 * requests which occur asynchronously.  Synchronize using
5523 	 * host lock.
5524 	 */
5525 	spin_lock_irqsave(ap->lock, flags);
5526 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5527 	dev->horkage = 0;
5528 	spin_unlock_irqrestore(ap->lock, flags);
5529 
5530 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5531 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5532 	dev->pio_mask = UINT_MAX;
5533 	dev->mwdma_mask = UINT_MAX;
5534 	dev->udma_mask = UINT_MAX;
5535 }
5536 
5537 /**
5538  *	ata_link_init - Initialize an ata_link structure
5539  *	@ap: ATA port link is attached to
5540  *	@link: Link structure to initialize
5541  *	@pmp: Port multiplier port number
5542  *
5543  *	Initialize @link.
5544  *
5545  *	LOCKING:
5546  *	Kernel thread context (may sleep)
5547  */
5548 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5549 {
5550 	int i;
5551 
5552 	/* clear everything except for devices */
5553 	memset(link, 0, offsetof(struct ata_link, device[0]));
5554 
5555 	link->ap = ap;
5556 	link->pmp = pmp;
5557 	link->active_tag = ATA_TAG_POISON;
5558 	link->hw_sata_spd_limit = UINT_MAX;
5559 
5560 	/* can't use iterator, ap isn't initialized yet */
5561 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5562 		struct ata_device *dev = &link->device[i];
5563 
5564 		dev->link = link;
5565 		dev->devno = dev - link->device;
5566 		ata_dev_init(dev);
5567 	}
5568 }
5569 
5570 /**
5571  *	sata_link_init_spd - Initialize link->sata_spd_limit
5572  *	@link: Link to configure sata_spd_limit for
5573  *
5574  *	Initialize @link->[hw_]sata_spd_limit to the currently
5575  *	configured value.
5576  *
5577  *	LOCKING:
5578  *	Kernel thread context (may sleep).
5579  *
5580  *	RETURNS:
5581  *	0 on success, -errno on failure.
5582  */
5583 int sata_link_init_spd(struct ata_link *link)
5584 {
5585 	u8 spd;
5586 	int rc;
5587 
5588 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5589 	if (rc)
5590 		return rc;
5591 
5592 	spd = (link->saved_scontrol >> 4) & 0xf;
5593 	if (spd)
5594 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5595 
5596 	ata_force_link_limits(link);
5597 
5598 	link->sata_spd_limit = link->hw_sata_spd_limit;
5599 
5600 	return 0;
5601 }
5602 
5603 /**
5604  *	ata_port_alloc - allocate and initialize basic ATA port resources
5605  *	@host: ATA host this allocated port belongs to
5606  *
5607  *	Allocate and initialize basic ATA port resources.
5608  *
5609  *	RETURNS:
5610  *	Allocate ATA port on success, NULL on failure.
5611  *
5612  *	LOCKING:
5613  *	Inherited from calling layer (may sleep).
5614  */
5615 struct ata_port *ata_port_alloc(struct ata_host *host)
5616 {
5617 	struct ata_port *ap;
5618 
5619 	DPRINTK("ENTER\n");
5620 
5621 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5622 	if (!ap)
5623 		return NULL;
5624 
5625 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5626 	ap->lock = &host->lock;
5627 	ap->flags = ATA_FLAG_DISABLED;
5628 	ap->print_id = -1;
5629 	ap->ctl = ATA_DEVCTL_OBS;
5630 	ap->host = host;
5631 	ap->dev = host->dev;
5632 	ap->last_ctl = 0xFF;
5633 
5634 #if defined(ATA_VERBOSE_DEBUG)
5635 	/* turn on all debugging levels */
5636 	ap->msg_enable = 0x00FF;
5637 #elif defined(ATA_DEBUG)
5638 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5639 #else
5640 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5641 #endif
5642 
5643 #ifdef CONFIG_ATA_SFF
5644 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5645 #else
5646 	INIT_DELAYED_WORK(&ap->port_task, NULL);
5647 #endif
5648 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5649 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5650 	INIT_LIST_HEAD(&ap->eh_done_q);
5651 	init_waitqueue_head(&ap->eh_wait_q);
5652 	init_completion(&ap->park_req_pending);
5653 	init_timer_deferrable(&ap->fastdrain_timer);
5654 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5655 	ap->fastdrain_timer.data = (unsigned long)ap;
5656 
5657 	ap->cbl = ATA_CBL_NONE;
5658 
5659 	ata_link_init(ap, &ap->link, 0);
5660 
5661 #ifdef ATA_IRQ_TRAP
5662 	ap->stats.unhandled_irq = 1;
5663 	ap->stats.idle_irq = 1;
5664 #endif
5665 	return ap;
5666 }
5667 
5668 static void ata_host_release(struct device *gendev, void *res)
5669 {
5670 	struct ata_host *host = dev_get_drvdata(gendev);
5671 	int i;
5672 
5673 	for (i = 0; i < host->n_ports; i++) {
5674 		struct ata_port *ap = host->ports[i];
5675 
5676 		if (!ap)
5677 			continue;
5678 
5679 		if (ap->scsi_host)
5680 			scsi_host_put(ap->scsi_host);
5681 
5682 		kfree(ap->pmp_link);
5683 		kfree(ap->slave_link);
5684 		kfree(ap);
5685 		host->ports[i] = NULL;
5686 	}
5687 
5688 	dev_set_drvdata(gendev, NULL);
5689 }
5690 
5691 /**
5692  *	ata_host_alloc - allocate and init basic ATA host resources
5693  *	@dev: generic device this host is associated with
5694  *	@max_ports: maximum number of ATA ports associated with this host
5695  *
5696  *	Allocate and initialize basic ATA host resources.  LLD calls
5697  *	this function to allocate a host, initializes it fully and
5698  *	attaches it using ata_host_register().
5699  *
5700  *	@max_ports ports are allocated and host->n_ports is
5701  *	initialized to @max_ports.  The caller is allowed to decrease
5702  *	host->n_ports before calling ata_host_register().  The unused
5703  *	ports will be automatically freed on registration.
5704  *
5705  *	RETURNS:
5706  *	Allocate ATA host on success, NULL on failure.
5707  *
5708  *	LOCKING:
5709  *	Inherited from calling layer (may sleep).
5710  */
5711 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5712 {
5713 	struct ata_host *host;
5714 	size_t sz;
5715 	int i;
5716 
5717 	DPRINTK("ENTER\n");
5718 
5719 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5720 		return NULL;
5721 
5722 	/* alloc a container for our list of ATA ports (buses) */
5723 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5724 	/* alloc a container for our list of ATA ports (buses) */
5725 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5726 	if (!host)
5727 		goto err_out;
5728 
5729 	devres_add(dev, host);
5730 	dev_set_drvdata(dev, host);
5731 
5732 	spin_lock_init(&host->lock);
5733 	host->dev = dev;
5734 	host->n_ports = max_ports;
5735 
5736 	/* allocate ports bound to this host */
5737 	for (i = 0; i < max_ports; i++) {
5738 		struct ata_port *ap;
5739 
5740 		ap = ata_port_alloc(host);
5741 		if (!ap)
5742 			goto err_out;
5743 
5744 		ap->port_no = i;
5745 		host->ports[i] = ap;
5746 	}
5747 
5748 	devres_remove_group(dev, NULL);
5749 	return host;
5750 
5751  err_out:
5752 	devres_release_group(dev, NULL);
5753 	return NULL;
5754 }
5755 
5756 /**
5757  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5758  *	@dev: generic device this host is associated with
5759  *	@ppi: array of ATA port_info to initialize host with
5760  *	@n_ports: number of ATA ports attached to this host
5761  *
5762  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5763  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5764  *	last entry will be used for the remaining ports.
5765  *
5766  *	RETURNS:
5767  *	Allocate ATA host on success, NULL on failure.
5768  *
5769  *	LOCKING:
5770  *	Inherited from calling layer (may sleep).
5771  */
5772 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5773 				      const struct ata_port_info * const * ppi,
5774 				      int n_ports)
5775 {
5776 	const struct ata_port_info *pi;
5777 	struct ata_host *host;
5778 	int i, j;
5779 
5780 	host = ata_host_alloc(dev, n_ports);
5781 	if (!host)
5782 		return NULL;
5783 
5784 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5785 		struct ata_port *ap = host->ports[i];
5786 
5787 		if (ppi[j])
5788 			pi = ppi[j++];
5789 
5790 		ap->pio_mask = pi->pio_mask;
5791 		ap->mwdma_mask = pi->mwdma_mask;
5792 		ap->udma_mask = pi->udma_mask;
5793 		ap->flags |= pi->flags;
5794 		ap->link.flags |= pi->link_flags;
5795 		ap->ops = pi->port_ops;
5796 
5797 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5798 			host->ops = pi->port_ops;
5799 	}
5800 
5801 	return host;
5802 }
5803 
5804 /**
5805  *	ata_slave_link_init - initialize slave link
5806  *	@ap: port to initialize slave link for
5807  *
5808  *	Create and initialize slave link for @ap.  This enables slave
5809  *	link handling on the port.
5810  *
5811  *	In libata, a port contains links and a link contains devices.
5812  *	There is single host link but if a PMP is attached to it,
5813  *	there can be multiple fan-out links.  On SATA, there's usually
5814  *	a single device connected to a link but PATA and SATA
5815  *	controllers emulating TF based interface can have two - master
5816  *	and slave.
5817  *
5818  *	However, there are a few controllers which don't fit into this
5819  *	abstraction too well - SATA controllers which emulate TF
5820  *	interface with both master and slave devices but also have
5821  *	separate SCR register sets for each device.  These controllers
5822  *	need separate links for physical link handling
5823  *	(e.g. onlineness, link speed) but should be treated like a
5824  *	traditional M/S controller for everything else (e.g. command
5825  *	issue, softreset).
5826  *
5827  *	slave_link is libata's way of handling this class of
5828  *	controllers without impacting core layer too much.  For
5829  *	anything other than physical link handling, the default host
5830  *	link is used for both master and slave.  For physical link
5831  *	handling, separate @ap->slave_link is used.  All dirty details
5832  *	are implemented inside libata core layer.  From LLD's POV, the
5833  *	only difference is that prereset, hardreset and postreset are
5834  *	called once more for the slave link, so the reset sequence
5835  *	looks like the following.
5836  *
5837  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5838  *	softreset(M) -> postreset(M) -> postreset(S)
5839  *
5840  *	Note that softreset is called only for the master.  Softreset
5841  *	resets both M/S by definition, so SRST on master should handle
5842  *	both (the standard method will work just fine).
5843  *
5844  *	LOCKING:
5845  *	Should be called before host is registered.
5846  *
5847  *	RETURNS:
5848  *	0 on success, -errno on failure.
5849  */
5850 int ata_slave_link_init(struct ata_port *ap)
5851 {
5852 	struct ata_link *link;
5853 
5854 	WARN_ON(ap->slave_link);
5855 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5856 
5857 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5858 	if (!link)
5859 		return -ENOMEM;
5860 
5861 	ata_link_init(ap, link, 1);
5862 	ap->slave_link = link;
5863 	return 0;
5864 }
5865 
5866 static void ata_host_stop(struct device *gendev, void *res)
5867 {
5868 	struct ata_host *host = dev_get_drvdata(gendev);
5869 	int i;
5870 
5871 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5872 
5873 	for (i = 0; i < host->n_ports; i++) {
5874 		struct ata_port *ap = host->ports[i];
5875 
5876 		if (ap->ops->port_stop)
5877 			ap->ops->port_stop(ap);
5878 	}
5879 
5880 	if (host->ops->host_stop)
5881 		host->ops->host_stop(host);
5882 }
5883 
5884 /**
5885  *	ata_finalize_port_ops - finalize ata_port_operations
5886  *	@ops: ata_port_operations to finalize
5887  *
5888  *	An ata_port_operations can inherit from another ops and that
5889  *	ops can again inherit from another.  This can go on as many
5890  *	times as necessary as long as there is no loop in the
5891  *	inheritance chain.
5892  *
5893  *	Ops tables are finalized when the host is started.  NULL or
5894  *	unspecified entries are inherited from the closet ancestor
5895  *	which has the method and the entry is populated with it.
5896  *	After finalization, the ops table directly points to all the
5897  *	methods and ->inherits is no longer necessary and cleared.
5898  *
5899  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5900  *
5901  *	LOCKING:
5902  *	None.
5903  */
5904 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5905 {
5906 	static DEFINE_SPINLOCK(lock);
5907 	const struct ata_port_operations *cur;
5908 	void **begin = (void **)ops;
5909 	void **end = (void **)&ops->inherits;
5910 	void **pp;
5911 
5912 	if (!ops || !ops->inherits)
5913 		return;
5914 
5915 	spin_lock(&lock);
5916 
5917 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5918 		void **inherit = (void **)cur;
5919 
5920 		for (pp = begin; pp < end; pp++, inherit++)
5921 			if (!*pp)
5922 				*pp = *inherit;
5923 	}
5924 
5925 	for (pp = begin; pp < end; pp++)
5926 		if (IS_ERR(*pp))
5927 			*pp = NULL;
5928 
5929 	ops->inherits = NULL;
5930 
5931 	spin_unlock(&lock);
5932 }
5933 
5934 /**
5935  *	ata_host_start - start and freeze ports of an ATA host
5936  *	@host: ATA host to start ports for
5937  *
5938  *	Start and then freeze ports of @host.  Started status is
5939  *	recorded in host->flags, so this function can be called
5940  *	multiple times.  Ports are guaranteed to get started only
5941  *	once.  If host->ops isn't initialized yet, its set to the
5942  *	first non-dummy port ops.
5943  *
5944  *	LOCKING:
5945  *	Inherited from calling layer (may sleep).
5946  *
5947  *	RETURNS:
5948  *	0 if all ports are started successfully, -errno otherwise.
5949  */
5950 int ata_host_start(struct ata_host *host)
5951 {
5952 	int have_stop = 0;
5953 	void *start_dr = NULL;
5954 	int i, rc;
5955 
5956 	if (host->flags & ATA_HOST_STARTED)
5957 		return 0;
5958 
5959 	ata_finalize_port_ops(host->ops);
5960 
5961 	for (i = 0; i < host->n_ports; i++) {
5962 		struct ata_port *ap = host->ports[i];
5963 
5964 		ata_finalize_port_ops(ap->ops);
5965 
5966 		if (!host->ops && !ata_port_is_dummy(ap))
5967 			host->ops = ap->ops;
5968 
5969 		if (ap->ops->port_stop)
5970 			have_stop = 1;
5971 	}
5972 
5973 	if (host->ops->host_stop)
5974 		have_stop = 1;
5975 
5976 	if (have_stop) {
5977 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5978 		if (!start_dr)
5979 			return -ENOMEM;
5980 	}
5981 
5982 	for (i = 0; i < host->n_ports; i++) {
5983 		struct ata_port *ap = host->ports[i];
5984 
5985 		if (ap->ops->port_start) {
5986 			rc = ap->ops->port_start(ap);
5987 			if (rc) {
5988 				if (rc != -ENODEV)
5989 					dev_printk(KERN_ERR, host->dev,
5990 						"failed to start port %d "
5991 						"(errno=%d)\n", i, rc);
5992 				goto err_out;
5993 			}
5994 		}
5995 		ata_eh_freeze_port(ap);
5996 	}
5997 
5998 	if (start_dr)
5999 		devres_add(host->dev, start_dr);
6000 	host->flags |= ATA_HOST_STARTED;
6001 	return 0;
6002 
6003  err_out:
6004 	while (--i >= 0) {
6005 		struct ata_port *ap = host->ports[i];
6006 
6007 		if (ap->ops->port_stop)
6008 			ap->ops->port_stop(ap);
6009 	}
6010 	devres_free(start_dr);
6011 	return rc;
6012 }
6013 
6014 /**
6015  *	ata_sas_host_init - Initialize a host struct
6016  *	@host:	host to initialize
6017  *	@dev:	device host is attached to
6018  *	@flags:	host flags
6019  *	@ops:	port_ops
6020  *
6021  *	LOCKING:
6022  *	PCI/etc. bus probe sem.
6023  *
6024  */
6025 /* KILLME - the only user left is ipr */
6026 void ata_host_init(struct ata_host *host, struct device *dev,
6027 		   unsigned long flags, struct ata_port_operations *ops)
6028 {
6029 	spin_lock_init(&host->lock);
6030 	host->dev = dev;
6031 	host->flags = flags;
6032 	host->ops = ops;
6033 }
6034 
6035 
6036 static void async_port_probe(void *data, async_cookie_t cookie)
6037 {
6038 	int rc;
6039 	struct ata_port *ap = data;
6040 
6041 	/*
6042 	 * If we're not allowed to scan this host in parallel,
6043 	 * we need to wait until all previous scans have completed
6044 	 * before going further.
6045 	 * Jeff Garzik says this is only within a controller, so we
6046 	 * don't need to wait for port 0, only for later ports.
6047 	 */
6048 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6049 		async_synchronize_cookie(cookie);
6050 
6051 	/* probe */
6052 	if (ap->ops->error_handler) {
6053 		struct ata_eh_info *ehi = &ap->link.eh_info;
6054 		unsigned long flags;
6055 
6056 		ata_port_probe(ap);
6057 
6058 		/* kick EH for boot probing */
6059 		spin_lock_irqsave(ap->lock, flags);
6060 
6061 		ehi->probe_mask |= ATA_ALL_DEVICES;
6062 		ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
6063 		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6064 
6065 		ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6066 		ap->pflags |= ATA_PFLAG_LOADING;
6067 		ata_port_schedule_eh(ap);
6068 
6069 		spin_unlock_irqrestore(ap->lock, flags);
6070 
6071 		/* wait for EH to finish */
6072 		ata_port_wait_eh(ap);
6073 	} else {
6074 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6075 		rc = ata_bus_probe(ap);
6076 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6077 
6078 		if (rc) {
6079 			/* FIXME: do something useful here?
6080 			 * Current libata behavior will
6081 			 * tear down everything when
6082 			 * the module is removed
6083 			 * or the h/w is unplugged.
6084 			 */
6085 		}
6086 	}
6087 
6088 	/* in order to keep device order, we need to synchronize at this point */
6089 	async_synchronize_cookie(cookie);
6090 
6091 	ata_scsi_scan_host(ap, 1);
6092 
6093 }
6094 /**
6095  *	ata_host_register - register initialized ATA host
6096  *	@host: ATA host to register
6097  *	@sht: template for SCSI host
6098  *
6099  *	Register initialized ATA host.  @host is allocated using
6100  *	ata_host_alloc() and fully initialized by LLD.  This function
6101  *	starts ports, registers @host with ATA and SCSI layers and
6102  *	probe registered devices.
6103  *
6104  *	LOCKING:
6105  *	Inherited from calling layer (may sleep).
6106  *
6107  *	RETURNS:
6108  *	0 on success, -errno otherwise.
6109  */
6110 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6111 {
6112 	int i, rc;
6113 
6114 	/* host must have been started */
6115 	if (!(host->flags & ATA_HOST_STARTED)) {
6116 		dev_printk(KERN_ERR, host->dev,
6117 			   "BUG: trying to register unstarted host\n");
6118 		WARN_ON(1);
6119 		return -EINVAL;
6120 	}
6121 
6122 	/* Blow away unused ports.  This happens when LLD can't
6123 	 * determine the exact number of ports to allocate at
6124 	 * allocation time.
6125 	 */
6126 	for (i = host->n_ports; host->ports[i]; i++)
6127 		kfree(host->ports[i]);
6128 
6129 	/* give ports names and add SCSI hosts */
6130 	for (i = 0; i < host->n_ports; i++)
6131 		host->ports[i]->print_id = ata_print_id++;
6132 
6133 	rc = ata_scsi_add_hosts(host, sht);
6134 	if (rc)
6135 		return rc;
6136 
6137 	/* associate with ACPI nodes */
6138 	ata_acpi_associate(host);
6139 
6140 	/* set cable, sata_spd_limit and report */
6141 	for (i = 0; i < host->n_ports; i++) {
6142 		struct ata_port *ap = host->ports[i];
6143 		unsigned long xfer_mask;
6144 
6145 		/* set SATA cable type if still unset */
6146 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6147 			ap->cbl = ATA_CBL_SATA;
6148 
6149 		/* init sata_spd_limit to the current value */
6150 		sata_link_init_spd(&ap->link);
6151 		if (ap->slave_link)
6152 			sata_link_init_spd(ap->slave_link);
6153 
6154 		/* print per-port info to dmesg */
6155 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6156 					      ap->udma_mask);
6157 
6158 		if (!ata_port_is_dummy(ap)) {
6159 			ata_port_printk(ap, KERN_INFO,
6160 					"%cATA max %s %s\n",
6161 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6162 					ata_mode_string(xfer_mask),
6163 					ap->link.eh_info.desc);
6164 			ata_ehi_clear_desc(&ap->link.eh_info);
6165 		} else
6166 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6167 	}
6168 
6169 	/* perform each probe asynchronously */
6170 	for (i = 0; i < host->n_ports; i++) {
6171 		struct ata_port *ap = host->ports[i];
6172 		async_schedule(async_port_probe, ap);
6173 	}
6174 
6175 	return 0;
6176 }
6177 
6178 /**
6179  *	ata_host_activate - start host, request IRQ and register it
6180  *	@host: target ATA host
6181  *	@irq: IRQ to request
6182  *	@irq_handler: irq_handler used when requesting IRQ
6183  *	@irq_flags: irq_flags used when requesting IRQ
6184  *	@sht: scsi_host_template to use when registering the host
6185  *
6186  *	After allocating an ATA host and initializing it, most libata
6187  *	LLDs perform three steps to activate the host - start host,
6188  *	request IRQ and register it.  This helper takes necessasry
6189  *	arguments and performs the three steps in one go.
6190  *
6191  *	An invalid IRQ skips the IRQ registration and expects the host to
6192  *	have set polling mode on the port. In this case, @irq_handler
6193  *	should be NULL.
6194  *
6195  *	LOCKING:
6196  *	Inherited from calling layer (may sleep).
6197  *
6198  *	RETURNS:
6199  *	0 on success, -errno otherwise.
6200  */
6201 int ata_host_activate(struct ata_host *host, int irq,
6202 		      irq_handler_t irq_handler, unsigned long irq_flags,
6203 		      struct scsi_host_template *sht)
6204 {
6205 	int i, rc;
6206 
6207 	rc = ata_host_start(host);
6208 	if (rc)
6209 		return rc;
6210 
6211 	/* Special case for polling mode */
6212 	if (!irq) {
6213 		WARN_ON(irq_handler);
6214 		return ata_host_register(host, sht);
6215 	}
6216 
6217 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6218 			      dev_driver_string(host->dev), host);
6219 	if (rc)
6220 		return rc;
6221 
6222 	for (i = 0; i < host->n_ports; i++)
6223 		ata_port_desc(host->ports[i], "irq %d", irq);
6224 
6225 	rc = ata_host_register(host, sht);
6226 	/* if failed, just free the IRQ and leave ports alone */
6227 	if (rc)
6228 		devm_free_irq(host->dev, irq, host);
6229 
6230 	return rc;
6231 }
6232 
6233 /**
6234  *	ata_port_detach - Detach ATA port in prepration of device removal
6235  *	@ap: ATA port to be detached
6236  *
6237  *	Detach all ATA devices and the associated SCSI devices of @ap;
6238  *	then, remove the associated SCSI host.  @ap is guaranteed to
6239  *	be quiescent on return from this function.
6240  *
6241  *	LOCKING:
6242  *	Kernel thread context (may sleep).
6243  */
6244 static void ata_port_detach(struct ata_port *ap)
6245 {
6246 	unsigned long flags;
6247 
6248 	if (!ap->ops->error_handler)
6249 		goto skip_eh;
6250 
6251 	/* tell EH we're leaving & flush EH */
6252 	spin_lock_irqsave(ap->lock, flags);
6253 	ap->pflags |= ATA_PFLAG_UNLOADING;
6254 	ata_port_schedule_eh(ap);
6255 	spin_unlock_irqrestore(ap->lock, flags);
6256 
6257 	/* wait till EH commits suicide */
6258 	ata_port_wait_eh(ap);
6259 
6260 	/* it better be dead now */
6261 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6262 
6263 	cancel_rearming_delayed_work(&ap->hotplug_task);
6264 
6265  skip_eh:
6266 	/* remove the associated SCSI host */
6267 	scsi_remove_host(ap->scsi_host);
6268 }
6269 
6270 /**
6271  *	ata_host_detach - Detach all ports of an ATA host
6272  *	@host: Host to detach
6273  *
6274  *	Detach all ports of @host.
6275  *
6276  *	LOCKING:
6277  *	Kernel thread context (may sleep).
6278  */
6279 void ata_host_detach(struct ata_host *host)
6280 {
6281 	int i;
6282 
6283 	for (i = 0; i < host->n_ports; i++)
6284 		ata_port_detach(host->ports[i]);
6285 
6286 	/* the host is dead now, dissociate ACPI */
6287 	ata_acpi_dissociate(host);
6288 }
6289 
6290 #ifdef CONFIG_PCI
6291 
6292 /**
6293  *	ata_pci_remove_one - PCI layer callback for device removal
6294  *	@pdev: PCI device that was removed
6295  *
6296  *	PCI layer indicates to libata via this hook that hot-unplug or
6297  *	module unload event has occurred.  Detach all ports.  Resource
6298  *	release is handled via devres.
6299  *
6300  *	LOCKING:
6301  *	Inherited from PCI layer (may sleep).
6302  */
6303 void ata_pci_remove_one(struct pci_dev *pdev)
6304 {
6305 	struct device *dev = &pdev->dev;
6306 	struct ata_host *host = dev_get_drvdata(dev);
6307 
6308 	ata_host_detach(host);
6309 }
6310 
6311 /* move to PCI subsystem */
6312 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6313 {
6314 	unsigned long tmp = 0;
6315 
6316 	switch (bits->width) {
6317 	case 1: {
6318 		u8 tmp8 = 0;
6319 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6320 		tmp = tmp8;
6321 		break;
6322 	}
6323 	case 2: {
6324 		u16 tmp16 = 0;
6325 		pci_read_config_word(pdev, bits->reg, &tmp16);
6326 		tmp = tmp16;
6327 		break;
6328 	}
6329 	case 4: {
6330 		u32 tmp32 = 0;
6331 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6332 		tmp = tmp32;
6333 		break;
6334 	}
6335 
6336 	default:
6337 		return -EINVAL;
6338 	}
6339 
6340 	tmp &= bits->mask;
6341 
6342 	return (tmp == bits->val) ? 1 : 0;
6343 }
6344 
6345 #ifdef CONFIG_PM
6346 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6347 {
6348 	pci_save_state(pdev);
6349 	pci_disable_device(pdev);
6350 
6351 	if (mesg.event & PM_EVENT_SLEEP)
6352 		pci_set_power_state(pdev, PCI_D3hot);
6353 }
6354 
6355 int ata_pci_device_do_resume(struct pci_dev *pdev)
6356 {
6357 	int rc;
6358 
6359 	pci_set_power_state(pdev, PCI_D0);
6360 	pci_restore_state(pdev);
6361 
6362 	rc = pcim_enable_device(pdev);
6363 	if (rc) {
6364 		dev_printk(KERN_ERR, &pdev->dev,
6365 			   "failed to enable device after resume (%d)\n", rc);
6366 		return rc;
6367 	}
6368 
6369 	pci_set_master(pdev);
6370 	return 0;
6371 }
6372 
6373 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6374 {
6375 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6376 	int rc = 0;
6377 
6378 	rc = ata_host_suspend(host, mesg);
6379 	if (rc)
6380 		return rc;
6381 
6382 	ata_pci_device_do_suspend(pdev, mesg);
6383 
6384 	return 0;
6385 }
6386 
6387 int ata_pci_device_resume(struct pci_dev *pdev)
6388 {
6389 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6390 	int rc;
6391 
6392 	rc = ata_pci_device_do_resume(pdev);
6393 	if (rc == 0)
6394 		ata_host_resume(host);
6395 	return rc;
6396 }
6397 #endif /* CONFIG_PM */
6398 
6399 #endif /* CONFIG_PCI */
6400 
6401 static int __init ata_parse_force_one(char **cur,
6402 				      struct ata_force_ent *force_ent,
6403 				      const char **reason)
6404 {
6405 	/* FIXME: Currently, there's no way to tag init const data and
6406 	 * using __initdata causes build failure on some versions of
6407 	 * gcc.  Once __initdataconst is implemented, add const to the
6408 	 * following structure.
6409 	 */
6410 	static struct ata_force_param force_tbl[] __initdata = {
6411 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6412 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6413 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6414 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6415 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6416 		{ "sata",	.cbl		= ATA_CBL_SATA },
6417 		{ "1.5Gbps",	.spd_limit	= 1 },
6418 		{ "3.0Gbps",	.spd_limit	= 2 },
6419 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6420 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6421 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6422 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6423 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6424 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6425 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6426 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6427 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6428 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6429 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6430 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6431 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6432 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6433 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6434 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6435 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6436 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6437 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6438 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6439 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6440 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6441 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6442 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6443 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6444 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6445 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6446 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6447 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6448 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6449 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6450 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6451 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6452 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6453 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6454 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6455 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6456 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6457 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6458 	};
6459 	char *start = *cur, *p = *cur;
6460 	char *id, *val, *endp;
6461 	const struct ata_force_param *match_fp = NULL;
6462 	int nr_matches = 0, i;
6463 
6464 	/* find where this param ends and update *cur */
6465 	while (*p != '\0' && *p != ',')
6466 		p++;
6467 
6468 	if (*p == '\0')
6469 		*cur = p;
6470 	else
6471 		*cur = p + 1;
6472 
6473 	*p = '\0';
6474 
6475 	/* parse */
6476 	p = strchr(start, ':');
6477 	if (!p) {
6478 		val = strstrip(start);
6479 		goto parse_val;
6480 	}
6481 	*p = '\0';
6482 
6483 	id = strstrip(start);
6484 	val = strstrip(p + 1);
6485 
6486 	/* parse id */
6487 	p = strchr(id, '.');
6488 	if (p) {
6489 		*p++ = '\0';
6490 		force_ent->device = simple_strtoul(p, &endp, 10);
6491 		if (p == endp || *endp != '\0') {
6492 			*reason = "invalid device";
6493 			return -EINVAL;
6494 		}
6495 	}
6496 
6497 	force_ent->port = simple_strtoul(id, &endp, 10);
6498 	if (p == endp || *endp != '\0') {
6499 		*reason = "invalid port/link";
6500 		return -EINVAL;
6501 	}
6502 
6503  parse_val:
6504 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6505 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6506 		const struct ata_force_param *fp = &force_tbl[i];
6507 
6508 		if (strncasecmp(val, fp->name, strlen(val)))
6509 			continue;
6510 
6511 		nr_matches++;
6512 		match_fp = fp;
6513 
6514 		if (strcasecmp(val, fp->name) == 0) {
6515 			nr_matches = 1;
6516 			break;
6517 		}
6518 	}
6519 
6520 	if (!nr_matches) {
6521 		*reason = "unknown value";
6522 		return -EINVAL;
6523 	}
6524 	if (nr_matches > 1) {
6525 		*reason = "ambigious value";
6526 		return -EINVAL;
6527 	}
6528 
6529 	force_ent->param = *match_fp;
6530 
6531 	return 0;
6532 }
6533 
6534 static void __init ata_parse_force_param(void)
6535 {
6536 	int idx = 0, size = 1;
6537 	int last_port = -1, last_device = -1;
6538 	char *p, *cur, *next;
6539 
6540 	/* calculate maximum number of params and allocate force_tbl */
6541 	for (p = ata_force_param_buf; *p; p++)
6542 		if (*p == ',')
6543 			size++;
6544 
6545 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6546 	if (!ata_force_tbl) {
6547 		printk(KERN_WARNING "ata: failed to extend force table, "
6548 		       "libata.force ignored\n");
6549 		return;
6550 	}
6551 
6552 	/* parse and populate the table */
6553 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6554 		const char *reason = "";
6555 		struct ata_force_ent te = { .port = -1, .device = -1 };
6556 
6557 		next = cur;
6558 		if (ata_parse_force_one(&next, &te, &reason)) {
6559 			printk(KERN_WARNING "ata: failed to parse force "
6560 			       "parameter \"%s\" (%s)\n",
6561 			       cur, reason);
6562 			continue;
6563 		}
6564 
6565 		if (te.port == -1) {
6566 			te.port = last_port;
6567 			te.device = last_device;
6568 		}
6569 
6570 		ata_force_tbl[idx++] = te;
6571 
6572 		last_port = te.port;
6573 		last_device = te.device;
6574 	}
6575 
6576 	ata_force_tbl_size = idx;
6577 }
6578 
6579 static int __init ata_init(void)
6580 {
6581 	ata_parse_force_param();
6582 
6583 	ata_wq = create_workqueue("ata");
6584 	if (!ata_wq)
6585 		goto free_force_tbl;
6586 
6587 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6588 	if (!ata_aux_wq)
6589 		goto free_wq;
6590 
6591 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6592 	return 0;
6593 
6594 free_wq:
6595 	destroy_workqueue(ata_wq);
6596 free_force_tbl:
6597 	kfree(ata_force_tbl);
6598 	return -ENOMEM;
6599 }
6600 
6601 static void __exit ata_exit(void)
6602 {
6603 	kfree(ata_force_tbl);
6604 	destroy_workqueue(ata_wq);
6605 	destroy_workqueue(ata_aux_wq);
6606 }
6607 
6608 subsys_initcall(ata_init);
6609 module_exit(ata_exit);
6610 
6611 static unsigned long ratelimit_time;
6612 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6613 
6614 int ata_ratelimit(void)
6615 {
6616 	int rc;
6617 	unsigned long flags;
6618 
6619 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6620 
6621 	if (time_after(jiffies, ratelimit_time)) {
6622 		rc = 1;
6623 		ratelimit_time = jiffies + (HZ/5);
6624 	} else
6625 		rc = 0;
6626 
6627 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6628 
6629 	return rc;
6630 }
6631 
6632 /**
6633  *	ata_wait_register - wait until register value changes
6634  *	@reg: IO-mapped register
6635  *	@mask: Mask to apply to read register value
6636  *	@val: Wait condition
6637  *	@interval: polling interval in milliseconds
6638  *	@timeout: timeout in milliseconds
6639  *
6640  *	Waiting for some bits of register to change is a common
6641  *	operation for ATA controllers.  This function reads 32bit LE
6642  *	IO-mapped register @reg and tests for the following condition.
6643  *
6644  *	(*@reg & mask) != val
6645  *
6646  *	If the condition is met, it returns; otherwise, the process is
6647  *	repeated after @interval_msec until timeout.
6648  *
6649  *	LOCKING:
6650  *	Kernel thread context (may sleep)
6651  *
6652  *	RETURNS:
6653  *	The final register value.
6654  */
6655 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6656 		      unsigned long interval, unsigned long timeout)
6657 {
6658 	unsigned long deadline;
6659 	u32 tmp;
6660 
6661 	tmp = ioread32(reg);
6662 
6663 	/* Calculate timeout _after_ the first read to make sure
6664 	 * preceding writes reach the controller before starting to
6665 	 * eat away the timeout.
6666 	 */
6667 	deadline = ata_deadline(jiffies, timeout);
6668 
6669 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6670 		msleep(interval);
6671 		tmp = ioread32(reg);
6672 	}
6673 
6674 	return tmp;
6675 }
6676 
6677 /*
6678  * Dummy port_ops
6679  */
6680 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6681 {
6682 	return AC_ERR_SYSTEM;
6683 }
6684 
6685 static void ata_dummy_error_handler(struct ata_port *ap)
6686 {
6687 	/* truly dummy */
6688 }
6689 
6690 struct ata_port_operations ata_dummy_port_ops = {
6691 	.qc_prep		= ata_noop_qc_prep,
6692 	.qc_issue		= ata_dummy_qc_issue,
6693 	.error_handler		= ata_dummy_error_handler,
6694 };
6695 
6696 const struct ata_port_info ata_dummy_port_info = {
6697 	.port_ops		= &ata_dummy_port_ops,
6698 };
6699 
6700 /*
6701  * libata is essentially a library of internal helper functions for
6702  * low-level ATA host controller drivers.  As such, the API/ABI is
6703  * likely to change as new drivers are added and updated.
6704  * Do not depend on ABI/API stability.
6705  */
6706 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6707 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6708 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6709 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6710 EXPORT_SYMBOL_GPL(sata_port_ops);
6711 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6712 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6713 EXPORT_SYMBOL_GPL(ata_link_next);
6714 EXPORT_SYMBOL_GPL(ata_dev_next);
6715 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6716 EXPORT_SYMBOL_GPL(ata_host_init);
6717 EXPORT_SYMBOL_GPL(ata_host_alloc);
6718 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6719 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6720 EXPORT_SYMBOL_GPL(ata_host_start);
6721 EXPORT_SYMBOL_GPL(ata_host_register);
6722 EXPORT_SYMBOL_GPL(ata_host_activate);
6723 EXPORT_SYMBOL_GPL(ata_host_detach);
6724 EXPORT_SYMBOL_GPL(ata_sg_init);
6725 EXPORT_SYMBOL_GPL(ata_qc_complete);
6726 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6727 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6728 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6729 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6730 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6731 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6732 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6733 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6734 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6735 EXPORT_SYMBOL_GPL(ata_mode_string);
6736 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6737 EXPORT_SYMBOL_GPL(ata_port_start);
6738 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6739 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6740 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6741 EXPORT_SYMBOL_GPL(ata_port_probe);
6742 EXPORT_SYMBOL_GPL(ata_dev_disable);
6743 EXPORT_SYMBOL_GPL(sata_set_spd);
6744 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6745 EXPORT_SYMBOL_GPL(sata_link_debounce);
6746 EXPORT_SYMBOL_GPL(sata_link_resume);
6747 EXPORT_SYMBOL_GPL(ata_std_prereset);
6748 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6749 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6750 EXPORT_SYMBOL_GPL(ata_std_postreset);
6751 EXPORT_SYMBOL_GPL(ata_dev_classify);
6752 EXPORT_SYMBOL_GPL(ata_dev_pair);
6753 EXPORT_SYMBOL_GPL(ata_port_disable);
6754 EXPORT_SYMBOL_GPL(ata_ratelimit);
6755 EXPORT_SYMBOL_GPL(ata_wait_register);
6756 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6757 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6758 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6759 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6760 EXPORT_SYMBOL_GPL(sata_scr_valid);
6761 EXPORT_SYMBOL_GPL(sata_scr_read);
6762 EXPORT_SYMBOL_GPL(sata_scr_write);
6763 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6764 EXPORT_SYMBOL_GPL(ata_link_online);
6765 EXPORT_SYMBOL_GPL(ata_link_offline);
6766 #ifdef CONFIG_PM
6767 EXPORT_SYMBOL_GPL(ata_host_suspend);
6768 EXPORT_SYMBOL_GPL(ata_host_resume);
6769 #endif /* CONFIG_PM */
6770 EXPORT_SYMBOL_GPL(ata_id_string);
6771 EXPORT_SYMBOL_GPL(ata_id_c_string);
6772 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6773 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6774 
6775 EXPORT_SYMBOL_GPL(ata_pio_queue_task);
6776 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6777 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6778 EXPORT_SYMBOL_GPL(ata_timing_compute);
6779 EXPORT_SYMBOL_GPL(ata_timing_merge);
6780 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6781 
6782 #ifdef CONFIG_PCI
6783 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6784 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6785 #ifdef CONFIG_PM
6786 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6787 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6788 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6789 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6790 #endif /* CONFIG_PM */
6791 #endif /* CONFIG_PCI */
6792 
6793 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6794 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6795 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6796 EXPORT_SYMBOL_GPL(ata_port_desc);
6797 #ifdef CONFIG_PCI
6798 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6799 #endif /* CONFIG_PCI */
6800 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6801 EXPORT_SYMBOL_GPL(ata_link_abort);
6802 EXPORT_SYMBOL_GPL(ata_port_abort);
6803 EXPORT_SYMBOL_GPL(ata_port_freeze);
6804 EXPORT_SYMBOL_GPL(sata_async_notification);
6805 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6806 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6807 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6808 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6809 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6810 EXPORT_SYMBOL_GPL(ata_do_eh);
6811 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6812 
6813 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6814 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6815 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6816 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6817 EXPORT_SYMBOL_GPL(ata_cable_sata);
6818