xref: /linux/drivers/ata/libata-core.c (revision 93d546399c2b7d66a54d5fbd5eee17de19246bf6)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_host.h>
62 #include <linux/libata.h>
63 #include <asm/byteorder.h>
64 #include <linux/cdrom.h>
65 
66 #include "libata.h"
67 
68 
69 /* debounce timing parameters in msecs { interval, duration, timeout } */
70 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
71 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
72 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
73 
74 const struct ata_port_operations ata_base_port_ops = {
75 	.prereset		= ata_std_prereset,
76 	.postreset		= ata_std_postreset,
77 	.error_handler		= ata_std_error_handler,
78 };
79 
80 const struct ata_port_operations sata_port_ops = {
81 	.inherits		= &ata_base_port_ops,
82 
83 	.qc_defer		= ata_std_qc_defer,
84 	.hardreset		= sata_std_hardreset,
85 };
86 
87 static unsigned int ata_dev_init_params(struct ata_device *dev,
88 					u16 heads, u16 sectors);
89 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
90 static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 					u8 enable, u8 feature);
92 static void ata_dev_xfermask(struct ata_device *dev);
93 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
94 
95 unsigned int ata_print_id = 1;
96 static struct workqueue_struct *ata_wq;
97 
98 struct workqueue_struct *ata_aux_wq;
99 
100 struct ata_force_param {
101 	const char	*name;
102 	unsigned int	cbl;
103 	int		spd_limit;
104 	unsigned long	xfer_mask;
105 	unsigned int	horkage_on;
106 	unsigned int	horkage_off;
107 	unsigned int	lflags;
108 };
109 
110 struct ata_force_ent {
111 	int			port;
112 	int			device;
113 	struct ata_force_param	param;
114 };
115 
116 static struct ata_force_ent *ata_force_tbl;
117 static int ata_force_tbl_size;
118 
119 static char ata_force_param_buf[PAGE_SIZE] __initdata;
120 /* param_buf is thrown away after initialization, disallow read */
121 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123 
124 static int atapi_enabled = 1;
125 module_param(atapi_enabled, int, 0444);
126 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
127 
128 static int atapi_dmadir = 0;
129 module_param(atapi_dmadir, int, 0444);
130 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
131 
132 int atapi_passthru16 = 1;
133 module_param(atapi_passthru16, int, 0444);
134 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
135 
136 int libata_fua = 0;
137 module_param_named(fua, libata_fua, int, 0444);
138 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
139 
140 static int ata_ignore_hpa;
141 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143 
144 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145 module_param_named(dma, libata_dma_mask, int, 0444);
146 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147 
148 static int ata_probe_timeout;
149 module_param(ata_probe_timeout, int, 0444);
150 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151 
152 int libata_noacpi = 0;
153 module_param_named(noacpi, libata_noacpi, int, 0444);
154 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
155 
156 int libata_allow_tpm = 0;
157 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
159 
160 MODULE_AUTHOR("Jeff Garzik");
161 MODULE_DESCRIPTION("Library module for ATA devices");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(DRV_VERSION);
164 
165 
166 /*
167  * Iterator helpers.  Don't use directly.
168  *
169  * LOCKING:
170  * Host lock or EH context.
171  */
172 struct ata_link *__ata_port_next_link(struct ata_port *ap,
173 				      struct ata_link *link, bool dev_only)
174 {
175 	/* NULL link indicates start of iteration */
176 	if (!link) {
177 		if (dev_only && sata_pmp_attached(ap))
178 			return ap->pmp_link;
179 		return &ap->link;
180 	}
181 
182 	/* we just iterated over the host master link, what's next? */
183 	if (link == &ap->link) {
184 		if (!sata_pmp_attached(ap)) {
185 			if (unlikely(ap->slave_link) && !dev_only)
186 				return ap->slave_link;
187 			return NULL;
188 		}
189 		return ap->pmp_link;
190 	}
191 
192 	/* slave_link excludes PMP */
193 	if (unlikely(link == ap->slave_link))
194 		return NULL;
195 
196 	/* iterate to the next PMP link */
197 	if (++link < ap->pmp_link + ap->nr_pmp_links)
198 		return link;
199 	return NULL;
200 }
201 
202 /**
203  *	ata_dev_phys_link - find physical link for a device
204  *	@dev: ATA device to look up physical link for
205  *
206  *	Look up physical link which @dev is attached to.  Note that
207  *	this is different from @dev->link only when @dev is on slave
208  *	link.  For all other cases, it's the same as @dev->link.
209  *
210  *	LOCKING:
211  *	Don't care.
212  *
213  *	RETURNS:
214  *	Pointer to the found physical link.
215  */
216 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
217 {
218 	struct ata_port *ap = dev->link->ap;
219 
220 	if (!ap->slave_link)
221 		return dev->link;
222 	if (!dev->devno)
223 		return &ap->link;
224 	return ap->slave_link;
225 }
226 
227 /**
228  *	ata_force_cbl - force cable type according to libata.force
229  *	@ap: ATA port of interest
230  *
231  *	Force cable type according to libata.force and whine about it.
232  *	The last entry which has matching port number is used, so it
233  *	can be specified as part of device force parameters.  For
234  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
235  *	same effect.
236  *
237  *	LOCKING:
238  *	EH context.
239  */
240 void ata_force_cbl(struct ata_port *ap)
241 {
242 	int i;
243 
244 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
245 		const struct ata_force_ent *fe = &ata_force_tbl[i];
246 
247 		if (fe->port != -1 && fe->port != ap->print_id)
248 			continue;
249 
250 		if (fe->param.cbl == ATA_CBL_NONE)
251 			continue;
252 
253 		ap->cbl = fe->param.cbl;
254 		ata_port_printk(ap, KERN_NOTICE,
255 				"FORCE: cable set to %s\n", fe->param.name);
256 		return;
257 	}
258 }
259 
260 /**
261  *	ata_force_link_limits - force link limits according to libata.force
262  *	@link: ATA link of interest
263  *
264  *	Force link flags and SATA spd limit according to libata.force
265  *	and whine about it.  When only the port part is specified
266  *	(e.g. 1:), the limit applies to all links connected to both
267  *	the host link and all fan-out ports connected via PMP.  If the
268  *	device part is specified as 0 (e.g. 1.00:), it specifies the
269  *	first fan-out link not the host link.  Device number 15 always
270  *	points to the host link whether PMP is attached or not.  If the
271  *	controller has slave link, device number 16 points to it.
272  *
273  *	LOCKING:
274  *	EH context.
275  */
276 static void ata_force_link_limits(struct ata_link *link)
277 {
278 	bool did_spd = false;
279 	int linkno = link->pmp;
280 	int i;
281 
282 	if (ata_is_host_link(link))
283 		linkno += 15;
284 
285 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
286 		const struct ata_force_ent *fe = &ata_force_tbl[i];
287 
288 		if (fe->port != -1 && fe->port != link->ap->print_id)
289 			continue;
290 
291 		if (fe->device != -1 && fe->device != linkno)
292 			continue;
293 
294 		/* only honor the first spd limit */
295 		if (!did_spd && fe->param.spd_limit) {
296 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
297 			ata_link_printk(link, KERN_NOTICE,
298 					"FORCE: PHY spd limit set to %s\n",
299 					fe->param.name);
300 			did_spd = true;
301 		}
302 
303 		/* let lflags stack */
304 		if (fe->param.lflags) {
305 			link->flags |= fe->param.lflags;
306 			ata_link_printk(link, KERN_NOTICE,
307 					"FORCE: link flag 0x%x forced -> 0x%x\n",
308 					fe->param.lflags, link->flags);
309 		}
310 	}
311 }
312 
313 /**
314  *	ata_force_xfermask - force xfermask according to libata.force
315  *	@dev: ATA device of interest
316  *
317  *	Force xfer_mask according to libata.force and whine about it.
318  *	For consistency with link selection, device number 15 selects
319  *	the first device connected to the host link.
320  *
321  *	LOCKING:
322  *	EH context.
323  */
324 static void ata_force_xfermask(struct ata_device *dev)
325 {
326 	int devno = dev->link->pmp + dev->devno;
327 	int alt_devno = devno;
328 	int i;
329 
330 	/* allow n.15/16 for devices attached to host port */
331 	if (ata_is_host_link(dev->link))
332 		alt_devno += 15;
333 
334 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
335 		const struct ata_force_ent *fe = &ata_force_tbl[i];
336 		unsigned long pio_mask, mwdma_mask, udma_mask;
337 
338 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
339 			continue;
340 
341 		if (fe->device != -1 && fe->device != devno &&
342 		    fe->device != alt_devno)
343 			continue;
344 
345 		if (!fe->param.xfer_mask)
346 			continue;
347 
348 		ata_unpack_xfermask(fe->param.xfer_mask,
349 				    &pio_mask, &mwdma_mask, &udma_mask);
350 		if (udma_mask)
351 			dev->udma_mask = udma_mask;
352 		else if (mwdma_mask) {
353 			dev->udma_mask = 0;
354 			dev->mwdma_mask = mwdma_mask;
355 		} else {
356 			dev->udma_mask = 0;
357 			dev->mwdma_mask = 0;
358 			dev->pio_mask = pio_mask;
359 		}
360 
361 		ata_dev_printk(dev, KERN_NOTICE,
362 			"FORCE: xfer_mask set to %s\n", fe->param.name);
363 		return;
364 	}
365 }
366 
367 /**
368  *	ata_force_horkage - force horkage according to libata.force
369  *	@dev: ATA device of interest
370  *
371  *	Force horkage according to libata.force and whine about it.
372  *	For consistency with link selection, device number 15 selects
373  *	the first device connected to the host link.
374  *
375  *	LOCKING:
376  *	EH context.
377  */
378 static void ata_force_horkage(struct ata_device *dev)
379 {
380 	int devno = dev->link->pmp + dev->devno;
381 	int alt_devno = devno;
382 	int i;
383 
384 	/* allow n.15/16 for devices attached to host port */
385 	if (ata_is_host_link(dev->link))
386 		alt_devno += 15;
387 
388 	for (i = 0; i < ata_force_tbl_size; i++) {
389 		const struct ata_force_ent *fe = &ata_force_tbl[i];
390 
391 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
392 			continue;
393 
394 		if (fe->device != -1 && fe->device != devno &&
395 		    fe->device != alt_devno)
396 			continue;
397 
398 		if (!(~dev->horkage & fe->param.horkage_on) &&
399 		    !(dev->horkage & fe->param.horkage_off))
400 			continue;
401 
402 		dev->horkage |= fe->param.horkage_on;
403 		dev->horkage &= ~fe->param.horkage_off;
404 
405 		ata_dev_printk(dev, KERN_NOTICE,
406 			"FORCE: horkage modified (%s)\n", fe->param.name);
407 	}
408 }
409 
410 /**
411  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
412  *	@opcode: SCSI opcode
413  *
414  *	Determine ATAPI command type from @opcode.
415  *
416  *	LOCKING:
417  *	None.
418  *
419  *	RETURNS:
420  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
421  */
422 int atapi_cmd_type(u8 opcode)
423 {
424 	switch (opcode) {
425 	case GPCMD_READ_10:
426 	case GPCMD_READ_12:
427 		return ATAPI_READ;
428 
429 	case GPCMD_WRITE_10:
430 	case GPCMD_WRITE_12:
431 	case GPCMD_WRITE_AND_VERIFY_10:
432 		return ATAPI_WRITE;
433 
434 	case GPCMD_READ_CD:
435 	case GPCMD_READ_CD_MSF:
436 		return ATAPI_READ_CD;
437 
438 	case ATA_16:
439 	case ATA_12:
440 		if (atapi_passthru16)
441 			return ATAPI_PASS_THRU;
442 		/* fall thru */
443 	default:
444 		return ATAPI_MISC;
445 	}
446 }
447 
448 /**
449  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
450  *	@tf: Taskfile to convert
451  *	@pmp: Port multiplier port
452  *	@is_cmd: This FIS is for command
453  *	@fis: Buffer into which data will output
454  *
455  *	Converts a standard ATA taskfile to a Serial ATA
456  *	FIS structure (Register - Host to Device).
457  *
458  *	LOCKING:
459  *	Inherited from caller.
460  */
461 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
462 {
463 	fis[0] = 0x27;			/* Register - Host to Device FIS */
464 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
465 	if (is_cmd)
466 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
467 
468 	fis[2] = tf->command;
469 	fis[3] = tf->feature;
470 
471 	fis[4] = tf->lbal;
472 	fis[5] = tf->lbam;
473 	fis[6] = tf->lbah;
474 	fis[7] = tf->device;
475 
476 	fis[8] = tf->hob_lbal;
477 	fis[9] = tf->hob_lbam;
478 	fis[10] = tf->hob_lbah;
479 	fis[11] = tf->hob_feature;
480 
481 	fis[12] = tf->nsect;
482 	fis[13] = tf->hob_nsect;
483 	fis[14] = 0;
484 	fis[15] = tf->ctl;
485 
486 	fis[16] = 0;
487 	fis[17] = 0;
488 	fis[18] = 0;
489 	fis[19] = 0;
490 }
491 
492 /**
493  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
494  *	@fis: Buffer from which data will be input
495  *	@tf: Taskfile to output
496  *
497  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
498  *
499  *	LOCKING:
500  *	Inherited from caller.
501  */
502 
503 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
504 {
505 	tf->command	= fis[2];	/* status */
506 	tf->feature	= fis[3];	/* error */
507 
508 	tf->lbal	= fis[4];
509 	tf->lbam	= fis[5];
510 	tf->lbah	= fis[6];
511 	tf->device	= fis[7];
512 
513 	tf->hob_lbal	= fis[8];
514 	tf->hob_lbam	= fis[9];
515 	tf->hob_lbah	= fis[10];
516 
517 	tf->nsect	= fis[12];
518 	tf->hob_nsect	= fis[13];
519 }
520 
521 static const u8 ata_rw_cmds[] = {
522 	/* pio multi */
523 	ATA_CMD_READ_MULTI,
524 	ATA_CMD_WRITE_MULTI,
525 	ATA_CMD_READ_MULTI_EXT,
526 	ATA_CMD_WRITE_MULTI_EXT,
527 	0,
528 	0,
529 	0,
530 	ATA_CMD_WRITE_MULTI_FUA_EXT,
531 	/* pio */
532 	ATA_CMD_PIO_READ,
533 	ATA_CMD_PIO_WRITE,
534 	ATA_CMD_PIO_READ_EXT,
535 	ATA_CMD_PIO_WRITE_EXT,
536 	0,
537 	0,
538 	0,
539 	0,
540 	/* dma */
541 	ATA_CMD_READ,
542 	ATA_CMD_WRITE,
543 	ATA_CMD_READ_EXT,
544 	ATA_CMD_WRITE_EXT,
545 	0,
546 	0,
547 	0,
548 	ATA_CMD_WRITE_FUA_EXT
549 };
550 
551 /**
552  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
553  *	@tf: command to examine and configure
554  *	@dev: device tf belongs to
555  *
556  *	Examine the device configuration and tf->flags to calculate
557  *	the proper read/write commands and protocol to use.
558  *
559  *	LOCKING:
560  *	caller.
561  */
562 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
563 {
564 	u8 cmd;
565 
566 	int index, fua, lba48, write;
567 
568 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
569 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
570 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
571 
572 	if (dev->flags & ATA_DFLAG_PIO) {
573 		tf->protocol = ATA_PROT_PIO;
574 		index = dev->multi_count ? 0 : 8;
575 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
576 		/* Unable to use DMA due to host limitation */
577 		tf->protocol = ATA_PROT_PIO;
578 		index = dev->multi_count ? 0 : 8;
579 	} else {
580 		tf->protocol = ATA_PROT_DMA;
581 		index = 16;
582 	}
583 
584 	cmd = ata_rw_cmds[index + fua + lba48 + write];
585 	if (cmd) {
586 		tf->command = cmd;
587 		return 0;
588 	}
589 	return -1;
590 }
591 
592 /**
593  *	ata_tf_read_block - Read block address from ATA taskfile
594  *	@tf: ATA taskfile of interest
595  *	@dev: ATA device @tf belongs to
596  *
597  *	LOCKING:
598  *	None.
599  *
600  *	Read block address from @tf.  This function can handle all
601  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
602  *	flags select the address format to use.
603  *
604  *	RETURNS:
605  *	Block address read from @tf.
606  */
607 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
608 {
609 	u64 block = 0;
610 
611 	if (tf->flags & ATA_TFLAG_LBA) {
612 		if (tf->flags & ATA_TFLAG_LBA48) {
613 			block |= (u64)tf->hob_lbah << 40;
614 			block |= (u64)tf->hob_lbam << 32;
615 			block |= (u64)tf->hob_lbal << 24;
616 		} else
617 			block |= (tf->device & 0xf) << 24;
618 
619 		block |= tf->lbah << 16;
620 		block |= tf->lbam << 8;
621 		block |= tf->lbal;
622 	} else {
623 		u32 cyl, head, sect;
624 
625 		cyl = tf->lbam | (tf->lbah << 8);
626 		head = tf->device & 0xf;
627 		sect = tf->lbal;
628 
629 		block = (cyl * dev->heads + head) * dev->sectors + sect;
630 	}
631 
632 	return block;
633 }
634 
635 /**
636  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
637  *	@tf: Target ATA taskfile
638  *	@dev: ATA device @tf belongs to
639  *	@block: Block address
640  *	@n_block: Number of blocks
641  *	@tf_flags: RW/FUA etc...
642  *	@tag: tag
643  *
644  *	LOCKING:
645  *	None.
646  *
647  *	Build ATA taskfile @tf for read/write request described by
648  *	@block, @n_block, @tf_flags and @tag on @dev.
649  *
650  *	RETURNS:
651  *
652  *	0 on success, -ERANGE if the request is too large for @dev,
653  *	-EINVAL if the request is invalid.
654  */
655 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
656 		    u64 block, u32 n_block, unsigned int tf_flags,
657 		    unsigned int tag)
658 {
659 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
660 	tf->flags |= tf_flags;
661 
662 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
663 		/* yay, NCQ */
664 		if (!lba_48_ok(block, n_block))
665 			return -ERANGE;
666 
667 		tf->protocol = ATA_PROT_NCQ;
668 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
669 
670 		if (tf->flags & ATA_TFLAG_WRITE)
671 			tf->command = ATA_CMD_FPDMA_WRITE;
672 		else
673 			tf->command = ATA_CMD_FPDMA_READ;
674 
675 		tf->nsect = tag << 3;
676 		tf->hob_feature = (n_block >> 8) & 0xff;
677 		tf->feature = n_block & 0xff;
678 
679 		tf->hob_lbah = (block >> 40) & 0xff;
680 		tf->hob_lbam = (block >> 32) & 0xff;
681 		tf->hob_lbal = (block >> 24) & 0xff;
682 		tf->lbah = (block >> 16) & 0xff;
683 		tf->lbam = (block >> 8) & 0xff;
684 		tf->lbal = block & 0xff;
685 
686 		tf->device = 1 << 6;
687 		if (tf->flags & ATA_TFLAG_FUA)
688 			tf->device |= 1 << 7;
689 	} else if (dev->flags & ATA_DFLAG_LBA) {
690 		tf->flags |= ATA_TFLAG_LBA;
691 
692 		if (lba_28_ok(block, n_block)) {
693 			/* use LBA28 */
694 			tf->device |= (block >> 24) & 0xf;
695 		} else if (lba_48_ok(block, n_block)) {
696 			if (!(dev->flags & ATA_DFLAG_LBA48))
697 				return -ERANGE;
698 
699 			/* use LBA48 */
700 			tf->flags |= ATA_TFLAG_LBA48;
701 
702 			tf->hob_nsect = (n_block >> 8) & 0xff;
703 
704 			tf->hob_lbah = (block >> 40) & 0xff;
705 			tf->hob_lbam = (block >> 32) & 0xff;
706 			tf->hob_lbal = (block >> 24) & 0xff;
707 		} else
708 			/* request too large even for LBA48 */
709 			return -ERANGE;
710 
711 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
712 			return -EINVAL;
713 
714 		tf->nsect = n_block & 0xff;
715 
716 		tf->lbah = (block >> 16) & 0xff;
717 		tf->lbam = (block >> 8) & 0xff;
718 		tf->lbal = block & 0xff;
719 
720 		tf->device |= ATA_LBA;
721 	} else {
722 		/* CHS */
723 		u32 sect, head, cyl, track;
724 
725 		/* The request -may- be too large for CHS addressing. */
726 		if (!lba_28_ok(block, n_block))
727 			return -ERANGE;
728 
729 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
730 			return -EINVAL;
731 
732 		/* Convert LBA to CHS */
733 		track = (u32)block / dev->sectors;
734 		cyl   = track / dev->heads;
735 		head  = track % dev->heads;
736 		sect  = (u32)block % dev->sectors + 1;
737 
738 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
739 			(u32)block, track, cyl, head, sect);
740 
741 		/* Check whether the converted CHS can fit.
742 		   Cylinder: 0-65535
743 		   Head: 0-15
744 		   Sector: 1-255*/
745 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
746 			return -ERANGE;
747 
748 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
749 		tf->lbal = sect;
750 		tf->lbam = cyl;
751 		tf->lbah = cyl >> 8;
752 		tf->device |= head;
753 	}
754 
755 	return 0;
756 }
757 
758 /**
759  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
760  *	@pio_mask: pio_mask
761  *	@mwdma_mask: mwdma_mask
762  *	@udma_mask: udma_mask
763  *
764  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
765  *	unsigned int xfer_mask.
766  *
767  *	LOCKING:
768  *	None.
769  *
770  *	RETURNS:
771  *	Packed xfer_mask.
772  */
773 unsigned long ata_pack_xfermask(unsigned long pio_mask,
774 				unsigned long mwdma_mask,
775 				unsigned long udma_mask)
776 {
777 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
778 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
779 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
780 }
781 
782 /**
783  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
784  *	@xfer_mask: xfer_mask to unpack
785  *	@pio_mask: resulting pio_mask
786  *	@mwdma_mask: resulting mwdma_mask
787  *	@udma_mask: resulting udma_mask
788  *
789  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
790  *	Any NULL distination masks will be ignored.
791  */
792 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
793 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
794 {
795 	if (pio_mask)
796 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
797 	if (mwdma_mask)
798 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
799 	if (udma_mask)
800 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
801 }
802 
803 static const struct ata_xfer_ent {
804 	int shift, bits;
805 	u8 base;
806 } ata_xfer_tbl[] = {
807 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
808 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
809 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
810 	{ -1, },
811 };
812 
813 /**
814  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
815  *	@xfer_mask: xfer_mask of interest
816  *
817  *	Return matching XFER_* value for @xfer_mask.  Only the highest
818  *	bit of @xfer_mask is considered.
819  *
820  *	LOCKING:
821  *	None.
822  *
823  *	RETURNS:
824  *	Matching XFER_* value, 0xff if no match found.
825  */
826 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
827 {
828 	int highbit = fls(xfer_mask) - 1;
829 	const struct ata_xfer_ent *ent;
830 
831 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
832 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
833 			return ent->base + highbit - ent->shift;
834 	return 0xff;
835 }
836 
837 /**
838  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
839  *	@xfer_mode: XFER_* of interest
840  *
841  *	Return matching xfer_mask for @xfer_mode.
842  *
843  *	LOCKING:
844  *	None.
845  *
846  *	RETURNS:
847  *	Matching xfer_mask, 0 if no match found.
848  */
849 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
850 {
851 	const struct ata_xfer_ent *ent;
852 
853 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
854 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
855 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
856 				& ~((1 << ent->shift) - 1);
857 	return 0;
858 }
859 
860 /**
861  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
862  *	@xfer_mode: XFER_* of interest
863  *
864  *	Return matching xfer_shift for @xfer_mode.
865  *
866  *	LOCKING:
867  *	None.
868  *
869  *	RETURNS:
870  *	Matching xfer_shift, -1 if no match found.
871  */
872 int ata_xfer_mode2shift(unsigned long xfer_mode)
873 {
874 	const struct ata_xfer_ent *ent;
875 
876 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
877 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
878 			return ent->shift;
879 	return -1;
880 }
881 
882 /**
883  *	ata_mode_string - convert xfer_mask to string
884  *	@xfer_mask: mask of bits supported; only highest bit counts.
885  *
886  *	Determine string which represents the highest speed
887  *	(highest bit in @modemask).
888  *
889  *	LOCKING:
890  *	None.
891  *
892  *	RETURNS:
893  *	Constant C string representing highest speed listed in
894  *	@mode_mask, or the constant C string "<n/a>".
895  */
896 const char *ata_mode_string(unsigned long xfer_mask)
897 {
898 	static const char * const xfer_mode_str[] = {
899 		"PIO0",
900 		"PIO1",
901 		"PIO2",
902 		"PIO3",
903 		"PIO4",
904 		"PIO5",
905 		"PIO6",
906 		"MWDMA0",
907 		"MWDMA1",
908 		"MWDMA2",
909 		"MWDMA3",
910 		"MWDMA4",
911 		"UDMA/16",
912 		"UDMA/25",
913 		"UDMA/33",
914 		"UDMA/44",
915 		"UDMA/66",
916 		"UDMA/100",
917 		"UDMA/133",
918 		"UDMA7",
919 	};
920 	int highbit;
921 
922 	highbit = fls(xfer_mask) - 1;
923 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
924 		return xfer_mode_str[highbit];
925 	return "<n/a>";
926 }
927 
928 static const char *sata_spd_string(unsigned int spd)
929 {
930 	static const char * const spd_str[] = {
931 		"1.5 Gbps",
932 		"3.0 Gbps",
933 	};
934 
935 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
936 		return "<unknown>";
937 	return spd_str[spd - 1];
938 }
939 
940 void ata_dev_disable(struct ata_device *dev)
941 {
942 	if (ata_dev_enabled(dev)) {
943 		if (ata_msg_drv(dev->link->ap))
944 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
945 		ata_acpi_on_disable(dev);
946 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
947 					     ATA_DNXFER_QUIET);
948 		dev->class++;
949 	}
950 }
951 
952 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
953 {
954 	struct ata_link *link = dev->link;
955 	struct ata_port *ap = link->ap;
956 	u32 scontrol;
957 	unsigned int err_mask;
958 	int rc;
959 
960 	/*
961 	 * disallow DIPM for drivers which haven't set
962 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
963 	 * phy ready will be set in the interrupt status on
964 	 * state changes, which will cause some drivers to
965 	 * think there are errors - additionally drivers will
966 	 * need to disable hot plug.
967 	 */
968 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
969 		ap->pm_policy = NOT_AVAILABLE;
970 		return -EINVAL;
971 	}
972 
973 	/*
974 	 * For DIPM, we will only enable it for the
975 	 * min_power setting.
976 	 *
977 	 * Why?  Because Disks are too stupid to know that
978 	 * If the host rejects a request to go to SLUMBER
979 	 * they should retry at PARTIAL, and instead it
980 	 * just would give up.  So, for medium_power to
981 	 * work at all, we need to only allow HIPM.
982 	 */
983 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
984 	if (rc)
985 		return rc;
986 
987 	switch (policy) {
988 	case MIN_POWER:
989 		/* no restrictions on IPM transitions */
990 		scontrol &= ~(0x3 << 8);
991 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
992 		if (rc)
993 			return rc;
994 
995 		/* enable DIPM */
996 		if (dev->flags & ATA_DFLAG_DIPM)
997 			err_mask = ata_dev_set_feature(dev,
998 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
999 		break;
1000 	case MEDIUM_POWER:
1001 		/* allow IPM to PARTIAL */
1002 		scontrol &= ~(0x1 << 8);
1003 		scontrol |= (0x2 << 8);
1004 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1005 		if (rc)
1006 			return rc;
1007 
1008 		/*
1009 		 * we don't have to disable DIPM since IPM flags
1010 		 * disallow transitions to SLUMBER, which effectively
1011 		 * disable DIPM if it does not support PARTIAL
1012 		 */
1013 		break;
1014 	case NOT_AVAILABLE:
1015 	case MAX_PERFORMANCE:
1016 		/* disable all IPM transitions */
1017 		scontrol |= (0x3 << 8);
1018 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1019 		if (rc)
1020 			return rc;
1021 
1022 		/*
1023 		 * we don't have to disable DIPM since IPM flags
1024 		 * disallow all transitions which effectively
1025 		 * disable DIPM anyway.
1026 		 */
1027 		break;
1028 	}
1029 
1030 	/* FIXME: handle SET FEATURES failure */
1031 	(void) err_mask;
1032 
1033 	return 0;
1034 }
1035 
1036 /**
1037  *	ata_dev_enable_pm - enable SATA interface power management
1038  *	@dev:  device to enable power management
1039  *	@policy: the link power management policy
1040  *
1041  *	Enable SATA Interface power management.  This will enable
1042  *	Device Interface Power Management (DIPM) for min_power
1043  * 	policy, and then call driver specific callbacks for
1044  *	enabling Host Initiated Power management.
1045  *
1046  *	Locking: Caller.
1047  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
1048  */
1049 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1050 {
1051 	int rc = 0;
1052 	struct ata_port *ap = dev->link->ap;
1053 
1054 	/* set HIPM first, then DIPM */
1055 	if (ap->ops->enable_pm)
1056 		rc = ap->ops->enable_pm(ap, policy);
1057 	if (rc)
1058 		goto enable_pm_out;
1059 	rc = ata_dev_set_dipm(dev, policy);
1060 
1061 enable_pm_out:
1062 	if (rc)
1063 		ap->pm_policy = MAX_PERFORMANCE;
1064 	else
1065 		ap->pm_policy = policy;
1066 	return /* rc */;	/* hopefully we can use 'rc' eventually */
1067 }
1068 
1069 #ifdef CONFIG_PM
1070 /**
1071  *	ata_dev_disable_pm - disable SATA interface power management
1072  *	@dev: device to disable power management
1073  *
1074  *	Disable SATA Interface power management.  This will disable
1075  *	Device Interface Power Management (DIPM) without changing
1076  * 	policy,  call driver specific callbacks for disabling Host
1077  * 	Initiated Power management.
1078  *
1079  *	Locking: Caller.
1080  *	Returns: void
1081  */
1082 static void ata_dev_disable_pm(struct ata_device *dev)
1083 {
1084 	struct ata_port *ap = dev->link->ap;
1085 
1086 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1087 	if (ap->ops->disable_pm)
1088 		ap->ops->disable_pm(ap);
1089 }
1090 #endif	/* CONFIG_PM */
1091 
1092 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1093 {
1094 	ap->pm_policy = policy;
1095 	ap->link.eh_info.action |= ATA_EH_LPM;
1096 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1097 	ata_port_schedule_eh(ap);
1098 }
1099 
1100 #ifdef CONFIG_PM
1101 static void ata_lpm_enable(struct ata_host *host)
1102 {
1103 	struct ata_link *link;
1104 	struct ata_port *ap;
1105 	struct ata_device *dev;
1106 	int i;
1107 
1108 	for (i = 0; i < host->n_ports; i++) {
1109 		ap = host->ports[i];
1110 		ata_port_for_each_link(link, ap) {
1111 			ata_link_for_each_dev(dev, link)
1112 				ata_dev_disable_pm(dev);
1113 		}
1114 	}
1115 }
1116 
1117 static void ata_lpm_disable(struct ata_host *host)
1118 {
1119 	int i;
1120 
1121 	for (i = 0; i < host->n_ports; i++) {
1122 		struct ata_port *ap = host->ports[i];
1123 		ata_lpm_schedule(ap, ap->pm_policy);
1124 	}
1125 }
1126 #endif	/* CONFIG_PM */
1127 
1128 /**
1129  *	ata_dev_classify - determine device type based on ATA-spec signature
1130  *	@tf: ATA taskfile register set for device to be identified
1131  *
1132  *	Determine from taskfile register contents whether a device is
1133  *	ATA or ATAPI, as per "Signature and persistence" section
1134  *	of ATA/PI spec (volume 1, sect 5.14).
1135  *
1136  *	LOCKING:
1137  *	None.
1138  *
1139  *	RETURNS:
1140  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1141  *	%ATA_DEV_UNKNOWN the event of failure.
1142  */
1143 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1144 {
1145 	/* Apple's open source Darwin code hints that some devices only
1146 	 * put a proper signature into the LBA mid/high registers,
1147 	 * So, we only check those.  It's sufficient for uniqueness.
1148 	 *
1149 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1150 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1151 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1152 	 * spec has never mentioned about using different signatures
1153 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1154 	 * Multiplier specification began to use 0x69/0x96 to identify
1155 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1156 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1157 	 * 0x69/0x96 shortly and described them as reserved for
1158 	 * SerialATA.
1159 	 *
1160 	 * We follow the current spec and consider that 0x69/0x96
1161 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1162 	 */
1163 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1164 		DPRINTK("found ATA device by sig\n");
1165 		return ATA_DEV_ATA;
1166 	}
1167 
1168 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1169 		DPRINTK("found ATAPI device by sig\n");
1170 		return ATA_DEV_ATAPI;
1171 	}
1172 
1173 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1174 		DPRINTK("found PMP device by sig\n");
1175 		return ATA_DEV_PMP;
1176 	}
1177 
1178 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1179 		printk(KERN_INFO "ata: SEMB device ignored\n");
1180 		return ATA_DEV_SEMB_UNSUP; /* not yet */
1181 	}
1182 
1183 	DPRINTK("unknown device\n");
1184 	return ATA_DEV_UNKNOWN;
1185 }
1186 
1187 /**
1188  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1189  *	@id: IDENTIFY DEVICE results we will examine
1190  *	@s: string into which data is output
1191  *	@ofs: offset into identify device page
1192  *	@len: length of string to return. must be an even number.
1193  *
1194  *	The strings in the IDENTIFY DEVICE page are broken up into
1195  *	16-bit chunks.  Run through the string, and output each
1196  *	8-bit chunk linearly, regardless of platform.
1197  *
1198  *	LOCKING:
1199  *	caller.
1200  */
1201 
1202 void ata_id_string(const u16 *id, unsigned char *s,
1203 		   unsigned int ofs, unsigned int len)
1204 {
1205 	unsigned int c;
1206 
1207 	BUG_ON(len & 1);
1208 
1209 	while (len > 0) {
1210 		c = id[ofs] >> 8;
1211 		*s = c;
1212 		s++;
1213 
1214 		c = id[ofs] & 0xff;
1215 		*s = c;
1216 		s++;
1217 
1218 		ofs++;
1219 		len -= 2;
1220 	}
1221 }
1222 
1223 /**
1224  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1225  *	@id: IDENTIFY DEVICE results we will examine
1226  *	@s: string into which data is output
1227  *	@ofs: offset into identify device page
1228  *	@len: length of string to return. must be an odd number.
1229  *
1230  *	This function is identical to ata_id_string except that it
1231  *	trims trailing spaces and terminates the resulting string with
1232  *	null.  @len must be actual maximum length (even number) + 1.
1233  *
1234  *	LOCKING:
1235  *	caller.
1236  */
1237 void ata_id_c_string(const u16 *id, unsigned char *s,
1238 		     unsigned int ofs, unsigned int len)
1239 {
1240 	unsigned char *p;
1241 
1242 	ata_id_string(id, s, ofs, len - 1);
1243 
1244 	p = s + strnlen(s, len - 1);
1245 	while (p > s && p[-1] == ' ')
1246 		p--;
1247 	*p = '\0';
1248 }
1249 
1250 static u64 ata_id_n_sectors(const u16 *id)
1251 {
1252 	if (ata_id_has_lba(id)) {
1253 		if (ata_id_has_lba48(id))
1254 			return ata_id_u64(id, 100);
1255 		else
1256 			return ata_id_u32(id, 60);
1257 	} else {
1258 		if (ata_id_current_chs_valid(id))
1259 			return ata_id_u32(id, 57);
1260 		else
1261 			return id[1] * id[3] * id[6];
1262 	}
1263 }
1264 
1265 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1266 {
1267 	u64 sectors = 0;
1268 
1269 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1270 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1271 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1272 	sectors |= (tf->lbah & 0xff) << 16;
1273 	sectors |= (tf->lbam & 0xff) << 8;
1274 	sectors |= (tf->lbal & 0xff);
1275 
1276 	return sectors;
1277 }
1278 
1279 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1280 {
1281 	u64 sectors = 0;
1282 
1283 	sectors |= (tf->device & 0x0f) << 24;
1284 	sectors |= (tf->lbah & 0xff) << 16;
1285 	sectors |= (tf->lbam & 0xff) << 8;
1286 	sectors |= (tf->lbal & 0xff);
1287 
1288 	return sectors;
1289 }
1290 
1291 /**
1292  *	ata_read_native_max_address - Read native max address
1293  *	@dev: target device
1294  *	@max_sectors: out parameter for the result native max address
1295  *
1296  *	Perform an LBA48 or LBA28 native size query upon the device in
1297  *	question.
1298  *
1299  *	RETURNS:
1300  *	0 on success, -EACCES if command is aborted by the drive.
1301  *	-EIO on other errors.
1302  */
1303 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1304 {
1305 	unsigned int err_mask;
1306 	struct ata_taskfile tf;
1307 	int lba48 = ata_id_has_lba48(dev->id);
1308 
1309 	ata_tf_init(dev, &tf);
1310 
1311 	/* always clear all address registers */
1312 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1313 
1314 	if (lba48) {
1315 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1316 		tf.flags |= ATA_TFLAG_LBA48;
1317 	} else
1318 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1319 
1320 	tf.protocol |= ATA_PROT_NODATA;
1321 	tf.device |= ATA_LBA;
1322 
1323 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1324 	if (err_mask) {
1325 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1326 			       "max address (err_mask=0x%x)\n", err_mask);
1327 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1328 			return -EACCES;
1329 		return -EIO;
1330 	}
1331 
1332 	if (lba48)
1333 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1334 	else
1335 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1336 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1337 		(*max_sectors)--;
1338 	return 0;
1339 }
1340 
1341 /**
1342  *	ata_set_max_sectors - Set max sectors
1343  *	@dev: target device
1344  *	@new_sectors: new max sectors value to set for the device
1345  *
1346  *	Set max sectors of @dev to @new_sectors.
1347  *
1348  *	RETURNS:
1349  *	0 on success, -EACCES if command is aborted or denied (due to
1350  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1351  *	errors.
1352  */
1353 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1354 {
1355 	unsigned int err_mask;
1356 	struct ata_taskfile tf;
1357 	int lba48 = ata_id_has_lba48(dev->id);
1358 
1359 	new_sectors--;
1360 
1361 	ata_tf_init(dev, &tf);
1362 
1363 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1364 
1365 	if (lba48) {
1366 		tf.command = ATA_CMD_SET_MAX_EXT;
1367 		tf.flags |= ATA_TFLAG_LBA48;
1368 
1369 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1370 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1371 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1372 	} else {
1373 		tf.command = ATA_CMD_SET_MAX;
1374 
1375 		tf.device |= (new_sectors >> 24) & 0xf;
1376 	}
1377 
1378 	tf.protocol |= ATA_PROT_NODATA;
1379 	tf.device |= ATA_LBA;
1380 
1381 	tf.lbal = (new_sectors >> 0) & 0xff;
1382 	tf.lbam = (new_sectors >> 8) & 0xff;
1383 	tf.lbah = (new_sectors >> 16) & 0xff;
1384 
1385 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1386 	if (err_mask) {
1387 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1388 			       "max address (err_mask=0x%x)\n", err_mask);
1389 		if (err_mask == AC_ERR_DEV &&
1390 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1391 			return -EACCES;
1392 		return -EIO;
1393 	}
1394 
1395 	return 0;
1396 }
1397 
1398 /**
1399  *	ata_hpa_resize		-	Resize a device with an HPA set
1400  *	@dev: Device to resize
1401  *
1402  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1403  *	it if required to the full size of the media. The caller must check
1404  *	the drive has the HPA feature set enabled.
1405  *
1406  *	RETURNS:
1407  *	0 on success, -errno on failure.
1408  */
1409 static int ata_hpa_resize(struct ata_device *dev)
1410 {
1411 	struct ata_eh_context *ehc = &dev->link->eh_context;
1412 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1413 	u64 sectors = ata_id_n_sectors(dev->id);
1414 	u64 native_sectors;
1415 	int rc;
1416 
1417 	/* do we need to do it? */
1418 	if (dev->class != ATA_DEV_ATA ||
1419 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1420 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1421 		return 0;
1422 
1423 	/* read native max address */
1424 	rc = ata_read_native_max_address(dev, &native_sectors);
1425 	if (rc) {
1426 		/* If device aborted the command or HPA isn't going to
1427 		 * be unlocked, skip HPA resizing.
1428 		 */
1429 		if (rc == -EACCES || !ata_ignore_hpa) {
1430 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1431 				       "broken, skipping HPA handling\n");
1432 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1433 
1434 			/* we can continue if device aborted the command */
1435 			if (rc == -EACCES)
1436 				rc = 0;
1437 		}
1438 
1439 		return rc;
1440 	}
1441 
1442 	/* nothing to do? */
1443 	if (native_sectors <= sectors || !ata_ignore_hpa) {
1444 		if (!print_info || native_sectors == sectors)
1445 			return 0;
1446 
1447 		if (native_sectors > sectors)
1448 			ata_dev_printk(dev, KERN_INFO,
1449 				"HPA detected: current %llu, native %llu\n",
1450 				(unsigned long long)sectors,
1451 				(unsigned long long)native_sectors);
1452 		else if (native_sectors < sectors)
1453 			ata_dev_printk(dev, KERN_WARNING,
1454 				"native sectors (%llu) is smaller than "
1455 				"sectors (%llu)\n",
1456 				(unsigned long long)native_sectors,
1457 				(unsigned long long)sectors);
1458 		return 0;
1459 	}
1460 
1461 	/* let's unlock HPA */
1462 	rc = ata_set_max_sectors(dev, native_sectors);
1463 	if (rc == -EACCES) {
1464 		/* if device aborted the command, skip HPA resizing */
1465 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1466 			       "(%llu -> %llu), skipping HPA handling\n",
1467 			       (unsigned long long)sectors,
1468 			       (unsigned long long)native_sectors);
1469 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1470 		return 0;
1471 	} else if (rc)
1472 		return rc;
1473 
1474 	/* re-read IDENTIFY data */
1475 	rc = ata_dev_reread_id(dev, 0);
1476 	if (rc) {
1477 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1478 			       "data after HPA resizing\n");
1479 		return rc;
1480 	}
1481 
1482 	if (print_info) {
1483 		u64 new_sectors = ata_id_n_sectors(dev->id);
1484 		ata_dev_printk(dev, KERN_INFO,
1485 			"HPA unlocked: %llu -> %llu, native %llu\n",
1486 			(unsigned long long)sectors,
1487 			(unsigned long long)new_sectors,
1488 			(unsigned long long)native_sectors);
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 /**
1495  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1496  *	@id: IDENTIFY DEVICE page to dump
1497  *
1498  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1499  *	page.
1500  *
1501  *	LOCKING:
1502  *	caller.
1503  */
1504 
1505 static inline void ata_dump_id(const u16 *id)
1506 {
1507 	DPRINTK("49==0x%04x  "
1508 		"53==0x%04x  "
1509 		"63==0x%04x  "
1510 		"64==0x%04x  "
1511 		"75==0x%04x  \n",
1512 		id[49],
1513 		id[53],
1514 		id[63],
1515 		id[64],
1516 		id[75]);
1517 	DPRINTK("80==0x%04x  "
1518 		"81==0x%04x  "
1519 		"82==0x%04x  "
1520 		"83==0x%04x  "
1521 		"84==0x%04x  \n",
1522 		id[80],
1523 		id[81],
1524 		id[82],
1525 		id[83],
1526 		id[84]);
1527 	DPRINTK("88==0x%04x  "
1528 		"93==0x%04x\n",
1529 		id[88],
1530 		id[93]);
1531 }
1532 
1533 /**
1534  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1535  *	@id: IDENTIFY data to compute xfer mask from
1536  *
1537  *	Compute the xfermask for this device. This is not as trivial
1538  *	as it seems if we must consider early devices correctly.
1539  *
1540  *	FIXME: pre IDE drive timing (do we care ?).
1541  *
1542  *	LOCKING:
1543  *	None.
1544  *
1545  *	RETURNS:
1546  *	Computed xfermask
1547  */
1548 unsigned long ata_id_xfermask(const u16 *id)
1549 {
1550 	unsigned long pio_mask, mwdma_mask, udma_mask;
1551 
1552 	/* Usual case. Word 53 indicates word 64 is valid */
1553 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1554 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1555 		pio_mask <<= 3;
1556 		pio_mask |= 0x7;
1557 	} else {
1558 		/* If word 64 isn't valid then Word 51 high byte holds
1559 		 * the PIO timing number for the maximum. Turn it into
1560 		 * a mask.
1561 		 */
1562 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1563 		if (mode < 5)	/* Valid PIO range */
1564 			pio_mask = (2 << mode) - 1;
1565 		else
1566 			pio_mask = 1;
1567 
1568 		/* But wait.. there's more. Design your standards by
1569 		 * committee and you too can get a free iordy field to
1570 		 * process. However its the speeds not the modes that
1571 		 * are supported... Note drivers using the timing API
1572 		 * will get this right anyway
1573 		 */
1574 	}
1575 
1576 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1577 
1578 	if (ata_id_is_cfa(id)) {
1579 		/*
1580 		 *	Process compact flash extended modes
1581 		 */
1582 		int pio = id[163] & 0x7;
1583 		int dma = (id[163] >> 3) & 7;
1584 
1585 		if (pio)
1586 			pio_mask |= (1 << 5);
1587 		if (pio > 1)
1588 			pio_mask |= (1 << 6);
1589 		if (dma)
1590 			mwdma_mask |= (1 << 3);
1591 		if (dma > 1)
1592 			mwdma_mask |= (1 << 4);
1593 	}
1594 
1595 	udma_mask = 0;
1596 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1597 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1598 
1599 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1600 }
1601 
1602 /**
1603  *	ata_pio_queue_task - Queue port_task
1604  *	@ap: The ata_port to queue port_task for
1605  *	@data: data for @fn to use
1606  *	@delay: delay time in msecs for workqueue function
1607  *
1608  *	Schedule @fn(@data) for execution after @delay jiffies using
1609  *	port_task.  There is one port_task per port and it's the
1610  *	user(low level driver)'s responsibility to make sure that only
1611  *	one task is active at any given time.
1612  *
1613  *	libata core layer takes care of synchronization between
1614  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1615  *	synchronization.
1616  *
1617  *	LOCKING:
1618  *	Inherited from caller.
1619  */
1620 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1621 {
1622 	ap->port_task_data = data;
1623 
1624 	/* may fail if ata_port_flush_task() in progress */
1625 	queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1626 }
1627 
1628 /**
1629  *	ata_port_flush_task - Flush port_task
1630  *	@ap: The ata_port to flush port_task for
1631  *
1632  *	After this function completes, port_task is guranteed not to
1633  *	be running or scheduled.
1634  *
1635  *	LOCKING:
1636  *	Kernel thread context (may sleep)
1637  */
1638 void ata_port_flush_task(struct ata_port *ap)
1639 {
1640 	DPRINTK("ENTER\n");
1641 
1642 	cancel_rearming_delayed_work(&ap->port_task);
1643 
1644 	if (ata_msg_ctl(ap))
1645 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1646 }
1647 
1648 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1649 {
1650 	struct completion *waiting = qc->private_data;
1651 
1652 	complete(waiting);
1653 }
1654 
1655 /**
1656  *	ata_exec_internal_sg - execute libata internal command
1657  *	@dev: Device to which the command is sent
1658  *	@tf: Taskfile registers for the command and the result
1659  *	@cdb: CDB for packet command
1660  *	@dma_dir: Data tranfer direction of the command
1661  *	@sgl: sg list for the data buffer of the command
1662  *	@n_elem: Number of sg entries
1663  *	@timeout: Timeout in msecs (0 for default)
1664  *
1665  *	Executes libata internal command with timeout.  @tf contains
1666  *	command on entry and result on return.  Timeout and error
1667  *	conditions are reported via return value.  No recovery action
1668  *	is taken after a command times out.  It's caller's duty to
1669  *	clean up after timeout.
1670  *
1671  *	LOCKING:
1672  *	None.  Should be called with kernel context, might sleep.
1673  *
1674  *	RETURNS:
1675  *	Zero on success, AC_ERR_* mask on failure
1676  */
1677 unsigned ata_exec_internal_sg(struct ata_device *dev,
1678 			      struct ata_taskfile *tf, const u8 *cdb,
1679 			      int dma_dir, struct scatterlist *sgl,
1680 			      unsigned int n_elem, unsigned long timeout)
1681 {
1682 	struct ata_link *link = dev->link;
1683 	struct ata_port *ap = link->ap;
1684 	u8 command = tf->command;
1685 	int auto_timeout = 0;
1686 	struct ata_queued_cmd *qc;
1687 	unsigned int tag, preempted_tag;
1688 	u32 preempted_sactive, preempted_qc_active;
1689 	int preempted_nr_active_links;
1690 	DECLARE_COMPLETION_ONSTACK(wait);
1691 	unsigned long flags;
1692 	unsigned int err_mask;
1693 	int rc;
1694 
1695 	spin_lock_irqsave(ap->lock, flags);
1696 
1697 	/* no internal command while frozen */
1698 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1699 		spin_unlock_irqrestore(ap->lock, flags);
1700 		return AC_ERR_SYSTEM;
1701 	}
1702 
1703 	/* initialize internal qc */
1704 
1705 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1706 	 * drivers choke if any other tag is given.  This breaks
1707 	 * ata_tag_internal() test for those drivers.  Don't use new
1708 	 * EH stuff without converting to it.
1709 	 */
1710 	if (ap->ops->error_handler)
1711 		tag = ATA_TAG_INTERNAL;
1712 	else
1713 		tag = 0;
1714 
1715 	if (test_and_set_bit(tag, &ap->qc_allocated))
1716 		BUG();
1717 	qc = __ata_qc_from_tag(ap, tag);
1718 
1719 	qc->tag = tag;
1720 	qc->scsicmd = NULL;
1721 	qc->ap = ap;
1722 	qc->dev = dev;
1723 	ata_qc_reinit(qc);
1724 
1725 	preempted_tag = link->active_tag;
1726 	preempted_sactive = link->sactive;
1727 	preempted_qc_active = ap->qc_active;
1728 	preempted_nr_active_links = ap->nr_active_links;
1729 	link->active_tag = ATA_TAG_POISON;
1730 	link->sactive = 0;
1731 	ap->qc_active = 0;
1732 	ap->nr_active_links = 0;
1733 
1734 	/* prepare & issue qc */
1735 	qc->tf = *tf;
1736 	if (cdb)
1737 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1738 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1739 	qc->dma_dir = dma_dir;
1740 	if (dma_dir != DMA_NONE) {
1741 		unsigned int i, buflen = 0;
1742 		struct scatterlist *sg;
1743 
1744 		for_each_sg(sgl, sg, n_elem, i)
1745 			buflen += sg->length;
1746 
1747 		ata_sg_init(qc, sgl, n_elem);
1748 		qc->nbytes = buflen;
1749 	}
1750 
1751 	qc->private_data = &wait;
1752 	qc->complete_fn = ata_qc_complete_internal;
1753 
1754 	ata_qc_issue(qc);
1755 
1756 	spin_unlock_irqrestore(ap->lock, flags);
1757 
1758 	if (!timeout) {
1759 		if (ata_probe_timeout)
1760 			timeout = ata_probe_timeout * 1000;
1761 		else {
1762 			timeout = ata_internal_cmd_timeout(dev, command);
1763 			auto_timeout = 1;
1764 		}
1765 	}
1766 
1767 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1768 
1769 	ata_port_flush_task(ap);
1770 
1771 	if (!rc) {
1772 		spin_lock_irqsave(ap->lock, flags);
1773 
1774 		/* We're racing with irq here.  If we lose, the
1775 		 * following test prevents us from completing the qc
1776 		 * twice.  If we win, the port is frozen and will be
1777 		 * cleaned up by ->post_internal_cmd().
1778 		 */
1779 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1780 			qc->err_mask |= AC_ERR_TIMEOUT;
1781 
1782 			if (ap->ops->error_handler)
1783 				ata_port_freeze(ap);
1784 			else
1785 				ata_qc_complete(qc);
1786 
1787 			if (ata_msg_warn(ap))
1788 				ata_dev_printk(dev, KERN_WARNING,
1789 					"qc timeout (cmd 0x%x)\n", command);
1790 		}
1791 
1792 		spin_unlock_irqrestore(ap->lock, flags);
1793 	}
1794 
1795 	/* do post_internal_cmd */
1796 	if (ap->ops->post_internal_cmd)
1797 		ap->ops->post_internal_cmd(qc);
1798 
1799 	/* perform minimal error analysis */
1800 	if (qc->flags & ATA_QCFLAG_FAILED) {
1801 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1802 			qc->err_mask |= AC_ERR_DEV;
1803 
1804 		if (!qc->err_mask)
1805 			qc->err_mask |= AC_ERR_OTHER;
1806 
1807 		if (qc->err_mask & ~AC_ERR_OTHER)
1808 			qc->err_mask &= ~AC_ERR_OTHER;
1809 	}
1810 
1811 	/* finish up */
1812 	spin_lock_irqsave(ap->lock, flags);
1813 
1814 	*tf = qc->result_tf;
1815 	err_mask = qc->err_mask;
1816 
1817 	ata_qc_free(qc);
1818 	link->active_tag = preempted_tag;
1819 	link->sactive = preempted_sactive;
1820 	ap->qc_active = preempted_qc_active;
1821 	ap->nr_active_links = preempted_nr_active_links;
1822 
1823 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1824 	 * Until those drivers are fixed, we detect the condition
1825 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1826 	 * port.
1827 	 *
1828 	 * Note that this doesn't change any behavior as internal
1829 	 * command failure results in disabling the device in the
1830 	 * higher layer for LLDDs without new reset/EH callbacks.
1831 	 *
1832 	 * Kill the following code as soon as those drivers are fixed.
1833 	 */
1834 	if (ap->flags & ATA_FLAG_DISABLED) {
1835 		err_mask |= AC_ERR_SYSTEM;
1836 		ata_port_probe(ap);
1837 	}
1838 
1839 	spin_unlock_irqrestore(ap->lock, flags);
1840 
1841 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1842 		ata_internal_cmd_timed_out(dev, command);
1843 
1844 	return err_mask;
1845 }
1846 
1847 /**
1848  *	ata_exec_internal - execute libata internal command
1849  *	@dev: Device to which the command is sent
1850  *	@tf: Taskfile registers for the command and the result
1851  *	@cdb: CDB for packet command
1852  *	@dma_dir: Data tranfer direction of the command
1853  *	@buf: Data buffer of the command
1854  *	@buflen: Length of data buffer
1855  *	@timeout: Timeout in msecs (0 for default)
1856  *
1857  *	Wrapper around ata_exec_internal_sg() which takes simple
1858  *	buffer instead of sg list.
1859  *
1860  *	LOCKING:
1861  *	None.  Should be called with kernel context, might sleep.
1862  *
1863  *	RETURNS:
1864  *	Zero on success, AC_ERR_* mask on failure
1865  */
1866 unsigned ata_exec_internal(struct ata_device *dev,
1867 			   struct ata_taskfile *tf, const u8 *cdb,
1868 			   int dma_dir, void *buf, unsigned int buflen,
1869 			   unsigned long timeout)
1870 {
1871 	struct scatterlist *psg = NULL, sg;
1872 	unsigned int n_elem = 0;
1873 
1874 	if (dma_dir != DMA_NONE) {
1875 		WARN_ON(!buf);
1876 		sg_init_one(&sg, buf, buflen);
1877 		psg = &sg;
1878 		n_elem++;
1879 	}
1880 
1881 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1882 				    timeout);
1883 }
1884 
1885 /**
1886  *	ata_do_simple_cmd - execute simple internal command
1887  *	@dev: Device to which the command is sent
1888  *	@cmd: Opcode to execute
1889  *
1890  *	Execute a 'simple' command, that only consists of the opcode
1891  *	'cmd' itself, without filling any other registers
1892  *
1893  *	LOCKING:
1894  *	Kernel thread context (may sleep).
1895  *
1896  *	RETURNS:
1897  *	Zero on success, AC_ERR_* mask on failure
1898  */
1899 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1900 {
1901 	struct ata_taskfile tf;
1902 
1903 	ata_tf_init(dev, &tf);
1904 
1905 	tf.command = cmd;
1906 	tf.flags |= ATA_TFLAG_DEVICE;
1907 	tf.protocol = ATA_PROT_NODATA;
1908 
1909 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1910 }
1911 
1912 /**
1913  *	ata_pio_need_iordy	-	check if iordy needed
1914  *	@adev: ATA device
1915  *
1916  *	Check if the current speed of the device requires IORDY. Used
1917  *	by various controllers for chip configuration.
1918  */
1919 
1920 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1921 {
1922 	/* Controller doesn't support  IORDY. Probably a pointless check
1923 	   as the caller should know this */
1924 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1925 		return 0;
1926 	/* PIO3 and higher it is mandatory */
1927 	if (adev->pio_mode > XFER_PIO_2)
1928 		return 1;
1929 	/* We turn it on when possible */
1930 	if (ata_id_has_iordy(adev->id))
1931 		return 1;
1932 	return 0;
1933 }
1934 
1935 /**
1936  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1937  *	@adev: ATA device
1938  *
1939  *	Compute the highest mode possible if we are not using iordy. Return
1940  *	-1 if no iordy mode is available.
1941  */
1942 
1943 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1944 {
1945 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1946 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1947 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1948 		/* Is the speed faster than the drive allows non IORDY ? */
1949 		if (pio) {
1950 			/* This is cycle times not frequency - watch the logic! */
1951 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1952 				return 3 << ATA_SHIFT_PIO;
1953 			return 7 << ATA_SHIFT_PIO;
1954 		}
1955 	}
1956 	return 3 << ATA_SHIFT_PIO;
1957 }
1958 
1959 /**
1960  *	ata_do_dev_read_id		-	default ID read method
1961  *	@dev: device
1962  *	@tf: proposed taskfile
1963  *	@id: data buffer
1964  *
1965  *	Issue the identify taskfile and hand back the buffer containing
1966  *	identify data. For some RAID controllers and for pre ATA devices
1967  *	this function is wrapped or replaced by the driver
1968  */
1969 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1970 					struct ata_taskfile *tf, u16 *id)
1971 {
1972 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1973 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1974 }
1975 
1976 /**
1977  *	ata_dev_read_id - Read ID data from the specified device
1978  *	@dev: target device
1979  *	@p_class: pointer to class of the target device (may be changed)
1980  *	@flags: ATA_READID_* flags
1981  *	@id: buffer to read IDENTIFY data into
1982  *
1983  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1984  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1985  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1986  *	for pre-ATA4 drives.
1987  *
1988  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1989  *	now we abort if we hit that case.
1990  *
1991  *	LOCKING:
1992  *	Kernel thread context (may sleep)
1993  *
1994  *	RETURNS:
1995  *	0 on success, -errno otherwise.
1996  */
1997 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1998 		    unsigned int flags, u16 *id)
1999 {
2000 	struct ata_port *ap = dev->link->ap;
2001 	unsigned int class = *p_class;
2002 	struct ata_taskfile tf;
2003 	unsigned int err_mask = 0;
2004 	const char *reason;
2005 	int may_fallback = 1, tried_spinup = 0;
2006 	int rc;
2007 
2008 	if (ata_msg_ctl(ap))
2009 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2010 
2011 retry:
2012 	ata_tf_init(dev, &tf);
2013 
2014 	switch (class) {
2015 	case ATA_DEV_ATA:
2016 		tf.command = ATA_CMD_ID_ATA;
2017 		break;
2018 	case ATA_DEV_ATAPI:
2019 		tf.command = ATA_CMD_ID_ATAPI;
2020 		break;
2021 	default:
2022 		rc = -ENODEV;
2023 		reason = "unsupported class";
2024 		goto err_out;
2025 	}
2026 
2027 	tf.protocol = ATA_PROT_PIO;
2028 
2029 	/* Some devices choke if TF registers contain garbage.  Make
2030 	 * sure those are properly initialized.
2031 	 */
2032 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2033 
2034 	/* Device presence detection is unreliable on some
2035 	 * controllers.  Always poll IDENTIFY if available.
2036 	 */
2037 	tf.flags |= ATA_TFLAG_POLLING;
2038 
2039 	if (ap->ops->read_id)
2040 		err_mask = ap->ops->read_id(dev, &tf, id);
2041 	else
2042 		err_mask = ata_do_dev_read_id(dev, &tf, id);
2043 
2044 	if (err_mask) {
2045 		if (err_mask & AC_ERR_NODEV_HINT) {
2046 			ata_dev_printk(dev, KERN_DEBUG,
2047 				       "NODEV after polling detection\n");
2048 			return -ENOENT;
2049 		}
2050 
2051 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2052 			/* Device or controller might have reported
2053 			 * the wrong device class.  Give a shot at the
2054 			 * other IDENTIFY if the current one is
2055 			 * aborted by the device.
2056 			 */
2057 			if (may_fallback) {
2058 				may_fallback = 0;
2059 
2060 				if (class == ATA_DEV_ATA)
2061 					class = ATA_DEV_ATAPI;
2062 				else
2063 					class = ATA_DEV_ATA;
2064 				goto retry;
2065 			}
2066 
2067 			/* Control reaches here iff the device aborted
2068 			 * both flavors of IDENTIFYs which happens
2069 			 * sometimes with phantom devices.
2070 			 */
2071 			ata_dev_printk(dev, KERN_DEBUG,
2072 				       "both IDENTIFYs aborted, assuming NODEV\n");
2073 			return -ENOENT;
2074 		}
2075 
2076 		rc = -EIO;
2077 		reason = "I/O error";
2078 		goto err_out;
2079 	}
2080 
2081 	/* Falling back doesn't make sense if ID data was read
2082 	 * successfully at least once.
2083 	 */
2084 	may_fallback = 0;
2085 
2086 	swap_buf_le16(id, ATA_ID_WORDS);
2087 
2088 	/* sanity check */
2089 	rc = -EINVAL;
2090 	reason = "device reports invalid type";
2091 
2092 	if (class == ATA_DEV_ATA) {
2093 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2094 			goto err_out;
2095 	} else {
2096 		if (ata_id_is_ata(id))
2097 			goto err_out;
2098 	}
2099 
2100 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2101 		tried_spinup = 1;
2102 		/*
2103 		 * Drive powered-up in standby mode, and requires a specific
2104 		 * SET_FEATURES spin-up subcommand before it will accept
2105 		 * anything other than the original IDENTIFY command.
2106 		 */
2107 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2108 		if (err_mask && id[2] != 0x738c) {
2109 			rc = -EIO;
2110 			reason = "SPINUP failed";
2111 			goto err_out;
2112 		}
2113 		/*
2114 		 * If the drive initially returned incomplete IDENTIFY info,
2115 		 * we now must reissue the IDENTIFY command.
2116 		 */
2117 		if (id[2] == 0x37c8)
2118 			goto retry;
2119 	}
2120 
2121 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2122 		/*
2123 		 * The exact sequence expected by certain pre-ATA4 drives is:
2124 		 * SRST RESET
2125 		 * IDENTIFY (optional in early ATA)
2126 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2127 		 * anything else..
2128 		 * Some drives were very specific about that exact sequence.
2129 		 *
2130 		 * Note that ATA4 says lba is mandatory so the second check
2131 		 * shoud never trigger.
2132 		 */
2133 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2134 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2135 			if (err_mask) {
2136 				rc = -EIO;
2137 				reason = "INIT_DEV_PARAMS failed";
2138 				goto err_out;
2139 			}
2140 
2141 			/* current CHS translation info (id[53-58]) might be
2142 			 * changed. reread the identify device info.
2143 			 */
2144 			flags &= ~ATA_READID_POSTRESET;
2145 			goto retry;
2146 		}
2147 	}
2148 
2149 	*p_class = class;
2150 
2151 	return 0;
2152 
2153  err_out:
2154 	if (ata_msg_warn(ap))
2155 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2156 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2157 	return rc;
2158 }
2159 
2160 static inline u8 ata_dev_knobble(struct ata_device *dev)
2161 {
2162 	struct ata_port *ap = dev->link->ap;
2163 
2164 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2165 		return 0;
2166 
2167 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2168 }
2169 
2170 static void ata_dev_config_ncq(struct ata_device *dev,
2171 			       char *desc, size_t desc_sz)
2172 {
2173 	struct ata_port *ap = dev->link->ap;
2174 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2175 
2176 	if (!ata_id_has_ncq(dev->id)) {
2177 		desc[0] = '\0';
2178 		return;
2179 	}
2180 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2181 		snprintf(desc, desc_sz, "NCQ (not used)");
2182 		return;
2183 	}
2184 	if (ap->flags & ATA_FLAG_NCQ) {
2185 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2186 		dev->flags |= ATA_DFLAG_NCQ;
2187 	}
2188 
2189 	if (hdepth >= ddepth)
2190 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2191 	else
2192 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2193 }
2194 
2195 /**
2196  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2197  *	@dev: Target device to configure
2198  *
2199  *	Configure @dev according to @dev->id.  Generic and low-level
2200  *	driver specific fixups are also applied.
2201  *
2202  *	LOCKING:
2203  *	Kernel thread context (may sleep)
2204  *
2205  *	RETURNS:
2206  *	0 on success, -errno otherwise
2207  */
2208 int ata_dev_configure(struct ata_device *dev)
2209 {
2210 	struct ata_port *ap = dev->link->ap;
2211 	struct ata_eh_context *ehc = &dev->link->eh_context;
2212 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2213 	const u16 *id = dev->id;
2214 	unsigned long xfer_mask;
2215 	char revbuf[7];		/* XYZ-99\0 */
2216 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2217 	char modelbuf[ATA_ID_PROD_LEN+1];
2218 	int rc;
2219 
2220 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2221 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2222 			       __func__);
2223 		return 0;
2224 	}
2225 
2226 	if (ata_msg_probe(ap))
2227 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2228 
2229 	/* set horkage */
2230 	dev->horkage |= ata_dev_blacklisted(dev);
2231 	ata_force_horkage(dev);
2232 
2233 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2234 		ata_dev_printk(dev, KERN_INFO,
2235 			       "unsupported device, disabling\n");
2236 		ata_dev_disable(dev);
2237 		return 0;
2238 	}
2239 
2240 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2241 	    dev->class == ATA_DEV_ATAPI) {
2242 		ata_dev_printk(dev, KERN_WARNING,
2243 			"WARNING: ATAPI is %s, device ignored.\n",
2244 			atapi_enabled ? "not supported with this driver"
2245 				      : "disabled");
2246 		ata_dev_disable(dev);
2247 		return 0;
2248 	}
2249 
2250 	/* let ACPI work its magic */
2251 	rc = ata_acpi_on_devcfg(dev);
2252 	if (rc)
2253 		return rc;
2254 
2255 	/* massage HPA, do it early as it might change IDENTIFY data */
2256 	rc = ata_hpa_resize(dev);
2257 	if (rc)
2258 		return rc;
2259 
2260 	/* print device capabilities */
2261 	if (ata_msg_probe(ap))
2262 		ata_dev_printk(dev, KERN_DEBUG,
2263 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2264 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2265 			       __func__,
2266 			       id[49], id[82], id[83], id[84],
2267 			       id[85], id[86], id[87], id[88]);
2268 
2269 	/* initialize to-be-configured parameters */
2270 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2271 	dev->max_sectors = 0;
2272 	dev->cdb_len = 0;
2273 	dev->n_sectors = 0;
2274 	dev->cylinders = 0;
2275 	dev->heads = 0;
2276 	dev->sectors = 0;
2277 
2278 	/*
2279 	 * common ATA, ATAPI feature tests
2280 	 */
2281 
2282 	/* find max transfer mode; for printk only */
2283 	xfer_mask = ata_id_xfermask(id);
2284 
2285 	if (ata_msg_probe(ap))
2286 		ata_dump_id(id);
2287 
2288 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2289 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2290 			sizeof(fwrevbuf));
2291 
2292 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2293 			sizeof(modelbuf));
2294 
2295 	/* ATA-specific feature tests */
2296 	if (dev->class == ATA_DEV_ATA) {
2297 		if (ata_id_is_cfa(id)) {
2298 			if (id[162] & 1) /* CPRM may make this media unusable */
2299 				ata_dev_printk(dev, KERN_WARNING,
2300 					       "supports DRM functions and may "
2301 					       "not be fully accessable.\n");
2302 			snprintf(revbuf, 7, "CFA");
2303 		} else {
2304 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2305 			/* Warn the user if the device has TPM extensions */
2306 			if (ata_id_has_tpm(id))
2307 				ata_dev_printk(dev, KERN_WARNING,
2308 					       "supports DRM functions and may "
2309 					       "not be fully accessable.\n");
2310 		}
2311 
2312 		dev->n_sectors = ata_id_n_sectors(id);
2313 
2314 		if (dev->id[59] & 0x100)
2315 			dev->multi_count = dev->id[59] & 0xff;
2316 
2317 		if (ata_id_has_lba(id)) {
2318 			const char *lba_desc;
2319 			char ncq_desc[20];
2320 
2321 			lba_desc = "LBA";
2322 			dev->flags |= ATA_DFLAG_LBA;
2323 			if (ata_id_has_lba48(id)) {
2324 				dev->flags |= ATA_DFLAG_LBA48;
2325 				lba_desc = "LBA48";
2326 
2327 				if (dev->n_sectors >= (1UL << 28) &&
2328 				    ata_id_has_flush_ext(id))
2329 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2330 			}
2331 
2332 			/* config NCQ */
2333 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2334 
2335 			/* print device info to dmesg */
2336 			if (ata_msg_drv(ap) && print_info) {
2337 				ata_dev_printk(dev, KERN_INFO,
2338 					"%s: %s, %s, max %s\n",
2339 					revbuf, modelbuf, fwrevbuf,
2340 					ata_mode_string(xfer_mask));
2341 				ata_dev_printk(dev, KERN_INFO,
2342 					"%Lu sectors, multi %u: %s %s\n",
2343 					(unsigned long long)dev->n_sectors,
2344 					dev->multi_count, lba_desc, ncq_desc);
2345 			}
2346 		} else {
2347 			/* CHS */
2348 
2349 			/* Default translation */
2350 			dev->cylinders	= id[1];
2351 			dev->heads	= id[3];
2352 			dev->sectors	= id[6];
2353 
2354 			if (ata_id_current_chs_valid(id)) {
2355 				/* Current CHS translation is valid. */
2356 				dev->cylinders = id[54];
2357 				dev->heads     = id[55];
2358 				dev->sectors   = id[56];
2359 			}
2360 
2361 			/* print device info to dmesg */
2362 			if (ata_msg_drv(ap) && print_info) {
2363 				ata_dev_printk(dev, KERN_INFO,
2364 					"%s: %s, %s, max %s\n",
2365 					revbuf,	modelbuf, fwrevbuf,
2366 					ata_mode_string(xfer_mask));
2367 				ata_dev_printk(dev, KERN_INFO,
2368 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2369 					(unsigned long long)dev->n_sectors,
2370 					dev->multi_count, dev->cylinders,
2371 					dev->heads, dev->sectors);
2372 			}
2373 		}
2374 
2375 		dev->cdb_len = 16;
2376 	}
2377 
2378 	/* ATAPI-specific feature tests */
2379 	else if (dev->class == ATA_DEV_ATAPI) {
2380 		const char *cdb_intr_string = "";
2381 		const char *atapi_an_string = "";
2382 		const char *dma_dir_string = "";
2383 		u32 sntf;
2384 
2385 		rc = atapi_cdb_len(id);
2386 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2387 			if (ata_msg_warn(ap))
2388 				ata_dev_printk(dev, KERN_WARNING,
2389 					       "unsupported CDB len\n");
2390 			rc = -EINVAL;
2391 			goto err_out_nosup;
2392 		}
2393 		dev->cdb_len = (unsigned int) rc;
2394 
2395 		/* Enable ATAPI AN if both the host and device have
2396 		 * the support.  If PMP is attached, SNTF is required
2397 		 * to enable ATAPI AN to discern between PHY status
2398 		 * changed notifications and ATAPI ANs.
2399 		 */
2400 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2401 		    (!sata_pmp_attached(ap) ||
2402 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2403 			unsigned int err_mask;
2404 
2405 			/* issue SET feature command to turn this on */
2406 			err_mask = ata_dev_set_feature(dev,
2407 					SETFEATURES_SATA_ENABLE, SATA_AN);
2408 			if (err_mask)
2409 				ata_dev_printk(dev, KERN_ERR,
2410 					"failed to enable ATAPI AN "
2411 					"(err_mask=0x%x)\n", err_mask);
2412 			else {
2413 				dev->flags |= ATA_DFLAG_AN;
2414 				atapi_an_string = ", ATAPI AN";
2415 			}
2416 		}
2417 
2418 		if (ata_id_cdb_intr(dev->id)) {
2419 			dev->flags |= ATA_DFLAG_CDB_INTR;
2420 			cdb_intr_string = ", CDB intr";
2421 		}
2422 
2423 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2424 			dev->flags |= ATA_DFLAG_DMADIR;
2425 			dma_dir_string = ", DMADIR";
2426 		}
2427 
2428 		/* print device info to dmesg */
2429 		if (ata_msg_drv(ap) && print_info)
2430 			ata_dev_printk(dev, KERN_INFO,
2431 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2432 				       modelbuf, fwrevbuf,
2433 				       ata_mode_string(xfer_mask),
2434 				       cdb_intr_string, atapi_an_string,
2435 				       dma_dir_string);
2436 	}
2437 
2438 	/* determine max_sectors */
2439 	dev->max_sectors = ATA_MAX_SECTORS;
2440 	if (dev->flags & ATA_DFLAG_LBA48)
2441 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2442 
2443 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2444 		if (ata_id_has_hipm(dev->id))
2445 			dev->flags |= ATA_DFLAG_HIPM;
2446 		if (ata_id_has_dipm(dev->id))
2447 			dev->flags |= ATA_DFLAG_DIPM;
2448 	}
2449 
2450 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2451 	   200 sectors */
2452 	if (ata_dev_knobble(dev)) {
2453 		if (ata_msg_drv(ap) && print_info)
2454 			ata_dev_printk(dev, KERN_INFO,
2455 				       "applying bridge limits\n");
2456 		dev->udma_mask &= ATA_UDMA5;
2457 		dev->max_sectors = ATA_MAX_SECTORS;
2458 	}
2459 
2460 	if ((dev->class == ATA_DEV_ATAPI) &&
2461 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2462 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2463 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2464 	}
2465 
2466 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2467 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2468 					 dev->max_sectors);
2469 
2470 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2471 		dev->horkage |= ATA_HORKAGE_IPM;
2472 
2473 		/* reset link pm_policy for this port to no pm */
2474 		ap->pm_policy = MAX_PERFORMANCE;
2475 	}
2476 
2477 	if (ap->ops->dev_config)
2478 		ap->ops->dev_config(dev);
2479 
2480 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2481 		/* Let the user know. We don't want to disallow opens for
2482 		   rescue purposes, or in case the vendor is just a blithering
2483 		   idiot. Do this after the dev_config call as some controllers
2484 		   with buggy firmware may want to avoid reporting false device
2485 		   bugs */
2486 
2487 		if (print_info) {
2488 			ata_dev_printk(dev, KERN_WARNING,
2489 "Drive reports diagnostics failure. This may indicate a drive\n");
2490 			ata_dev_printk(dev, KERN_WARNING,
2491 "fault or invalid emulation. Contact drive vendor for information.\n");
2492 		}
2493 	}
2494 
2495 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2496 		ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2497 			       "firmware update to be fully functional.\n");
2498 		ata_dev_printk(dev, KERN_WARNING, "         contact the vendor "
2499 			       "or visit http://ata.wiki.kernel.org.\n");
2500 	}
2501 
2502 	return 0;
2503 
2504 err_out_nosup:
2505 	if (ata_msg_probe(ap))
2506 		ata_dev_printk(dev, KERN_DEBUG,
2507 			       "%s: EXIT, err\n", __func__);
2508 	return rc;
2509 }
2510 
2511 /**
2512  *	ata_cable_40wire	-	return 40 wire cable type
2513  *	@ap: port
2514  *
2515  *	Helper method for drivers which want to hardwire 40 wire cable
2516  *	detection.
2517  */
2518 
2519 int ata_cable_40wire(struct ata_port *ap)
2520 {
2521 	return ATA_CBL_PATA40;
2522 }
2523 
2524 /**
2525  *	ata_cable_80wire	-	return 80 wire cable type
2526  *	@ap: port
2527  *
2528  *	Helper method for drivers which want to hardwire 80 wire cable
2529  *	detection.
2530  */
2531 
2532 int ata_cable_80wire(struct ata_port *ap)
2533 {
2534 	return ATA_CBL_PATA80;
2535 }
2536 
2537 /**
2538  *	ata_cable_unknown	-	return unknown PATA cable.
2539  *	@ap: port
2540  *
2541  *	Helper method for drivers which have no PATA cable detection.
2542  */
2543 
2544 int ata_cable_unknown(struct ata_port *ap)
2545 {
2546 	return ATA_CBL_PATA_UNK;
2547 }
2548 
2549 /**
2550  *	ata_cable_ignore	-	return ignored PATA cable.
2551  *	@ap: port
2552  *
2553  *	Helper method for drivers which don't use cable type to limit
2554  *	transfer mode.
2555  */
2556 int ata_cable_ignore(struct ata_port *ap)
2557 {
2558 	return ATA_CBL_PATA_IGN;
2559 }
2560 
2561 /**
2562  *	ata_cable_sata	-	return SATA cable type
2563  *	@ap: port
2564  *
2565  *	Helper method for drivers which have SATA cables
2566  */
2567 
2568 int ata_cable_sata(struct ata_port *ap)
2569 {
2570 	return ATA_CBL_SATA;
2571 }
2572 
2573 /**
2574  *	ata_bus_probe - Reset and probe ATA bus
2575  *	@ap: Bus to probe
2576  *
2577  *	Master ATA bus probing function.  Initiates a hardware-dependent
2578  *	bus reset, then attempts to identify any devices found on
2579  *	the bus.
2580  *
2581  *	LOCKING:
2582  *	PCI/etc. bus probe sem.
2583  *
2584  *	RETURNS:
2585  *	Zero on success, negative errno otherwise.
2586  */
2587 
2588 int ata_bus_probe(struct ata_port *ap)
2589 {
2590 	unsigned int classes[ATA_MAX_DEVICES];
2591 	int tries[ATA_MAX_DEVICES];
2592 	int rc;
2593 	struct ata_device *dev;
2594 
2595 	ata_port_probe(ap);
2596 
2597 	ata_link_for_each_dev(dev, &ap->link)
2598 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2599 
2600  retry:
2601 	ata_link_for_each_dev(dev, &ap->link) {
2602 		/* If we issue an SRST then an ATA drive (not ATAPI)
2603 		 * may change configuration and be in PIO0 timing. If
2604 		 * we do a hard reset (or are coming from power on)
2605 		 * this is true for ATA or ATAPI. Until we've set a
2606 		 * suitable controller mode we should not touch the
2607 		 * bus as we may be talking too fast.
2608 		 */
2609 		dev->pio_mode = XFER_PIO_0;
2610 
2611 		/* If the controller has a pio mode setup function
2612 		 * then use it to set the chipset to rights. Don't
2613 		 * touch the DMA setup as that will be dealt with when
2614 		 * configuring devices.
2615 		 */
2616 		if (ap->ops->set_piomode)
2617 			ap->ops->set_piomode(ap, dev);
2618 	}
2619 
2620 	/* reset and determine device classes */
2621 	ap->ops->phy_reset(ap);
2622 
2623 	ata_link_for_each_dev(dev, &ap->link) {
2624 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2625 		    dev->class != ATA_DEV_UNKNOWN)
2626 			classes[dev->devno] = dev->class;
2627 		else
2628 			classes[dev->devno] = ATA_DEV_NONE;
2629 
2630 		dev->class = ATA_DEV_UNKNOWN;
2631 	}
2632 
2633 	ata_port_probe(ap);
2634 
2635 	/* read IDENTIFY page and configure devices. We have to do the identify
2636 	   specific sequence bass-ackwards so that PDIAG- is released by
2637 	   the slave device */
2638 
2639 	ata_link_for_each_dev_reverse(dev, &ap->link) {
2640 		if (tries[dev->devno])
2641 			dev->class = classes[dev->devno];
2642 
2643 		if (!ata_dev_enabled(dev))
2644 			continue;
2645 
2646 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2647 				     dev->id);
2648 		if (rc)
2649 			goto fail;
2650 	}
2651 
2652 	/* Now ask for the cable type as PDIAG- should have been released */
2653 	if (ap->ops->cable_detect)
2654 		ap->cbl = ap->ops->cable_detect(ap);
2655 
2656 	/* We may have SATA bridge glue hiding here irrespective of the
2657 	   reported cable types and sensed types */
2658 	ata_link_for_each_dev(dev, &ap->link) {
2659 		if (!ata_dev_enabled(dev))
2660 			continue;
2661 		/* SATA drives indicate we have a bridge. We don't know which
2662 		   end of the link the bridge is which is a problem */
2663 		if (ata_id_is_sata(dev->id))
2664 			ap->cbl = ATA_CBL_SATA;
2665 	}
2666 
2667 	/* After the identify sequence we can now set up the devices. We do
2668 	   this in the normal order so that the user doesn't get confused */
2669 
2670 	ata_link_for_each_dev(dev, &ap->link) {
2671 		if (!ata_dev_enabled(dev))
2672 			continue;
2673 
2674 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2675 		rc = ata_dev_configure(dev);
2676 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2677 		if (rc)
2678 			goto fail;
2679 	}
2680 
2681 	/* configure transfer mode */
2682 	rc = ata_set_mode(&ap->link, &dev);
2683 	if (rc)
2684 		goto fail;
2685 
2686 	ata_link_for_each_dev(dev, &ap->link)
2687 		if (ata_dev_enabled(dev))
2688 			return 0;
2689 
2690 	/* no device present, disable port */
2691 	ata_port_disable(ap);
2692 	return -ENODEV;
2693 
2694  fail:
2695 	tries[dev->devno]--;
2696 
2697 	switch (rc) {
2698 	case -EINVAL:
2699 		/* eeek, something went very wrong, give up */
2700 		tries[dev->devno] = 0;
2701 		break;
2702 
2703 	case -ENODEV:
2704 		/* give it just one more chance */
2705 		tries[dev->devno] = min(tries[dev->devno], 1);
2706 	case -EIO:
2707 		if (tries[dev->devno] == 1) {
2708 			/* This is the last chance, better to slow
2709 			 * down than lose it.
2710 			 */
2711 			sata_down_spd_limit(&ap->link);
2712 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2713 		}
2714 	}
2715 
2716 	if (!tries[dev->devno])
2717 		ata_dev_disable(dev);
2718 
2719 	goto retry;
2720 }
2721 
2722 /**
2723  *	ata_port_probe - Mark port as enabled
2724  *	@ap: Port for which we indicate enablement
2725  *
2726  *	Modify @ap data structure such that the system
2727  *	thinks that the entire port is enabled.
2728  *
2729  *	LOCKING: host lock, or some other form of
2730  *	serialization.
2731  */
2732 
2733 void ata_port_probe(struct ata_port *ap)
2734 {
2735 	ap->flags &= ~ATA_FLAG_DISABLED;
2736 }
2737 
2738 /**
2739  *	sata_print_link_status - Print SATA link status
2740  *	@link: SATA link to printk link status about
2741  *
2742  *	This function prints link speed and status of a SATA link.
2743  *
2744  *	LOCKING:
2745  *	None.
2746  */
2747 static void sata_print_link_status(struct ata_link *link)
2748 {
2749 	u32 sstatus, scontrol, tmp;
2750 
2751 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2752 		return;
2753 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2754 
2755 	if (ata_phys_link_online(link)) {
2756 		tmp = (sstatus >> 4) & 0xf;
2757 		ata_link_printk(link, KERN_INFO,
2758 				"SATA link up %s (SStatus %X SControl %X)\n",
2759 				sata_spd_string(tmp), sstatus, scontrol);
2760 	} else {
2761 		ata_link_printk(link, KERN_INFO,
2762 				"SATA link down (SStatus %X SControl %X)\n",
2763 				sstatus, scontrol);
2764 	}
2765 }
2766 
2767 /**
2768  *	ata_dev_pair		-	return other device on cable
2769  *	@adev: device
2770  *
2771  *	Obtain the other device on the same cable, or if none is
2772  *	present NULL is returned
2773  */
2774 
2775 struct ata_device *ata_dev_pair(struct ata_device *adev)
2776 {
2777 	struct ata_link *link = adev->link;
2778 	struct ata_device *pair = &link->device[1 - adev->devno];
2779 	if (!ata_dev_enabled(pair))
2780 		return NULL;
2781 	return pair;
2782 }
2783 
2784 /**
2785  *	ata_port_disable - Disable port.
2786  *	@ap: Port to be disabled.
2787  *
2788  *	Modify @ap data structure such that the system
2789  *	thinks that the entire port is disabled, and should
2790  *	never attempt to probe or communicate with devices
2791  *	on this port.
2792  *
2793  *	LOCKING: host lock, or some other form of
2794  *	serialization.
2795  */
2796 
2797 void ata_port_disable(struct ata_port *ap)
2798 {
2799 	ap->link.device[0].class = ATA_DEV_NONE;
2800 	ap->link.device[1].class = ATA_DEV_NONE;
2801 	ap->flags |= ATA_FLAG_DISABLED;
2802 }
2803 
2804 /**
2805  *	sata_down_spd_limit - adjust SATA spd limit downward
2806  *	@link: Link to adjust SATA spd limit for
2807  *
2808  *	Adjust SATA spd limit of @link downward.  Note that this
2809  *	function only adjusts the limit.  The change must be applied
2810  *	using sata_set_spd().
2811  *
2812  *	LOCKING:
2813  *	Inherited from caller.
2814  *
2815  *	RETURNS:
2816  *	0 on success, negative errno on failure
2817  */
2818 int sata_down_spd_limit(struct ata_link *link)
2819 {
2820 	u32 sstatus, spd, mask;
2821 	int rc, highbit;
2822 
2823 	if (!sata_scr_valid(link))
2824 		return -EOPNOTSUPP;
2825 
2826 	/* If SCR can be read, use it to determine the current SPD.
2827 	 * If not, use cached value in link->sata_spd.
2828 	 */
2829 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2830 	if (rc == 0)
2831 		spd = (sstatus >> 4) & 0xf;
2832 	else
2833 		spd = link->sata_spd;
2834 
2835 	mask = link->sata_spd_limit;
2836 	if (mask <= 1)
2837 		return -EINVAL;
2838 
2839 	/* unconditionally mask off the highest bit */
2840 	highbit = fls(mask) - 1;
2841 	mask &= ~(1 << highbit);
2842 
2843 	/* Mask off all speeds higher than or equal to the current
2844 	 * one.  Force 1.5Gbps if current SPD is not available.
2845 	 */
2846 	if (spd > 1)
2847 		mask &= (1 << (spd - 1)) - 1;
2848 	else
2849 		mask &= 1;
2850 
2851 	/* were we already at the bottom? */
2852 	if (!mask)
2853 		return -EINVAL;
2854 
2855 	link->sata_spd_limit = mask;
2856 
2857 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2858 			sata_spd_string(fls(mask)));
2859 
2860 	return 0;
2861 }
2862 
2863 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2864 {
2865 	struct ata_link *host_link = &link->ap->link;
2866 	u32 limit, target, spd;
2867 
2868 	limit = link->sata_spd_limit;
2869 
2870 	/* Don't configure downstream link faster than upstream link.
2871 	 * It doesn't speed up anything and some PMPs choke on such
2872 	 * configuration.
2873 	 */
2874 	if (!ata_is_host_link(link) && host_link->sata_spd)
2875 		limit &= (1 << host_link->sata_spd) - 1;
2876 
2877 	if (limit == UINT_MAX)
2878 		target = 0;
2879 	else
2880 		target = fls(limit);
2881 
2882 	spd = (*scontrol >> 4) & 0xf;
2883 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2884 
2885 	return spd != target;
2886 }
2887 
2888 /**
2889  *	sata_set_spd_needed - is SATA spd configuration needed
2890  *	@link: Link in question
2891  *
2892  *	Test whether the spd limit in SControl matches
2893  *	@link->sata_spd_limit.  This function is used to determine
2894  *	whether hardreset is necessary to apply SATA spd
2895  *	configuration.
2896  *
2897  *	LOCKING:
2898  *	Inherited from caller.
2899  *
2900  *	RETURNS:
2901  *	1 if SATA spd configuration is needed, 0 otherwise.
2902  */
2903 static int sata_set_spd_needed(struct ata_link *link)
2904 {
2905 	u32 scontrol;
2906 
2907 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2908 		return 1;
2909 
2910 	return __sata_set_spd_needed(link, &scontrol);
2911 }
2912 
2913 /**
2914  *	sata_set_spd - set SATA spd according to spd limit
2915  *	@link: Link to set SATA spd for
2916  *
2917  *	Set SATA spd of @link according to sata_spd_limit.
2918  *
2919  *	LOCKING:
2920  *	Inherited from caller.
2921  *
2922  *	RETURNS:
2923  *	0 if spd doesn't need to be changed, 1 if spd has been
2924  *	changed.  Negative errno if SCR registers are inaccessible.
2925  */
2926 int sata_set_spd(struct ata_link *link)
2927 {
2928 	u32 scontrol;
2929 	int rc;
2930 
2931 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2932 		return rc;
2933 
2934 	if (!__sata_set_spd_needed(link, &scontrol))
2935 		return 0;
2936 
2937 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2938 		return rc;
2939 
2940 	return 1;
2941 }
2942 
2943 /*
2944  * This mode timing computation functionality is ported over from
2945  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2946  */
2947 /*
2948  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2949  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2950  * for UDMA6, which is currently supported only by Maxtor drives.
2951  *
2952  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2953  */
2954 
2955 static const struct ata_timing ata_timing[] = {
2956 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2957 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2958 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2959 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2960 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2961 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2962 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2963 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2964 
2965 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2966 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2967 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2968 
2969 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2970 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2971 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2972 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2973 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2974 
2975 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2976 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2977 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2978 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2979 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2980 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2981 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2982 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2983 
2984 	{ 0xFF }
2985 };
2986 
2987 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2988 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2989 
2990 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2991 {
2992 	q->setup   = EZ(t->setup   * 1000,  T);
2993 	q->act8b   = EZ(t->act8b   * 1000,  T);
2994 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2995 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2996 	q->active  = EZ(t->active  * 1000,  T);
2997 	q->recover = EZ(t->recover * 1000,  T);
2998 	q->cycle   = EZ(t->cycle   * 1000,  T);
2999 	q->udma    = EZ(t->udma    * 1000, UT);
3000 }
3001 
3002 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3003 		      struct ata_timing *m, unsigned int what)
3004 {
3005 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3006 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3007 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3008 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3009 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3010 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3011 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3012 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3013 }
3014 
3015 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3016 {
3017 	const struct ata_timing *t = ata_timing;
3018 
3019 	while (xfer_mode > t->mode)
3020 		t++;
3021 
3022 	if (xfer_mode == t->mode)
3023 		return t;
3024 	return NULL;
3025 }
3026 
3027 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3028 		       struct ata_timing *t, int T, int UT)
3029 {
3030 	const struct ata_timing *s;
3031 	struct ata_timing p;
3032 
3033 	/*
3034 	 * Find the mode.
3035 	 */
3036 
3037 	if (!(s = ata_timing_find_mode(speed)))
3038 		return -EINVAL;
3039 
3040 	memcpy(t, s, sizeof(*s));
3041 
3042 	/*
3043 	 * If the drive is an EIDE drive, it can tell us it needs extended
3044 	 * PIO/MW_DMA cycle timing.
3045 	 */
3046 
3047 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3048 		memset(&p, 0, sizeof(p));
3049 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3050 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3051 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3052 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3053 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3054 		}
3055 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3056 	}
3057 
3058 	/*
3059 	 * Convert the timing to bus clock counts.
3060 	 */
3061 
3062 	ata_timing_quantize(t, t, T, UT);
3063 
3064 	/*
3065 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3066 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3067 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3068 	 */
3069 
3070 	if (speed > XFER_PIO_6) {
3071 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3072 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3073 	}
3074 
3075 	/*
3076 	 * Lengthen active & recovery time so that cycle time is correct.
3077 	 */
3078 
3079 	if (t->act8b + t->rec8b < t->cyc8b) {
3080 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3081 		t->rec8b = t->cyc8b - t->act8b;
3082 	}
3083 
3084 	if (t->active + t->recover < t->cycle) {
3085 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3086 		t->recover = t->cycle - t->active;
3087 	}
3088 
3089 	/* In a few cases quantisation may produce enough errors to
3090 	   leave t->cycle too low for the sum of active and recovery
3091 	   if so we must correct this */
3092 	if (t->active + t->recover > t->cycle)
3093 		t->cycle = t->active + t->recover;
3094 
3095 	return 0;
3096 }
3097 
3098 /**
3099  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3100  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3101  *	@cycle: cycle duration in ns
3102  *
3103  *	Return matching xfer mode for @cycle.  The returned mode is of
3104  *	the transfer type specified by @xfer_shift.  If @cycle is too
3105  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3106  *	than the fastest known mode, the fasted mode is returned.
3107  *
3108  *	LOCKING:
3109  *	None.
3110  *
3111  *	RETURNS:
3112  *	Matching xfer_mode, 0xff if no match found.
3113  */
3114 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3115 {
3116 	u8 base_mode = 0xff, last_mode = 0xff;
3117 	const struct ata_xfer_ent *ent;
3118 	const struct ata_timing *t;
3119 
3120 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3121 		if (ent->shift == xfer_shift)
3122 			base_mode = ent->base;
3123 
3124 	for (t = ata_timing_find_mode(base_mode);
3125 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3126 		unsigned short this_cycle;
3127 
3128 		switch (xfer_shift) {
3129 		case ATA_SHIFT_PIO:
3130 		case ATA_SHIFT_MWDMA:
3131 			this_cycle = t->cycle;
3132 			break;
3133 		case ATA_SHIFT_UDMA:
3134 			this_cycle = t->udma;
3135 			break;
3136 		default:
3137 			return 0xff;
3138 		}
3139 
3140 		if (cycle > this_cycle)
3141 			break;
3142 
3143 		last_mode = t->mode;
3144 	}
3145 
3146 	return last_mode;
3147 }
3148 
3149 /**
3150  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3151  *	@dev: Device to adjust xfer masks
3152  *	@sel: ATA_DNXFER_* selector
3153  *
3154  *	Adjust xfer masks of @dev downward.  Note that this function
3155  *	does not apply the change.  Invoking ata_set_mode() afterwards
3156  *	will apply the limit.
3157  *
3158  *	LOCKING:
3159  *	Inherited from caller.
3160  *
3161  *	RETURNS:
3162  *	0 on success, negative errno on failure
3163  */
3164 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3165 {
3166 	char buf[32];
3167 	unsigned long orig_mask, xfer_mask;
3168 	unsigned long pio_mask, mwdma_mask, udma_mask;
3169 	int quiet, highbit;
3170 
3171 	quiet = !!(sel & ATA_DNXFER_QUIET);
3172 	sel &= ~ATA_DNXFER_QUIET;
3173 
3174 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3175 						  dev->mwdma_mask,
3176 						  dev->udma_mask);
3177 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3178 
3179 	switch (sel) {
3180 	case ATA_DNXFER_PIO:
3181 		highbit = fls(pio_mask) - 1;
3182 		pio_mask &= ~(1 << highbit);
3183 		break;
3184 
3185 	case ATA_DNXFER_DMA:
3186 		if (udma_mask) {
3187 			highbit = fls(udma_mask) - 1;
3188 			udma_mask &= ~(1 << highbit);
3189 			if (!udma_mask)
3190 				return -ENOENT;
3191 		} else if (mwdma_mask) {
3192 			highbit = fls(mwdma_mask) - 1;
3193 			mwdma_mask &= ~(1 << highbit);
3194 			if (!mwdma_mask)
3195 				return -ENOENT;
3196 		}
3197 		break;
3198 
3199 	case ATA_DNXFER_40C:
3200 		udma_mask &= ATA_UDMA_MASK_40C;
3201 		break;
3202 
3203 	case ATA_DNXFER_FORCE_PIO0:
3204 		pio_mask &= 1;
3205 	case ATA_DNXFER_FORCE_PIO:
3206 		mwdma_mask = 0;
3207 		udma_mask = 0;
3208 		break;
3209 
3210 	default:
3211 		BUG();
3212 	}
3213 
3214 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3215 
3216 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3217 		return -ENOENT;
3218 
3219 	if (!quiet) {
3220 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3221 			snprintf(buf, sizeof(buf), "%s:%s",
3222 				 ata_mode_string(xfer_mask),
3223 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3224 		else
3225 			snprintf(buf, sizeof(buf), "%s",
3226 				 ata_mode_string(xfer_mask));
3227 
3228 		ata_dev_printk(dev, KERN_WARNING,
3229 			       "limiting speed to %s\n", buf);
3230 	}
3231 
3232 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3233 			    &dev->udma_mask);
3234 
3235 	return 0;
3236 }
3237 
3238 static int ata_dev_set_mode(struct ata_device *dev)
3239 {
3240 	struct ata_eh_context *ehc = &dev->link->eh_context;
3241 	const char *dev_err_whine = "";
3242 	int ign_dev_err = 0;
3243 	unsigned int err_mask;
3244 	int rc;
3245 
3246 	dev->flags &= ~ATA_DFLAG_PIO;
3247 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3248 		dev->flags |= ATA_DFLAG_PIO;
3249 
3250 	err_mask = ata_dev_set_xfermode(dev);
3251 
3252 	if (err_mask & ~AC_ERR_DEV)
3253 		goto fail;
3254 
3255 	/* revalidate */
3256 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3257 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3258 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3259 	if (rc)
3260 		return rc;
3261 
3262 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3263 		/* Old CFA may refuse this command, which is just fine */
3264 		if (ata_id_is_cfa(dev->id))
3265 			ign_dev_err = 1;
3266 		/* Catch several broken garbage emulations plus some pre
3267 		   ATA devices */
3268 		if (ata_id_major_version(dev->id) == 0 &&
3269 					dev->pio_mode <= XFER_PIO_2)
3270 			ign_dev_err = 1;
3271 		/* Some very old devices and some bad newer ones fail
3272 		   any kind of SET_XFERMODE request but support PIO0-2
3273 		   timings and no IORDY */
3274 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3275 			ign_dev_err = 1;
3276 	}
3277 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3278 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3279 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3280 	    dev->dma_mode == XFER_MW_DMA_0 &&
3281 	    (dev->id[63] >> 8) & 1)
3282 		ign_dev_err = 1;
3283 
3284 	/* if the device is actually configured correctly, ignore dev err */
3285 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3286 		ign_dev_err = 1;
3287 
3288 	if (err_mask & AC_ERR_DEV) {
3289 		if (!ign_dev_err)
3290 			goto fail;
3291 		else
3292 			dev_err_whine = " (device error ignored)";
3293 	}
3294 
3295 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3296 		dev->xfer_shift, (int)dev->xfer_mode);
3297 
3298 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3299 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3300 		       dev_err_whine);
3301 
3302 	return 0;
3303 
3304  fail:
3305 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3306 		       "(err_mask=0x%x)\n", err_mask);
3307 	return -EIO;
3308 }
3309 
3310 /**
3311  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3312  *	@link: link on which timings will be programmed
3313  *	@r_failed_dev: out parameter for failed device
3314  *
3315  *	Standard implementation of the function used to tune and set
3316  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3317  *	ata_dev_set_mode() fails, pointer to the failing device is
3318  *	returned in @r_failed_dev.
3319  *
3320  *	LOCKING:
3321  *	PCI/etc. bus probe sem.
3322  *
3323  *	RETURNS:
3324  *	0 on success, negative errno otherwise
3325  */
3326 
3327 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3328 {
3329 	struct ata_port *ap = link->ap;
3330 	struct ata_device *dev;
3331 	int rc = 0, used_dma = 0, found = 0;
3332 
3333 	/* step 1: calculate xfer_mask */
3334 	ata_link_for_each_dev(dev, link) {
3335 		unsigned long pio_mask, dma_mask;
3336 		unsigned int mode_mask;
3337 
3338 		if (!ata_dev_enabled(dev))
3339 			continue;
3340 
3341 		mode_mask = ATA_DMA_MASK_ATA;
3342 		if (dev->class == ATA_DEV_ATAPI)
3343 			mode_mask = ATA_DMA_MASK_ATAPI;
3344 		else if (ata_id_is_cfa(dev->id))
3345 			mode_mask = ATA_DMA_MASK_CFA;
3346 
3347 		ata_dev_xfermask(dev);
3348 		ata_force_xfermask(dev);
3349 
3350 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3351 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3352 
3353 		if (libata_dma_mask & mode_mask)
3354 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3355 		else
3356 			dma_mask = 0;
3357 
3358 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3359 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3360 
3361 		found = 1;
3362 		if (ata_dma_enabled(dev))
3363 			used_dma = 1;
3364 	}
3365 	if (!found)
3366 		goto out;
3367 
3368 	/* step 2: always set host PIO timings */
3369 	ata_link_for_each_dev(dev, link) {
3370 		if (!ata_dev_enabled(dev))
3371 			continue;
3372 
3373 		if (dev->pio_mode == 0xff) {
3374 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3375 			rc = -EINVAL;
3376 			goto out;
3377 		}
3378 
3379 		dev->xfer_mode = dev->pio_mode;
3380 		dev->xfer_shift = ATA_SHIFT_PIO;
3381 		if (ap->ops->set_piomode)
3382 			ap->ops->set_piomode(ap, dev);
3383 	}
3384 
3385 	/* step 3: set host DMA timings */
3386 	ata_link_for_each_dev(dev, link) {
3387 		if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev))
3388 			continue;
3389 
3390 		dev->xfer_mode = dev->dma_mode;
3391 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3392 		if (ap->ops->set_dmamode)
3393 			ap->ops->set_dmamode(ap, dev);
3394 	}
3395 
3396 	/* step 4: update devices' xfer mode */
3397 	ata_link_for_each_dev(dev, link) {
3398 		/* don't update suspended devices' xfer mode */
3399 		if (!ata_dev_enabled(dev))
3400 			continue;
3401 
3402 		rc = ata_dev_set_mode(dev);
3403 		if (rc)
3404 			goto out;
3405 	}
3406 
3407 	/* Record simplex status. If we selected DMA then the other
3408 	 * host channels are not permitted to do so.
3409 	 */
3410 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3411 		ap->host->simplex_claimed = ap;
3412 
3413  out:
3414 	if (rc)
3415 		*r_failed_dev = dev;
3416 	return rc;
3417 }
3418 
3419 /**
3420  *	ata_wait_ready - wait for link to become ready
3421  *	@link: link to be waited on
3422  *	@deadline: deadline jiffies for the operation
3423  *	@check_ready: callback to check link readiness
3424  *
3425  *	Wait for @link to become ready.  @check_ready should return
3426  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3427  *	link doesn't seem to be occupied, other errno for other error
3428  *	conditions.
3429  *
3430  *	Transient -ENODEV conditions are allowed for
3431  *	ATA_TMOUT_FF_WAIT.
3432  *
3433  *	LOCKING:
3434  *	EH context.
3435  *
3436  *	RETURNS:
3437  *	0 if @linke is ready before @deadline; otherwise, -errno.
3438  */
3439 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3440 		   int (*check_ready)(struct ata_link *link))
3441 {
3442 	unsigned long start = jiffies;
3443 	unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3444 	int warned = 0;
3445 
3446 	/* Slave readiness can't be tested separately from master.  On
3447 	 * M/S emulation configuration, this function should be called
3448 	 * only on the master and it will handle both master and slave.
3449 	 */
3450 	WARN_ON(link == link->ap->slave_link);
3451 
3452 	if (time_after(nodev_deadline, deadline))
3453 		nodev_deadline = deadline;
3454 
3455 	while (1) {
3456 		unsigned long now = jiffies;
3457 		int ready, tmp;
3458 
3459 		ready = tmp = check_ready(link);
3460 		if (ready > 0)
3461 			return 0;
3462 
3463 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3464 		 * is online.  Also, some SATA devices take a long
3465 		 * time to clear 0xff after reset.  For example,
3466 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3467 		 * GoVault needs even more than that.  Wait for
3468 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3469 		 *
3470 		 * Note that some PATA controllers (pata_ali) explode
3471 		 * if status register is read more than once when
3472 		 * there's no device attached.
3473 		 */
3474 		if (ready == -ENODEV) {
3475 			if (ata_link_online(link))
3476 				ready = 0;
3477 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3478 				 !ata_link_offline(link) &&
3479 				 time_before(now, nodev_deadline))
3480 				ready = 0;
3481 		}
3482 
3483 		if (ready)
3484 			return ready;
3485 		if (time_after(now, deadline))
3486 			return -EBUSY;
3487 
3488 		if (!warned && time_after(now, start + 5 * HZ) &&
3489 		    (deadline - now > 3 * HZ)) {
3490 			ata_link_printk(link, KERN_WARNING,
3491 				"link is slow to respond, please be patient "
3492 				"(ready=%d)\n", tmp);
3493 			warned = 1;
3494 		}
3495 
3496 		msleep(50);
3497 	}
3498 }
3499 
3500 /**
3501  *	ata_wait_after_reset - wait for link to become ready after reset
3502  *	@link: link to be waited on
3503  *	@deadline: deadline jiffies for the operation
3504  *	@check_ready: callback to check link readiness
3505  *
3506  *	Wait for @link to become ready after reset.
3507  *
3508  *	LOCKING:
3509  *	EH context.
3510  *
3511  *	RETURNS:
3512  *	0 if @linke is ready before @deadline; otherwise, -errno.
3513  */
3514 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3515 				int (*check_ready)(struct ata_link *link))
3516 {
3517 	msleep(ATA_WAIT_AFTER_RESET);
3518 
3519 	return ata_wait_ready(link, deadline, check_ready);
3520 }
3521 
3522 /**
3523  *	sata_link_debounce - debounce SATA phy status
3524  *	@link: ATA link to debounce SATA phy status for
3525  *	@params: timing parameters { interval, duratinon, timeout } in msec
3526  *	@deadline: deadline jiffies for the operation
3527  *
3528 *	Make sure SStatus of @link reaches stable state, determined by
3529  *	holding the same value where DET is not 1 for @duration polled
3530  *	every @interval, before @timeout.  Timeout constraints the
3531  *	beginning of the stable state.  Because DET gets stuck at 1 on
3532  *	some controllers after hot unplugging, this functions waits
3533  *	until timeout then returns 0 if DET is stable at 1.
3534  *
3535  *	@timeout is further limited by @deadline.  The sooner of the
3536  *	two is used.
3537  *
3538  *	LOCKING:
3539  *	Kernel thread context (may sleep)
3540  *
3541  *	RETURNS:
3542  *	0 on success, -errno on failure.
3543  */
3544 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3545 		       unsigned long deadline)
3546 {
3547 	unsigned long interval = params[0];
3548 	unsigned long duration = params[1];
3549 	unsigned long last_jiffies, t;
3550 	u32 last, cur;
3551 	int rc;
3552 
3553 	t = ata_deadline(jiffies, params[2]);
3554 	if (time_before(t, deadline))
3555 		deadline = t;
3556 
3557 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3558 		return rc;
3559 	cur &= 0xf;
3560 
3561 	last = cur;
3562 	last_jiffies = jiffies;
3563 
3564 	while (1) {
3565 		msleep(interval);
3566 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3567 			return rc;
3568 		cur &= 0xf;
3569 
3570 		/* DET stable? */
3571 		if (cur == last) {
3572 			if (cur == 1 && time_before(jiffies, deadline))
3573 				continue;
3574 			if (time_after(jiffies,
3575 				       ata_deadline(last_jiffies, duration)))
3576 				return 0;
3577 			continue;
3578 		}
3579 
3580 		/* unstable, start over */
3581 		last = cur;
3582 		last_jiffies = jiffies;
3583 
3584 		/* Check deadline.  If debouncing failed, return
3585 		 * -EPIPE to tell upper layer to lower link speed.
3586 		 */
3587 		if (time_after(jiffies, deadline))
3588 			return -EPIPE;
3589 	}
3590 }
3591 
3592 /**
3593  *	sata_link_resume - resume SATA link
3594  *	@link: ATA link to resume SATA
3595  *	@params: timing parameters { interval, duratinon, timeout } in msec
3596  *	@deadline: deadline jiffies for the operation
3597  *
3598  *	Resume SATA phy @link and debounce it.
3599  *
3600  *	LOCKING:
3601  *	Kernel thread context (may sleep)
3602  *
3603  *	RETURNS:
3604  *	0 on success, -errno on failure.
3605  */
3606 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3607 		     unsigned long deadline)
3608 {
3609 	u32 scontrol, serror;
3610 	int rc;
3611 
3612 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3613 		return rc;
3614 
3615 	scontrol = (scontrol & 0x0f0) | 0x300;
3616 
3617 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3618 		return rc;
3619 
3620 	/* Some PHYs react badly if SStatus is pounded immediately
3621 	 * after resuming.  Delay 200ms before debouncing.
3622 	 */
3623 	msleep(200);
3624 
3625 	if ((rc = sata_link_debounce(link, params, deadline)))
3626 		return rc;
3627 
3628 	/* clear SError, some PHYs require this even for SRST to work */
3629 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3630 		rc = sata_scr_write(link, SCR_ERROR, serror);
3631 
3632 	return rc != -EINVAL ? rc : 0;
3633 }
3634 
3635 /**
3636  *	ata_std_prereset - prepare for reset
3637  *	@link: ATA link to be reset
3638  *	@deadline: deadline jiffies for the operation
3639  *
3640  *	@link is about to be reset.  Initialize it.  Failure from
3641  *	prereset makes libata abort whole reset sequence and give up
3642  *	that port, so prereset should be best-effort.  It does its
3643  *	best to prepare for reset sequence but if things go wrong, it
3644  *	should just whine, not fail.
3645  *
3646  *	LOCKING:
3647  *	Kernel thread context (may sleep)
3648  *
3649  *	RETURNS:
3650  *	0 on success, -errno otherwise.
3651  */
3652 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3653 {
3654 	struct ata_port *ap = link->ap;
3655 	struct ata_eh_context *ehc = &link->eh_context;
3656 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3657 	int rc;
3658 
3659 	/* if we're about to do hardreset, nothing more to do */
3660 	if (ehc->i.action & ATA_EH_HARDRESET)
3661 		return 0;
3662 
3663 	/* if SATA, resume link */
3664 	if (ap->flags & ATA_FLAG_SATA) {
3665 		rc = sata_link_resume(link, timing, deadline);
3666 		/* whine about phy resume failure but proceed */
3667 		if (rc && rc != -EOPNOTSUPP)
3668 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3669 					"link for reset (errno=%d)\n", rc);
3670 	}
3671 
3672 	/* no point in trying softreset on offline link */
3673 	if (ata_phys_link_offline(link))
3674 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3675 
3676 	return 0;
3677 }
3678 
3679 /**
3680  *	sata_link_hardreset - reset link via SATA phy reset
3681  *	@link: link to reset
3682  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3683  *	@deadline: deadline jiffies for the operation
3684  *	@online: optional out parameter indicating link onlineness
3685  *	@check_ready: optional callback to check link readiness
3686  *
3687  *	SATA phy-reset @link using DET bits of SControl register.
3688  *	After hardreset, link readiness is waited upon using
3689  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3690  *	allowed to not specify @check_ready and wait itself after this
3691  *	function returns.  Device classification is LLD's
3692  *	responsibility.
3693  *
3694  *	*@online is set to one iff reset succeeded and @link is online
3695  *	after reset.
3696  *
3697  *	LOCKING:
3698  *	Kernel thread context (may sleep)
3699  *
3700  *	RETURNS:
3701  *	0 on success, -errno otherwise.
3702  */
3703 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3704 			unsigned long deadline,
3705 			bool *online, int (*check_ready)(struct ata_link *))
3706 {
3707 	u32 scontrol;
3708 	int rc;
3709 
3710 	DPRINTK("ENTER\n");
3711 
3712 	if (online)
3713 		*online = false;
3714 
3715 	if (sata_set_spd_needed(link)) {
3716 		/* SATA spec says nothing about how to reconfigure
3717 		 * spd.  To be on the safe side, turn off phy during
3718 		 * reconfiguration.  This works for at least ICH7 AHCI
3719 		 * and Sil3124.
3720 		 */
3721 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3722 			goto out;
3723 
3724 		scontrol = (scontrol & 0x0f0) | 0x304;
3725 
3726 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3727 			goto out;
3728 
3729 		sata_set_spd(link);
3730 	}
3731 
3732 	/* issue phy wake/reset */
3733 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3734 		goto out;
3735 
3736 	scontrol = (scontrol & 0x0f0) | 0x301;
3737 
3738 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3739 		goto out;
3740 
3741 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3742 	 * 10.4.2 says at least 1 ms.
3743 	 */
3744 	msleep(1);
3745 
3746 	/* bring link back */
3747 	rc = sata_link_resume(link, timing, deadline);
3748 	if (rc)
3749 		goto out;
3750 	/* if link is offline nothing more to do */
3751 	if (ata_phys_link_offline(link))
3752 		goto out;
3753 
3754 	/* Link is online.  From this point, -ENODEV too is an error. */
3755 	if (online)
3756 		*online = true;
3757 
3758 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3759 		/* If PMP is supported, we have to do follow-up SRST.
3760 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3761 		 * the first port is empty.  Wait only for
3762 		 * ATA_TMOUT_PMP_SRST_WAIT.
3763 		 */
3764 		if (check_ready) {
3765 			unsigned long pmp_deadline;
3766 
3767 			pmp_deadline = ata_deadline(jiffies,
3768 						    ATA_TMOUT_PMP_SRST_WAIT);
3769 			if (time_after(pmp_deadline, deadline))
3770 				pmp_deadline = deadline;
3771 			ata_wait_ready(link, pmp_deadline, check_ready);
3772 		}
3773 		rc = -EAGAIN;
3774 		goto out;
3775 	}
3776 
3777 	rc = 0;
3778 	if (check_ready)
3779 		rc = ata_wait_ready(link, deadline, check_ready);
3780  out:
3781 	if (rc && rc != -EAGAIN) {
3782 		/* online is set iff link is online && reset succeeded */
3783 		if (online)
3784 			*online = false;
3785 		ata_link_printk(link, KERN_ERR,
3786 				"COMRESET failed (errno=%d)\n", rc);
3787 	}
3788 	DPRINTK("EXIT, rc=%d\n", rc);
3789 	return rc;
3790 }
3791 
3792 /**
3793  *	sata_std_hardreset - COMRESET w/o waiting or classification
3794  *	@link: link to reset
3795  *	@class: resulting class of attached device
3796  *	@deadline: deadline jiffies for the operation
3797  *
3798  *	Standard SATA COMRESET w/o waiting or classification.
3799  *
3800  *	LOCKING:
3801  *	Kernel thread context (may sleep)
3802  *
3803  *	RETURNS:
3804  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3805  */
3806 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3807 		       unsigned long deadline)
3808 {
3809 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3810 	bool online;
3811 	int rc;
3812 
3813 	/* do hardreset */
3814 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3815 	return online ? -EAGAIN : rc;
3816 }
3817 
3818 /**
3819  *	ata_std_postreset - standard postreset callback
3820  *	@link: the target ata_link
3821  *	@classes: classes of attached devices
3822  *
3823  *	This function is invoked after a successful reset.  Note that
3824  *	the device might have been reset more than once using
3825  *	different reset methods before postreset is invoked.
3826  *
3827  *	LOCKING:
3828  *	Kernel thread context (may sleep)
3829  */
3830 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3831 {
3832 	u32 serror;
3833 
3834 	DPRINTK("ENTER\n");
3835 
3836 	/* reset complete, clear SError */
3837 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3838 		sata_scr_write(link, SCR_ERROR, serror);
3839 
3840 	/* print link status */
3841 	sata_print_link_status(link);
3842 
3843 	DPRINTK("EXIT\n");
3844 }
3845 
3846 /**
3847  *	ata_dev_same_device - Determine whether new ID matches configured device
3848  *	@dev: device to compare against
3849  *	@new_class: class of the new device
3850  *	@new_id: IDENTIFY page of the new device
3851  *
3852  *	Compare @new_class and @new_id against @dev and determine
3853  *	whether @dev is the device indicated by @new_class and
3854  *	@new_id.
3855  *
3856  *	LOCKING:
3857  *	None.
3858  *
3859  *	RETURNS:
3860  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3861  */
3862 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3863 			       const u16 *new_id)
3864 {
3865 	const u16 *old_id = dev->id;
3866 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3867 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3868 
3869 	if (dev->class != new_class) {
3870 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3871 			       dev->class, new_class);
3872 		return 0;
3873 	}
3874 
3875 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3876 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3877 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3878 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3879 
3880 	if (strcmp(model[0], model[1])) {
3881 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3882 			       "'%s' != '%s'\n", model[0], model[1]);
3883 		return 0;
3884 	}
3885 
3886 	if (strcmp(serial[0], serial[1])) {
3887 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3888 			       "'%s' != '%s'\n", serial[0], serial[1]);
3889 		return 0;
3890 	}
3891 
3892 	return 1;
3893 }
3894 
3895 /**
3896  *	ata_dev_reread_id - Re-read IDENTIFY data
3897  *	@dev: target ATA device
3898  *	@readid_flags: read ID flags
3899  *
3900  *	Re-read IDENTIFY page and make sure @dev is still attached to
3901  *	the port.
3902  *
3903  *	LOCKING:
3904  *	Kernel thread context (may sleep)
3905  *
3906  *	RETURNS:
3907  *	0 on success, negative errno otherwise
3908  */
3909 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3910 {
3911 	unsigned int class = dev->class;
3912 	u16 *id = (void *)dev->link->ap->sector_buf;
3913 	int rc;
3914 
3915 	/* read ID data */
3916 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3917 	if (rc)
3918 		return rc;
3919 
3920 	/* is the device still there? */
3921 	if (!ata_dev_same_device(dev, class, id))
3922 		return -ENODEV;
3923 
3924 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3925 	return 0;
3926 }
3927 
3928 /**
3929  *	ata_dev_revalidate - Revalidate ATA device
3930  *	@dev: device to revalidate
3931  *	@new_class: new class code
3932  *	@readid_flags: read ID flags
3933  *
3934  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3935  *	port and reconfigure it according to the new IDENTIFY page.
3936  *
3937  *	LOCKING:
3938  *	Kernel thread context (may sleep)
3939  *
3940  *	RETURNS:
3941  *	0 on success, negative errno otherwise
3942  */
3943 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3944 		       unsigned int readid_flags)
3945 {
3946 	u64 n_sectors = dev->n_sectors;
3947 	int rc;
3948 
3949 	if (!ata_dev_enabled(dev))
3950 		return -ENODEV;
3951 
3952 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3953 	if (ata_class_enabled(new_class) &&
3954 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3955 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3956 			       dev->class, new_class);
3957 		rc = -ENODEV;
3958 		goto fail;
3959 	}
3960 
3961 	/* re-read ID */
3962 	rc = ata_dev_reread_id(dev, readid_flags);
3963 	if (rc)
3964 		goto fail;
3965 
3966 	/* configure device according to the new ID */
3967 	rc = ata_dev_configure(dev);
3968 	if (rc)
3969 		goto fail;
3970 
3971 	/* verify n_sectors hasn't changed */
3972 	if (dev->class == ATA_DEV_ATA && n_sectors &&
3973 	    dev->n_sectors != n_sectors) {
3974 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3975 			       "%llu != %llu\n",
3976 			       (unsigned long long)n_sectors,
3977 			       (unsigned long long)dev->n_sectors);
3978 
3979 		/* restore original n_sectors */
3980 		dev->n_sectors = n_sectors;
3981 
3982 		rc = -ENODEV;
3983 		goto fail;
3984 	}
3985 
3986 	return 0;
3987 
3988  fail:
3989 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3990 	return rc;
3991 }
3992 
3993 struct ata_blacklist_entry {
3994 	const char *model_num;
3995 	const char *model_rev;
3996 	unsigned long horkage;
3997 };
3998 
3999 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4000 	/* Devices with DMA related problems under Linux */
4001 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4002 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4003 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4004 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4005 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4006 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4007 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4008 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4009 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4010 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
4011 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
4012 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4013 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4014 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4015 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4016 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4017 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
4018 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
4019 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4020 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4021 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4022 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4023 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4024 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4025 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4026 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4027 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4028 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4029 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4030 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4031 	/* Odd clown on sil3726/4726 PMPs */
4032 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4033 
4034 	/* Weird ATAPI devices */
4035 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4036 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4037 
4038 	/* Devices we expect to fail diagnostics */
4039 
4040 	/* Devices where NCQ should be avoided */
4041 	/* NCQ is slow */
4042 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4043 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4044 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4045 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4046 	/* NCQ is broken */
4047 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4048 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4049 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4050 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4051 
4052 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4053 	{ "ST31500341AS",	"9JU138",	ATA_HORKAGE_NONCQ |
4054 						ATA_HORKAGE_FIRMWARE_WARN },
4055 	{ "ST31000333AS",	"9FZ136",	ATA_HORKAGE_NONCQ |
4056 						ATA_HORKAGE_FIRMWARE_WARN },
4057 	{ "ST3640623AS",	"9FZ164",	ATA_HORKAGE_NONCQ |
4058 						ATA_HORKAGE_FIRMWARE_WARN },
4059 	{ "ST3640323AS",	"9FZ134",	ATA_HORKAGE_NONCQ |
4060 						ATA_HORKAGE_FIRMWARE_WARN },
4061 	{ "ST3320813AS",	"9FZ182",	ATA_HORKAGE_NONCQ |
4062 						ATA_HORKAGE_FIRMWARE_WARN },
4063 	{ "ST3320613AS",	"9FZ162",	ATA_HORKAGE_NONCQ |
4064 						ATA_HORKAGE_FIRMWARE_WARN },
4065 
4066 	/* Blacklist entries taken from Silicon Image 3124/3132
4067 	   Windows driver .inf file - also several Linux problem reports */
4068 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4069 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4070 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4071 
4072 	/* devices which puke on READ_NATIVE_MAX */
4073 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4074 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4075 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4076 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4077 
4078 	/* Devices which report 1 sector over size HPA */
4079 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4080 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4081 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4082 
4083 	/* Devices which get the IVB wrong */
4084 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4085 	/* Maybe we should just blacklist TSSTcorp... */
4086 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
4087 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
4088 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4089 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4090 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4091 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
4092 
4093 	/* Devices that do not need bridging limits applied */
4094 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4095 
4096 	/* End Marker */
4097 	{ }
4098 };
4099 
4100 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4101 {
4102 	const char *p;
4103 	int len;
4104 
4105 	/*
4106 	 * check for trailing wildcard: *\0
4107 	 */
4108 	p = strchr(patt, wildchar);
4109 	if (p && ((*(p + 1)) == 0))
4110 		len = p - patt;
4111 	else {
4112 		len = strlen(name);
4113 		if (!len) {
4114 			if (!*patt)
4115 				return 0;
4116 			return -1;
4117 		}
4118 	}
4119 
4120 	return strncmp(patt, name, len);
4121 }
4122 
4123 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4124 {
4125 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4126 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4127 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4128 
4129 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4130 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4131 
4132 	while (ad->model_num) {
4133 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4134 			if (ad->model_rev == NULL)
4135 				return ad->horkage;
4136 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4137 				return ad->horkage;
4138 		}
4139 		ad++;
4140 	}
4141 	return 0;
4142 }
4143 
4144 static int ata_dma_blacklisted(const struct ata_device *dev)
4145 {
4146 	/* We don't support polling DMA.
4147 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4148 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4149 	 */
4150 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4151 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4152 		return 1;
4153 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4154 }
4155 
4156 /**
4157  *	ata_is_40wire		-	check drive side detection
4158  *	@dev: device
4159  *
4160  *	Perform drive side detection decoding, allowing for device vendors
4161  *	who can't follow the documentation.
4162  */
4163 
4164 static int ata_is_40wire(struct ata_device *dev)
4165 {
4166 	if (dev->horkage & ATA_HORKAGE_IVB)
4167 		return ata_drive_40wire_relaxed(dev->id);
4168 	return ata_drive_40wire(dev->id);
4169 }
4170 
4171 /**
4172  *	cable_is_40wire		-	40/80/SATA decider
4173  *	@ap: port to consider
4174  *
4175  *	This function encapsulates the policy for speed management
4176  *	in one place. At the moment we don't cache the result but
4177  *	there is a good case for setting ap->cbl to the result when
4178  *	we are called with unknown cables (and figuring out if it
4179  *	impacts hotplug at all).
4180  *
4181  *	Return 1 if the cable appears to be 40 wire.
4182  */
4183 
4184 static int cable_is_40wire(struct ata_port *ap)
4185 {
4186 	struct ata_link *link;
4187 	struct ata_device *dev;
4188 
4189 	/* If the controller thinks we are 40 wire, we are. */
4190 	if (ap->cbl == ATA_CBL_PATA40)
4191 		return 1;
4192 
4193 	/* If the controller thinks we are 80 wire, we are. */
4194 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4195 		return 0;
4196 
4197 	/* If the system is known to be 40 wire short cable (eg
4198 	 * laptop), then we allow 80 wire modes even if the drive
4199 	 * isn't sure.
4200 	 */
4201 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4202 		return 0;
4203 
4204 	/* If the controller doesn't know, we scan.
4205 	 *
4206 	 * Note: We look for all 40 wire detects at this point.  Any
4207 	 *       80 wire detect is taken to be 80 wire cable because
4208 	 * - in many setups only the one drive (slave if present) will
4209 	 *   give a valid detect
4210 	 * - if you have a non detect capable drive you don't want it
4211 	 *   to colour the choice
4212 	 */
4213 	ata_port_for_each_link(link, ap) {
4214 		ata_link_for_each_dev(dev, link) {
4215 			if (ata_dev_enabled(dev) && !ata_is_40wire(dev))
4216 				return 0;
4217 		}
4218 	}
4219 	return 1;
4220 }
4221 
4222 /**
4223  *	ata_dev_xfermask - Compute supported xfermask of the given device
4224  *	@dev: Device to compute xfermask for
4225  *
4226  *	Compute supported xfermask of @dev and store it in
4227  *	dev->*_mask.  This function is responsible for applying all
4228  *	known limits including host controller limits, device
4229  *	blacklist, etc...
4230  *
4231  *	LOCKING:
4232  *	None.
4233  */
4234 static void ata_dev_xfermask(struct ata_device *dev)
4235 {
4236 	struct ata_link *link = dev->link;
4237 	struct ata_port *ap = link->ap;
4238 	struct ata_host *host = ap->host;
4239 	unsigned long xfer_mask;
4240 
4241 	/* controller modes available */
4242 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4243 				      ap->mwdma_mask, ap->udma_mask);
4244 
4245 	/* drive modes available */
4246 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4247 				       dev->mwdma_mask, dev->udma_mask);
4248 	xfer_mask &= ata_id_xfermask(dev->id);
4249 
4250 	/*
4251 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4252 	 *	cable
4253 	 */
4254 	if (ata_dev_pair(dev)) {
4255 		/* No PIO5 or PIO6 */
4256 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4257 		/* No MWDMA3 or MWDMA 4 */
4258 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4259 	}
4260 
4261 	if (ata_dma_blacklisted(dev)) {
4262 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4263 		ata_dev_printk(dev, KERN_WARNING,
4264 			       "device is on DMA blacklist, disabling DMA\n");
4265 	}
4266 
4267 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4268 	    host->simplex_claimed && host->simplex_claimed != ap) {
4269 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4270 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4271 			       "other device, disabling DMA\n");
4272 	}
4273 
4274 	if (ap->flags & ATA_FLAG_NO_IORDY)
4275 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4276 
4277 	if (ap->ops->mode_filter)
4278 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4279 
4280 	/* Apply cable rule here.  Don't apply it early because when
4281 	 * we handle hot plug the cable type can itself change.
4282 	 * Check this last so that we know if the transfer rate was
4283 	 * solely limited by the cable.
4284 	 * Unknown or 80 wire cables reported host side are checked
4285 	 * drive side as well. Cases where we know a 40wire cable
4286 	 * is used safely for 80 are not checked here.
4287 	 */
4288 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4289 		/* UDMA/44 or higher would be available */
4290 		if (cable_is_40wire(ap)) {
4291 			ata_dev_printk(dev, KERN_WARNING,
4292 				 "limited to UDMA/33 due to 40-wire cable\n");
4293 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4294 		}
4295 
4296 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4297 			    &dev->mwdma_mask, &dev->udma_mask);
4298 }
4299 
4300 /**
4301  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4302  *	@dev: Device to which command will be sent
4303  *
4304  *	Issue SET FEATURES - XFER MODE command to device @dev
4305  *	on port @ap.
4306  *
4307  *	LOCKING:
4308  *	PCI/etc. bus probe sem.
4309  *
4310  *	RETURNS:
4311  *	0 on success, AC_ERR_* mask otherwise.
4312  */
4313 
4314 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4315 {
4316 	struct ata_taskfile tf;
4317 	unsigned int err_mask;
4318 
4319 	/* set up set-features taskfile */
4320 	DPRINTK("set features - xfer mode\n");
4321 
4322 	/* Some controllers and ATAPI devices show flaky interrupt
4323 	 * behavior after setting xfer mode.  Use polling instead.
4324 	 */
4325 	ata_tf_init(dev, &tf);
4326 	tf.command = ATA_CMD_SET_FEATURES;
4327 	tf.feature = SETFEATURES_XFER;
4328 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4329 	tf.protocol = ATA_PROT_NODATA;
4330 	/* If we are using IORDY we must send the mode setting command */
4331 	if (ata_pio_need_iordy(dev))
4332 		tf.nsect = dev->xfer_mode;
4333 	/* If the device has IORDY and the controller does not - turn it off */
4334  	else if (ata_id_has_iordy(dev->id))
4335 		tf.nsect = 0x01;
4336 	else /* In the ancient relic department - skip all of this */
4337 		return 0;
4338 
4339 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4340 
4341 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4342 	return err_mask;
4343 }
4344 /**
4345  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4346  *	@dev: Device to which command will be sent
4347  *	@enable: Whether to enable or disable the feature
4348  *	@feature: The sector count represents the feature to set
4349  *
4350  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4351  *	on port @ap with sector count
4352  *
4353  *	LOCKING:
4354  *	PCI/etc. bus probe sem.
4355  *
4356  *	RETURNS:
4357  *	0 on success, AC_ERR_* mask otherwise.
4358  */
4359 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4360 					u8 feature)
4361 {
4362 	struct ata_taskfile tf;
4363 	unsigned int err_mask;
4364 
4365 	/* set up set-features taskfile */
4366 	DPRINTK("set features - SATA features\n");
4367 
4368 	ata_tf_init(dev, &tf);
4369 	tf.command = ATA_CMD_SET_FEATURES;
4370 	tf.feature = enable;
4371 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4372 	tf.protocol = ATA_PROT_NODATA;
4373 	tf.nsect = feature;
4374 
4375 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4376 
4377 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4378 	return err_mask;
4379 }
4380 
4381 /**
4382  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4383  *	@dev: Device to which command will be sent
4384  *	@heads: Number of heads (taskfile parameter)
4385  *	@sectors: Number of sectors (taskfile parameter)
4386  *
4387  *	LOCKING:
4388  *	Kernel thread context (may sleep)
4389  *
4390  *	RETURNS:
4391  *	0 on success, AC_ERR_* mask otherwise.
4392  */
4393 static unsigned int ata_dev_init_params(struct ata_device *dev,
4394 					u16 heads, u16 sectors)
4395 {
4396 	struct ata_taskfile tf;
4397 	unsigned int err_mask;
4398 
4399 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4400 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4401 		return AC_ERR_INVALID;
4402 
4403 	/* set up init dev params taskfile */
4404 	DPRINTK("init dev params \n");
4405 
4406 	ata_tf_init(dev, &tf);
4407 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4408 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4409 	tf.protocol = ATA_PROT_NODATA;
4410 	tf.nsect = sectors;
4411 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4412 
4413 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4414 	/* A clean abort indicates an original or just out of spec drive
4415 	   and we should continue as we issue the setup based on the
4416 	   drive reported working geometry */
4417 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4418 		err_mask = 0;
4419 
4420 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4421 	return err_mask;
4422 }
4423 
4424 /**
4425  *	ata_sg_clean - Unmap DMA memory associated with command
4426  *	@qc: Command containing DMA memory to be released
4427  *
4428  *	Unmap all mapped DMA memory associated with this command.
4429  *
4430  *	LOCKING:
4431  *	spin_lock_irqsave(host lock)
4432  */
4433 void ata_sg_clean(struct ata_queued_cmd *qc)
4434 {
4435 	struct ata_port *ap = qc->ap;
4436 	struct scatterlist *sg = qc->sg;
4437 	int dir = qc->dma_dir;
4438 
4439 	WARN_ON(sg == NULL);
4440 
4441 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4442 
4443 	if (qc->n_elem)
4444 		dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4445 
4446 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4447 	qc->sg = NULL;
4448 }
4449 
4450 /**
4451  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4452  *	@qc: Metadata associated with taskfile to check
4453  *
4454  *	Allow low-level driver to filter ATA PACKET commands, returning
4455  *	a status indicating whether or not it is OK to use DMA for the
4456  *	supplied PACKET command.
4457  *
4458  *	LOCKING:
4459  *	spin_lock_irqsave(host lock)
4460  *
4461  *	RETURNS: 0 when ATAPI DMA can be used
4462  *               nonzero otherwise
4463  */
4464 int atapi_check_dma(struct ata_queued_cmd *qc)
4465 {
4466 	struct ata_port *ap = qc->ap;
4467 
4468 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4469 	 * few ATAPI devices choke on such DMA requests.
4470 	 */
4471 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4472 	    unlikely(qc->nbytes & 15))
4473 		return 1;
4474 
4475 	if (ap->ops->check_atapi_dma)
4476 		return ap->ops->check_atapi_dma(qc);
4477 
4478 	return 0;
4479 }
4480 
4481 /**
4482  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4483  *	@qc: ATA command in question
4484  *
4485  *	Non-NCQ commands cannot run with any other command, NCQ or
4486  *	not.  As upper layer only knows the queue depth, we are
4487  *	responsible for maintaining exclusion.  This function checks
4488  *	whether a new command @qc can be issued.
4489  *
4490  *	LOCKING:
4491  *	spin_lock_irqsave(host lock)
4492  *
4493  *	RETURNS:
4494  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4495  */
4496 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4497 {
4498 	struct ata_link *link = qc->dev->link;
4499 
4500 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4501 		if (!ata_tag_valid(link->active_tag))
4502 			return 0;
4503 	} else {
4504 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4505 			return 0;
4506 	}
4507 
4508 	return ATA_DEFER_LINK;
4509 }
4510 
4511 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4512 
4513 /**
4514  *	ata_sg_init - Associate command with scatter-gather table.
4515  *	@qc: Command to be associated
4516  *	@sg: Scatter-gather table.
4517  *	@n_elem: Number of elements in s/g table.
4518  *
4519  *	Initialize the data-related elements of queued_cmd @qc
4520  *	to point to a scatter-gather table @sg, containing @n_elem
4521  *	elements.
4522  *
4523  *	LOCKING:
4524  *	spin_lock_irqsave(host lock)
4525  */
4526 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4527 		 unsigned int n_elem)
4528 {
4529 	qc->sg = sg;
4530 	qc->n_elem = n_elem;
4531 	qc->cursg = qc->sg;
4532 }
4533 
4534 /**
4535  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4536  *	@qc: Command with scatter-gather table to be mapped.
4537  *
4538  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4539  *
4540  *	LOCKING:
4541  *	spin_lock_irqsave(host lock)
4542  *
4543  *	RETURNS:
4544  *	Zero on success, negative on error.
4545  *
4546  */
4547 static int ata_sg_setup(struct ata_queued_cmd *qc)
4548 {
4549 	struct ata_port *ap = qc->ap;
4550 	unsigned int n_elem;
4551 
4552 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4553 
4554 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4555 	if (n_elem < 1)
4556 		return -1;
4557 
4558 	DPRINTK("%d sg elements mapped\n", n_elem);
4559 
4560 	qc->n_elem = n_elem;
4561 	qc->flags |= ATA_QCFLAG_DMAMAP;
4562 
4563 	return 0;
4564 }
4565 
4566 /**
4567  *	swap_buf_le16 - swap halves of 16-bit words in place
4568  *	@buf:  Buffer to swap
4569  *	@buf_words:  Number of 16-bit words in buffer.
4570  *
4571  *	Swap halves of 16-bit words if needed to convert from
4572  *	little-endian byte order to native cpu byte order, or
4573  *	vice-versa.
4574  *
4575  *	LOCKING:
4576  *	Inherited from caller.
4577  */
4578 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4579 {
4580 #ifdef __BIG_ENDIAN
4581 	unsigned int i;
4582 
4583 	for (i = 0; i < buf_words; i++)
4584 		buf[i] = le16_to_cpu(buf[i]);
4585 #endif /* __BIG_ENDIAN */
4586 }
4587 
4588 /**
4589  *	ata_qc_new - Request an available ATA command, for queueing
4590  *	@ap: Port associated with device @dev
4591  *	@dev: Device from whom we request an available command structure
4592  *
4593  *	LOCKING:
4594  *	None.
4595  */
4596 
4597 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4598 {
4599 	struct ata_queued_cmd *qc = NULL;
4600 	unsigned int i;
4601 
4602 	/* no command while frozen */
4603 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4604 		return NULL;
4605 
4606 	/* the last tag is reserved for internal command. */
4607 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4608 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4609 			qc = __ata_qc_from_tag(ap, i);
4610 			break;
4611 		}
4612 
4613 	if (qc)
4614 		qc->tag = i;
4615 
4616 	return qc;
4617 }
4618 
4619 /**
4620  *	ata_qc_new_init - Request an available ATA command, and initialize it
4621  *	@dev: Device from whom we request an available command structure
4622  *	@tag: command tag
4623  *
4624  *	LOCKING:
4625  *	None.
4626  */
4627 
4628 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4629 {
4630 	struct ata_port *ap = dev->link->ap;
4631 	struct ata_queued_cmd *qc;
4632 
4633 	qc = ata_qc_new(ap);
4634 	if (qc) {
4635 		qc->scsicmd = NULL;
4636 		qc->ap = ap;
4637 		qc->dev = dev;
4638 
4639 		ata_qc_reinit(qc);
4640 	}
4641 
4642 	return qc;
4643 }
4644 
4645 /**
4646  *	ata_qc_free - free unused ata_queued_cmd
4647  *	@qc: Command to complete
4648  *
4649  *	Designed to free unused ata_queued_cmd object
4650  *	in case something prevents using it.
4651  *
4652  *	LOCKING:
4653  *	spin_lock_irqsave(host lock)
4654  */
4655 void ata_qc_free(struct ata_queued_cmd *qc)
4656 {
4657 	struct ata_port *ap = qc->ap;
4658 	unsigned int tag;
4659 
4660 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4661 
4662 	qc->flags = 0;
4663 	tag = qc->tag;
4664 	if (likely(ata_tag_valid(tag))) {
4665 		qc->tag = ATA_TAG_POISON;
4666 		clear_bit(tag, &ap->qc_allocated);
4667 	}
4668 }
4669 
4670 void __ata_qc_complete(struct ata_queued_cmd *qc)
4671 {
4672 	struct ata_port *ap = qc->ap;
4673 	struct ata_link *link = qc->dev->link;
4674 
4675 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4676 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4677 
4678 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4679 		ata_sg_clean(qc);
4680 
4681 	/* command should be marked inactive atomically with qc completion */
4682 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4683 		link->sactive &= ~(1 << qc->tag);
4684 		if (!link->sactive)
4685 			ap->nr_active_links--;
4686 	} else {
4687 		link->active_tag = ATA_TAG_POISON;
4688 		ap->nr_active_links--;
4689 	}
4690 
4691 	/* clear exclusive status */
4692 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4693 		     ap->excl_link == link))
4694 		ap->excl_link = NULL;
4695 
4696 	/* atapi: mark qc as inactive to prevent the interrupt handler
4697 	 * from completing the command twice later, before the error handler
4698 	 * is called. (when rc != 0 and atapi request sense is needed)
4699 	 */
4700 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4701 	ap->qc_active &= ~(1 << qc->tag);
4702 
4703 	/* call completion callback */
4704 	qc->complete_fn(qc);
4705 }
4706 
4707 static void fill_result_tf(struct ata_queued_cmd *qc)
4708 {
4709 	struct ata_port *ap = qc->ap;
4710 
4711 	qc->result_tf.flags = qc->tf.flags;
4712 	ap->ops->qc_fill_rtf(qc);
4713 }
4714 
4715 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4716 {
4717 	struct ata_device *dev = qc->dev;
4718 
4719 	if (ata_tag_internal(qc->tag))
4720 		return;
4721 
4722 	if (ata_is_nodata(qc->tf.protocol))
4723 		return;
4724 
4725 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4726 		return;
4727 
4728 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4729 }
4730 
4731 /**
4732  *	ata_qc_complete - Complete an active ATA command
4733  *	@qc: Command to complete
4734  *
4735  *	Indicate to the mid and upper layers that an ATA
4736  *	command has completed, with either an ok or not-ok status.
4737  *
4738  *	LOCKING:
4739  *	spin_lock_irqsave(host lock)
4740  */
4741 void ata_qc_complete(struct ata_queued_cmd *qc)
4742 {
4743 	struct ata_port *ap = qc->ap;
4744 
4745 	/* XXX: New EH and old EH use different mechanisms to
4746 	 * synchronize EH with regular execution path.
4747 	 *
4748 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4749 	 * Normal execution path is responsible for not accessing a
4750 	 * failed qc.  libata core enforces the rule by returning NULL
4751 	 * from ata_qc_from_tag() for failed qcs.
4752 	 *
4753 	 * Old EH depends on ata_qc_complete() nullifying completion
4754 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4755 	 * not synchronize with interrupt handler.  Only PIO task is
4756 	 * taken care of.
4757 	 */
4758 	if (ap->ops->error_handler) {
4759 		struct ata_device *dev = qc->dev;
4760 		struct ata_eh_info *ehi = &dev->link->eh_info;
4761 
4762 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4763 
4764 		if (unlikely(qc->err_mask))
4765 			qc->flags |= ATA_QCFLAG_FAILED;
4766 
4767 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4768 			if (!ata_tag_internal(qc->tag)) {
4769 				/* always fill result TF for failed qc */
4770 				fill_result_tf(qc);
4771 				ata_qc_schedule_eh(qc);
4772 				return;
4773 			}
4774 		}
4775 
4776 		/* read result TF if requested */
4777 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4778 			fill_result_tf(qc);
4779 
4780 		/* Some commands need post-processing after successful
4781 		 * completion.
4782 		 */
4783 		switch (qc->tf.command) {
4784 		case ATA_CMD_SET_FEATURES:
4785 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4786 			    qc->tf.feature != SETFEATURES_WC_OFF)
4787 				break;
4788 			/* fall through */
4789 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4790 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4791 			/* revalidate device */
4792 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4793 			ata_port_schedule_eh(ap);
4794 			break;
4795 
4796 		case ATA_CMD_SLEEP:
4797 			dev->flags |= ATA_DFLAG_SLEEPING;
4798 			break;
4799 		}
4800 
4801 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4802 			ata_verify_xfer(qc);
4803 
4804 		__ata_qc_complete(qc);
4805 	} else {
4806 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4807 			return;
4808 
4809 		/* read result TF if failed or requested */
4810 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4811 			fill_result_tf(qc);
4812 
4813 		__ata_qc_complete(qc);
4814 	}
4815 }
4816 
4817 /**
4818  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4819  *	@ap: port in question
4820  *	@qc_active: new qc_active mask
4821  *
4822  *	Complete in-flight commands.  This functions is meant to be
4823  *	called from low-level driver's interrupt routine to complete
4824  *	requests normally.  ap->qc_active and @qc_active is compared
4825  *	and commands are completed accordingly.
4826  *
4827  *	LOCKING:
4828  *	spin_lock_irqsave(host lock)
4829  *
4830  *	RETURNS:
4831  *	Number of completed commands on success, -errno otherwise.
4832  */
4833 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4834 {
4835 	int nr_done = 0;
4836 	u32 done_mask;
4837 	int i;
4838 
4839 	done_mask = ap->qc_active ^ qc_active;
4840 
4841 	if (unlikely(done_mask & qc_active)) {
4842 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4843 				"(%08x->%08x)\n", ap->qc_active, qc_active);
4844 		return -EINVAL;
4845 	}
4846 
4847 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
4848 		struct ata_queued_cmd *qc;
4849 
4850 		if (!(done_mask & (1 << i)))
4851 			continue;
4852 
4853 		if ((qc = ata_qc_from_tag(ap, i))) {
4854 			ata_qc_complete(qc);
4855 			nr_done++;
4856 		}
4857 	}
4858 
4859 	return nr_done;
4860 }
4861 
4862 /**
4863  *	ata_qc_issue - issue taskfile to device
4864  *	@qc: command to issue to device
4865  *
4866  *	Prepare an ATA command to submission to device.
4867  *	This includes mapping the data into a DMA-able
4868  *	area, filling in the S/G table, and finally
4869  *	writing the taskfile to hardware, starting the command.
4870  *
4871  *	LOCKING:
4872  *	spin_lock_irqsave(host lock)
4873  */
4874 void ata_qc_issue(struct ata_queued_cmd *qc)
4875 {
4876 	struct ata_port *ap = qc->ap;
4877 	struct ata_link *link = qc->dev->link;
4878 	u8 prot = qc->tf.protocol;
4879 
4880 	/* Make sure only one non-NCQ command is outstanding.  The
4881 	 * check is skipped for old EH because it reuses active qc to
4882 	 * request ATAPI sense.
4883 	 */
4884 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4885 
4886 	if (ata_is_ncq(prot)) {
4887 		WARN_ON(link->sactive & (1 << qc->tag));
4888 
4889 		if (!link->sactive)
4890 			ap->nr_active_links++;
4891 		link->sactive |= 1 << qc->tag;
4892 	} else {
4893 		WARN_ON(link->sactive);
4894 
4895 		ap->nr_active_links++;
4896 		link->active_tag = qc->tag;
4897 	}
4898 
4899 	qc->flags |= ATA_QCFLAG_ACTIVE;
4900 	ap->qc_active |= 1 << qc->tag;
4901 
4902 	/* We guarantee to LLDs that they will have at least one
4903 	 * non-zero sg if the command is a data command.
4904 	 */
4905 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4906 
4907 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4908 				 (ap->flags & ATA_FLAG_PIO_DMA)))
4909 		if (ata_sg_setup(qc))
4910 			goto sg_err;
4911 
4912 	/* if device is sleeping, schedule reset and abort the link */
4913 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4914 		link->eh_info.action |= ATA_EH_RESET;
4915 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4916 		ata_link_abort(link);
4917 		return;
4918 	}
4919 
4920 	ap->ops->qc_prep(qc);
4921 
4922 	qc->err_mask |= ap->ops->qc_issue(qc);
4923 	if (unlikely(qc->err_mask))
4924 		goto err;
4925 	return;
4926 
4927 sg_err:
4928 	qc->err_mask |= AC_ERR_SYSTEM;
4929 err:
4930 	ata_qc_complete(qc);
4931 }
4932 
4933 /**
4934  *	sata_scr_valid - test whether SCRs are accessible
4935  *	@link: ATA link to test SCR accessibility for
4936  *
4937  *	Test whether SCRs are accessible for @link.
4938  *
4939  *	LOCKING:
4940  *	None.
4941  *
4942  *	RETURNS:
4943  *	1 if SCRs are accessible, 0 otherwise.
4944  */
4945 int sata_scr_valid(struct ata_link *link)
4946 {
4947 	struct ata_port *ap = link->ap;
4948 
4949 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4950 }
4951 
4952 /**
4953  *	sata_scr_read - read SCR register of the specified port
4954  *	@link: ATA link to read SCR for
4955  *	@reg: SCR to read
4956  *	@val: Place to store read value
4957  *
4958  *	Read SCR register @reg of @link into *@val.  This function is
4959  *	guaranteed to succeed if @link is ap->link, the cable type of
4960  *	the port is SATA and the port implements ->scr_read.
4961  *
4962  *	LOCKING:
4963  *	None if @link is ap->link.  Kernel thread context otherwise.
4964  *
4965  *	RETURNS:
4966  *	0 on success, negative errno on failure.
4967  */
4968 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4969 {
4970 	if (ata_is_host_link(link)) {
4971 		if (sata_scr_valid(link))
4972 			return link->ap->ops->scr_read(link, reg, val);
4973 		return -EOPNOTSUPP;
4974 	}
4975 
4976 	return sata_pmp_scr_read(link, reg, val);
4977 }
4978 
4979 /**
4980  *	sata_scr_write - write SCR register of the specified port
4981  *	@link: ATA link to write SCR for
4982  *	@reg: SCR to write
4983  *	@val: value to write
4984  *
4985  *	Write @val to SCR register @reg of @link.  This function is
4986  *	guaranteed to succeed if @link is ap->link, the cable type of
4987  *	the port is SATA and the port implements ->scr_read.
4988  *
4989  *	LOCKING:
4990  *	None if @link is ap->link.  Kernel thread context otherwise.
4991  *
4992  *	RETURNS:
4993  *	0 on success, negative errno on failure.
4994  */
4995 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4996 {
4997 	if (ata_is_host_link(link)) {
4998 		if (sata_scr_valid(link))
4999 			return link->ap->ops->scr_write(link, reg, val);
5000 		return -EOPNOTSUPP;
5001 	}
5002 
5003 	return sata_pmp_scr_write(link, reg, val);
5004 }
5005 
5006 /**
5007  *	sata_scr_write_flush - write SCR register of the specified port and flush
5008  *	@link: ATA link to write SCR for
5009  *	@reg: SCR to write
5010  *	@val: value to write
5011  *
5012  *	This function is identical to sata_scr_write() except that this
5013  *	function performs flush after writing to the register.
5014  *
5015  *	LOCKING:
5016  *	None if @link is ap->link.  Kernel thread context otherwise.
5017  *
5018  *	RETURNS:
5019  *	0 on success, negative errno on failure.
5020  */
5021 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5022 {
5023 	if (ata_is_host_link(link)) {
5024 		int rc;
5025 
5026 		if (sata_scr_valid(link)) {
5027 			rc = link->ap->ops->scr_write(link, reg, val);
5028 			if (rc == 0)
5029 				rc = link->ap->ops->scr_read(link, reg, &val);
5030 			return rc;
5031 		}
5032 		return -EOPNOTSUPP;
5033 	}
5034 
5035 	return sata_pmp_scr_write(link, reg, val);
5036 }
5037 
5038 /**
5039  *	ata_phys_link_online - test whether the given link is online
5040  *	@link: ATA link to test
5041  *
5042  *	Test whether @link is online.  Note that this function returns
5043  *	0 if online status of @link cannot be obtained, so
5044  *	ata_link_online(link) != !ata_link_offline(link).
5045  *
5046  *	LOCKING:
5047  *	None.
5048  *
5049  *	RETURNS:
5050  *	True if the port online status is available and online.
5051  */
5052 bool ata_phys_link_online(struct ata_link *link)
5053 {
5054 	u32 sstatus;
5055 
5056 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5057 	    (sstatus & 0xf) == 0x3)
5058 		return true;
5059 	return false;
5060 }
5061 
5062 /**
5063  *	ata_phys_link_offline - test whether the given link is offline
5064  *	@link: ATA link to test
5065  *
5066  *	Test whether @link is offline.  Note that this function
5067  *	returns 0 if offline status of @link cannot be obtained, so
5068  *	ata_link_online(link) != !ata_link_offline(link).
5069  *
5070  *	LOCKING:
5071  *	None.
5072  *
5073  *	RETURNS:
5074  *	True if the port offline status is available and offline.
5075  */
5076 bool ata_phys_link_offline(struct ata_link *link)
5077 {
5078 	u32 sstatus;
5079 
5080 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5081 	    (sstatus & 0xf) != 0x3)
5082 		return true;
5083 	return false;
5084 }
5085 
5086 /**
5087  *	ata_link_online - test whether the given link is online
5088  *	@link: ATA link to test
5089  *
5090  *	Test whether @link is online.  This is identical to
5091  *	ata_phys_link_online() when there's no slave link.  When
5092  *	there's a slave link, this function should only be called on
5093  *	the master link and will return true if any of M/S links is
5094  *	online.
5095  *
5096  *	LOCKING:
5097  *	None.
5098  *
5099  *	RETURNS:
5100  *	True if the port online status is available and online.
5101  */
5102 bool ata_link_online(struct ata_link *link)
5103 {
5104 	struct ata_link *slave = link->ap->slave_link;
5105 
5106 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5107 
5108 	return ata_phys_link_online(link) ||
5109 		(slave && ata_phys_link_online(slave));
5110 }
5111 
5112 /**
5113  *	ata_link_offline - test whether the given link is offline
5114  *	@link: ATA link to test
5115  *
5116  *	Test whether @link is offline.  This is identical to
5117  *	ata_phys_link_offline() when there's no slave link.  When
5118  *	there's a slave link, this function should only be called on
5119  *	the master link and will return true if both M/S links are
5120  *	offline.
5121  *
5122  *	LOCKING:
5123  *	None.
5124  *
5125  *	RETURNS:
5126  *	True if the port offline status is available and offline.
5127  */
5128 bool ata_link_offline(struct ata_link *link)
5129 {
5130 	struct ata_link *slave = link->ap->slave_link;
5131 
5132 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5133 
5134 	return ata_phys_link_offline(link) &&
5135 		(!slave || ata_phys_link_offline(slave));
5136 }
5137 
5138 #ifdef CONFIG_PM
5139 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5140 			       unsigned int action, unsigned int ehi_flags,
5141 			       int wait)
5142 {
5143 	unsigned long flags;
5144 	int i, rc;
5145 
5146 	for (i = 0; i < host->n_ports; i++) {
5147 		struct ata_port *ap = host->ports[i];
5148 		struct ata_link *link;
5149 
5150 		/* Previous resume operation might still be in
5151 		 * progress.  Wait for PM_PENDING to clear.
5152 		 */
5153 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5154 			ata_port_wait_eh(ap);
5155 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5156 		}
5157 
5158 		/* request PM ops to EH */
5159 		spin_lock_irqsave(ap->lock, flags);
5160 
5161 		ap->pm_mesg = mesg;
5162 		if (wait) {
5163 			rc = 0;
5164 			ap->pm_result = &rc;
5165 		}
5166 
5167 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5168 		__ata_port_for_each_link(link, ap) {
5169 			link->eh_info.action |= action;
5170 			link->eh_info.flags |= ehi_flags;
5171 		}
5172 
5173 		ata_port_schedule_eh(ap);
5174 
5175 		spin_unlock_irqrestore(ap->lock, flags);
5176 
5177 		/* wait and check result */
5178 		if (wait) {
5179 			ata_port_wait_eh(ap);
5180 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5181 			if (rc)
5182 				return rc;
5183 		}
5184 	}
5185 
5186 	return 0;
5187 }
5188 
5189 /**
5190  *	ata_host_suspend - suspend host
5191  *	@host: host to suspend
5192  *	@mesg: PM message
5193  *
5194  *	Suspend @host.  Actual operation is performed by EH.  This
5195  *	function requests EH to perform PM operations and waits for EH
5196  *	to finish.
5197  *
5198  *	LOCKING:
5199  *	Kernel thread context (may sleep).
5200  *
5201  *	RETURNS:
5202  *	0 on success, -errno on failure.
5203  */
5204 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5205 {
5206 	int rc;
5207 
5208 	/*
5209 	 * disable link pm on all ports before requesting
5210 	 * any pm activity
5211 	 */
5212 	ata_lpm_enable(host);
5213 
5214 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5215 	if (rc == 0)
5216 		host->dev->power.power_state = mesg;
5217 	return rc;
5218 }
5219 
5220 /**
5221  *	ata_host_resume - resume host
5222  *	@host: host to resume
5223  *
5224  *	Resume @host.  Actual operation is performed by EH.  This
5225  *	function requests EH to perform PM operations and returns.
5226  *	Note that all resume operations are performed parallely.
5227  *
5228  *	LOCKING:
5229  *	Kernel thread context (may sleep).
5230  */
5231 void ata_host_resume(struct ata_host *host)
5232 {
5233 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5234 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5235 	host->dev->power.power_state = PMSG_ON;
5236 
5237 	/* reenable link pm */
5238 	ata_lpm_disable(host);
5239 }
5240 #endif
5241 
5242 /**
5243  *	ata_port_start - Set port up for dma.
5244  *	@ap: Port to initialize
5245  *
5246  *	Called just after data structures for each port are
5247  *	initialized.  Allocates space for PRD table.
5248  *
5249  *	May be used as the port_start() entry in ata_port_operations.
5250  *
5251  *	LOCKING:
5252  *	Inherited from caller.
5253  */
5254 int ata_port_start(struct ata_port *ap)
5255 {
5256 	struct device *dev = ap->dev;
5257 
5258 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5259 				      GFP_KERNEL);
5260 	if (!ap->prd)
5261 		return -ENOMEM;
5262 
5263 	return 0;
5264 }
5265 
5266 /**
5267  *	ata_dev_init - Initialize an ata_device structure
5268  *	@dev: Device structure to initialize
5269  *
5270  *	Initialize @dev in preparation for probing.
5271  *
5272  *	LOCKING:
5273  *	Inherited from caller.
5274  */
5275 void ata_dev_init(struct ata_device *dev)
5276 {
5277 	struct ata_link *link = ata_dev_phys_link(dev);
5278 	struct ata_port *ap = link->ap;
5279 	unsigned long flags;
5280 
5281 	/* SATA spd limit is bound to the attached device, reset together */
5282 	link->sata_spd_limit = link->hw_sata_spd_limit;
5283 	link->sata_spd = 0;
5284 
5285 	/* High bits of dev->flags are used to record warm plug
5286 	 * requests which occur asynchronously.  Synchronize using
5287 	 * host lock.
5288 	 */
5289 	spin_lock_irqsave(ap->lock, flags);
5290 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5291 	dev->horkage = 0;
5292 	spin_unlock_irqrestore(ap->lock, flags);
5293 
5294 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5295 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5296 	dev->pio_mask = UINT_MAX;
5297 	dev->mwdma_mask = UINT_MAX;
5298 	dev->udma_mask = UINT_MAX;
5299 }
5300 
5301 /**
5302  *	ata_link_init - Initialize an ata_link structure
5303  *	@ap: ATA port link is attached to
5304  *	@link: Link structure to initialize
5305  *	@pmp: Port multiplier port number
5306  *
5307  *	Initialize @link.
5308  *
5309  *	LOCKING:
5310  *	Kernel thread context (may sleep)
5311  */
5312 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5313 {
5314 	int i;
5315 
5316 	/* clear everything except for devices */
5317 	memset(link, 0, offsetof(struct ata_link, device[0]));
5318 
5319 	link->ap = ap;
5320 	link->pmp = pmp;
5321 	link->active_tag = ATA_TAG_POISON;
5322 	link->hw_sata_spd_limit = UINT_MAX;
5323 
5324 	/* can't use iterator, ap isn't initialized yet */
5325 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5326 		struct ata_device *dev = &link->device[i];
5327 
5328 		dev->link = link;
5329 		dev->devno = dev - link->device;
5330 		ata_dev_init(dev);
5331 	}
5332 }
5333 
5334 /**
5335  *	sata_link_init_spd - Initialize link->sata_spd_limit
5336  *	@link: Link to configure sata_spd_limit for
5337  *
5338  *	Initialize @link->[hw_]sata_spd_limit to the currently
5339  *	configured value.
5340  *
5341  *	LOCKING:
5342  *	Kernel thread context (may sleep).
5343  *
5344  *	RETURNS:
5345  *	0 on success, -errno on failure.
5346  */
5347 int sata_link_init_spd(struct ata_link *link)
5348 {
5349 	u8 spd;
5350 	int rc;
5351 
5352 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5353 	if (rc)
5354 		return rc;
5355 
5356 	spd = (link->saved_scontrol >> 4) & 0xf;
5357 	if (spd)
5358 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5359 
5360 	ata_force_link_limits(link);
5361 
5362 	link->sata_spd_limit = link->hw_sata_spd_limit;
5363 
5364 	return 0;
5365 }
5366 
5367 /**
5368  *	ata_port_alloc - allocate and initialize basic ATA port resources
5369  *	@host: ATA host this allocated port belongs to
5370  *
5371  *	Allocate and initialize basic ATA port resources.
5372  *
5373  *	RETURNS:
5374  *	Allocate ATA port on success, NULL on failure.
5375  *
5376  *	LOCKING:
5377  *	Inherited from calling layer (may sleep).
5378  */
5379 struct ata_port *ata_port_alloc(struct ata_host *host)
5380 {
5381 	struct ata_port *ap;
5382 
5383 	DPRINTK("ENTER\n");
5384 
5385 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5386 	if (!ap)
5387 		return NULL;
5388 
5389 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5390 	ap->lock = &host->lock;
5391 	ap->flags = ATA_FLAG_DISABLED;
5392 	ap->print_id = -1;
5393 	ap->ctl = ATA_DEVCTL_OBS;
5394 	ap->host = host;
5395 	ap->dev = host->dev;
5396 	ap->last_ctl = 0xFF;
5397 
5398 #if defined(ATA_VERBOSE_DEBUG)
5399 	/* turn on all debugging levels */
5400 	ap->msg_enable = 0x00FF;
5401 #elif defined(ATA_DEBUG)
5402 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5403 #else
5404 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5405 #endif
5406 
5407 #ifdef CONFIG_ATA_SFF
5408 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5409 #else
5410 	INIT_DELAYED_WORK(&ap->port_task, NULL);
5411 #endif
5412 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5413 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5414 	INIT_LIST_HEAD(&ap->eh_done_q);
5415 	init_waitqueue_head(&ap->eh_wait_q);
5416 	init_completion(&ap->park_req_pending);
5417 	init_timer_deferrable(&ap->fastdrain_timer);
5418 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5419 	ap->fastdrain_timer.data = (unsigned long)ap;
5420 
5421 	ap->cbl = ATA_CBL_NONE;
5422 
5423 	ata_link_init(ap, &ap->link, 0);
5424 
5425 #ifdef ATA_IRQ_TRAP
5426 	ap->stats.unhandled_irq = 1;
5427 	ap->stats.idle_irq = 1;
5428 #endif
5429 	return ap;
5430 }
5431 
5432 static void ata_host_release(struct device *gendev, void *res)
5433 {
5434 	struct ata_host *host = dev_get_drvdata(gendev);
5435 	int i;
5436 
5437 	for (i = 0; i < host->n_ports; i++) {
5438 		struct ata_port *ap = host->ports[i];
5439 
5440 		if (!ap)
5441 			continue;
5442 
5443 		if (ap->scsi_host)
5444 			scsi_host_put(ap->scsi_host);
5445 
5446 		kfree(ap->pmp_link);
5447 		kfree(ap->slave_link);
5448 		kfree(ap);
5449 		host->ports[i] = NULL;
5450 	}
5451 
5452 	dev_set_drvdata(gendev, NULL);
5453 }
5454 
5455 /**
5456  *	ata_host_alloc - allocate and init basic ATA host resources
5457  *	@dev: generic device this host is associated with
5458  *	@max_ports: maximum number of ATA ports associated with this host
5459  *
5460  *	Allocate and initialize basic ATA host resources.  LLD calls
5461  *	this function to allocate a host, initializes it fully and
5462  *	attaches it using ata_host_register().
5463  *
5464  *	@max_ports ports are allocated and host->n_ports is
5465  *	initialized to @max_ports.  The caller is allowed to decrease
5466  *	host->n_ports before calling ata_host_register().  The unused
5467  *	ports will be automatically freed on registration.
5468  *
5469  *	RETURNS:
5470  *	Allocate ATA host on success, NULL on failure.
5471  *
5472  *	LOCKING:
5473  *	Inherited from calling layer (may sleep).
5474  */
5475 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5476 {
5477 	struct ata_host *host;
5478 	size_t sz;
5479 	int i;
5480 
5481 	DPRINTK("ENTER\n");
5482 
5483 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5484 		return NULL;
5485 
5486 	/* alloc a container for our list of ATA ports (buses) */
5487 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5488 	/* alloc a container for our list of ATA ports (buses) */
5489 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5490 	if (!host)
5491 		goto err_out;
5492 
5493 	devres_add(dev, host);
5494 	dev_set_drvdata(dev, host);
5495 
5496 	spin_lock_init(&host->lock);
5497 	host->dev = dev;
5498 	host->n_ports = max_ports;
5499 
5500 	/* allocate ports bound to this host */
5501 	for (i = 0; i < max_ports; i++) {
5502 		struct ata_port *ap;
5503 
5504 		ap = ata_port_alloc(host);
5505 		if (!ap)
5506 			goto err_out;
5507 
5508 		ap->port_no = i;
5509 		host->ports[i] = ap;
5510 	}
5511 
5512 	devres_remove_group(dev, NULL);
5513 	return host;
5514 
5515  err_out:
5516 	devres_release_group(dev, NULL);
5517 	return NULL;
5518 }
5519 
5520 /**
5521  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5522  *	@dev: generic device this host is associated with
5523  *	@ppi: array of ATA port_info to initialize host with
5524  *	@n_ports: number of ATA ports attached to this host
5525  *
5526  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5527  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5528  *	last entry will be used for the remaining ports.
5529  *
5530  *	RETURNS:
5531  *	Allocate ATA host on success, NULL on failure.
5532  *
5533  *	LOCKING:
5534  *	Inherited from calling layer (may sleep).
5535  */
5536 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5537 				      const struct ata_port_info * const * ppi,
5538 				      int n_ports)
5539 {
5540 	const struct ata_port_info *pi;
5541 	struct ata_host *host;
5542 	int i, j;
5543 
5544 	host = ata_host_alloc(dev, n_ports);
5545 	if (!host)
5546 		return NULL;
5547 
5548 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5549 		struct ata_port *ap = host->ports[i];
5550 
5551 		if (ppi[j])
5552 			pi = ppi[j++];
5553 
5554 		ap->pio_mask = pi->pio_mask;
5555 		ap->mwdma_mask = pi->mwdma_mask;
5556 		ap->udma_mask = pi->udma_mask;
5557 		ap->flags |= pi->flags;
5558 		ap->link.flags |= pi->link_flags;
5559 		ap->ops = pi->port_ops;
5560 
5561 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5562 			host->ops = pi->port_ops;
5563 	}
5564 
5565 	return host;
5566 }
5567 
5568 /**
5569  *	ata_slave_link_init - initialize slave link
5570  *	@ap: port to initialize slave link for
5571  *
5572  *	Create and initialize slave link for @ap.  This enables slave
5573  *	link handling on the port.
5574  *
5575  *	In libata, a port contains links and a link contains devices.
5576  *	There is single host link but if a PMP is attached to it,
5577  *	there can be multiple fan-out links.  On SATA, there's usually
5578  *	a single device connected to a link but PATA and SATA
5579  *	controllers emulating TF based interface can have two - master
5580  *	and slave.
5581  *
5582  *	However, there are a few controllers which don't fit into this
5583  *	abstraction too well - SATA controllers which emulate TF
5584  *	interface with both master and slave devices but also have
5585  *	separate SCR register sets for each device.  These controllers
5586  *	need separate links for physical link handling
5587  *	(e.g. onlineness, link speed) but should be treated like a
5588  *	traditional M/S controller for everything else (e.g. command
5589  *	issue, softreset).
5590  *
5591  *	slave_link is libata's way of handling this class of
5592  *	controllers without impacting core layer too much.  For
5593  *	anything other than physical link handling, the default host
5594  *	link is used for both master and slave.  For physical link
5595  *	handling, separate @ap->slave_link is used.  All dirty details
5596  *	are implemented inside libata core layer.  From LLD's POV, the
5597  *	only difference is that prereset, hardreset and postreset are
5598  *	called once more for the slave link, so the reset sequence
5599  *	looks like the following.
5600  *
5601  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5602  *	softreset(M) -> postreset(M) -> postreset(S)
5603  *
5604  *	Note that softreset is called only for the master.  Softreset
5605  *	resets both M/S by definition, so SRST on master should handle
5606  *	both (the standard method will work just fine).
5607  *
5608  *	LOCKING:
5609  *	Should be called before host is registered.
5610  *
5611  *	RETURNS:
5612  *	0 on success, -errno on failure.
5613  */
5614 int ata_slave_link_init(struct ata_port *ap)
5615 {
5616 	struct ata_link *link;
5617 
5618 	WARN_ON(ap->slave_link);
5619 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5620 
5621 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5622 	if (!link)
5623 		return -ENOMEM;
5624 
5625 	ata_link_init(ap, link, 1);
5626 	ap->slave_link = link;
5627 	return 0;
5628 }
5629 
5630 static void ata_host_stop(struct device *gendev, void *res)
5631 {
5632 	struct ata_host *host = dev_get_drvdata(gendev);
5633 	int i;
5634 
5635 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5636 
5637 	for (i = 0; i < host->n_ports; i++) {
5638 		struct ata_port *ap = host->ports[i];
5639 
5640 		if (ap->ops->port_stop)
5641 			ap->ops->port_stop(ap);
5642 	}
5643 
5644 	if (host->ops->host_stop)
5645 		host->ops->host_stop(host);
5646 }
5647 
5648 /**
5649  *	ata_finalize_port_ops - finalize ata_port_operations
5650  *	@ops: ata_port_operations to finalize
5651  *
5652  *	An ata_port_operations can inherit from another ops and that
5653  *	ops can again inherit from another.  This can go on as many
5654  *	times as necessary as long as there is no loop in the
5655  *	inheritance chain.
5656  *
5657  *	Ops tables are finalized when the host is started.  NULL or
5658  *	unspecified entries are inherited from the closet ancestor
5659  *	which has the method and the entry is populated with it.
5660  *	After finalization, the ops table directly points to all the
5661  *	methods and ->inherits is no longer necessary and cleared.
5662  *
5663  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5664  *
5665  *	LOCKING:
5666  *	None.
5667  */
5668 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5669 {
5670 	static DEFINE_SPINLOCK(lock);
5671 	const struct ata_port_operations *cur;
5672 	void **begin = (void **)ops;
5673 	void **end = (void **)&ops->inherits;
5674 	void **pp;
5675 
5676 	if (!ops || !ops->inherits)
5677 		return;
5678 
5679 	spin_lock(&lock);
5680 
5681 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5682 		void **inherit = (void **)cur;
5683 
5684 		for (pp = begin; pp < end; pp++, inherit++)
5685 			if (!*pp)
5686 				*pp = *inherit;
5687 	}
5688 
5689 	for (pp = begin; pp < end; pp++)
5690 		if (IS_ERR(*pp))
5691 			*pp = NULL;
5692 
5693 	ops->inherits = NULL;
5694 
5695 	spin_unlock(&lock);
5696 }
5697 
5698 /**
5699  *	ata_host_start - start and freeze ports of an ATA host
5700  *	@host: ATA host to start ports for
5701  *
5702  *	Start and then freeze ports of @host.  Started status is
5703  *	recorded in host->flags, so this function can be called
5704  *	multiple times.  Ports are guaranteed to get started only
5705  *	once.  If host->ops isn't initialized yet, its set to the
5706  *	first non-dummy port ops.
5707  *
5708  *	LOCKING:
5709  *	Inherited from calling layer (may sleep).
5710  *
5711  *	RETURNS:
5712  *	0 if all ports are started successfully, -errno otherwise.
5713  */
5714 int ata_host_start(struct ata_host *host)
5715 {
5716 	int have_stop = 0;
5717 	void *start_dr = NULL;
5718 	int i, rc;
5719 
5720 	if (host->flags & ATA_HOST_STARTED)
5721 		return 0;
5722 
5723 	ata_finalize_port_ops(host->ops);
5724 
5725 	for (i = 0; i < host->n_ports; i++) {
5726 		struct ata_port *ap = host->ports[i];
5727 
5728 		ata_finalize_port_ops(ap->ops);
5729 
5730 		if (!host->ops && !ata_port_is_dummy(ap))
5731 			host->ops = ap->ops;
5732 
5733 		if (ap->ops->port_stop)
5734 			have_stop = 1;
5735 	}
5736 
5737 	if (host->ops->host_stop)
5738 		have_stop = 1;
5739 
5740 	if (have_stop) {
5741 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5742 		if (!start_dr)
5743 			return -ENOMEM;
5744 	}
5745 
5746 	for (i = 0; i < host->n_ports; i++) {
5747 		struct ata_port *ap = host->ports[i];
5748 
5749 		if (ap->ops->port_start) {
5750 			rc = ap->ops->port_start(ap);
5751 			if (rc) {
5752 				if (rc != -ENODEV)
5753 					dev_printk(KERN_ERR, host->dev,
5754 						"failed to start port %d "
5755 						"(errno=%d)\n", i, rc);
5756 				goto err_out;
5757 			}
5758 		}
5759 		ata_eh_freeze_port(ap);
5760 	}
5761 
5762 	if (start_dr)
5763 		devres_add(host->dev, start_dr);
5764 	host->flags |= ATA_HOST_STARTED;
5765 	return 0;
5766 
5767  err_out:
5768 	while (--i >= 0) {
5769 		struct ata_port *ap = host->ports[i];
5770 
5771 		if (ap->ops->port_stop)
5772 			ap->ops->port_stop(ap);
5773 	}
5774 	devres_free(start_dr);
5775 	return rc;
5776 }
5777 
5778 /**
5779  *	ata_sas_host_init - Initialize a host struct
5780  *	@host:	host to initialize
5781  *	@dev:	device host is attached to
5782  *	@flags:	host flags
5783  *	@ops:	port_ops
5784  *
5785  *	LOCKING:
5786  *	PCI/etc. bus probe sem.
5787  *
5788  */
5789 /* KILLME - the only user left is ipr */
5790 void ata_host_init(struct ata_host *host, struct device *dev,
5791 		   unsigned long flags, struct ata_port_operations *ops)
5792 {
5793 	spin_lock_init(&host->lock);
5794 	host->dev = dev;
5795 	host->flags = flags;
5796 	host->ops = ops;
5797 }
5798 
5799 /**
5800  *	ata_host_register - register initialized ATA host
5801  *	@host: ATA host to register
5802  *	@sht: template for SCSI host
5803  *
5804  *	Register initialized ATA host.  @host is allocated using
5805  *	ata_host_alloc() and fully initialized by LLD.  This function
5806  *	starts ports, registers @host with ATA and SCSI layers and
5807  *	probe registered devices.
5808  *
5809  *	LOCKING:
5810  *	Inherited from calling layer (may sleep).
5811  *
5812  *	RETURNS:
5813  *	0 on success, -errno otherwise.
5814  */
5815 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5816 {
5817 	int i, rc;
5818 
5819 	/* host must have been started */
5820 	if (!(host->flags & ATA_HOST_STARTED)) {
5821 		dev_printk(KERN_ERR, host->dev,
5822 			   "BUG: trying to register unstarted host\n");
5823 		WARN_ON(1);
5824 		return -EINVAL;
5825 	}
5826 
5827 	/* Blow away unused ports.  This happens when LLD can't
5828 	 * determine the exact number of ports to allocate at
5829 	 * allocation time.
5830 	 */
5831 	for (i = host->n_ports; host->ports[i]; i++)
5832 		kfree(host->ports[i]);
5833 
5834 	/* give ports names and add SCSI hosts */
5835 	for (i = 0; i < host->n_ports; i++)
5836 		host->ports[i]->print_id = ata_print_id++;
5837 
5838 	rc = ata_scsi_add_hosts(host, sht);
5839 	if (rc)
5840 		return rc;
5841 
5842 	/* associate with ACPI nodes */
5843 	ata_acpi_associate(host);
5844 
5845 	/* set cable, sata_spd_limit and report */
5846 	for (i = 0; i < host->n_ports; i++) {
5847 		struct ata_port *ap = host->ports[i];
5848 		unsigned long xfer_mask;
5849 
5850 		/* set SATA cable type if still unset */
5851 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5852 			ap->cbl = ATA_CBL_SATA;
5853 
5854 		/* init sata_spd_limit to the current value */
5855 		sata_link_init_spd(&ap->link);
5856 		if (ap->slave_link)
5857 			sata_link_init_spd(ap->slave_link);
5858 
5859 		/* print per-port info to dmesg */
5860 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5861 					      ap->udma_mask);
5862 
5863 		if (!ata_port_is_dummy(ap)) {
5864 			ata_port_printk(ap, KERN_INFO,
5865 					"%cATA max %s %s\n",
5866 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5867 					ata_mode_string(xfer_mask),
5868 					ap->link.eh_info.desc);
5869 			ata_ehi_clear_desc(&ap->link.eh_info);
5870 		} else
5871 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5872 	}
5873 
5874 	/* perform each probe synchronously */
5875 	DPRINTK("probe begin\n");
5876 	for (i = 0; i < host->n_ports; i++) {
5877 		struct ata_port *ap = host->ports[i];
5878 
5879 		/* probe */
5880 		if (ap->ops->error_handler) {
5881 			struct ata_eh_info *ehi = &ap->link.eh_info;
5882 			unsigned long flags;
5883 
5884 			ata_port_probe(ap);
5885 
5886 			/* kick EH for boot probing */
5887 			spin_lock_irqsave(ap->lock, flags);
5888 
5889 			ehi->probe_mask |= ATA_ALL_DEVICES;
5890 			ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5891 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5892 
5893 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5894 			ap->pflags |= ATA_PFLAG_LOADING;
5895 			ata_port_schedule_eh(ap);
5896 
5897 			spin_unlock_irqrestore(ap->lock, flags);
5898 
5899 			/* wait for EH to finish */
5900 			ata_port_wait_eh(ap);
5901 		} else {
5902 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5903 			rc = ata_bus_probe(ap);
5904 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
5905 
5906 			if (rc) {
5907 				/* FIXME: do something useful here?
5908 				 * Current libata behavior will
5909 				 * tear down everything when
5910 				 * the module is removed
5911 				 * or the h/w is unplugged.
5912 				 */
5913 			}
5914 		}
5915 	}
5916 
5917 	/* probes are done, now scan each port's disk(s) */
5918 	DPRINTK("host probe begin\n");
5919 	for (i = 0; i < host->n_ports; i++) {
5920 		struct ata_port *ap = host->ports[i];
5921 
5922 		ata_scsi_scan_host(ap, 1);
5923 	}
5924 
5925 	return 0;
5926 }
5927 
5928 /**
5929  *	ata_host_activate - start host, request IRQ and register it
5930  *	@host: target ATA host
5931  *	@irq: IRQ to request
5932  *	@irq_handler: irq_handler used when requesting IRQ
5933  *	@irq_flags: irq_flags used when requesting IRQ
5934  *	@sht: scsi_host_template to use when registering the host
5935  *
5936  *	After allocating an ATA host and initializing it, most libata
5937  *	LLDs perform three steps to activate the host - start host,
5938  *	request IRQ and register it.  This helper takes necessasry
5939  *	arguments and performs the three steps in one go.
5940  *
5941  *	An invalid IRQ skips the IRQ registration and expects the host to
5942  *	have set polling mode on the port. In this case, @irq_handler
5943  *	should be NULL.
5944  *
5945  *	LOCKING:
5946  *	Inherited from calling layer (may sleep).
5947  *
5948  *	RETURNS:
5949  *	0 on success, -errno otherwise.
5950  */
5951 int ata_host_activate(struct ata_host *host, int irq,
5952 		      irq_handler_t irq_handler, unsigned long irq_flags,
5953 		      struct scsi_host_template *sht)
5954 {
5955 	int i, rc;
5956 
5957 	rc = ata_host_start(host);
5958 	if (rc)
5959 		return rc;
5960 
5961 	/* Special case for polling mode */
5962 	if (!irq) {
5963 		WARN_ON(irq_handler);
5964 		return ata_host_register(host, sht);
5965 	}
5966 
5967 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5968 			      dev_driver_string(host->dev), host);
5969 	if (rc)
5970 		return rc;
5971 
5972 	for (i = 0; i < host->n_ports; i++)
5973 		ata_port_desc(host->ports[i], "irq %d", irq);
5974 
5975 	rc = ata_host_register(host, sht);
5976 	/* if failed, just free the IRQ and leave ports alone */
5977 	if (rc)
5978 		devm_free_irq(host->dev, irq, host);
5979 
5980 	return rc;
5981 }
5982 
5983 /**
5984  *	ata_port_detach - Detach ATA port in prepration of device removal
5985  *	@ap: ATA port to be detached
5986  *
5987  *	Detach all ATA devices and the associated SCSI devices of @ap;
5988  *	then, remove the associated SCSI host.  @ap is guaranteed to
5989  *	be quiescent on return from this function.
5990  *
5991  *	LOCKING:
5992  *	Kernel thread context (may sleep).
5993  */
5994 static void ata_port_detach(struct ata_port *ap)
5995 {
5996 	unsigned long flags;
5997 	struct ata_link *link;
5998 	struct ata_device *dev;
5999 
6000 	if (!ap->ops->error_handler)
6001 		goto skip_eh;
6002 
6003 	/* tell EH we're leaving & flush EH */
6004 	spin_lock_irqsave(ap->lock, flags);
6005 	ap->pflags |= ATA_PFLAG_UNLOADING;
6006 	spin_unlock_irqrestore(ap->lock, flags);
6007 
6008 	ata_port_wait_eh(ap);
6009 
6010 	/* EH is now guaranteed to see UNLOADING - EH context belongs
6011 	 * to us.  Restore SControl and disable all existing devices.
6012 	 */
6013 	__ata_port_for_each_link(link, ap) {
6014 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
6015 		ata_link_for_each_dev(dev, link)
6016 			ata_dev_disable(dev);
6017 	}
6018 
6019 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6020 	 * will be skipped and retrials will be terminated with bad
6021 	 * target.
6022 	 */
6023 	spin_lock_irqsave(ap->lock, flags);
6024 	ata_port_freeze(ap);	/* won't be thawed */
6025 	spin_unlock_irqrestore(ap->lock, flags);
6026 
6027 	ata_port_wait_eh(ap);
6028 	cancel_rearming_delayed_work(&ap->hotplug_task);
6029 
6030  skip_eh:
6031 	/* remove the associated SCSI host */
6032 	scsi_remove_host(ap->scsi_host);
6033 }
6034 
6035 /**
6036  *	ata_host_detach - Detach all ports of an ATA host
6037  *	@host: Host to detach
6038  *
6039  *	Detach all ports of @host.
6040  *
6041  *	LOCKING:
6042  *	Kernel thread context (may sleep).
6043  */
6044 void ata_host_detach(struct ata_host *host)
6045 {
6046 	int i;
6047 
6048 	for (i = 0; i < host->n_ports; i++)
6049 		ata_port_detach(host->ports[i]);
6050 
6051 	/* the host is dead now, dissociate ACPI */
6052 	ata_acpi_dissociate(host);
6053 }
6054 
6055 #ifdef CONFIG_PCI
6056 
6057 /**
6058  *	ata_pci_remove_one - PCI layer callback for device removal
6059  *	@pdev: PCI device that was removed
6060  *
6061  *	PCI layer indicates to libata via this hook that hot-unplug or
6062  *	module unload event has occurred.  Detach all ports.  Resource
6063  *	release is handled via devres.
6064  *
6065  *	LOCKING:
6066  *	Inherited from PCI layer (may sleep).
6067  */
6068 void ata_pci_remove_one(struct pci_dev *pdev)
6069 {
6070 	struct device *dev = &pdev->dev;
6071 	struct ata_host *host = dev_get_drvdata(dev);
6072 
6073 	ata_host_detach(host);
6074 }
6075 
6076 /* move to PCI subsystem */
6077 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6078 {
6079 	unsigned long tmp = 0;
6080 
6081 	switch (bits->width) {
6082 	case 1: {
6083 		u8 tmp8 = 0;
6084 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6085 		tmp = tmp8;
6086 		break;
6087 	}
6088 	case 2: {
6089 		u16 tmp16 = 0;
6090 		pci_read_config_word(pdev, bits->reg, &tmp16);
6091 		tmp = tmp16;
6092 		break;
6093 	}
6094 	case 4: {
6095 		u32 tmp32 = 0;
6096 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6097 		tmp = tmp32;
6098 		break;
6099 	}
6100 
6101 	default:
6102 		return -EINVAL;
6103 	}
6104 
6105 	tmp &= bits->mask;
6106 
6107 	return (tmp == bits->val) ? 1 : 0;
6108 }
6109 
6110 #ifdef CONFIG_PM
6111 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6112 {
6113 	pci_save_state(pdev);
6114 	pci_disable_device(pdev);
6115 
6116 	if (mesg.event & PM_EVENT_SLEEP)
6117 		pci_set_power_state(pdev, PCI_D3hot);
6118 }
6119 
6120 int ata_pci_device_do_resume(struct pci_dev *pdev)
6121 {
6122 	int rc;
6123 
6124 	pci_set_power_state(pdev, PCI_D0);
6125 	pci_restore_state(pdev);
6126 
6127 	rc = pcim_enable_device(pdev);
6128 	if (rc) {
6129 		dev_printk(KERN_ERR, &pdev->dev,
6130 			   "failed to enable device after resume (%d)\n", rc);
6131 		return rc;
6132 	}
6133 
6134 	pci_set_master(pdev);
6135 	return 0;
6136 }
6137 
6138 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6139 {
6140 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6141 	int rc = 0;
6142 
6143 	rc = ata_host_suspend(host, mesg);
6144 	if (rc)
6145 		return rc;
6146 
6147 	ata_pci_device_do_suspend(pdev, mesg);
6148 
6149 	return 0;
6150 }
6151 
6152 int ata_pci_device_resume(struct pci_dev *pdev)
6153 {
6154 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6155 	int rc;
6156 
6157 	rc = ata_pci_device_do_resume(pdev);
6158 	if (rc == 0)
6159 		ata_host_resume(host);
6160 	return rc;
6161 }
6162 #endif /* CONFIG_PM */
6163 
6164 #endif /* CONFIG_PCI */
6165 
6166 static int __init ata_parse_force_one(char **cur,
6167 				      struct ata_force_ent *force_ent,
6168 				      const char **reason)
6169 {
6170 	/* FIXME: Currently, there's no way to tag init const data and
6171 	 * using __initdata causes build failure on some versions of
6172 	 * gcc.  Once __initdataconst is implemented, add const to the
6173 	 * following structure.
6174 	 */
6175 	static struct ata_force_param force_tbl[] __initdata = {
6176 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6177 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6178 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6179 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6180 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6181 		{ "sata",	.cbl		= ATA_CBL_SATA },
6182 		{ "1.5Gbps",	.spd_limit	= 1 },
6183 		{ "3.0Gbps",	.spd_limit	= 2 },
6184 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6185 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6186 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6187 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6188 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6189 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6190 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6191 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6192 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6193 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6194 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6195 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6196 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6197 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6198 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6199 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6200 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6201 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6202 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6203 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6204 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6205 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6206 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6207 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6208 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6209 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6210 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6211 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6212 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6213 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6214 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6215 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6216 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6217 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6218 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6219 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6220 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6221 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6222 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6223 	};
6224 	char *start = *cur, *p = *cur;
6225 	char *id, *val, *endp;
6226 	const struct ata_force_param *match_fp = NULL;
6227 	int nr_matches = 0, i;
6228 
6229 	/* find where this param ends and update *cur */
6230 	while (*p != '\0' && *p != ',')
6231 		p++;
6232 
6233 	if (*p == '\0')
6234 		*cur = p;
6235 	else
6236 		*cur = p + 1;
6237 
6238 	*p = '\0';
6239 
6240 	/* parse */
6241 	p = strchr(start, ':');
6242 	if (!p) {
6243 		val = strstrip(start);
6244 		goto parse_val;
6245 	}
6246 	*p = '\0';
6247 
6248 	id = strstrip(start);
6249 	val = strstrip(p + 1);
6250 
6251 	/* parse id */
6252 	p = strchr(id, '.');
6253 	if (p) {
6254 		*p++ = '\0';
6255 		force_ent->device = simple_strtoul(p, &endp, 10);
6256 		if (p == endp || *endp != '\0') {
6257 			*reason = "invalid device";
6258 			return -EINVAL;
6259 		}
6260 	}
6261 
6262 	force_ent->port = simple_strtoul(id, &endp, 10);
6263 	if (p == endp || *endp != '\0') {
6264 		*reason = "invalid port/link";
6265 		return -EINVAL;
6266 	}
6267 
6268  parse_val:
6269 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6270 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6271 		const struct ata_force_param *fp = &force_tbl[i];
6272 
6273 		if (strncasecmp(val, fp->name, strlen(val)))
6274 			continue;
6275 
6276 		nr_matches++;
6277 		match_fp = fp;
6278 
6279 		if (strcasecmp(val, fp->name) == 0) {
6280 			nr_matches = 1;
6281 			break;
6282 		}
6283 	}
6284 
6285 	if (!nr_matches) {
6286 		*reason = "unknown value";
6287 		return -EINVAL;
6288 	}
6289 	if (nr_matches > 1) {
6290 		*reason = "ambigious value";
6291 		return -EINVAL;
6292 	}
6293 
6294 	force_ent->param = *match_fp;
6295 
6296 	return 0;
6297 }
6298 
6299 static void __init ata_parse_force_param(void)
6300 {
6301 	int idx = 0, size = 1;
6302 	int last_port = -1, last_device = -1;
6303 	char *p, *cur, *next;
6304 
6305 	/* calculate maximum number of params and allocate force_tbl */
6306 	for (p = ata_force_param_buf; *p; p++)
6307 		if (*p == ',')
6308 			size++;
6309 
6310 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6311 	if (!ata_force_tbl) {
6312 		printk(KERN_WARNING "ata: failed to extend force table, "
6313 		       "libata.force ignored\n");
6314 		return;
6315 	}
6316 
6317 	/* parse and populate the table */
6318 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6319 		const char *reason = "";
6320 		struct ata_force_ent te = { .port = -1, .device = -1 };
6321 
6322 		next = cur;
6323 		if (ata_parse_force_one(&next, &te, &reason)) {
6324 			printk(KERN_WARNING "ata: failed to parse force "
6325 			       "parameter \"%s\" (%s)\n",
6326 			       cur, reason);
6327 			continue;
6328 		}
6329 
6330 		if (te.port == -1) {
6331 			te.port = last_port;
6332 			te.device = last_device;
6333 		}
6334 
6335 		ata_force_tbl[idx++] = te;
6336 
6337 		last_port = te.port;
6338 		last_device = te.device;
6339 	}
6340 
6341 	ata_force_tbl_size = idx;
6342 }
6343 
6344 static int __init ata_init(void)
6345 {
6346 	ata_parse_force_param();
6347 
6348 	ata_wq = create_workqueue("ata");
6349 	if (!ata_wq)
6350 		goto free_force_tbl;
6351 
6352 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6353 	if (!ata_aux_wq)
6354 		goto free_wq;
6355 
6356 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6357 	return 0;
6358 
6359 free_wq:
6360 	destroy_workqueue(ata_wq);
6361 free_force_tbl:
6362 	kfree(ata_force_tbl);
6363 	return -ENOMEM;
6364 }
6365 
6366 static void __exit ata_exit(void)
6367 {
6368 	kfree(ata_force_tbl);
6369 	destroy_workqueue(ata_wq);
6370 	destroy_workqueue(ata_aux_wq);
6371 }
6372 
6373 subsys_initcall(ata_init);
6374 module_exit(ata_exit);
6375 
6376 static unsigned long ratelimit_time;
6377 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6378 
6379 int ata_ratelimit(void)
6380 {
6381 	int rc;
6382 	unsigned long flags;
6383 
6384 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6385 
6386 	if (time_after(jiffies, ratelimit_time)) {
6387 		rc = 1;
6388 		ratelimit_time = jiffies + (HZ/5);
6389 	} else
6390 		rc = 0;
6391 
6392 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6393 
6394 	return rc;
6395 }
6396 
6397 /**
6398  *	ata_wait_register - wait until register value changes
6399  *	@reg: IO-mapped register
6400  *	@mask: Mask to apply to read register value
6401  *	@val: Wait condition
6402  *	@interval: polling interval in milliseconds
6403  *	@timeout: timeout in milliseconds
6404  *
6405  *	Waiting for some bits of register to change is a common
6406  *	operation for ATA controllers.  This function reads 32bit LE
6407  *	IO-mapped register @reg and tests for the following condition.
6408  *
6409  *	(*@reg & mask) != val
6410  *
6411  *	If the condition is met, it returns; otherwise, the process is
6412  *	repeated after @interval_msec until timeout.
6413  *
6414  *	LOCKING:
6415  *	Kernel thread context (may sleep)
6416  *
6417  *	RETURNS:
6418  *	The final register value.
6419  */
6420 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6421 		      unsigned long interval, unsigned long timeout)
6422 {
6423 	unsigned long deadline;
6424 	u32 tmp;
6425 
6426 	tmp = ioread32(reg);
6427 
6428 	/* Calculate timeout _after_ the first read to make sure
6429 	 * preceding writes reach the controller before starting to
6430 	 * eat away the timeout.
6431 	 */
6432 	deadline = ata_deadline(jiffies, timeout);
6433 
6434 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6435 		msleep(interval);
6436 		tmp = ioread32(reg);
6437 	}
6438 
6439 	return tmp;
6440 }
6441 
6442 /*
6443  * Dummy port_ops
6444  */
6445 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6446 {
6447 	return AC_ERR_SYSTEM;
6448 }
6449 
6450 static void ata_dummy_error_handler(struct ata_port *ap)
6451 {
6452 	/* truly dummy */
6453 }
6454 
6455 struct ata_port_operations ata_dummy_port_ops = {
6456 	.qc_prep		= ata_noop_qc_prep,
6457 	.qc_issue		= ata_dummy_qc_issue,
6458 	.error_handler		= ata_dummy_error_handler,
6459 };
6460 
6461 const struct ata_port_info ata_dummy_port_info = {
6462 	.port_ops		= &ata_dummy_port_ops,
6463 };
6464 
6465 /*
6466  * libata is essentially a library of internal helper functions for
6467  * low-level ATA host controller drivers.  As such, the API/ABI is
6468  * likely to change as new drivers are added and updated.
6469  * Do not depend on ABI/API stability.
6470  */
6471 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6472 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6473 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6474 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6475 EXPORT_SYMBOL_GPL(sata_port_ops);
6476 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6477 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6478 EXPORT_SYMBOL_GPL(__ata_port_next_link);
6479 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6480 EXPORT_SYMBOL_GPL(ata_host_init);
6481 EXPORT_SYMBOL_GPL(ata_host_alloc);
6482 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6483 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6484 EXPORT_SYMBOL_GPL(ata_host_start);
6485 EXPORT_SYMBOL_GPL(ata_host_register);
6486 EXPORT_SYMBOL_GPL(ata_host_activate);
6487 EXPORT_SYMBOL_GPL(ata_host_detach);
6488 EXPORT_SYMBOL_GPL(ata_sg_init);
6489 EXPORT_SYMBOL_GPL(ata_qc_complete);
6490 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6491 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6492 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6493 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6494 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6495 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6496 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6497 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6498 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6499 EXPORT_SYMBOL_GPL(ata_mode_string);
6500 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6501 EXPORT_SYMBOL_GPL(ata_port_start);
6502 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6503 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6504 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6505 EXPORT_SYMBOL_GPL(ata_port_probe);
6506 EXPORT_SYMBOL_GPL(ata_dev_disable);
6507 EXPORT_SYMBOL_GPL(sata_set_spd);
6508 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6509 EXPORT_SYMBOL_GPL(sata_link_debounce);
6510 EXPORT_SYMBOL_GPL(sata_link_resume);
6511 EXPORT_SYMBOL_GPL(ata_std_prereset);
6512 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6513 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6514 EXPORT_SYMBOL_GPL(ata_std_postreset);
6515 EXPORT_SYMBOL_GPL(ata_dev_classify);
6516 EXPORT_SYMBOL_GPL(ata_dev_pair);
6517 EXPORT_SYMBOL_GPL(ata_port_disable);
6518 EXPORT_SYMBOL_GPL(ata_ratelimit);
6519 EXPORT_SYMBOL_GPL(ata_wait_register);
6520 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6521 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6522 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6523 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6524 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6525 EXPORT_SYMBOL_GPL(sata_scr_valid);
6526 EXPORT_SYMBOL_GPL(sata_scr_read);
6527 EXPORT_SYMBOL_GPL(sata_scr_write);
6528 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6529 EXPORT_SYMBOL_GPL(ata_link_online);
6530 EXPORT_SYMBOL_GPL(ata_link_offline);
6531 #ifdef CONFIG_PM
6532 EXPORT_SYMBOL_GPL(ata_host_suspend);
6533 EXPORT_SYMBOL_GPL(ata_host_resume);
6534 #endif /* CONFIG_PM */
6535 EXPORT_SYMBOL_GPL(ata_id_string);
6536 EXPORT_SYMBOL_GPL(ata_id_c_string);
6537 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6538 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6539 
6540 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6541 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6542 EXPORT_SYMBOL_GPL(ata_timing_compute);
6543 EXPORT_SYMBOL_GPL(ata_timing_merge);
6544 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6545 
6546 #ifdef CONFIG_PCI
6547 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6548 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6549 #ifdef CONFIG_PM
6550 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6551 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6552 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6553 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6554 #endif /* CONFIG_PM */
6555 #endif /* CONFIG_PCI */
6556 
6557 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6558 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6559 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6560 EXPORT_SYMBOL_GPL(ata_port_desc);
6561 #ifdef CONFIG_PCI
6562 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6563 #endif /* CONFIG_PCI */
6564 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6565 EXPORT_SYMBOL_GPL(ata_link_abort);
6566 EXPORT_SYMBOL_GPL(ata_port_abort);
6567 EXPORT_SYMBOL_GPL(ata_port_freeze);
6568 EXPORT_SYMBOL_GPL(sata_async_notification);
6569 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6570 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6571 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6572 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6573 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6574 EXPORT_SYMBOL_GPL(ata_do_eh);
6575 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6576 
6577 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6578 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6579 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6580 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6581 EXPORT_SYMBOL_GPL(ata_cable_sata);
6582