xref: /linux/drivers/ata/libata-core.c (revision 7265706c8fd57722f622f336ec110cb35f83e739)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_host.h>
62 #include <linux/libata.h>
63 #include <asm/byteorder.h>
64 #include <linux/cdrom.h>
65 
66 #include "libata.h"
67 
68 
69 /* debounce timing parameters in msecs { interval, duration, timeout } */
70 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
71 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
72 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
73 
74 const struct ata_port_operations ata_base_port_ops = {
75 	.prereset		= ata_std_prereset,
76 	.postreset		= ata_std_postreset,
77 	.error_handler		= ata_std_error_handler,
78 };
79 
80 const struct ata_port_operations sata_port_ops = {
81 	.inherits		= &ata_base_port_ops,
82 
83 	.qc_defer		= ata_std_qc_defer,
84 	.hardreset		= sata_std_hardreset,
85 };
86 
87 static unsigned int ata_dev_init_params(struct ata_device *dev,
88 					u16 heads, u16 sectors);
89 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
90 static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 					u8 enable, u8 feature);
92 static void ata_dev_xfermask(struct ata_device *dev);
93 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
94 
95 unsigned int ata_print_id = 1;
96 static struct workqueue_struct *ata_wq;
97 
98 struct workqueue_struct *ata_aux_wq;
99 
100 struct ata_force_param {
101 	const char	*name;
102 	unsigned int	cbl;
103 	int		spd_limit;
104 	unsigned long	xfer_mask;
105 	unsigned int	horkage_on;
106 	unsigned int	horkage_off;
107 };
108 
109 struct ata_force_ent {
110 	int			port;
111 	int			device;
112 	struct ata_force_param	param;
113 };
114 
115 static struct ata_force_ent *ata_force_tbl;
116 static int ata_force_tbl_size;
117 
118 static char ata_force_param_buf[PAGE_SIZE] __initdata;
119 /* param_buf is thrown away after initialization, disallow read */
120 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
121 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122 
123 static int atapi_enabled = 1;
124 module_param(atapi_enabled, int, 0444);
125 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
126 
127 static int atapi_dmadir = 0;
128 module_param(atapi_dmadir, int, 0444);
129 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
130 
131 int atapi_passthru16 = 1;
132 module_param(atapi_passthru16, int, 0444);
133 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
134 
135 int libata_fua = 0;
136 module_param_named(fua, libata_fua, int, 0444);
137 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
138 
139 static int ata_ignore_hpa;
140 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142 
143 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144 module_param_named(dma, libata_dma_mask, int, 0444);
145 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146 
147 static int ata_probe_timeout;
148 module_param(ata_probe_timeout, int, 0444);
149 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150 
151 int libata_noacpi = 0;
152 module_param_named(noacpi, libata_noacpi, int, 0444);
153 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
154 
155 int libata_allow_tpm = 0;
156 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
157 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
158 
159 MODULE_AUTHOR("Jeff Garzik");
160 MODULE_DESCRIPTION("Library module for ATA devices");
161 MODULE_LICENSE("GPL");
162 MODULE_VERSION(DRV_VERSION);
163 
164 
165 /**
166  *	ata_force_cbl - force cable type according to libata.force
167  *	@ap: ATA port of interest
168  *
169  *	Force cable type according to libata.force and whine about it.
170  *	The last entry which has matching port number is used, so it
171  *	can be specified as part of device force parameters.  For
172  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
173  *	same effect.
174  *
175  *	LOCKING:
176  *	EH context.
177  */
178 void ata_force_cbl(struct ata_port *ap)
179 {
180 	int i;
181 
182 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
183 		const struct ata_force_ent *fe = &ata_force_tbl[i];
184 
185 		if (fe->port != -1 && fe->port != ap->print_id)
186 			continue;
187 
188 		if (fe->param.cbl == ATA_CBL_NONE)
189 			continue;
190 
191 		ap->cbl = fe->param.cbl;
192 		ata_port_printk(ap, KERN_NOTICE,
193 				"FORCE: cable set to %s\n", fe->param.name);
194 		return;
195 	}
196 }
197 
198 /**
199  *	ata_force_spd_limit - force SATA spd limit according to libata.force
200  *	@link: ATA link of interest
201  *
202  *	Force SATA spd limit according to libata.force and whine about
203  *	it.  When only the port part is specified (e.g. 1:), the limit
204  *	applies to all links connected to both the host link and all
205  *	fan-out ports connected via PMP.  If the device part is
206  *	specified as 0 (e.g. 1.00:), it specifies the first fan-out
207  *	link not the host link.  Device number 15 always points to the
208  *	host link whether PMP is attached or not.
209  *
210  *	LOCKING:
211  *	EH context.
212  */
213 static void ata_force_spd_limit(struct ata_link *link)
214 {
215 	int linkno, i;
216 
217 	if (ata_is_host_link(link))
218 		linkno = 15;
219 	else
220 		linkno = link->pmp;
221 
222 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
223 		const struct ata_force_ent *fe = &ata_force_tbl[i];
224 
225 		if (fe->port != -1 && fe->port != link->ap->print_id)
226 			continue;
227 
228 		if (fe->device != -1 && fe->device != linkno)
229 			continue;
230 
231 		if (!fe->param.spd_limit)
232 			continue;
233 
234 		link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
235 		ata_link_printk(link, KERN_NOTICE,
236 			"FORCE: PHY spd limit set to %s\n", fe->param.name);
237 		return;
238 	}
239 }
240 
241 /**
242  *	ata_force_xfermask - force xfermask according to libata.force
243  *	@dev: ATA device of interest
244  *
245  *	Force xfer_mask according to libata.force and whine about it.
246  *	For consistency with link selection, device number 15 selects
247  *	the first device connected to the host link.
248  *
249  *	LOCKING:
250  *	EH context.
251  */
252 static void ata_force_xfermask(struct ata_device *dev)
253 {
254 	int devno = dev->link->pmp + dev->devno;
255 	int alt_devno = devno;
256 	int i;
257 
258 	/* allow n.15 for the first device attached to host port */
259 	if (ata_is_host_link(dev->link) && devno == 0)
260 		alt_devno = 15;
261 
262 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
263 		const struct ata_force_ent *fe = &ata_force_tbl[i];
264 		unsigned long pio_mask, mwdma_mask, udma_mask;
265 
266 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
267 			continue;
268 
269 		if (fe->device != -1 && fe->device != devno &&
270 		    fe->device != alt_devno)
271 			continue;
272 
273 		if (!fe->param.xfer_mask)
274 			continue;
275 
276 		ata_unpack_xfermask(fe->param.xfer_mask,
277 				    &pio_mask, &mwdma_mask, &udma_mask);
278 		if (udma_mask)
279 			dev->udma_mask = udma_mask;
280 		else if (mwdma_mask) {
281 			dev->udma_mask = 0;
282 			dev->mwdma_mask = mwdma_mask;
283 		} else {
284 			dev->udma_mask = 0;
285 			dev->mwdma_mask = 0;
286 			dev->pio_mask = pio_mask;
287 		}
288 
289 		ata_dev_printk(dev, KERN_NOTICE,
290 			"FORCE: xfer_mask set to %s\n", fe->param.name);
291 		return;
292 	}
293 }
294 
295 /**
296  *	ata_force_horkage - force horkage according to libata.force
297  *	@dev: ATA device of interest
298  *
299  *	Force horkage according to libata.force and whine about it.
300  *	For consistency with link selection, device number 15 selects
301  *	the first device connected to the host link.
302  *
303  *	LOCKING:
304  *	EH context.
305  */
306 static void ata_force_horkage(struct ata_device *dev)
307 {
308 	int devno = dev->link->pmp + dev->devno;
309 	int alt_devno = devno;
310 	int i;
311 
312 	/* allow n.15 for the first device attached to host port */
313 	if (ata_is_host_link(dev->link) && devno == 0)
314 		alt_devno = 15;
315 
316 	for (i = 0; i < ata_force_tbl_size; i++) {
317 		const struct ata_force_ent *fe = &ata_force_tbl[i];
318 
319 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
320 			continue;
321 
322 		if (fe->device != -1 && fe->device != devno &&
323 		    fe->device != alt_devno)
324 			continue;
325 
326 		if (!(~dev->horkage & fe->param.horkage_on) &&
327 		    !(dev->horkage & fe->param.horkage_off))
328 			continue;
329 
330 		dev->horkage |= fe->param.horkage_on;
331 		dev->horkage &= ~fe->param.horkage_off;
332 
333 		ata_dev_printk(dev, KERN_NOTICE,
334 			"FORCE: horkage modified (%s)\n", fe->param.name);
335 	}
336 }
337 
338 /**
339  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
340  *	@opcode: SCSI opcode
341  *
342  *	Determine ATAPI command type from @opcode.
343  *
344  *	LOCKING:
345  *	None.
346  *
347  *	RETURNS:
348  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
349  */
350 int atapi_cmd_type(u8 opcode)
351 {
352 	switch (opcode) {
353 	case GPCMD_READ_10:
354 	case GPCMD_READ_12:
355 		return ATAPI_READ;
356 
357 	case GPCMD_WRITE_10:
358 	case GPCMD_WRITE_12:
359 	case GPCMD_WRITE_AND_VERIFY_10:
360 		return ATAPI_WRITE;
361 
362 	case GPCMD_READ_CD:
363 	case GPCMD_READ_CD_MSF:
364 		return ATAPI_READ_CD;
365 
366 	case ATA_16:
367 	case ATA_12:
368 		if (atapi_passthru16)
369 			return ATAPI_PASS_THRU;
370 		/* fall thru */
371 	default:
372 		return ATAPI_MISC;
373 	}
374 }
375 
376 /**
377  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
378  *	@tf: Taskfile to convert
379  *	@pmp: Port multiplier port
380  *	@is_cmd: This FIS is for command
381  *	@fis: Buffer into which data will output
382  *
383  *	Converts a standard ATA taskfile to a Serial ATA
384  *	FIS structure (Register - Host to Device).
385  *
386  *	LOCKING:
387  *	Inherited from caller.
388  */
389 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
390 {
391 	fis[0] = 0x27;			/* Register - Host to Device FIS */
392 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
393 	if (is_cmd)
394 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
395 
396 	fis[2] = tf->command;
397 	fis[3] = tf->feature;
398 
399 	fis[4] = tf->lbal;
400 	fis[5] = tf->lbam;
401 	fis[6] = tf->lbah;
402 	fis[7] = tf->device;
403 
404 	fis[8] = tf->hob_lbal;
405 	fis[9] = tf->hob_lbam;
406 	fis[10] = tf->hob_lbah;
407 	fis[11] = tf->hob_feature;
408 
409 	fis[12] = tf->nsect;
410 	fis[13] = tf->hob_nsect;
411 	fis[14] = 0;
412 	fis[15] = tf->ctl;
413 
414 	fis[16] = 0;
415 	fis[17] = 0;
416 	fis[18] = 0;
417 	fis[19] = 0;
418 }
419 
420 /**
421  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
422  *	@fis: Buffer from which data will be input
423  *	@tf: Taskfile to output
424  *
425  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
426  *
427  *	LOCKING:
428  *	Inherited from caller.
429  */
430 
431 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
432 {
433 	tf->command	= fis[2];	/* status */
434 	tf->feature	= fis[3];	/* error */
435 
436 	tf->lbal	= fis[4];
437 	tf->lbam	= fis[5];
438 	tf->lbah	= fis[6];
439 	tf->device	= fis[7];
440 
441 	tf->hob_lbal	= fis[8];
442 	tf->hob_lbam	= fis[9];
443 	tf->hob_lbah	= fis[10];
444 
445 	tf->nsect	= fis[12];
446 	tf->hob_nsect	= fis[13];
447 }
448 
449 static const u8 ata_rw_cmds[] = {
450 	/* pio multi */
451 	ATA_CMD_READ_MULTI,
452 	ATA_CMD_WRITE_MULTI,
453 	ATA_CMD_READ_MULTI_EXT,
454 	ATA_CMD_WRITE_MULTI_EXT,
455 	0,
456 	0,
457 	0,
458 	ATA_CMD_WRITE_MULTI_FUA_EXT,
459 	/* pio */
460 	ATA_CMD_PIO_READ,
461 	ATA_CMD_PIO_WRITE,
462 	ATA_CMD_PIO_READ_EXT,
463 	ATA_CMD_PIO_WRITE_EXT,
464 	0,
465 	0,
466 	0,
467 	0,
468 	/* dma */
469 	ATA_CMD_READ,
470 	ATA_CMD_WRITE,
471 	ATA_CMD_READ_EXT,
472 	ATA_CMD_WRITE_EXT,
473 	0,
474 	0,
475 	0,
476 	ATA_CMD_WRITE_FUA_EXT
477 };
478 
479 /**
480  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
481  *	@tf: command to examine and configure
482  *	@dev: device tf belongs to
483  *
484  *	Examine the device configuration and tf->flags to calculate
485  *	the proper read/write commands and protocol to use.
486  *
487  *	LOCKING:
488  *	caller.
489  */
490 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
491 {
492 	u8 cmd;
493 
494 	int index, fua, lba48, write;
495 
496 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
497 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
498 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
499 
500 	if (dev->flags & ATA_DFLAG_PIO) {
501 		tf->protocol = ATA_PROT_PIO;
502 		index = dev->multi_count ? 0 : 8;
503 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
504 		/* Unable to use DMA due to host limitation */
505 		tf->protocol = ATA_PROT_PIO;
506 		index = dev->multi_count ? 0 : 8;
507 	} else {
508 		tf->protocol = ATA_PROT_DMA;
509 		index = 16;
510 	}
511 
512 	cmd = ata_rw_cmds[index + fua + lba48 + write];
513 	if (cmd) {
514 		tf->command = cmd;
515 		return 0;
516 	}
517 	return -1;
518 }
519 
520 /**
521  *	ata_tf_read_block - Read block address from ATA taskfile
522  *	@tf: ATA taskfile of interest
523  *	@dev: ATA device @tf belongs to
524  *
525  *	LOCKING:
526  *	None.
527  *
528  *	Read block address from @tf.  This function can handle all
529  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
530  *	flags select the address format to use.
531  *
532  *	RETURNS:
533  *	Block address read from @tf.
534  */
535 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
536 {
537 	u64 block = 0;
538 
539 	if (tf->flags & ATA_TFLAG_LBA) {
540 		if (tf->flags & ATA_TFLAG_LBA48) {
541 			block |= (u64)tf->hob_lbah << 40;
542 			block |= (u64)tf->hob_lbam << 32;
543 			block |= tf->hob_lbal << 24;
544 		} else
545 			block |= (tf->device & 0xf) << 24;
546 
547 		block |= tf->lbah << 16;
548 		block |= tf->lbam << 8;
549 		block |= tf->lbal;
550 	} else {
551 		u32 cyl, head, sect;
552 
553 		cyl = tf->lbam | (tf->lbah << 8);
554 		head = tf->device & 0xf;
555 		sect = tf->lbal;
556 
557 		block = (cyl * dev->heads + head) * dev->sectors + sect;
558 	}
559 
560 	return block;
561 }
562 
563 /**
564  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
565  *	@tf: Target ATA taskfile
566  *	@dev: ATA device @tf belongs to
567  *	@block: Block address
568  *	@n_block: Number of blocks
569  *	@tf_flags: RW/FUA etc...
570  *	@tag: tag
571  *
572  *	LOCKING:
573  *	None.
574  *
575  *	Build ATA taskfile @tf for read/write request described by
576  *	@block, @n_block, @tf_flags and @tag on @dev.
577  *
578  *	RETURNS:
579  *
580  *	0 on success, -ERANGE if the request is too large for @dev,
581  *	-EINVAL if the request is invalid.
582  */
583 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
584 		    u64 block, u32 n_block, unsigned int tf_flags,
585 		    unsigned int tag)
586 {
587 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
588 	tf->flags |= tf_flags;
589 
590 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
591 		/* yay, NCQ */
592 		if (!lba_48_ok(block, n_block))
593 			return -ERANGE;
594 
595 		tf->protocol = ATA_PROT_NCQ;
596 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
597 
598 		if (tf->flags & ATA_TFLAG_WRITE)
599 			tf->command = ATA_CMD_FPDMA_WRITE;
600 		else
601 			tf->command = ATA_CMD_FPDMA_READ;
602 
603 		tf->nsect = tag << 3;
604 		tf->hob_feature = (n_block >> 8) & 0xff;
605 		tf->feature = n_block & 0xff;
606 
607 		tf->hob_lbah = (block >> 40) & 0xff;
608 		tf->hob_lbam = (block >> 32) & 0xff;
609 		tf->hob_lbal = (block >> 24) & 0xff;
610 		tf->lbah = (block >> 16) & 0xff;
611 		tf->lbam = (block >> 8) & 0xff;
612 		tf->lbal = block & 0xff;
613 
614 		tf->device = 1 << 6;
615 		if (tf->flags & ATA_TFLAG_FUA)
616 			tf->device |= 1 << 7;
617 	} else if (dev->flags & ATA_DFLAG_LBA) {
618 		tf->flags |= ATA_TFLAG_LBA;
619 
620 		if (lba_28_ok(block, n_block)) {
621 			/* use LBA28 */
622 			tf->device |= (block >> 24) & 0xf;
623 		} else if (lba_48_ok(block, n_block)) {
624 			if (!(dev->flags & ATA_DFLAG_LBA48))
625 				return -ERANGE;
626 
627 			/* use LBA48 */
628 			tf->flags |= ATA_TFLAG_LBA48;
629 
630 			tf->hob_nsect = (n_block >> 8) & 0xff;
631 
632 			tf->hob_lbah = (block >> 40) & 0xff;
633 			tf->hob_lbam = (block >> 32) & 0xff;
634 			tf->hob_lbal = (block >> 24) & 0xff;
635 		} else
636 			/* request too large even for LBA48 */
637 			return -ERANGE;
638 
639 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
640 			return -EINVAL;
641 
642 		tf->nsect = n_block & 0xff;
643 
644 		tf->lbah = (block >> 16) & 0xff;
645 		tf->lbam = (block >> 8) & 0xff;
646 		tf->lbal = block & 0xff;
647 
648 		tf->device |= ATA_LBA;
649 	} else {
650 		/* CHS */
651 		u32 sect, head, cyl, track;
652 
653 		/* The request -may- be too large for CHS addressing. */
654 		if (!lba_28_ok(block, n_block))
655 			return -ERANGE;
656 
657 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
658 			return -EINVAL;
659 
660 		/* Convert LBA to CHS */
661 		track = (u32)block / dev->sectors;
662 		cyl   = track / dev->heads;
663 		head  = track % dev->heads;
664 		sect  = (u32)block % dev->sectors + 1;
665 
666 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
667 			(u32)block, track, cyl, head, sect);
668 
669 		/* Check whether the converted CHS can fit.
670 		   Cylinder: 0-65535
671 		   Head: 0-15
672 		   Sector: 1-255*/
673 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
674 			return -ERANGE;
675 
676 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
677 		tf->lbal = sect;
678 		tf->lbam = cyl;
679 		tf->lbah = cyl >> 8;
680 		tf->device |= head;
681 	}
682 
683 	return 0;
684 }
685 
686 /**
687  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
688  *	@pio_mask: pio_mask
689  *	@mwdma_mask: mwdma_mask
690  *	@udma_mask: udma_mask
691  *
692  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
693  *	unsigned int xfer_mask.
694  *
695  *	LOCKING:
696  *	None.
697  *
698  *	RETURNS:
699  *	Packed xfer_mask.
700  */
701 unsigned long ata_pack_xfermask(unsigned long pio_mask,
702 				unsigned long mwdma_mask,
703 				unsigned long udma_mask)
704 {
705 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
706 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
707 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
708 }
709 
710 /**
711  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
712  *	@xfer_mask: xfer_mask to unpack
713  *	@pio_mask: resulting pio_mask
714  *	@mwdma_mask: resulting mwdma_mask
715  *	@udma_mask: resulting udma_mask
716  *
717  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
718  *	Any NULL distination masks will be ignored.
719  */
720 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
721 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
722 {
723 	if (pio_mask)
724 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
725 	if (mwdma_mask)
726 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
727 	if (udma_mask)
728 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
729 }
730 
731 static const struct ata_xfer_ent {
732 	int shift, bits;
733 	u8 base;
734 } ata_xfer_tbl[] = {
735 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
736 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
737 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
738 	{ -1, },
739 };
740 
741 /**
742  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
743  *	@xfer_mask: xfer_mask of interest
744  *
745  *	Return matching XFER_* value for @xfer_mask.  Only the highest
746  *	bit of @xfer_mask is considered.
747  *
748  *	LOCKING:
749  *	None.
750  *
751  *	RETURNS:
752  *	Matching XFER_* value, 0xff if no match found.
753  */
754 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
755 {
756 	int highbit = fls(xfer_mask) - 1;
757 	const struct ata_xfer_ent *ent;
758 
759 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
760 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
761 			return ent->base + highbit - ent->shift;
762 	return 0xff;
763 }
764 
765 /**
766  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
767  *	@xfer_mode: XFER_* of interest
768  *
769  *	Return matching xfer_mask for @xfer_mode.
770  *
771  *	LOCKING:
772  *	None.
773  *
774  *	RETURNS:
775  *	Matching xfer_mask, 0 if no match found.
776  */
777 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
778 {
779 	const struct ata_xfer_ent *ent;
780 
781 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
782 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
783 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
784 				& ~((1 << ent->shift) - 1);
785 	return 0;
786 }
787 
788 /**
789  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
790  *	@xfer_mode: XFER_* of interest
791  *
792  *	Return matching xfer_shift for @xfer_mode.
793  *
794  *	LOCKING:
795  *	None.
796  *
797  *	RETURNS:
798  *	Matching xfer_shift, -1 if no match found.
799  */
800 int ata_xfer_mode2shift(unsigned long xfer_mode)
801 {
802 	const struct ata_xfer_ent *ent;
803 
804 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
805 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
806 			return ent->shift;
807 	return -1;
808 }
809 
810 /**
811  *	ata_mode_string - convert xfer_mask to string
812  *	@xfer_mask: mask of bits supported; only highest bit counts.
813  *
814  *	Determine string which represents the highest speed
815  *	(highest bit in @modemask).
816  *
817  *	LOCKING:
818  *	None.
819  *
820  *	RETURNS:
821  *	Constant C string representing highest speed listed in
822  *	@mode_mask, or the constant C string "<n/a>".
823  */
824 const char *ata_mode_string(unsigned long xfer_mask)
825 {
826 	static const char * const xfer_mode_str[] = {
827 		"PIO0",
828 		"PIO1",
829 		"PIO2",
830 		"PIO3",
831 		"PIO4",
832 		"PIO5",
833 		"PIO6",
834 		"MWDMA0",
835 		"MWDMA1",
836 		"MWDMA2",
837 		"MWDMA3",
838 		"MWDMA4",
839 		"UDMA/16",
840 		"UDMA/25",
841 		"UDMA/33",
842 		"UDMA/44",
843 		"UDMA/66",
844 		"UDMA/100",
845 		"UDMA/133",
846 		"UDMA7",
847 	};
848 	int highbit;
849 
850 	highbit = fls(xfer_mask) - 1;
851 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
852 		return xfer_mode_str[highbit];
853 	return "<n/a>";
854 }
855 
856 static const char *sata_spd_string(unsigned int spd)
857 {
858 	static const char * const spd_str[] = {
859 		"1.5 Gbps",
860 		"3.0 Gbps",
861 	};
862 
863 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
864 		return "<unknown>";
865 	return spd_str[spd - 1];
866 }
867 
868 void ata_dev_disable(struct ata_device *dev)
869 {
870 	if (ata_dev_enabled(dev)) {
871 		if (ata_msg_drv(dev->link->ap))
872 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
873 		ata_acpi_on_disable(dev);
874 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
875 					     ATA_DNXFER_QUIET);
876 		dev->class++;
877 	}
878 }
879 
880 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
881 {
882 	struct ata_link *link = dev->link;
883 	struct ata_port *ap = link->ap;
884 	u32 scontrol;
885 	unsigned int err_mask;
886 	int rc;
887 
888 	/*
889 	 * disallow DIPM for drivers which haven't set
890 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
891 	 * phy ready will be set in the interrupt status on
892 	 * state changes, which will cause some drivers to
893 	 * think there are errors - additionally drivers will
894 	 * need to disable hot plug.
895 	 */
896 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
897 		ap->pm_policy = NOT_AVAILABLE;
898 		return -EINVAL;
899 	}
900 
901 	/*
902 	 * For DIPM, we will only enable it for the
903 	 * min_power setting.
904 	 *
905 	 * Why?  Because Disks are too stupid to know that
906 	 * If the host rejects a request to go to SLUMBER
907 	 * they should retry at PARTIAL, and instead it
908 	 * just would give up.  So, for medium_power to
909 	 * work at all, we need to only allow HIPM.
910 	 */
911 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
912 	if (rc)
913 		return rc;
914 
915 	switch (policy) {
916 	case MIN_POWER:
917 		/* no restrictions on IPM transitions */
918 		scontrol &= ~(0x3 << 8);
919 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
920 		if (rc)
921 			return rc;
922 
923 		/* enable DIPM */
924 		if (dev->flags & ATA_DFLAG_DIPM)
925 			err_mask = ata_dev_set_feature(dev,
926 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
927 		break;
928 	case MEDIUM_POWER:
929 		/* allow IPM to PARTIAL */
930 		scontrol &= ~(0x1 << 8);
931 		scontrol |= (0x2 << 8);
932 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
933 		if (rc)
934 			return rc;
935 
936 		/*
937 		 * we don't have to disable DIPM since IPM flags
938 		 * disallow transitions to SLUMBER, which effectively
939 		 * disable DIPM if it does not support PARTIAL
940 		 */
941 		break;
942 	case NOT_AVAILABLE:
943 	case MAX_PERFORMANCE:
944 		/* disable all IPM transitions */
945 		scontrol |= (0x3 << 8);
946 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
947 		if (rc)
948 			return rc;
949 
950 		/*
951 		 * we don't have to disable DIPM since IPM flags
952 		 * disallow all transitions which effectively
953 		 * disable DIPM anyway.
954 		 */
955 		break;
956 	}
957 
958 	/* FIXME: handle SET FEATURES failure */
959 	(void) err_mask;
960 
961 	return 0;
962 }
963 
964 /**
965  *	ata_dev_enable_pm - enable SATA interface power management
966  *	@dev:  device to enable power management
967  *	@policy: the link power management policy
968  *
969  *	Enable SATA Interface power management.  This will enable
970  *	Device Interface Power Management (DIPM) for min_power
971  * 	policy, and then call driver specific callbacks for
972  *	enabling Host Initiated Power management.
973  *
974  *	Locking: Caller.
975  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
976  */
977 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
978 {
979 	int rc = 0;
980 	struct ata_port *ap = dev->link->ap;
981 
982 	/* set HIPM first, then DIPM */
983 	if (ap->ops->enable_pm)
984 		rc = ap->ops->enable_pm(ap, policy);
985 	if (rc)
986 		goto enable_pm_out;
987 	rc = ata_dev_set_dipm(dev, policy);
988 
989 enable_pm_out:
990 	if (rc)
991 		ap->pm_policy = MAX_PERFORMANCE;
992 	else
993 		ap->pm_policy = policy;
994 	return /* rc */;	/* hopefully we can use 'rc' eventually */
995 }
996 
997 #ifdef CONFIG_PM
998 /**
999  *	ata_dev_disable_pm - disable SATA interface power management
1000  *	@dev: device to disable power management
1001  *
1002  *	Disable SATA Interface power management.  This will disable
1003  *	Device Interface Power Management (DIPM) without changing
1004  * 	policy,  call driver specific callbacks for disabling Host
1005  * 	Initiated Power management.
1006  *
1007  *	Locking: Caller.
1008  *	Returns: void
1009  */
1010 static void ata_dev_disable_pm(struct ata_device *dev)
1011 {
1012 	struct ata_port *ap = dev->link->ap;
1013 
1014 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1015 	if (ap->ops->disable_pm)
1016 		ap->ops->disable_pm(ap);
1017 }
1018 #endif	/* CONFIG_PM */
1019 
1020 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1021 {
1022 	ap->pm_policy = policy;
1023 	ap->link.eh_info.action |= ATA_EH_LPM;
1024 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1025 	ata_port_schedule_eh(ap);
1026 }
1027 
1028 #ifdef CONFIG_PM
1029 static void ata_lpm_enable(struct ata_host *host)
1030 {
1031 	struct ata_link *link;
1032 	struct ata_port *ap;
1033 	struct ata_device *dev;
1034 	int i;
1035 
1036 	for (i = 0; i < host->n_ports; i++) {
1037 		ap = host->ports[i];
1038 		ata_port_for_each_link(link, ap) {
1039 			ata_link_for_each_dev(dev, link)
1040 				ata_dev_disable_pm(dev);
1041 		}
1042 	}
1043 }
1044 
1045 static void ata_lpm_disable(struct ata_host *host)
1046 {
1047 	int i;
1048 
1049 	for (i = 0; i < host->n_ports; i++) {
1050 		struct ata_port *ap = host->ports[i];
1051 		ata_lpm_schedule(ap, ap->pm_policy);
1052 	}
1053 }
1054 #endif	/* CONFIG_PM */
1055 
1056 /**
1057  *	ata_dev_classify - determine device type based on ATA-spec signature
1058  *	@tf: ATA taskfile register set for device to be identified
1059  *
1060  *	Determine from taskfile register contents whether a device is
1061  *	ATA or ATAPI, as per "Signature and persistence" section
1062  *	of ATA/PI spec (volume 1, sect 5.14).
1063  *
1064  *	LOCKING:
1065  *	None.
1066  *
1067  *	RETURNS:
1068  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1069  *	%ATA_DEV_UNKNOWN the event of failure.
1070  */
1071 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1072 {
1073 	/* Apple's open source Darwin code hints that some devices only
1074 	 * put a proper signature into the LBA mid/high registers,
1075 	 * So, we only check those.  It's sufficient for uniqueness.
1076 	 *
1077 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1078 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1079 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1080 	 * spec has never mentioned about using different signatures
1081 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1082 	 * Multiplier specification began to use 0x69/0x96 to identify
1083 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1084 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1085 	 * 0x69/0x96 shortly and described them as reserved for
1086 	 * SerialATA.
1087 	 *
1088 	 * We follow the current spec and consider that 0x69/0x96
1089 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1090 	 */
1091 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1092 		DPRINTK("found ATA device by sig\n");
1093 		return ATA_DEV_ATA;
1094 	}
1095 
1096 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1097 		DPRINTK("found ATAPI device by sig\n");
1098 		return ATA_DEV_ATAPI;
1099 	}
1100 
1101 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1102 		DPRINTK("found PMP device by sig\n");
1103 		return ATA_DEV_PMP;
1104 	}
1105 
1106 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1107 		printk(KERN_INFO "ata: SEMB device ignored\n");
1108 		return ATA_DEV_SEMB_UNSUP; /* not yet */
1109 	}
1110 
1111 	DPRINTK("unknown device\n");
1112 	return ATA_DEV_UNKNOWN;
1113 }
1114 
1115 /**
1116  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1117  *	@id: IDENTIFY DEVICE results we will examine
1118  *	@s: string into which data is output
1119  *	@ofs: offset into identify device page
1120  *	@len: length of string to return. must be an even number.
1121  *
1122  *	The strings in the IDENTIFY DEVICE page are broken up into
1123  *	16-bit chunks.  Run through the string, and output each
1124  *	8-bit chunk linearly, regardless of platform.
1125  *
1126  *	LOCKING:
1127  *	caller.
1128  */
1129 
1130 void ata_id_string(const u16 *id, unsigned char *s,
1131 		   unsigned int ofs, unsigned int len)
1132 {
1133 	unsigned int c;
1134 
1135 	BUG_ON(len & 1);
1136 
1137 	while (len > 0) {
1138 		c = id[ofs] >> 8;
1139 		*s = c;
1140 		s++;
1141 
1142 		c = id[ofs] & 0xff;
1143 		*s = c;
1144 		s++;
1145 
1146 		ofs++;
1147 		len -= 2;
1148 	}
1149 }
1150 
1151 /**
1152  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1153  *	@id: IDENTIFY DEVICE results we will examine
1154  *	@s: string into which data is output
1155  *	@ofs: offset into identify device page
1156  *	@len: length of string to return. must be an odd number.
1157  *
1158  *	This function is identical to ata_id_string except that it
1159  *	trims trailing spaces and terminates the resulting string with
1160  *	null.  @len must be actual maximum length (even number) + 1.
1161  *
1162  *	LOCKING:
1163  *	caller.
1164  */
1165 void ata_id_c_string(const u16 *id, unsigned char *s,
1166 		     unsigned int ofs, unsigned int len)
1167 {
1168 	unsigned char *p;
1169 
1170 	ata_id_string(id, s, ofs, len - 1);
1171 
1172 	p = s + strnlen(s, len - 1);
1173 	while (p > s && p[-1] == ' ')
1174 		p--;
1175 	*p = '\0';
1176 }
1177 
1178 static u64 ata_id_n_sectors(const u16 *id)
1179 {
1180 	if (ata_id_has_lba(id)) {
1181 		if (ata_id_has_lba48(id))
1182 			return ata_id_u64(id, 100);
1183 		else
1184 			return ata_id_u32(id, 60);
1185 	} else {
1186 		if (ata_id_current_chs_valid(id))
1187 			return ata_id_u32(id, 57);
1188 		else
1189 			return id[1] * id[3] * id[6];
1190 	}
1191 }
1192 
1193 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1194 {
1195 	u64 sectors = 0;
1196 
1197 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1198 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1199 	sectors |= (tf->hob_lbal & 0xff) << 24;
1200 	sectors |= (tf->lbah & 0xff) << 16;
1201 	sectors |= (tf->lbam & 0xff) << 8;
1202 	sectors |= (tf->lbal & 0xff);
1203 
1204 	return sectors;
1205 }
1206 
1207 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1208 {
1209 	u64 sectors = 0;
1210 
1211 	sectors |= (tf->device & 0x0f) << 24;
1212 	sectors |= (tf->lbah & 0xff) << 16;
1213 	sectors |= (tf->lbam & 0xff) << 8;
1214 	sectors |= (tf->lbal & 0xff);
1215 
1216 	return sectors;
1217 }
1218 
1219 /**
1220  *	ata_read_native_max_address - Read native max address
1221  *	@dev: target device
1222  *	@max_sectors: out parameter for the result native max address
1223  *
1224  *	Perform an LBA48 or LBA28 native size query upon the device in
1225  *	question.
1226  *
1227  *	RETURNS:
1228  *	0 on success, -EACCES if command is aborted by the drive.
1229  *	-EIO on other errors.
1230  */
1231 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1232 {
1233 	unsigned int err_mask;
1234 	struct ata_taskfile tf;
1235 	int lba48 = ata_id_has_lba48(dev->id);
1236 
1237 	ata_tf_init(dev, &tf);
1238 
1239 	/* always clear all address registers */
1240 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1241 
1242 	if (lba48) {
1243 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1244 		tf.flags |= ATA_TFLAG_LBA48;
1245 	} else
1246 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1247 
1248 	tf.protocol |= ATA_PROT_NODATA;
1249 	tf.device |= ATA_LBA;
1250 
1251 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1252 	if (err_mask) {
1253 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1254 			       "max address (err_mask=0x%x)\n", err_mask);
1255 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1256 			return -EACCES;
1257 		return -EIO;
1258 	}
1259 
1260 	if (lba48)
1261 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1262 	else
1263 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1264 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1265 		(*max_sectors)--;
1266 	return 0;
1267 }
1268 
1269 /**
1270  *	ata_set_max_sectors - Set max sectors
1271  *	@dev: target device
1272  *	@new_sectors: new max sectors value to set for the device
1273  *
1274  *	Set max sectors of @dev to @new_sectors.
1275  *
1276  *	RETURNS:
1277  *	0 on success, -EACCES if command is aborted or denied (due to
1278  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1279  *	errors.
1280  */
1281 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1282 {
1283 	unsigned int err_mask;
1284 	struct ata_taskfile tf;
1285 	int lba48 = ata_id_has_lba48(dev->id);
1286 
1287 	new_sectors--;
1288 
1289 	ata_tf_init(dev, &tf);
1290 
1291 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1292 
1293 	if (lba48) {
1294 		tf.command = ATA_CMD_SET_MAX_EXT;
1295 		tf.flags |= ATA_TFLAG_LBA48;
1296 
1297 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1298 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1299 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1300 	} else {
1301 		tf.command = ATA_CMD_SET_MAX;
1302 
1303 		tf.device |= (new_sectors >> 24) & 0xf;
1304 	}
1305 
1306 	tf.protocol |= ATA_PROT_NODATA;
1307 	tf.device |= ATA_LBA;
1308 
1309 	tf.lbal = (new_sectors >> 0) & 0xff;
1310 	tf.lbam = (new_sectors >> 8) & 0xff;
1311 	tf.lbah = (new_sectors >> 16) & 0xff;
1312 
1313 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1314 	if (err_mask) {
1315 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1316 			       "max address (err_mask=0x%x)\n", err_mask);
1317 		if (err_mask == AC_ERR_DEV &&
1318 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1319 			return -EACCES;
1320 		return -EIO;
1321 	}
1322 
1323 	return 0;
1324 }
1325 
1326 /**
1327  *	ata_hpa_resize		-	Resize a device with an HPA set
1328  *	@dev: Device to resize
1329  *
1330  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1331  *	it if required to the full size of the media. The caller must check
1332  *	the drive has the HPA feature set enabled.
1333  *
1334  *	RETURNS:
1335  *	0 on success, -errno on failure.
1336  */
1337 static int ata_hpa_resize(struct ata_device *dev)
1338 {
1339 	struct ata_eh_context *ehc = &dev->link->eh_context;
1340 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1341 	u64 sectors = ata_id_n_sectors(dev->id);
1342 	u64 native_sectors;
1343 	int rc;
1344 
1345 	/* do we need to do it? */
1346 	if (dev->class != ATA_DEV_ATA ||
1347 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1348 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1349 		return 0;
1350 
1351 	/* read native max address */
1352 	rc = ata_read_native_max_address(dev, &native_sectors);
1353 	if (rc) {
1354 		/* If device aborted the command or HPA isn't going to
1355 		 * be unlocked, skip HPA resizing.
1356 		 */
1357 		if (rc == -EACCES || !ata_ignore_hpa) {
1358 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1359 				       "broken, skipping HPA handling\n");
1360 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1361 
1362 			/* we can continue if device aborted the command */
1363 			if (rc == -EACCES)
1364 				rc = 0;
1365 		}
1366 
1367 		return rc;
1368 	}
1369 
1370 	/* nothing to do? */
1371 	if (native_sectors <= sectors || !ata_ignore_hpa) {
1372 		if (!print_info || native_sectors == sectors)
1373 			return 0;
1374 
1375 		if (native_sectors > sectors)
1376 			ata_dev_printk(dev, KERN_INFO,
1377 				"HPA detected: current %llu, native %llu\n",
1378 				(unsigned long long)sectors,
1379 				(unsigned long long)native_sectors);
1380 		else if (native_sectors < sectors)
1381 			ata_dev_printk(dev, KERN_WARNING,
1382 				"native sectors (%llu) is smaller than "
1383 				"sectors (%llu)\n",
1384 				(unsigned long long)native_sectors,
1385 				(unsigned long long)sectors);
1386 		return 0;
1387 	}
1388 
1389 	/* let's unlock HPA */
1390 	rc = ata_set_max_sectors(dev, native_sectors);
1391 	if (rc == -EACCES) {
1392 		/* if device aborted the command, skip HPA resizing */
1393 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1394 			       "(%llu -> %llu), skipping HPA handling\n",
1395 			       (unsigned long long)sectors,
1396 			       (unsigned long long)native_sectors);
1397 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1398 		return 0;
1399 	} else if (rc)
1400 		return rc;
1401 
1402 	/* re-read IDENTIFY data */
1403 	rc = ata_dev_reread_id(dev, 0);
1404 	if (rc) {
1405 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1406 			       "data after HPA resizing\n");
1407 		return rc;
1408 	}
1409 
1410 	if (print_info) {
1411 		u64 new_sectors = ata_id_n_sectors(dev->id);
1412 		ata_dev_printk(dev, KERN_INFO,
1413 			"HPA unlocked: %llu -> %llu, native %llu\n",
1414 			(unsigned long long)sectors,
1415 			(unsigned long long)new_sectors,
1416 			(unsigned long long)native_sectors);
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 /**
1423  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1424  *	@id: IDENTIFY DEVICE page to dump
1425  *
1426  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1427  *	page.
1428  *
1429  *	LOCKING:
1430  *	caller.
1431  */
1432 
1433 static inline void ata_dump_id(const u16 *id)
1434 {
1435 	DPRINTK("49==0x%04x  "
1436 		"53==0x%04x  "
1437 		"63==0x%04x  "
1438 		"64==0x%04x  "
1439 		"75==0x%04x  \n",
1440 		id[49],
1441 		id[53],
1442 		id[63],
1443 		id[64],
1444 		id[75]);
1445 	DPRINTK("80==0x%04x  "
1446 		"81==0x%04x  "
1447 		"82==0x%04x  "
1448 		"83==0x%04x  "
1449 		"84==0x%04x  \n",
1450 		id[80],
1451 		id[81],
1452 		id[82],
1453 		id[83],
1454 		id[84]);
1455 	DPRINTK("88==0x%04x  "
1456 		"93==0x%04x\n",
1457 		id[88],
1458 		id[93]);
1459 }
1460 
1461 /**
1462  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1463  *	@id: IDENTIFY data to compute xfer mask from
1464  *
1465  *	Compute the xfermask for this device. This is not as trivial
1466  *	as it seems if we must consider early devices correctly.
1467  *
1468  *	FIXME: pre IDE drive timing (do we care ?).
1469  *
1470  *	LOCKING:
1471  *	None.
1472  *
1473  *	RETURNS:
1474  *	Computed xfermask
1475  */
1476 unsigned long ata_id_xfermask(const u16 *id)
1477 {
1478 	unsigned long pio_mask, mwdma_mask, udma_mask;
1479 
1480 	/* Usual case. Word 53 indicates word 64 is valid */
1481 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1482 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1483 		pio_mask <<= 3;
1484 		pio_mask |= 0x7;
1485 	} else {
1486 		/* If word 64 isn't valid then Word 51 high byte holds
1487 		 * the PIO timing number for the maximum. Turn it into
1488 		 * a mask.
1489 		 */
1490 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1491 		if (mode < 5)	/* Valid PIO range */
1492 			pio_mask = (2 << mode) - 1;
1493 		else
1494 			pio_mask = 1;
1495 
1496 		/* But wait.. there's more. Design your standards by
1497 		 * committee and you too can get a free iordy field to
1498 		 * process. However its the speeds not the modes that
1499 		 * are supported... Note drivers using the timing API
1500 		 * will get this right anyway
1501 		 */
1502 	}
1503 
1504 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1505 
1506 	if (ata_id_is_cfa(id)) {
1507 		/*
1508 		 *	Process compact flash extended modes
1509 		 */
1510 		int pio = id[163] & 0x7;
1511 		int dma = (id[163] >> 3) & 7;
1512 
1513 		if (pio)
1514 			pio_mask |= (1 << 5);
1515 		if (pio > 1)
1516 			pio_mask |= (1 << 6);
1517 		if (dma)
1518 			mwdma_mask |= (1 << 3);
1519 		if (dma > 1)
1520 			mwdma_mask |= (1 << 4);
1521 	}
1522 
1523 	udma_mask = 0;
1524 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1525 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1526 
1527 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1528 }
1529 
1530 /**
1531  *	ata_pio_queue_task - Queue port_task
1532  *	@ap: The ata_port to queue port_task for
1533  *	@fn: workqueue function to be scheduled
1534  *	@data: data for @fn to use
1535  *	@delay: delay time in msecs for workqueue function
1536  *
1537  *	Schedule @fn(@data) for execution after @delay jiffies using
1538  *	port_task.  There is one port_task per port and it's the
1539  *	user(low level driver)'s responsibility to make sure that only
1540  *	one task is active at any given time.
1541  *
1542  *	libata core layer takes care of synchronization between
1543  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1544  *	synchronization.
1545  *
1546  *	LOCKING:
1547  *	Inherited from caller.
1548  */
1549 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1550 {
1551 	ap->port_task_data = data;
1552 
1553 	/* may fail if ata_port_flush_task() in progress */
1554 	queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1555 }
1556 
1557 /**
1558  *	ata_port_flush_task - Flush port_task
1559  *	@ap: The ata_port to flush port_task for
1560  *
1561  *	After this function completes, port_task is guranteed not to
1562  *	be running or scheduled.
1563  *
1564  *	LOCKING:
1565  *	Kernel thread context (may sleep)
1566  */
1567 void ata_port_flush_task(struct ata_port *ap)
1568 {
1569 	DPRINTK("ENTER\n");
1570 
1571 	cancel_rearming_delayed_work(&ap->port_task);
1572 
1573 	if (ata_msg_ctl(ap))
1574 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1575 }
1576 
1577 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1578 {
1579 	struct completion *waiting = qc->private_data;
1580 
1581 	complete(waiting);
1582 }
1583 
1584 /**
1585  *	ata_exec_internal_sg - execute libata internal command
1586  *	@dev: Device to which the command is sent
1587  *	@tf: Taskfile registers for the command and the result
1588  *	@cdb: CDB for packet command
1589  *	@dma_dir: Data tranfer direction of the command
1590  *	@sgl: sg list for the data buffer of the command
1591  *	@n_elem: Number of sg entries
1592  *	@timeout: Timeout in msecs (0 for default)
1593  *
1594  *	Executes libata internal command with timeout.  @tf contains
1595  *	command on entry and result on return.  Timeout and error
1596  *	conditions are reported via return value.  No recovery action
1597  *	is taken after a command times out.  It's caller's duty to
1598  *	clean up after timeout.
1599  *
1600  *	LOCKING:
1601  *	None.  Should be called with kernel context, might sleep.
1602  *
1603  *	RETURNS:
1604  *	Zero on success, AC_ERR_* mask on failure
1605  */
1606 unsigned ata_exec_internal_sg(struct ata_device *dev,
1607 			      struct ata_taskfile *tf, const u8 *cdb,
1608 			      int dma_dir, struct scatterlist *sgl,
1609 			      unsigned int n_elem, unsigned long timeout)
1610 {
1611 	struct ata_link *link = dev->link;
1612 	struct ata_port *ap = link->ap;
1613 	u8 command = tf->command;
1614 	int auto_timeout = 0;
1615 	struct ata_queued_cmd *qc;
1616 	unsigned int tag, preempted_tag;
1617 	u32 preempted_sactive, preempted_qc_active;
1618 	int preempted_nr_active_links;
1619 	DECLARE_COMPLETION_ONSTACK(wait);
1620 	unsigned long flags;
1621 	unsigned int err_mask;
1622 	int rc;
1623 
1624 	spin_lock_irqsave(ap->lock, flags);
1625 
1626 	/* no internal command while frozen */
1627 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1628 		spin_unlock_irqrestore(ap->lock, flags);
1629 		return AC_ERR_SYSTEM;
1630 	}
1631 
1632 	/* initialize internal qc */
1633 
1634 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1635 	 * drivers choke if any other tag is given.  This breaks
1636 	 * ata_tag_internal() test for those drivers.  Don't use new
1637 	 * EH stuff without converting to it.
1638 	 */
1639 	if (ap->ops->error_handler)
1640 		tag = ATA_TAG_INTERNAL;
1641 	else
1642 		tag = 0;
1643 
1644 	if (test_and_set_bit(tag, &ap->qc_allocated))
1645 		BUG();
1646 	qc = __ata_qc_from_tag(ap, tag);
1647 
1648 	qc->tag = tag;
1649 	qc->scsicmd = NULL;
1650 	qc->ap = ap;
1651 	qc->dev = dev;
1652 	ata_qc_reinit(qc);
1653 
1654 	preempted_tag = link->active_tag;
1655 	preempted_sactive = link->sactive;
1656 	preempted_qc_active = ap->qc_active;
1657 	preempted_nr_active_links = ap->nr_active_links;
1658 	link->active_tag = ATA_TAG_POISON;
1659 	link->sactive = 0;
1660 	ap->qc_active = 0;
1661 	ap->nr_active_links = 0;
1662 
1663 	/* prepare & issue qc */
1664 	qc->tf = *tf;
1665 	if (cdb)
1666 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1667 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1668 	qc->dma_dir = dma_dir;
1669 	if (dma_dir != DMA_NONE) {
1670 		unsigned int i, buflen = 0;
1671 		struct scatterlist *sg;
1672 
1673 		for_each_sg(sgl, sg, n_elem, i)
1674 			buflen += sg->length;
1675 
1676 		ata_sg_init(qc, sgl, n_elem);
1677 		qc->nbytes = buflen;
1678 	}
1679 
1680 	qc->private_data = &wait;
1681 	qc->complete_fn = ata_qc_complete_internal;
1682 
1683 	ata_qc_issue(qc);
1684 
1685 	spin_unlock_irqrestore(ap->lock, flags);
1686 
1687 	if (!timeout) {
1688 		if (ata_probe_timeout)
1689 			timeout = ata_probe_timeout * 1000;
1690 		else {
1691 			timeout = ata_internal_cmd_timeout(dev, command);
1692 			auto_timeout = 1;
1693 		}
1694 	}
1695 
1696 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1697 
1698 	ata_port_flush_task(ap);
1699 
1700 	if (!rc) {
1701 		spin_lock_irqsave(ap->lock, flags);
1702 
1703 		/* We're racing with irq here.  If we lose, the
1704 		 * following test prevents us from completing the qc
1705 		 * twice.  If we win, the port is frozen and will be
1706 		 * cleaned up by ->post_internal_cmd().
1707 		 */
1708 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1709 			qc->err_mask |= AC_ERR_TIMEOUT;
1710 
1711 			if (ap->ops->error_handler)
1712 				ata_port_freeze(ap);
1713 			else
1714 				ata_qc_complete(qc);
1715 
1716 			if (ata_msg_warn(ap))
1717 				ata_dev_printk(dev, KERN_WARNING,
1718 					"qc timeout (cmd 0x%x)\n", command);
1719 		}
1720 
1721 		spin_unlock_irqrestore(ap->lock, flags);
1722 	}
1723 
1724 	/* do post_internal_cmd */
1725 	if (ap->ops->post_internal_cmd)
1726 		ap->ops->post_internal_cmd(qc);
1727 
1728 	/* perform minimal error analysis */
1729 	if (qc->flags & ATA_QCFLAG_FAILED) {
1730 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1731 			qc->err_mask |= AC_ERR_DEV;
1732 
1733 		if (!qc->err_mask)
1734 			qc->err_mask |= AC_ERR_OTHER;
1735 
1736 		if (qc->err_mask & ~AC_ERR_OTHER)
1737 			qc->err_mask &= ~AC_ERR_OTHER;
1738 	}
1739 
1740 	/* finish up */
1741 	spin_lock_irqsave(ap->lock, flags);
1742 
1743 	*tf = qc->result_tf;
1744 	err_mask = qc->err_mask;
1745 
1746 	ata_qc_free(qc);
1747 	link->active_tag = preempted_tag;
1748 	link->sactive = preempted_sactive;
1749 	ap->qc_active = preempted_qc_active;
1750 	ap->nr_active_links = preempted_nr_active_links;
1751 
1752 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1753 	 * Until those drivers are fixed, we detect the condition
1754 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1755 	 * port.
1756 	 *
1757 	 * Note that this doesn't change any behavior as internal
1758 	 * command failure results in disabling the device in the
1759 	 * higher layer for LLDDs without new reset/EH callbacks.
1760 	 *
1761 	 * Kill the following code as soon as those drivers are fixed.
1762 	 */
1763 	if (ap->flags & ATA_FLAG_DISABLED) {
1764 		err_mask |= AC_ERR_SYSTEM;
1765 		ata_port_probe(ap);
1766 	}
1767 
1768 	spin_unlock_irqrestore(ap->lock, flags);
1769 
1770 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1771 		ata_internal_cmd_timed_out(dev, command);
1772 
1773 	return err_mask;
1774 }
1775 
1776 /**
1777  *	ata_exec_internal - execute libata internal command
1778  *	@dev: Device to which the command is sent
1779  *	@tf: Taskfile registers for the command and the result
1780  *	@cdb: CDB for packet command
1781  *	@dma_dir: Data tranfer direction of the command
1782  *	@buf: Data buffer of the command
1783  *	@buflen: Length of data buffer
1784  *	@timeout: Timeout in msecs (0 for default)
1785  *
1786  *	Wrapper around ata_exec_internal_sg() which takes simple
1787  *	buffer instead of sg list.
1788  *
1789  *	LOCKING:
1790  *	None.  Should be called with kernel context, might sleep.
1791  *
1792  *	RETURNS:
1793  *	Zero on success, AC_ERR_* mask on failure
1794  */
1795 unsigned ata_exec_internal(struct ata_device *dev,
1796 			   struct ata_taskfile *tf, const u8 *cdb,
1797 			   int dma_dir, void *buf, unsigned int buflen,
1798 			   unsigned long timeout)
1799 {
1800 	struct scatterlist *psg = NULL, sg;
1801 	unsigned int n_elem = 0;
1802 
1803 	if (dma_dir != DMA_NONE) {
1804 		WARN_ON(!buf);
1805 		sg_init_one(&sg, buf, buflen);
1806 		psg = &sg;
1807 		n_elem++;
1808 	}
1809 
1810 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1811 				    timeout);
1812 }
1813 
1814 /**
1815  *	ata_do_simple_cmd - execute simple internal command
1816  *	@dev: Device to which the command is sent
1817  *	@cmd: Opcode to execute
1818  *
1819  *	Execute a 'simple' command, that only consists of the opcode
1820  *	'cmd' itself, without filling any other registers
1821  *
1822  *	LOCKING:
1823  *	Kernel thread context (may sleep).
1824  *
1825  *	RETURNS:
1826  *	Zero on success, AC_ERR_* mask on failure
1827  */
1828 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1829 {
1830 	struct ata_taskfile tf;
1831 
1832 	ata_tf_init(dev, &tf);
1833 
1834 	tf.command = cmd;
1835 	tf.flags |= ATA_TFLAG_DEVICE;
1836 	tf.protocol = ATA_PROT_NODATA;
1837 
1838 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1839 }
1840 
1841 /**
1842  *	ata_pio_need_iordy	-	check if iordy needed
1843  *	@adev: ATA device
1844  *
1845  *	Check if the current speed of the device requires IORDY. Used
1846  *	by various controllers for chip configuration.
1847  */
1848 
1849 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1850 {
1851 	/* Controller doesn't support  IORDY. Probably a pointless check
1852 	   as the caller should know this */
1853 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1854 		return 0;
1855 	/* PIO3 and higher it is mandatory */
1856 	if (adev->pio_mode > XFER_PIO_2)
1857 		return 1;
1858 	/* We turn it on when possible */
1859 	if (ata_id_has_iordy(adev->id))
1860 		return 1;
1861 	return 0;
1862 }
1863 
1864 /**
1865  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1866  *	@adev: ATA device
1867  *
1868  *	Compute the highest mode possible if we are not using iordy. Return
1869  *	-1 if no iordy mode is available.
1870  */
1871 
1872 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1873 {
1874 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1875 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1876 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1877 		/* Is the speed faster than the drive allows non IORDY ? */
1878 		if (pio) {
1879 			/* This is cycle times not frequency - watch the logic! */
1880 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1881 				return 3 << ATA_SHIFT_PIO;
1882 			return 7 << ATA_SHIFT_PIO;
1883 		}
1884 	}
1885 	return 3 << ATA_SHIFT_PIO;
1886 }
1887 
1888 /**
1889  *	ata_do_dev_read_id		-	default ID read method
1890  *	@dev: device
1891  *	@tf: proposed taskfile
1892  *	@id: data buffer
1893  *
1894  *	Issue the identify taskfile and hand back the buffer containing
1895  *	identify data. For some RAID controllers and for pre ATA devices
1896  *	this function is wrapped or replaced by the driver
1897  */
1898 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1899 					struct ata_taskfile *tf, u16 *id)
1900 {
1901 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1902 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1903 }
1904 
1905 /**
1906  *	ata_dev_read_id - Read ID data from the specified device
1907  *	@dev: target device
1908  *	@p_class: pointer to class of the target device (may be changed)
1909  *	@flags: ATA_READID_* flags
1910  *	@id: buffer to read IDENTIFY data into
1911  *
1912  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1913  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1914  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1915  *	for pre-ATA4 drives.
1916  *
1917  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1918  *	now we abort if we hit that case.
1919  *
1920  *	LOCKING:
1921  *	Kernel thread context (may sleep)
1922  *
1923  *	RETURNS:
1924  *	0 on success, -errno otherwise.
1925  */
1926 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1927 		    unsigned int flags, u16 *id)
1928 {
1929 	struct ata_port *ap = dev->link->ap;
1930 	unsigned int class = *p_class;
1931 	struct ata_taskfile tf;
1932 	unsigned int err_mask = 0;
1933 	const char *reason;
1934 	int may_fallback = 1, tried_spinup = 0;
1935 	int rc;
1936 
1937 	if (ata_msg_ctl(ap))
1938 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1939 
1940 retry:
1941 	ata_tf_init(dev, &tf);
1942 
1943 	switch (class) {
1944 	case ATA_DEV_ATA:
1945 		tf.command = ATA_CMD_ID_ATA;
1946 		break;
1947 	case ATA_DEV_ATAPI:
1948 		tf.command = ATA_CMD_ID_ATAPI;
1949 		break;
1950 	default:
1951 		rc = -ENODEV;
1952 		reason = "unsupported class";
1953 		goto err_out;
1954 	}
1955 
1956 	tf.protocol = ATA_PROT_PIO;
1957 
1958 	/* Some devices choke if TF registers contain garbage.  Make
1959 	 * sure those are properly initialized.
1960 	 */
1961 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1962 
1963 	/* Device presence detection is unreliable on some
1964 	 * controllers.  Always poll IDENTIFY if available.
1965 	 */
1966 	tf.flags |= ATA_TFLAG_POLLING;
1967 
1968 	if (ap->ops->read_id)
1969 		err_mask = ap->ops->read_id(dev, &tf, id);
1970 	else
1971 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1972 
1973 	if (err_mask) {
1974 		if (err_mask & AC_ERR_NODEV_HINT) {
1975 			ata_dev_printk(dev, KERN_DEBUG,
1976 				       "NODEV after polling detection\n");
1977 			return -ENOENT;
1978 		}
1979 
1980 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1981 			/* Device or controller might have reported
1982 			 * the wrong device class.  Give a shot at the
1983 			 * other IDENTIFY if the current one is
1984 			 * aborted by the device.
1985 			 */
1986 			if (may_fallback) {
1987 				may_fallback = 0;
1988 
1989 				if (class == ATA_DEV_ATA)
1990 					class = ATA_DEV_ATAPI;
1991 				else
1992 					class = ATA_DEV_ATA;
1993 				goto retry;
1994 			}
1995 
1996 			/* Control reaches here iff the device aborted
1997 			 * both flavors of IDENTIFYs which happens
1998 			 * sometimes with phantom devices.
1999 			 */
2000 			ata_dev_printk(dev, KERN_DEBUG,
2001 				       "both IDENTIFYs aborted, assuming NODEV\n");
2002 			return -ENOENT;
2003 		}
2004 
2005 		rc = -EIO;
2006 		reason = "I/O error";
2007 		goto err_out;
2008 	}
2009 
2010 	/* Falling back doesn't make sense if ID data was read
2011 	 * successfully at least once.
2012 	 */
2013 	may_fallback = 0;
2014 
2015 	swap_buf_le16(id, ATA_ID_WORDS);
2016 
2017 	/* sanity check */
2018 	rc = -EINVAL;
2019 	reason = "device reports invalid type";
2020 
2021 	if (class == ATA_DEV_ATA) {
2022 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2023 			goto err_out;
2024 	} else {
2025 		if (ata_id_is_ata(id))
2026 			goto err_out;
2027 	}
2028 
2029 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2030 		tried_spinup = 1;
2031 		/*
2032 		 * Drive powered-up in standby mode, and requires a specific
2033 		 * SET_FEATURES spin-up subcommand before it will accept
2034 		 * anything other than the original IDENTIFY command.
2035 		 */
2036 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2037 		if (err_mask && id[2] != 0x738c) {
2038 			rc = -EIO;
2039 			reason = "SPINUP failed";
2040 			goto err_out;
2041 		}
2042 		/*
2043 		 * If the drive initially returned incomplete IDENTIFY info,
2044 		 * we now must reissue the IDENTIFY command.
2045 		 */
2046 		if (id[2] == 0x37c8)
2047 			goto retry;
2048 	}
2049 
2050 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2051 		/*
2052 		 * The exact sequence expected by certain pre-ATA4 drives is:
2053 		 * SRST RESET
2054 		 * IDENTIFY (optional in early ATA)
2055 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2056 		 * anything else..
2057 		 * Some drives were very specific about that exact sequence.
2058 		 *
2059 		 * Note that ATA4 says lba is mandatory so the second check
2060 		 * shoud never trigger.
2061 		 */
2062 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2063 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2064 			if (err_mask) {
2065 				rc = -EIO;
2066 				reason = "INIT_DEV_PARAMS failed";
2067 				goto err_out;
2068 			}
2069 
2070 			/* current CHS translation info (id[53-58]) might be
2071 			 * changed. reread the identify device info.
2072 			 */
2073 			flags &= ~ATA_READID_POSTRESET;
2074 			goto retry;
2075 		}
2076 	}
2077 
2078 	*p_class = class;
2079 
2080 	return 0;
2081 
2082  err_out:
2083 	if (ata_msg_warn(ap))
2084 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2085 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2086 	return rc;
2087 }
2088 
2089 static inline u8 ata_dev_knobble(struct ata_device *dev)
2090 {
2091 	struct ata_port *ap = dev->link->ap;
2092 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2093 }
2094 
2095 static void ata_dev_config_ncq(struct ata_device *dev,
2096 			       char *desc, size_t desc_sz)
2097 {
2098 	struct ata_port *ap = dev->link->ap;
2099 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2100 
2101 	if (!ata_id_has_ncq(dev->id)) {
2102 		desc[0] = '\0';
2103 		return;
2104 	}
2105 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2106 		snprintf(desc, desc_sz, "NCQ (not used)");
2107 		return;
2108 	}
2109 	if (ap->flags & ATA_FLAG_NCQ) {
2110 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2111 		dev->flags |= ATA_DFLAG_NCQ;
2112 	}
2113 
2114 	if (hdepth >= ddepth)
2115 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2116 	else
2117 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2118 }
2119 
2120 /**
2121  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2122  *	@dev: Target device to configure
2123  *
2124  *	Configure @dev according to @dev->id.  Generic and low-level
2125  *	driver specific fixups are also applied.
2126  *
2127  *	LOCKING:
2128  *	Kernel thread context (may sleep)
2129  *
2130  *	RETURNS:
2131  *	0 on success, -errno otherwise
2132  */
2133 int ata_dev_configure(struct ata_device *dev)
2134 {
2135 	struct ata_port *ap = dev->link->ap;
2136 	struct ata_eh_context *ehc = &dev->link->eh_context;
2137 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2138 	const u16 *id = dev->id;
2139 	unsigned long xfer_mask;
2140 	char revbuf[7];		/* XYZ-99\0 */
2141 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2142 	char modelbuf[ATA_ID_PROD_LEN+1];
2143 	int rc;
2144 
2145 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2146 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2147 			       __func__);
2148 		return 0;
2149 	}
2150 
2151 	if (ata_msg_probe(ap))
2152 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2153 
2154 	/* set horkage */
2155 	dev->horkage |= ata_dev_blacklisted(dev);
2156 	ata_force_horkage(dev);
2157 
2158 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2159 		ata_dev_printk(dev, KERN_INFO,
2160 			       "unsupported device, disabling\n");
2161 		ata_dev_disable(dev);
2162 		return 0;
2163 	}
2164 
2165 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2166 	    dev->class == ATA_DEV_ATAPI) {
2167 		ata_dev_printk(dev, KERN_WARNING,
2168 			"WARNING: ATAPI is %s, device ignored.\n",
2169 			atapi_enabled ? "not supported with this driver"
2170 				      : "disabled");
2171 		ata_dev_disable(dev);
2172 		return 0;
2173 	}
2174 
2175 	/* let ACPI work its magic */
2176 	rc = ata_acpi_on_devcfg(dev);
2177 	if (rc)
2178 		return rc;
2179 
2180 	/* massage HPA, do it early as it might change IDENTIFY data */
2181 	rc = ata_hpa_resize(dev);
2182 	if (rc)
2183 		return rc;
2184 
2185 	/* print device capabilities */
2186 	if (ata_msg_probe(ap))
2187 		ata_dev_printk(dev, KERN_DEBUG,
2188 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2189 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2190 			       __func__,
2191 			       id[49], id[82], id[83], id[84],
2192 			       id[85], id[86], id[87], id[88]);
2193 
2194 	/* initialize to-be-configured parameters */
2195 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2196 	dev->max_sectors = 0;
2197 	dev->cdb_len = 0;
2198 	dev->n_sectors = 0;
2199 	dev->cylinders = 0;
2200 	dev->heads = 0;
2201 	dev->sectors = 0;
2202 
2203 	/*
2204 	 * common ATA, ATAPI feature tests
2205 	 */
2206 
2207 	/* find max transfer mode; for printk only */
2208 	xfer_mask = ata_id_xfermask(id);
2209 
2210 	if (ata_msg_probe(ap))
2211 		ata_dump_id(id);
2212 
2213 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2214 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2215 			sizeof(fwrevbuf));
2216 
2217 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2218 			sizeof(modelbuf));
2219 
2220 	/* ATA-specific feature tests */
2221 	if (dev->class == ATA_DEV_ATA) {
2222 		if (ata_id_is_cfa(id)) {
2223 			if (id[162] & 1) /* CPRM may make this media unusable */
2224 				ata_dev_printk(dev, KERN_WARNING,
2225 					       "supports DRM functions and may "
2226 					       "not be fully accessable.\n");
2227 			snprintf(revbuf, 7, "CFA");
2228 		} else {
2229 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2230 			/* Warn the user if the device has TPM extensions */
2231 			if (ata_id_has_tpm(id))
2232 				ata_dev_printk(dev, KERN_WARNING,
2233 					       "supports DRM functions and may "
2234 					       "not be fully accessable.\n");
2235 		}
2236 
2237 		dev->n_sectors = ata_id_n_sectors(id);
2238 
2239 		if (dev->id[59] & 0x100)
2240 			dev->multi_count = dev->id[59] & 0xff;
2241 
2242 		if (ata_id_has_lba(id)) {
2243 			const char *lba_desc;
2244 			char ncq_desc[20];
2245 
2246 			lba_desc = "LBA";
2247 			dev->flags |= ATA_DFLAG_LBA;
2248 			if (ata_id_has_lba48(id)) {
2249 				dev->flags |= ATA_DFLAG_LBA48;
2250 				lba_desc = "LBA48";
2251 
2252 				if (dev->n_sectors >= (1UL << 28) &&
2253 				    ata_id_has_flush_ext(id))
2254 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2255 			}
2256 
2257 			/* config NCQ */
2258 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2259 
2260 			/* print device info to dmesg */
2261 			if (ata_msg_drv(ap) && print_info) {
2262 				ata_dev_printk(dev, KERN_INFO,
2263 					"%s: %s, %s, max %s\n",
2264 					revbuf, modelbuf, fwrevbuf,
2265 					ata_mode_string(xfer_mask));
2266 				ata_dev_printk(dev, KERN_INFO,
2267 					"%Lu sectors, multi %u: %s %s\n",
2268 					(unsigned long long)dev->n_sectors,
2269 					dev->multi_count, lba_desc, ncq_desc);
2270 			}
2271 		} else {
2272 			/* CHS */
2273 
2274 			/* Default translation */
2275 			dev->cylinders	= id[1];
2276 			dev->heads	= id[3];
2277 			dev->sectors	= id[6];
2278 
2279 			if (ata_id_current_chs_valid(id)) {
2280 				/* Current CHS translation is valid. */
2281 				dev->cylinders = id[54];
2282 				dev->heads     = id[55];
2283 				dev->sectors   = id[56];
2284 			}
2285 
2286 			/* print device info to dmesg */
2287 			if (ata_msg_drv(ap) && print_info) {
2288 				ata_dev_printk(dev, KERN_INFO,
2289 					"%s: %s, %s, max %s\n",
2290 					revbuf,	modelbuf, fwrevbuf,
2291 					ata_mode_string(xfer_mask));
2292 				ata_dev_printk(dev, KERN_INFO,
2293 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2294 					(unsigned long long)dev->n_sectors,
2295 					dev->multi_count, dev->cylinders,
2296 					dev->heads, dev->sectors);
2297 			}
2298 		}
2299 
2300 		dev->cdb_len = 16;
2301 	}
2302 
2303 	/* ATAPI-specific feature tests */
2304 	else if (dev->class == ATA_DEV_ATAPI) {
2305 		const char *cdb_intr_string = "";
2306 		const char *atapi_an_string = "";
2307 		const char *dma_dir_string = "";
2308 		u32 sntf;
2309 
2310 		rc = atapi_cdb_len(id);
2311 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2312 			if (ata_msg_warn(ap))
2313 				ata_dev_printk(dev, KERN_WARNING,
2314 					       "unsupported CDB len\n");
2315 			rc = -EINVAL;
2316 			goto err_out_nosup;
2317 		}
2318 		dev->cdb_len = (unsigned int) rc;
2319 
2320 		/* Enable ATAPI AN if both the host and device have
2321 		 * the support.  If PMP is attached, SNTF is required
2322 		 * to enable ATAPI AN to discern between PHY status
2323 		 * changed notifications and ATAPI ANs.
2324 		 */
2325 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2326 		    (!sata_pmp_attached(ap) ||
2327 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2328 			unsigned int err_mask;
2329 
2330 			/* issue SET feature command to turn this on */
2331 			err_mask = ata_dev_set_feature(dev,
2332 					SETFEATURES_SATA_ENABLE, SATA_AN);
2333 			if (err_mask)
2334 				ata_dev_printk(dev, KERN_ERR,
2335 					"failed to enable ATAPI AN "
2336 					"(err_mask=0x%x)\n", err_mask);
2337 			else {
2338 				dev->flags |= ATA_DFLAG_AN;
2339 				atapi_an_string = ", ATAPI AN";
2340 			}
2341 		}
2342 
2343 		if (ata_id_cdb_intr(dev->id)) {
2344 			dev->flags |= ATA_DFLAG_CDB_INTR;
2345 			cdb_intr_string = ", CDB intr";
2346 		}
2347 
2348 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2349 			dev->flags |= ATA_DFLAG_DMADIR;
2350 			dma_dir_string = ", DMADIR";
2351 		}
2352 
2353 		/* print device info to dmesg */
2354 		if (ata_msg_drv(ap) && print_info)
2355 			ata_dev_printk(dev, KERN_INFO,
2356 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2357 				       modelbuf, fwrevbuf,
2358 				       ata_mode_string(xfer_mask),
2359 				       cdb_intr_string, atapi_an_string,
2360 				       dma_dir_string);
2361 	}
2362 
2363 	/* determine max_sectors */
2364 	dev->max_sectors = ATA_MAX_SECTORS;
2365 	if (dev->flags & ATA_DFLAG_LBA48)
2366 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2367 
2368 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2369 		if (ata_id_has_hipm(dev->id))
2370 			dev->flags |= ATA_DFLAG_HIPM;
2371 		if (ata_id_has_dipm(dev->id))
2372 			dev->flags |= ATA_DFLAG_DIPM;
2373 	}
2374 
2375 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2376 	   200 sectors */
2377 	if (ata_dev_knobble(dev)) {
2378 		if (ata_msg_drv(ap) && print_info)
2379 			ata_dev_printk(dev, KERN_INFO,
2380 				       "applying bridge limits\n");
2381 		dev->udma_mask &= ATA_UDMA5;
2382 		dev->max_sectors = ATA_MAX_SECTORS;
2383 	}
2384 
2385 	if ((dev->class == ATA_DEV_ATAPI) &&
2386 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2387 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2388 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2389 	}
2390 
2391 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2392 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2393 					 dev->max_sectors);
2394 
2395 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2396 		dev->horkage |= ATA_HORKAGE_IPM;
2397 
2398 		/* reset link pm_policy for this port to no pm */
2399 		ap->pm_policy = MAX_PERFORMANCE;
2400 	}
2401 
2402 	if (ap->ops->dev_config)
2403 		ap->ops->dev_config(dev);
2404 
2405 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2406 		/* Let the user know. We don't want to disallow opens for
2407 		   rescue purposes, or in case the vendor is just a blithering
2408 		   idiot. Do this after the dev_config call as some controllers
2409 		   with buggy firmware may want to avoid reporting false device
2410 		   bugs */
2411 
2412 		if (print_info) {
2413 			ata_dev_printk(dev, KERN_WARNING,
2414 "Drive reports diagnostics failure. This may indicate a drive\n");
2415 			ata_dev_printk(dev, KERN_WARNING,
2416 "fault or invalid emulation. Contact drive vendor for information.\n");
2417 		}
2418 	}
2419 
2420 	return 0;
2421 
2422 err_out_nosup:
2423 	if (ata_msg_probe(ap))
2424 		ata_dev_printk(dev, KERN_DEBUG,
2425 			       "%s: EXIT, err\n", __func__);
2426 	return rc;
2427 }
2428 
2429 /**
2430  *	ata_cable_40wire	-	return 40 wire cable type
2431  *	@ap: port
2432  *
2433  *	Helper method for drivers which want to hardwire 40 wire cable
2434  *	detection.
2435  */
2436 
2437 int ata_cable_40wire(struct ata_port *ap)
2438 {
2439 	return ATA_CBL_PATA40;
2440 }
2441 
2442 /**
2443  *	ata_cable_80wire	-	return 80 wire cable type
2444  *	@ap: port
2445  *
2446  *	Helper method for drivers which want to hardwire 80 wire cable
2447  *	detection.
2448  */
2449 
2450 int ata_cable_80wire(struct ata_port *ap)
2451 {
2452 	return ATA_CBL_PATA80;
2453 }
2454 
2455 /**
2456  *	ata_cable_unknown	-	return unknown PATA cable.
2457  *	@ap: port
2458  *
2459  *	Helper method for drivers which have no PATA cable detection.
2460  */
2461 
2462 int ata_cable_unknown(struct ata_port *ap)
2463 {
2464 	return ATA_CBL_PATA_UNK;
2465 }
2466 
2467 /**
2468  *	ata_cable_ignore	-	return ignored PATA cable.
2469  *	@ap: port
2470  *
2471  *	Helper method for drivers which don't use cable type to limit
2472  *	transfer mode.
2473  */
2474 int ata_cable_ignore(struct ata_port *ap)
2475 {
2476 	return ATA_CBL_PATA_IGN;
2477 }
2478 
2479 /**
2480  *	ata_cable_sata	-	return SATA cable type
2481  *	@ap: port
2482  *
2483  *	Helper method for drivers which have SATA cables
2484  */
2485 
2486 int ata_cable_sata(struct ata_port *ap)
2487 {
2488 	return ATA_CBL_SATA;
2489 }
2490 
2491 /**
2492  *	ata_bus_probe - Reset and probe ATA bus
2493  *	@ap: Bus to probe
2494  *
2495  *	Master ATA bus probing function.  Initiates a hardware-dependent
2496  *	bus reset, then attempts to identify any devices found on
2497  *	the bus.
2498  *
2499  *	LOCKING:
2500  *	PCI/etc. bus probe sem.
2501  *
2502  *	RETURNS:
2503  *	Zero on success, negative errno otherwise.
2504  */
2505 
2506 int ata_bus_probe(struct ata_port *ap)
2507 {
2508 	unsigned int classes[ATA_MAX_DEVICES];
2509 	int tries[ATA_MAX_DEVICES];
2510 	int rc;
2511 	struct ata_device *dev;
2512 
2513 	ata_port_probe(ap);
2514 
2515 	ata_link_for_each_dev(dev, &ap->link)
2516 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2517 
2518  retry:
2519 	ata_link_for_each_dev(dev, &ap->link) {
2520 		/* If we issue an SRST then an ATA drive (not ATAPI)
2521 		 * may change configuration and be in PIO0 timing. If
2522 		 * we do a hard reset (or are coming from power on)
2523 		 * this is true for ATA or ATAPI. Until we've set a
2524 		 * suitable controller mode we should not touch the
2525 		 * bus as we may be talking too fast.
2526 		 */
2527 		dev->pio_mode = XFER_PIO_0;
2528 
2529 		/* If the controller has a pio mode setup function
2530 		 * then use it to set the chipset to rights. Don't
2531 		 * touch the DMA setup as that will be dealt with when
2532 		 * configuring devices.
2533 		 */
2534 		if (ap->ops->set_piomode)
2535 			ap->ops->set_piomode(ap, dev);
2536 	}
2537 
2538 	/* reset and determine device classes */
2539 	ap->ops->phy_reset(ap);
2540 
2541 	ata_link_for_each_dev(dev, &ap->link) {
2542 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2543 		    dev->class != ATA_DEV_UNKNOWN)
2544 			classes[dev->devno] = dev->class;
2545 		else
2546 			classes[dev->devno] = ATA_DEV_NONE;
2547 
2548 		dev->class = ATA_DEV_UNKNOWN;
2549 	}
2550 
2551 	ata_port_probe(ap);
2552 
2553 	/* read IDENTIFY page and configure devices. We have to do the identify
2554 	   specific sequence bass-ackwards so that PDIAG- is released by
2555 	   the slave device */
2556 
2557 	ata_link_for_each_dev_reverse(dev, &ap->link) {
2558 		if (tries[dev->devno])
2559 			dev->class = classes[dev->devno];
2560 
2561 		if (!ata_dev_enabled(dev))
2562 			continue;
2563 
2564 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2565 				     dev->id);
2566 		if (rc)
2567 			goto fail;
2568 	}
2569 
2570 	/* Now ask for the cable type as PDIAG- should have been released */
2571 	if (ap->ops->cable_detect)
2572 		ap->cbl = ap->ops->cable_detect(ap);
2573 
2574 	/* We may have SATA bridge glue hiding here irrespective of the
2575 	   reported cable types and sensed types */
2576 	ata_link_for_each_dev(dev, &ap->link) {
2577 		if (!ata_dev_enabled(dev))
2578 			continue;
2579 		/* SATA drives indicate we have a bridge. We don't know which
2580 		   end of the link the bridge is which is a problem */
2581 		if (ata_id_is_sata(dev->id))
2582 			ap->cbl = ATA_CBL_SATA;
2583 	}
2584 
2585 	/* After the identify sequence we can now set up the devices. We do
2586 	   this in the normal order so that the user doesn't get confused */
2587 
2588 	ata_link_for_each_dev(dev, &ap->link) {
2589 		if (!ata_dev_enabled(dev))
2590 			continue;
2591 
2592 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2593 		rc = ata_dev_configure(dev);
2594 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2595 		if (rc)
2596 			goto fail;
2597 	}
2598 
2599 	/* configure transfer mode */
2600 	rc = ata_set_mode(&ap->link, &dev);
2601 	if (rc)
2602 		goto fail;
2603 
2604 	ata_link_for_each_dev(dev, &ap->link)
2605 		if (ata_dev_enabled(dev))
2606 			return 0;
2607 
2608 	/* no device present, disable port */
2609 	ata_port_disable(ap);
2610 	return -ENODEV;
2611 
2612  fail:
2613 	tries[dev->devno]--;
2614 
2615 	switch (rc) {
2616 	case -EINVAL:
2617 		/* eeek, something went very wrong, give up */
2618 		tries[dev->devno] = 0;
2619 		break;
2620 
2621 	case -ENODEV:
2622 		/* give it just one more chance */
2623 		tries[dev->devno] = min(tries[dev->devno], 1);
2624 	case -EIO:
2625 		if (tries[dev->devno] == 1) {
2626 			/* This is the last chance, better to slow
2627 			 * down than lose it.
2628 			 */
2629 			sata_down_spd_limit(&ap->link);
2630 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2631 		}
2632 	}
2633 
2634 	if (!tries[dev->devno])
2635 		ata_dev_disable(dev);
2636 
2637 	goto retry;
2638 }
2639 
2640 /**
2641  *	ata_port_probe - Mark port as enabled
2642  *	@ap: Port for which we indicate enablement
2643  *
2644  *	Modify @ap data structure such that the system
2645  *	thinks that the entire port is enabled.
2646  *
2647  *	LOCKING: host lock, or some other form of
2648  *	serialization.
2649  */
2650 
2651 void ata_port_probe(struct ata_port *ap)
2652 {
2653 	ap->flags &= ~ATA_FLAG_DISABLED;
2654 }
2655 
2656 /**
2657  *	sata_print_link_status - Print SATA link status
2658  *	@link: SATA link to printk link status about
2659  *
2660  *	This function prints link speed and status of a SATA link.
2661  *
2662  *	LOCKING:
2663  *	None.
2664  */
2665 static void sata_print_link_status(struct ata_link *link)
2666 {
2667 	u32 sstatus, scontrol, tmp;
2668 
2669 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2670 		return;
2671 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2672 
2673 	if (ata_link_online(link)) {
2674 		tmp = (sstatus >> 4) & 0xf;
2675 		ata_link_printk(link, KERN_INFO,
2676 				"SATA link up %s (SStatus %X SControl %X)\n",
2677 				sata_spd_string(tmp), sstatus, scontrol);
2678 	} else {
2679 		ata_link_printk(link, KERN_INFO,
2680 				"SATA link down (SStatus %X SControl %X)\n",
2681 				sstatus, scontrol);
2682 	}
2683 }
2684 
2685 /**
2686  *	ata_dev_pair		-	return other device on cable
2687  *	@adev: device
2688  *
2689  *	Obtain the other device on the same cable, or if none is
2690  *	present NULL is returned
2691  */
2692 
2693 struct ata_device *ata_dev_pair(struct ata_device *adev)
2694 {
2695 	struct ata_link *link = adev->link;
2696 	struct ata_device *pair = &link->device[1 - adev->devno];
2697 	if (!ata_dev_enabled(pair))
2698 		return NULL;
2699 	return pair;
2700 }
2701 
2702 /**
2703  *	ata_port_disable - Disable port.
2704  *	@ap: Port to be disabled.
2705  *
2706  *	Modify @ap data structure such that the system
2707  *	thinks that the entire port is disabled, and should
2708  *	never attempt to probe or communicate with devices
2709  *	on this port.
2710  *
2711  *	LOCKING: host lock, or some other form of
2712  *	serialization.
2713  */
2714 
2715 void ata_port_disable(struct ata_port *ap)
2716 {
2717 	ap->link.device[0].class = ATA_DEV_NONE;
2718 	ap->link.device[1].class = ATA_DEV_NONE;
2719 	ap->flags |= ATA_FLAG_DISABLED;
2720 }
2721 
2722 /**
2723  *	sata_down_spd_limit - adjust SATA spd limit downward
2724  *	@link: Link to adjust SATA spd limit for
2725  *
2726  *	Adjust SATA spd limit of @link downward.  Note that this
2727  *	function only adjusts the limit.  The change must be applied
2728  *	using sata_set_spd().
2729  *
2730  *	LOCKING:
2731  *	Inherited from caller.
2732  *
2733  *	RETURNS:
2734  *	0 on success, negative errno on failure
2735  */
2736 int sata_down_spd_limit(struct ata_link *link)
2737 {
2738 	u32 sstatus, spd, mask;
2739 	int rc, highbit;
2740 
2741 	if (!sata_scr_valid(link))
2742 		return -EOPNOTSUPP;
2743 
2744 	/* If SCR can be read, use it to determine the current SPD.
2745 	 * If not, use cached value in link->sata_spd.
2746 	 */
2747 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2748 	if (rc == 0)
2749 		spd = (sstatus >> 4) & 0xf;
2750 	else
2751 		spd = link->sata_spd;
2752 
2753 	mask = link->sata_spd_limit;
2754 	if (mask <= 1)
2755 		return -EINVAL;
2756 
2757 	/* unconditionally mask off the highest bit */
2758 	highbit = fls(mask) - 1;
2759 	mask &= ~(1 << highbit);
2760 
2761 	/* Mask off all speeds higher than or equal to the current
2762 	 * one.  Force 1.5Gbps if current SPD is not available.
2763 	 */
2764 	if (spd > 1)
2765 		mask &= (1 << (spd - 1)) - 1;
2766 	else
2767 		mask &= 1;
2768 
2769 	/* were we already at the bottom? */
2770 	if (!mask)
2771 		return -EINVAL;
2772 
2773 	link->sata_spd_limit = mask;
2774 
2775 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2776 			sata_spd_string(fls(mask)));
2777 
2778 	return 0;
2779 }
2780 
2781 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2782 {
2783 	struct ata_link *host_link = &link->ap->link;
2784 	u32 limit, target, spd;
2785 
2786 	limit = link->sata_spd_limit;
2787 
2788 	/* Don't configure downstream link faster than upstream link.
2789 	 * It doesn't speed up anything and some PMPs choke on such
2790 	 * configuration.
2791 	 */
2792 	if (!ata_is_host_link(link) && host_link->sata_spd)
2793 		limit &= (1 << host_link->sata_spd) - 1;
2794 
2795 	if (limit == UINT_MAX)
2796 		target = 0;
2797 	else
2798 		target = fls(limit);
2799 
2800 	spd = (*scontrol >> 4) & 0xf;
2801 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2802 
2803 	return spd != target;
2804 }
2805 
2806 /**
2807  *	sata_set_spd_needed - is SATA spd configuration needed
2808  *	@link: Link in question
2809  *
2810  *	Test whether the spd limit in SControl matches
2811  *	@link->sata_spd_limit.  This function is used to determine
2812  *	whether hardreset is necessary to apply SATA spd
2813  *	configuration.
2814  *
2815  *	LOCKING:
2816  *	Inherited from caller.
2817  *
2818  *	RETURNS:
2819  *	1 if SATA spd configuration is needed, 0 otherwise.
2820  */
2821 static int sata_set_spd_needed(struct ata_link *link)
2822 {
2823 	u32 scontrol;
2824 
2825 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2826 		return 1;
2827 
2828 	return __sata_set_spd_needed(link, &scontrol);
2829 }
2830 
2831 /**
2832  *	sata_set_spd - set SATA spd according to spd limit
2833  *	@link: Link to set SATA spd for
2834  *
2835  *	Set SATA spd of @link according to sata_spd_limit.
2836  *
2837  *	LOCKING:
2838  *	Inherited from caller.
2839  *
2840  *	RETURNS:
2841  *	0 if spd doesn't need to be changed, 1 if spd has been
2842  *	changed.  Negative errno if SCR registers are inaccessible.
2843  */
2844 int sata_set_spd(struct ata_link *link)
2845 {
2846 	u32 scontrol;
2847 	int rc;
2848 
2849 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2850 		return rc;
2851 
2852 	if (!__sata_set_spd_needed(link, &scontrol))
2853 		return 0;
2854 
2855 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2856 		return rc;
2857 
2858 	return 1;
2859 }
2860 
2861 /*
2862  * This mode timing computation functionality is ported over from
2863  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2864  */
2865 /*
2866  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2867  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2868  * for UDMA6, which is currently supported only by Maxtor drives.
2869  *
2870  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2871  */
2872 
2873 static const struct ata_timing ata_timing[] = {
2874 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2875 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2876 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2877 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2878 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2879 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2880 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2881 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2882 
2883 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2884 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2885 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2886 
2887 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2888 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2889 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2890 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2891 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2892 
2893 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2894 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2895 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2896 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2897 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2898 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2899 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2900 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2901 
2902 	{ 0xFF }
2903 };
2904 
2905 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2906 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2907 
2908 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2909 {
2910 	q->setup   = EZ(t->setup   * 1000,  T);
2911 	q->act8b   = EZ(t->act8b   * 1000,  T);
2912 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2913 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2914 	q->active  = EZ(t->active  * 1000,  T);
2915 	q->recover = EZ(t->recover * 1000,  T);
2916 	q->cycle   = EZ(t->cycle   * 1000,  T);
2917 	q->udma    = EZ(t->udma    * 1000, UT);
2918 }
2919 
2920 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2921 		      struct ata_timing *m, unsigned int what)
2922 {
2923 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2924 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2925 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2926 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2927 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2928 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2929 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2930 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2931 }
2932 
2933 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2934 {
2935 	const struct ata_timing *t = ata_timing;
2936 
2937 	while (xfer_mode > t->mode)
2938 		t++;
2939 
2940 	if (xfer_mode == t->mode)
2941 		return t;
2942 	return NULL;
2943 }
2944 
2945 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2946 		       struct ata_timing *t, int T, int UT)
2947 {
2948 	const struct ata_timing *s;
2949 	struct ata_timing p;
2950 
2951 	/*
2952 	 * Find the mode.
2953 	 */
2954 
2955 	if (!(s = ata_timing_find_mode(speed)))
2956 		return -EINVAL;
2957 
2958 	memcpy(t, s, sizeof(*s));
2959 
2960 	/*
2961 	 * If the drive is an EIDE drive, it can tell us it needs extended
2962 	 * PIO/MW_DMA cycle timing.
2963 	 */
2964 
2965 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2966 		memset(&p, 0, sizeof(p));
2967 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2968 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2969 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2970 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2971 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2972 		}
2973 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2974 	}
2975 
2976 	/*
2977 	 * Convert the timing to bus clock counts.
2978 	 */
2979 
2980 	ata_timing_quantize(t, t, T, UT);
2981 
2982 	/*
2983 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2984 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2985 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2986 	 */
2987 
2988 	if (speed > XFER_PIO_6) {
2989 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2990 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2991 	}
2992 
2993 	/*
2994 	 * Lengthen active & recovery time so that cycle time is correct.
2995 	 */
2996 
2997 	if (t->act8b + t->rec8b < t->cyc8b) {
2998 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2999 		t->rec8b = t->cyc8b - t->act8b;
3000 	}
3001 
3002 	if (t->active + t->recover < t->cycle) {
3003 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3004 		t->recover = t->cycle - t->active;
3005 	}
3006 
3007 	/* In a few cases quantisation may produce enough errors to
3008 	   leave t->cycle too low for the sum of active and recovery
3009 	   if so we must correct this */
3010 	if (t->active + t->recover > t->cycle)
3011 		t->cycle = t->active + t->recover;
3012 
3013 	return 0;
3014 }
3015 
3016 /**
3017  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3018  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3019  *	@cycle: cycle duration in ns
3020  *
3021  *	Return matching xfer mode for @cycle.  The returned mode is of
3022  *	the transfer type specified by @xfer_shift.  If @cycle is too
3023  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3024  *	than the fastest known mode, the fasted mode is returned.
3025  *
3026  *	LOCKING:
3027  *	None.
3028  *
3029  *	RETURNS:
3030  *	Matching xfer_mode, 0xff if no match found.
3031  */
3032 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3033 {
3034 	u8 base_mode = 0xff, last_mode = 0xff;
3035 	const struct ata_xfer_ent *ent;
3036 	const struct ata_timing *t;
3037 
3038 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3039 		if (ent->shift == xfer_shift)
3040 			base_mode = ent->base;
3041 
3042 	for (t = ata_timing_find_mode(base_mode);
3043 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3044 		unsigned short this_cycle;
3045 
3046 		switch (xfer_shift) {
3047 		case ATA_SHIFT_PIO:
3048 		case ATA_SHIFT_MWDMA:
3049 			this_cycle = t->cycle;
3050 			break;
3051 		case ATA_SHIFT_UDMA:
3052 			this_cycle = t->udma;
3053 			break;
3054 		default:
3055 			return 0xff;
3056 		}
3057 
3058 		if (cycle > this_cycle)
3059 			break;
3060 
3061 		last_mode = t->mode;
3062 	}
3063 
3064 	return last_mode;
3065 }
3066 
3067 /**
3068  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3069  *	@dev: Device to adjust xfer masks
3070  *	@sel: ATA_DNXFER_* selector
3071  *
3072  *	Adjust xfer masks of @dev downward.  Note that this function
3073  *	does not apply the change.  Invoking ata_set_mode() afterwards
3074  *	will apply the limit.
3075  *
3076  *	LOCKING:
3077  *	Inherited from caller.
3078  *
3079  *	RETURNS:
3080  *	0 on success, negative errno on failure
3081  */
3082 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3083 {
3084 	char buf[32];
3085 	unsigned long orig_mask, xfer_mask;
3086 	unsigned long pio_mask, mwdma_mask, udma_mask;
3087 	int quiet, highbit;
3088 
3089 	quiet = !!(sel & ATA_DNXFER_QUIET);
3090 	sel &= ~ATA_DNXFER_QUIET;
3091 
3092 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3093 						  dev->mwdma_mask,
3094 						  dev->udma_mask);
3095 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3096 
3097 	switch (sel) {
3098 	case ATA_DNXFER_PIO:
3099 		highbit = fls(pio_mask) - 1;
3100 		pio_mask &= ~(1 << highbit);
3101 		break;
3102 
3103 	case ATA_DNXFER_DMA:
3104 		if (udma_mask) {
3105 			highbit = fls(udma_mask) - 1;
3106 			udma_mask &= ~(1 << highbit);
3107 			if (!udma_mask)
3108 				return -ENOENT;
3109 		} else if (mwdma_mask) {
3110 			highbit = fls(mwdma_mask) - 1;
3111 			mwdma_mask &= ~(1 << highbit);
3112 			if (!mwdma_mask)
3113 				return -ENOENT;
3114 		}
3115 		break;
3116 
3117 	case ATA_DNXFER_40C:
3118 		udma_mask &= ATA_UDMA_MASK_40C;
3119 		break;
3120 
3121 	case ATA_DNXFER_FORCE_PIO0:
3122 		pio_mask &= 1;
3123 	case ATA_DNXFER_FORCE_PIO:
3124 		mwdma_mask = 0;
3125 		udma_mask = 0;
3126 		break;
3127 
3128 	default:
3129 		BUG();
3130 	}
3131 
3132 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3133 
3134 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3135 		return -ENOENT;
3136 
3137 	if (!quiet) {
3138 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3139 			snprintf(buf, sizeof(buf), "%s:%s",
3140 				 ata_mode_string(xfer_mask),
3141 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3142 		else
3143 			snprintf(buf, sizeof(buf), "%s",
3144 				 ata_mode_string(xfer_mask));
3145 
3146 		ata_dev_printk(dev, KERN_WARNING,
3147 			       "limiting speed to %s\n", buf);
3148 	}
3149 
3150 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3151 			    &dev->udma_mask);
3152 
3153 	return 0;
3154 }
3155 
3156 static int ata_dev_set_mode(struct ata_device *dev)
3157 {
3158 	struct ata_eh_context *ehc = &dev->link->eh_context;
3159 	const char *dev_err_whine = "";
3160 	int ign_dev_err = 0;
3161 	unsigned int err_mask;
3162 	int rc;
3163 
3164 	dev->flags &= ~ATA_DFLAG_PIO;
3165 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3166 		dev->flags |= ATA_DFLAG_PIO;
3167 
3168 	err_mask = ata_dev_set_xfermode(dev);
3169 
3170 	if (err_mask & ~AC_ERR_DEV)
3171 		goto fail;
3172 
3173 	/* revalidate */
3174 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3175 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3176 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3177 	if (rc)
3178 		return rc;
3179 
3180 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3181 		/* Old CFA may refuse this command, which is just fine */
3182 		if (ata_id_is_cfa(dev->id))
3183 			ign_dev_err = 1;
3184 		/* Catch several broken garbage emulations plus some pre
3185 		   ATA devices */
3186 		if (ata_id_major_version(dev->id) == 0 &&
3187 					dev->pio_mode <= XFER_PIO_2)
3188 			ign_dev_err = 1;
3189 		/* Some very old devices and some bad newer ones fail
3190 		   any kind of SET_XFERMODE request but support PIO0-2
3191 		   timings and no IORDY */
3192 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3193 			ign_dev_err = 1;
3194 	}
3195 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3196 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3197 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3198 	    dev->dma_mode == XFER_MW_DMA_0 &&
3199 	    (dev->id[63] >> 8) & 1)
3200 		ign_dev_err = 1;
3201 
3202 	/* if the device is actually configured correctly, ignore dev err */
3203 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3204 		ign_dev_err = 1;
3205 
3206 	if (err_mask & AC_ERR_DEV) {
3207 		if (!ign_dev_err)
3208 			goto fail;
3209 		else
3210 			dev_err_whine = " (device error ignored)";
3211 	}
3212 
3213 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3214 		dev->xfer_shift, (int)dev->xfer_mode);
3215 
3216 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3217 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3218 		       dev_err_whine);
3219 
3220 	return 0;
3221 
3222  fail:
3223 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3224 		       "(err_mask=0x%x)\n", err_mask);
3225 	return -EIO;
3226 }
3227 
3228 /**
3229  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3230  *	@link: link on which timings will be programmed
3231  *	@r_failed_dev: out parameter for failed device
3232  *
3233  *	Standard implementation of the function used to tune and set
3234  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3235  *	ata_dev_set_mode() fails, pointer to the failing device is
3236  *	returned in @r_failed_dev.
3237  *
3238  *	LOCKING:
3239  *	PCI/etc. bus probe sem.
3240  *
3241  *	RETURNS:
3242  *	0 on success, negative errno otherwise
3243  */
3244 
3245 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3246 {
3247 	struct ata_port *ap = link->ap;
3248 	struct ata_device *dev;
3249 	int rc = 0, used_dma = 0, found = 0;
3250 
3251 	/* step 1: calculate xfer_mask */
3252 	ata_link_for_each_dev(dev, link) {
3253 		unsigned long pio_mask, dma_mask;
3254 		unsigned int mode_mask;
3255 
3256 		if (!ata_dev_enabled(dev))
3257 			continue;
3258 
3259 		mode_mask = ATA_DMA_MASK_ATA;
3260 		if (dev->class == ATA_DEV_ATAPI)
3261 			mode_mask = ATA_DMA_MASK_ATAPI;
3262 		else if (ata_id_is_cfa(dev->id))
3263 			mode_mask = ATA_DMA_MASK_CFA;
3264 
3265 		ata_dev_xfermask(dev);
3266 		ata_force_xfermask(dev);
3267 
3268 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3269 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3270 
3271 		if (libata_dma_mask & mode_mask)
3272 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3273 		else
3274 			dma_mask = 0;
3275 
3276 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3277 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3278 
3279 		found = 1;
3280 		if (dev->dma_mode != 0xff)
3281 			used_dma = 1;
3282 	}
3283 	if (!found)
3284 		goto out;
3285 
3286 	/* step 2: always set host PIO timings */
3287 	ata_link_for_each_dev(dev, link) {
3288 		if (!ata_dev_enabled(dev))
3289 			continue;
3290 
3291 		if (dev->pio_mode == 0xff) {
3292 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3293 			rc = -EINVAL;
3294 			goto out;
3295 		}
3296 
3297 		dev->xfer_mode = dev->pio_mode;
3298 		dev->xfer_shift = ATA_SHIFT_PIO;
3299 		if (ap->ops->set_piomode)
3300 			ap->ops->set_piomode(ap, dev);
3301 	}
3302 
3303 	/* step 3: set host DMA timings */
3304 	ata_link_for_each_dev(dev, link) {
3305 		if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3306 			continue;
3307 
3308 		dev->xfer_mode = dev->dma_mode;
3309 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3310 		if (ap->ops->set_dmamode)
3311 			ap->ops->set_dmamode(ap, dev);
3312 	}
3313 
3314 	/* step 4: update devices' xfer mode */
3315 	ata_link_for_each_dev(dev, link) {
3316 		/* don't update suspended devices' xfer mode */
3317 		if (!ata_dev_enabled(dev))
3318 			continue;
3319 
3320 		rc = ata_dev_set_mode(dev);
3321 		if (rc)
3322 			goto out;
3323 	}
3324 
3325 	/* Record simplex status. If we selected DMA then the other
3326 	 * host channels are not permitted to do so.
3327 	 */
3328 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3329 		ap->host->simplex_claimed = ap;
3330 
3331  out:
3332 	if (rc)
3333 		*r_failed_dev = dev;
3334 	return rc;
3335 }
3336 
3337 /**
3338  *	ata_wait_ready - wait for link to become ready
3339  *	@link: link to be waited on
3340  *	@deadline: deadline jiffies for the operation
3341  *	@check_ready: callback to check link readiness
3342  *
3343  *	Wait for @link to become ready.  @check_ready should return
3344  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3345  *	link doesn't seem to be occupied, other errno for other error
3346  *	conditions.
3347  *
3348  *	Transient -ENODEV conditions are allowed for
3349  *	ATA_TMOUT_FF_WAIT.
3350  *
3351  *	LOCKING:
3352  *	EH context.
3353  *
3354  *	RETURNS:
3355  *	0 if @linke is ready before @deadline; otherwise, -errno.
3356  */
3357 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3358 		   int (*check_ready)(struct ata_link *link))
3359 {
3360 	unsigned long start = jiffies;
3361 	unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3362 	int warned = 0;
3363 
3364 	if (time_after(nodev_deadline, deadline))
3365 		nodev_deadline = deadline;
3366 
3367 	while (1) {
3368 		unsigned long now = jiffies;
3369 		int ready, tmp;
3370 
3371 		ready = tmp = check_ready(link);
3372 		if (ready > 0)
3373 			return 0;
3374 
3375 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3376 		 * is online.  Also, some SATA devices take a long
3377 		 * time to clear 0xff after reset.  For example,
3378 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3379 		 * GoVault needs even more than that.  Wait for
3380 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3381 		 *
3382 		 * Note that some PATA controllers (pata_ali) explode
3383 		 * if status register is read more than once when
3384 		 * there's no device attached.
3385 		 */
3386 		if (ready == -ENODEV) {
3387 			if (ata_link_online(link))
3388 				ready = 0;
3389 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3390 				 !ata_link_offline(link) &&
3391 				 time_before(now, nodev_deadline))
3392 				ready = 0;
3393 		}
3394 
3395 		if (ready)
3396 			return ready;
3397 		if (time_after(now, deadline))
3398 			return -EBUSY;
3399 
3400 		if (!warned && time_after(now, start + 5 * HZ) &&
3401 		    (deadline - now > 3 * HZ)) {
3402 			ata_link_printk(link, KERN_WARNING,
3403 				"link is slow to respond, please be patient "
3404 				"(ready=%d)\n", tmp);
3405 			warned = 1;
3406 		}
3407 
3408 		msleep(50);
3409 	}
3410 }
3411 
3412 /**
3413  *	ata_wait_after_reset - wait for link to become ready after reset
3414  *	@link: link to be waited on
3415  *	@deadline: deadline jiffies for the operation
3416  *	@check_ready: callback to check link readiness
3417  *
3418  *	Wait for @link to become ready after reset.
3419  *
3420  *	LOCKING:
3421  *	EH context.
3422  *
3423  *	RETURNS:
3424  *	0 if @linke is ready before @deadline; otherwise, -errno.
3425  */
3426 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3427 				int (*check_ready)(struct ata_link *link))
3428 {
3429 	msleep(ATA_WAIT_AFTER_RESET);
3430 
3431 	return ata_wait_ready(link, deadline, check_ready);
3432 }
3433 
3434 /**
3435  *	sata_link_debounce - debounce SATA phy status
3436  *	@link: ATA link to debounce SATA phy status for
3437  *	@params: timing parameters { interval, duratinon, timeout } in msec
3438  *	@deadline: deadline jiffies for the operation
3439  *
3440 *	Make sure SStatus of @link reaches stable state, determined by
3441  *	holding the same value where DET is not 1 for @duration polled
3442  *	every @interval, before @timeout.  Timeout constraints the
3443  *	beginning of the stable state.  Because DET gets stuck at 1 on
3444  *	some controllers after hot unplugging, this functions waits
3445  *	until timeout then returns 0 if DET is stable at 1.
3446  *
3447  *	@timeout is further limited by @deadline.  The sooner of the
3448  *	two is used.
3449  *
3450  *	LOCKING:
3451  *	Kernel thread context (may sleep)
3452  *
3453  *	RETURNS:
3454  *	0 on success, -errno on failure.
3455  */
3456 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3457 		       unsigned long deadline)
3458 {
3459 	unsigned long interval = params[0];
3460 	unsigned long duration = params[1];
3461 	unsigned long last_jiffies, t;
3462 	u32 last, cur;
3463 	int rc;
3464 
3465 	t = ata_deadline(jiffies, params[2]);
3466 	if (time_before(t, deadline))
3467 		deadline = t;
3468 
3469 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3470 		return rc;
3471 	cur &= 0xf;
3472 
3473 	last = cur;
3474 	last_jiffies = jiffies;
3475 
3476 	while (1) {
3477 		msleep(interval);
3478 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3479 			return rc;
3480 		cur &= 0xf;
3481 
3482 		/* DET stable? */
3483 		if (cur == last) {
3484 			if (cur == 1 && time_before(jiffies, deadline))
3485 				continue;
3486 			if (time_after(jiffies,
3487 				       ata_deadline(last_jiffies, duration)))
3488 				return 0;
3489 			continue;
3490 		}
3491 
3492 		/* unstable, start over */
3493 		last = cur;
3494 		last_jiffies = jiffies;
3495 
3496 		/* Check deadline.  If debouncing failed, return
3497 		 * -EPIPE to tell upper layer to lower link speed.
3498 		 */
3499 		if (time_after(jiffies, deadline))
3500 			return -EPIPE;
3501 	}
3502 }
3503 
3504 /**
3505  *	sata_link_resume - resume SATA link
3506  *	@link: ATA link to resume SATA
3507  *	@params: timing parameters { interval, duratinon, timeout } in msec
3508  *	@deadline: deadline jiffies for the operation
3509  *
3510  *	Resume SATA phy @link and debounce it.
3511  *
3512  *	LOCKING:
3513  *	Kernel thread context (may sleep)
3514  *
3515  *	RETURNS:
3516  *	0 on success, -errno on failure.
3517  */
3518 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3519 		     unsigned long deadline)
3520 {
3521 	u32 scontrol, serror;
3522 	int rc;
3523 
3524 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3525 		return rc;
3526 
3527 	scontrol = (scontrol & 0x0f0) | 0x300;
3528 
3529 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3530 		return rc;
3531 
3532 	/* Some PHYs react badly if SStatus is pounded immediately
3533 	 * after resuming.  Delay 200ms before debouncing.
3534 	 */
3535 	msleep(200);
3536 
3537 	if ((rc = sata_link_debounce(link, params, deadline)))
3538 		return rc;
3539 
3540 	/* clear SError, some PHYs require this even for SRST to work */
3541 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3542 		rc = sata_scr_write(link, SCR_ERROR, serror);
3543 
3544 	return rc != -EINVAL ? rc : 0;
3545 }
3546 
3547 /**
3548  *	ata_std_prereset - prepare for reset
3549  *	@link: ATA link to be reset
3550  *	@deadline: deadline jiffies for the operation
3551  *
3552  *	@link is about to be reset.  Initialize it.  Failure from
3553  *	prereset makes libata abort whole reset sequence and give up
3554  *	that port, so prereset should be best-effort.  It does its
3555  *	best to prepare for reset sequence but if things go wrong, it
3556  *	should just whine, not fail.
3557  *
3558  *	LOCKING:
3559  *	Kernel thread context (may sleep)
3560  *
3561  *	RETURNS:
3562  *	0 on success, -errno otherwise.
3563  */
3564 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3565 {
3566 	struct ata_port *ap = link->ap;
3567 	struct ata_eh_context *ehc = &link->eh_context;
3568 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3569 	int rc;
3570 
3571 	/* if we're about to do hardreset, nothing more to do */
3572 	if (ehc->i.action & ATA_EH_HARDRESET)
3573 		return 0;
3574 
3575 	/* if SATA, resume link */
3576 	if (ap->flags & ATA_FLAG_SATA) {
3577 		rc = sata_link_resume(link, timing, deadline);
3578 		/* whine about phy resume failure but proceed */
3579 		if (rc && rc != -EOPNOTSUPP)
3580 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3581 					"link for reset (errno=%d)\n", rc);
3582 	}
3583 
3584 	/* no point in trying softreset on offline link */
3585 	if (ata_link_offline(link))
3586 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3587 
3588 	return 0;
3589 }
3590 
3591 /**
3592  *	sata_link_hardreset - reset link via SATA phy reset
3593  *	@link: link to reset
3594  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3595  *	@deadline: deadline jiffies for the operation
3596  *	@online: optional out parameter indicating link onlineness
3597  *	@check_ready: optional callback to check link readiness
3598  *
3599  *	SATA phy-reset @link using DET bits of SControl register.
3600  *	After hardreset, link readiness is waited upon using
3601  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3602  *	allowed to not specify @check_ready and wait itself after this
3603  *	function returns.  Device classification is LLD's
3604  *	responsibility.
3605  *
3606  *	*@online is set to one iff reset succeeded and @link is online
3607  *	after reset.
3608  *
3609  *	LOCKING:
3610  *	Kernel thread context (may sleep)
3611  *
3612  *	RETURNS:
3613  *	0 on success, -errno otherwise.
3614  */
3615 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3616 			unsigned long deadline,
3617 			bool *online, int (*check_ready)(struct ata_link *))
3618 {
3619 	u32 scontrol;
3620 	int rc;
3621 
3622 	DPRINTK("ENTER\n");
3623 
3624 	if (online)
3625 		*online = false;
3626 
3627 	if (sata_set_spd_needed(link)) {
3628 		/* SATA spec says nothing about how to reconfigure
3629 		 * spd.  To be on the safe side, turn off phy during
3630 		 * reconfiguration.  This works for at least ICH7 AHCI
3631 		 * and Sil3124.
3632 		 */
3633 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3634 			goto out;
3635 
3636 		scontrol = (scontrol & 0x0f0) | 0x304;
3637 
3638 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3639 			goto out;
3640 
3641 		sata_set_spd(link);
3642 	}
3643 
3644 	/* issue phy wake/reset */
3645 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3646 		goto out;
3647 
3648 	scontrol = (scontrol & 0x0f0) | 0x301;
3649 
3650 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3651 		goto out;
3652 
3653 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3654 	 * 10.4.2 says at least 1 ms.
3655 	 */
3656 	msleep(1);
3657 
3658 	/* bring link back */
3659 	rc = sata_link_resume(link, timing, deadline);
3660 	if (rc)
3661 		goto out;
3662 	/* if link is offline nothing more to do */
3663 	if (ata_link_offline(link))
3664 		goto out;
3665 
3666 	/* Link is online.  From this point, -ENODEV too is an error. */
3667 	if (online)
3668 		*online = true;
3669 
3670 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3671 		/* If PMP is supported, we have to do follow-up SRST.
3672 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3673 		 * the first port is empty.  Wait only for
3674 		 * ATA_TMOUT_PMP_SRST_WAIT.
3675 		 */
3676 		if (check_ready) {
3677 			unsigned long pmp_deadline;
3678 
3679 			pmp_deadline = ata_deadline(jiffies,
3680 						    ATA_TMOUT_PMP_SRST_WAIT);
3681 			if (time_after(pmp_deadline, deadline))
3682 				pmp_deadline = deadline;
3683 			ata_wait_ready(link, pmp_deadline, check_ready);
3684 		}
3685 		rc = -EAGAIN;
3686 		goto out;
3687 	}
3688 
3689 	rc = 0;
3690 	if (check_ready)
3691 		rc = ata_wait_ready(link, deadline, check_ready);
3692  out:
3693 	if (rc && rc != -EAGAIN) {
3694 		/* online is set iff link is online && reset succeeded */
3695 		if (online)
3696 			*online = false;
3697 		ata_link_printk(link, KERN_ERR,
3698 				"COMRESET failed (errno=%d)\n", rc);
3699 	}
3700 	DPRINTK("EXIT, rc=%d\n", rc);
3701 	return rc;
3702 }
3703 
3704 /**
3705  *	sata_std_hardreset - COMRESET w/o waiting or classification
3706  *	@link: link to reset
3707  *	@class: resulting class of attached device
3708  *	@deadline: deadline jiffies for the operation
3709  *
3710  *	Standard SATA COMRESET w/o waiting or classification.
3711  *
3712  *	LOCKING:
3713  *	Kernel thread context (may sleep)
3714  *
3715  *	RETURNS:
3716  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3717  */
3718 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3719 		       unsigned long deadline)
3720 {
3721 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3722 	bool online;
3723 	int rc;
3724 
3725 	/* do hardreset */
3726 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3727 	return online ? -EAGAIN : rc;
3728 }
3729 
3730 /**
3731  *	ata_std_postreset - standard postreset callback
3732  *	@link: the target ata_link
3733  *	@classes: classes of attached devices
3734  *
3735  *	This function is invoked after a successful reset.  Note that
3736  *	the device might have been reset more than once using
3737  *	different reset methods before postreset is invoked.
3738  *
3739  *	LOCKING:
3740  *	Kernel thread context (may sleep)
3741  */
3742 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3743 {
3744 	u32 serror;
3745 
3746 	DPRINTK("ENTER\n");
3747 
3748 	/* reset complete, clear SError */
3749 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3750 		sata_scr_write(link, SCR_ERROR, serror);
3751 
3752 	/* print link status */
3753 	sata_print_link_status(link);
3754 
3755 	DPRINTK("EXIT\n");
3756 }
3757 
3758 /**
3759  *	ata_dev_same_device - Determine whether new ID matches configured device
3760  *	@dev: device to compare against
3761  *	@new_class: class of the new device
3762  *	@new_id: IDENTIFY page of the new device
3763  *
3764  *	Compare @new_class and @new_id against @dev and determine
3765  *	whether @dev is the device indicated by @new_class and
3766  *	@new_id.
3767  *
3768  *	LOCKING:
3769  *	None.
3770  *
3771  *	RETURNS:
3772  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3773  */
3774 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3775 			       const u16 *new_id)
3776 {
3777 	const u16 *old_id = dev->id;
3778 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3779 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3780 
3781 	if (dev->class != new_class) {
3782 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3783 			       dev->class, new_class);
3784 		return 0;
3785 	}
3786 
3787 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3788 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3789 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3790 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3791 
3792 	if (strcmp(model[0], model[1])) {
3793 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3794 			       "'%s' != '%s'\n", model[0], model[1]);
3795 		return 0;
3796 	}
3797 
3798 	if (strcmp(serial[0], serial[1])) {
3799 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3800 			       "'%s' != '%s'\n", serial[0], serial[1]);
3801 		return 0;
3802 	}
3803 
3804 	return 1;
3805 }
3806 
3807 /**
3808  *	ata_dev_reread_id - Re-read IDENTIFY data
3809  *	@dev: target ATA device
3810  *	@readid_flags: read ID flags
3811  *
3812  *	Re-read IDENTIFY page and make sure @dev is still attached to
3813  *	the port.
3814  *
3815  *	LOCKING:
3816  *	Kernel thread context (may sleep)
3817  *
3818  *	RETURNS:
3819  *	0 on success, negative errno otherwise
3820  */
3821 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3822 {
3823 	unsigned int class = dev->class;
3824 	u16 *id = (void *)dev->link->ap->sector_buf;
3825 	int rc;
3826 
3827 	/* read ID data */
3828 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3829 	if (rc)
3830 		return rc;
3831 
3832 	/* is the device still there? */
3833 	if (!ata_dev_same_device(dev, class, id))
3834 		return -ENODEV;
3835 
3836 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3837 	return 0;
3838 }
3839 
3840 /**
3841  *	ata_dev_revalidate - Revalidate ATA device
3842  *	@dev: device to revalidate
3843  *	@new_class: new class code
3844  *	@readid_flags: read ID flags
3845  *
3846  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3847  *	port and reconfigure it according to the new IDENTIFY page.
3848  *
3849  *	LOCKING:
3850  *	Kernel thread context (may sleep)
3851  *
3852  *	RETURNS:
3853  *	0 on success, negative errno otherwise
3854  */
3855 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3856 		       unsigned int readid_flags)
3857 {
3858 	u64 n_sectors = dev->n_sectors;
3859 	int rc;
3860 
3861 	if (!ata_dev_enabled(dev))
3862 		return -ENODEV;
3863 
3864 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3865 	if (ata_class_enabled(new_class) &&
3866 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3867 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3868 			       dev->class, new_class);
3869 		rc = -ENODEV;
3870 		goto fail;
3871 	}
3872 
3873 	/* re-read ID */
3874 	rc = ata_dev_reread_id(dev, readid_flags);
3875 	if (rc)
3876 		goto fail;
3877 
3878 	/* configure device according to the new ID */
3879 	rc = ata_dev_configure(dev);
3880 	if (rc)
3881 		goto fail;
3882 
3883 	/* verify n_sectors hasn't changed */
3884 	if (dev->class == ATA_DEV_ATA && n_sectors &&
3885 	    dev->n_sectors != n_sectors) {
3886 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3887 			       "%llu != %llu\n",
3888 			       (unsigned long long)n_sectors,
3889 			       (unsigned long long)dev->n_sectors);
3890 
3891 		/* restore original n_sectors */
3892 		dev->n_sectors = n_sectors;
3893 
3894 		rc = -ENODEV;
3895 		goto fail;
3896 	}
3897 
3898 	return 0;
3899 
3900  fail:
3901 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3902 	return rc;
3903 }
3904 
3905 struct ata_blacklist_entry {
3906 	const char *model_num;
3907 	const char *model_rev;
3908 	unsigned long horkage;
3909 };
3910 
3911 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3912 	/* Devices with DMA related problems under Linux */
3913 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
3914 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
3915 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
3916 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
3917 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
3918 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
3919 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
3920 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
3921 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
3922 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
3923 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
3924 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
3925 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
3926 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
3927 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
3928 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
3929 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
3930 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
3931 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
3932 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
3933 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
3934 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
3935 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
3936 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
3937 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
3938 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
3939 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3940 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
3941 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
3942 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
3943 	/* Odd clown on sil3726/4726 PMPs */
3944 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
3945 
3946 	/* Weird ATAPI devices */
3947 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
3948 
3949 	/* Devices we expect to fail diagnostics */
3950 
3951 	/* Devices where NCQ should be avoided */
3952 	/* NCQ is slow */
3953 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
3954 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3955 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
3956 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
3957 	/* NCQ is broken */
3958 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
3959 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
3960 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
3961 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
3962 
3963 	/* Blacklist entries taken from Silicon Image 3124/3132
3964 	   Windows driver .inf file - also several Linux problem reports */
3965 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
3966 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
3967 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3968 
3969 	/* devices which puke on READ_NATIVE_MAX */
3970 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
3971 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3972 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3973 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
3974 
3975 	/* Devices which report 1 sector over size HPA */
3976 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3977 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3978 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3979 
3980 	/* Devices which get the IVB wrong */
3981 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3982 	/* Maybe we should just blacklist TSSTcorp... */
3983 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
3984 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
3985 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
3986 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
3987 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
3988 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
3989 
3990 	/* End Marker */
3991 	{ }
3992 };
3993 
3994 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3995 {
3996 	const char *p;
3997 	int len;
3998 
3999 	/*
4000 	 * check for trailing wildcard: *\0
4001 	 */
4002 	p = strchr(patt, wildchar);
4003 	if (p && ((*(p + 1)) == 0))
4004 		len = p - patt;
4005 	else {
4006 		len = strlen(name);
4007 		if (!len) {
4008 			if (!*patt)
4009 				return 0;
4010 			return -1;
4011 		}
4012 	}
4013 
4014 	return strncmp(patt, name, len);
4015 }
4016 
4017 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4018 {
4019 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4020 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4021 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4022 
4023 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4024 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4025 
4026 	while (ad->model_num) {
4027 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4028 			if (ad->model_rev == NULL)
4029 				return ad->horkage;
4030 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4031 				return ad->horkage;
4032 		}
4033 		ad++;
4034 	}
4035 	return 0;
4036 }
4037 
4038 static int ata_dma_blacklisted(const struct ata_device *dev)
4039 {
4040 	/* We don't support polling DMA.
4041 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4042 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4043 	 */
4044 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4045 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4046 		return 1;
4047 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4048 }
4049 
4050 /**
4051  *	ata_is_40wire		-	check drive side detection
4052  *	@dev: device
4053  *
4054  *	Perform drive side detection decoding, allowing for device vendors
4055  *	who can't follow the documentation.
4056  */
4057 
4058 static int ata_is_40wire(struct ata_device *dev)
4059 {
4060 	if (dev->horkage & ATA_HORKAGE_IVB)
4061 		return ata_drive_40wire_relaxed(dev->id);
4062 	return ata_drive_40wire(dev->id);
4063 }
4064 
4065 /**
4066  *	cable_is_40wire		-	40/80/SATA decider
4067  *	@ap: port to consider
4068  *
4069  *	This function encapsulates the policy for speed management
4070  *	in one place. At the moment we don't cache the result but
4071  *	there is a good case for setting ap->cbl to the result when
4072  *	we are called with unknown cables (and figuring out if it
4073  *	impacts hotplug at all).
4074  *
4075  *	Return 1 if the cable appears to be 40 wire.
4076  */
4077 
4078 static int cable_is_40wire(struct ata_port *ap)
4079 {
4080 	struct ata_link *link;
4081 	struct ata_device *dev;
4082 
4083 	/* If the controller thinks we are 40 wire, we are */
4084 	if (ap->cbl == ATA_CBL_PATA40)
4085 		return 1;
4086 	/* If the controller thinks we are 80 wire, we are */
4087 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4088 		return 0;
4089 	/* If the system is known to be 40 wire short cable (eg laptop),
4090 	   then we allow 80 wire modes even if the drive isn't sure */
4091 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4092 		return 0;
4093 	/* If the controller doesn't know we scan
4094 
4095 	   - Note: We look for all 40 wire detects at this point.
4096 	     Any 80 wire detect is taken to be 80 wire cable
4097 	     because
4098 	     - In many setups only the one drive (slave if present)
4099                will give a valid detect
4100              - If you have a non detect capable drive you don't
4101                want it to colour the choice
4102         */
4103 	ata_port_for_each_link(link, ap) {
4104 		ata_link_for_each_dev(dev, link) {
4105 			if (!ata_is_40wire(dev))
4106 				return 0;
4107 		}
4108 	}
4109 	return 1;
4110 }
4111 
4112 /**
4113  *	ata_dev_xfermask - Compute supported xfermask of the given device
4114  *	@dev: Device to compute xfermask for
4115  *
4116  *	Compute supported xfermask of @dev and store it in
4117  *	dev->*_mask.  This function is responsible for applying all
4118  *	known limits including host controller limits, device
4119  *	blacklist, etc...
4120  *
4121  *	LOCKING:
4122  *	None.
4123  */
4124 static void ata_dev_xfermask(struct ata_device *dev)
4125 {
4126 	struct ata_link *link = dev->link;
4127 	struct ata_port *ap = link->ap;
4128 	struct ata_host *host = ap->host;
4129 	unsigned long xfer_mask;
4130 
4131 	/* controller modes available */
4132 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4133 				      ap->mwdma_mask, ap->udma_mask);
4134 
4135 	/* drive modes available */
4136 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4137 				       dev->mwdma_mask, dev->udma_mask);
4138 	xfer_mask &= ata_id_xfermask(dev->id);
4139 
4140 	/*
4141 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4142 	 *	cable
4143 	 */
4144 	if (ata_dev_pair(dev)) {
4145 		/* No PIO5 or PIO6 */
4146 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4147 		/* No MWDMA3 or MWDMA 4 */
4148 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4149 	}
4150 
4151 	if (ata_dma_blacklisted(dev)) {
4152 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4153 		ata_dev_printk(dev, KERN_WARNING,
4154 			       "device is on DMA blacklist, disabling DMA\n");
4155 	}
4156 
4157 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4158 	    host->simplex_claimed && host->simplex_claimed != ap) {
4159 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4160 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4161 			       "other device, disabling DMA\n");
4162 	}
4163 
4164 	if (ap->flags & ATA_FLAG_NO_IORDY)
4165 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4166 
4167 	if (ap->ops->mode_filter)
4168 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4169 
4170 	/* Apply cable rule here.  Don't apply it early because when
4171 	 * we handle hot plug the cable type can itself change.
4172 	 * Check this last so that we know if the transfer rate was
4173 	 * solely limited by the cable.
4174 	 * Unknown or 80 wire cables reported host side are checked
4175 	 * drive side as well. Cases where we know a 40wire cable
4176 	 * is used safely for 80 are not checked here.
4177 	 */
4178 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4179 		/* UDMA/44 or higher would be available */
4180 		if (cable_is_40wire(ap)) {
4181 			ata_dev_printk(dev, KERN_WARNING,
4182 				 "limited to UDMA/33 due to 40-wire cable\n");
4183 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4184 		}
4185 
4186 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4187 			    &dev->mwdma_mask, &dev->udma_mask);
4188 }
4189 
4190 /**
4191  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4192  *	@dev: Device to which command will be sent
4193  *
4194  *	Issue SET FEATURES - XFER MODE command to device @dev
4195  *	on port @ap.
4196  *
4197  *	LOCKING:
4198  *	PCI/etc. bus probe sem.
4199  *
4200  *	RETURNS:
4201  *	0 on success, AC_ERR_* mask otherwise.
4202  */
4203 
4204 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4205 {
4206 	struct ata_taskfile tf;
4207 	unsigned int err_mask;
4208 
4209 	/* set up set-features taskfile */
4210 	DPRINTK("set features - xfer mode\n");
4211 
4212 	/* Some controllers and ATAPI devices show flaky interrupt
4213 	 * behavior after setting xfer mode.  Use polling instead.
4214 	 */
4215 	ata_tf_init(dev, &tf);
4216 	tf.command = ATA_CMD_SET_FEATURES;
4217 	tf.feature = SETFEATURES_XFER;
4218 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4219 	tf.protocol = ATA_PROT_NODATA;
4220 	/* If we are using IORDY we must send the mode setting command */
4221 	if (ata_pio_need_iordy(dev))
4222 		tf.nsect = dev->xfer_mode;
4223 	/* If the device has IORDY and the controller does not - turn it off */
4224  	else if (ata_id_has_iordy(dev->id))
4225 		tf.nsect = 0x01;
4226 	else /* In the ancient relic department - skip all of this */
4227 		return 0;
4228 
4229 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4230 
4231 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4232 	return err_mask;
4233 }
4234 /**
4235  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4236  *	@dev: Device to which command will be sent
4237  *	@enable: Whether to enable or disable the feature
4238  *	@feature: The sector count represents the feature to set
4239  *
4240  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4241  *	on port @ap with sector count
4242  *
4243  *	LOCKING:
4244  *	PCI/etc. bus probe sem.
4245  *
4246  *	RETURNS:
4247  *	0 on success, AC_ERR_* mask otherwise.
4248  */
4249 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4250 					u8 feature)
4251 {
4252 	struct ata_taskfile tf;
4253 	unsigned int err_mask;
4254 
4255 	/* set up set-features taskfile */
4256 	DPRINTK("set features - SATA features\n");
4257 
4258 	ata_tf_init(dev, &tf);
4259 	tf.command = ATA_CMD_SET_FEATURES;
4260 	tf.feature = enable;
4261 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4262 	tf.protocol = ATA_PROT_NODATA;
4263 	tf.nsect = feature;
4264 
4265 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4266 
4267 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4268 	return err_mask;
4269 }
4270 
4271 /**
4272  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4273  *	@dev: Device to which command will be sent
4274  *	@heads: Number of heads (taskfile parameter)
4275  *	@sectors: Number of sectors (taskfile parameter)
4276  *
4277  *	LOCKING:
4278  *	Kernel thread context (may sleep)
4279  *
4280  *	RETURNS:
4281  *	0 on success, AC_ERR_* mask otherwise.
4282  */
4283 static unsigned int ata_dev_init_params(struct ata_device *dev,
4284 					u16 heads, u16 sectors)
4285 {
4286 	struct ata_taskfile tf;
4287 	unsigned int err_mask;
4288 
4289 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4290 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4291 		return AC_ERR_INVALID;
4292 
4293 	/* set up init dev params taskfile */
4294 	DPRINTK("init dev params \n");
4295 
4296 	ata_tf_init(dev, &tf);
4297 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4298 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4299 	tf.protocol = ATA_PROT_NODATA;
4300 	tf.nsect = sectors;
4301 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4302 
4303 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4304 	/* A clean abort indicates an original or just out of spec drive
4305 	   and we should continue as we issue the setup based on the
4306 	   drive reported working geometry */
4307 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4308 		err_mask = 0;
4309 
4310 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4311 	return err_mask;
4312 }
4313 
4314 /**
4315  *	ata_sg_clean - Unmap DMA memory associated with command
4316  *	@qc: Command containing DMA memory to be released
4317  *
4318  *	Unmap all mapped DMA memory associated with this command.
4319  *
4320  *	LOCKING:
4321  *	spin_lock_irqsave(host lock)
4322  */
4323 void ata_sg_clean(struct ata_queued_cmd *qc)
4324 {
4325 	struct ata_port *ap = qc->ap;
4326 	struct scatterlist *sg = qc->sg;
4327 	int dir = qc->dma_dir;
4328 
4329 	WARN_ON(sg == NULL);
4330 
4331 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4332 
4333 	if (qc->n_elem)
4334 		dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4335 
4336 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4337 	qc->sg = NULL;
4338 }
4339 
4340 /**
4341  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4342  *	@qc: Metadata associated with taskfile to check
4343  *
4344  *	Allow low-level driver to filter ATA PACKET commands, returning
4345  *	a status indicating whether or not it is OK to use DMA for the
4346  *	supplied PACKET command.
4347  *
4348  *	LOCKING:
4349  *	spin_lock_irqsave(host lock)
4350  *
4351  *	RETURNS: 0 when ATAPI DMA can be used
4352  *               nonzero otherwise
4353  */
4354 int atapi_check_dma(struct ata_queued_cmd *qc)
4355 {
4356 	struct ata_port *ap = qc->ap;
4357 
4358 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4359 	 * few ATAPI devices choke on such DMA requests.
4360 	 */
4361 	if (unlikely(qc->nbytes & 15))
4362 		return 1;
4363 
4364 	if (ap->ops->check_atapi_dma)
4365 		return ap->ops->check_atapi_dma(qc);
4366 
4367 	return 0;
4368 }
4369 
4370 /**
4371  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4372  *	@qc: ATA command in question
4373  *
4374  *	Non-NCQ commands cannot run with any other command, NCQ or
4375  *	not.  As upper layer only knows the queue depth, we are
4376  *	responsible for maintaining exclusion.  This function checks
4377  *	whether a new command @qc can be issued.
4378  *
4379  *	LOCKING:
4380  *	spin_lock_irqsave(host lock)
4381  *
4382  *	RETURNS:
4383  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4384  */
4385 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4386 {
4387 	struct ata_link *link = qc->dev->link;
4388 
4389 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4390 		if (!ata_tag_valid(link->active_tag))
4391 			return 0;
4392 	} else {
4393 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4394 			return 0;
4395 	}
4396 
4397 	return ATA_DEFER_LINK;
4398 }
4399 
4400 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4401 
4402 /**
4403  *	ata_sg_init - Associate command with scatter-gather table.
4404  *	@qc: Command to be associated
4405  *	@sg: Scatter-gather table.
4406  *	@n_elem: Number of elements in s/g table.
4407  *
4408  *	Initialize the data-related elements of queued_cmd @qc
4409  *	to point to a scatter-gather table @sg, containing @n_elem
4410  *	elements.
4411  *
4412  *	LOCKING:
4413  *	spin_lock_irqsave(host lock)
4414  */
4415 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4416 		 unsigned int n_elem)
4417 {
4418 	qc->sg = sg;
4419 	qc->n_elem = n_elem;
4420 	qc->cursg = qc->sg;
4421 }
4422 
4423 /**
4424  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4425  *	@qc: Command with scatter-gather table to be mapped.
4426  *
4427  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4428  *
4429  *	LOCKING:
4430  *	spin_lock_irqsave(host lock)
4431  *
4432  *	RETURNS:
4433  *	Zero on success, negative on error.
4434  *
4435  */
4436 static int ata_sg_setup(struct ata_queued_cmd *qc)
4437 {
4438 	struct ata_port *ap = qc->ap;
4439 	unsigned int n_elem;
4440 
4441 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4442 
4443 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4444 	if (n_elem < 1)
4445 		return -1;
4446 
4447 	DPRINTK("%d sg elements mapped\n", n_elem);
4448 
4449 	qc->n_elem = n_elem;
4450 	qc->flags |= ATA_QCFLAG_DMAMAP;
4451 
4452 	return 0;
4453 }
4454 
4455 /**
4456  *	swap_buf_le16 - swap halves of 16-bit words in place
4457  *	@buf:  Buffer to swap
4458  *	@buf_words:  Number of 16-bit words in buffer.
4459  *
4460  *	Swap halves of 16-bit words if needed to convert from
4461  *	little-endian byte order to native cpu byte order, or
4462  *	vice-versa.
4463  *
4464  *	LOCKING:
4465  *	Inherited from caller.
4466  */
4467 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4468 {
4469 #ifdef __BIG_ENDIAN
4470 	unsigned int i;
4471 
4472 	for (i = 0; i < buf_words; i++)
4473 		buf[i] = le16_to_cpu(buf[i]);
4474 #endif /* __BIG_ENDIAN */
4475 }
4476 
4477 /**
4478  *	ata_qc_new - Request an available ATA command, for queueing
4479  *	@ap: Port associated with device @dev
4480  *	@dev: Device from whom we request an available command structure
4481  *
4482  *	LOCKING:
4483  *	None.
4484  */
4485 
4486 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4487 {
4488 	struct ata_queued_cmd *qc = NULL;
4489 	unsigned int i;
4490 
4491 	/* no command while frozen */
4492 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4493 		return NULL;
4494 
4495 	/* the last tag is reserved for internal command. */
4496 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4497 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4498 			qc = __ata_qc_from_tag(ap, i);
4499 			break;
4500 		}
4501 
4502 	if (qc)
4503 		qc->tag = i;
4504 
4505 	return qc;
4506 }
4507 
4508 /**
4509  *	ata_qc_new_init - Request an available ATA command, and initialize it
4510  *	@dev: Device from whom we request an available command structure
4511  *
4512  *	LOCKING:
4513  *	None.
4514  */
4515 
4516 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4517 {
4518 	struct ata_port *ap = dev->link->ap;
4519 	struct ata_queued_cmd *qc;
4520 
4521 	qc = ata_qc_new(ap);
4522 	if (qc) {
4523 		qc->scsicmd = NULL;
4524 		qc->ap = ap;
4525 		qc->dev = dev;
4526 
4527 		ata_qc_reinit(qc);
4528 	}
4529 
4530 	return qc;
4531 }
4532 
4533 /**
4534  *	ata_qc_free - free unused ata_queued_cmd
4535  *	@qc: Command to complete
4536  *
4537  *	Designed to free unused ata_queued_cmd object
4538  *	in case something prevents using it.
4539  *
4540  *	LOCKING:
4541  *	spin_lock_irqsave(host lock)
4542  */
4543 void ata_qc_free(struct ata_queued_cmd *qc)
4544 {
4545 	struct ata_port *ap = qc->ap;
4546 	unsigned int tag;
4547 
4548 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4549 
4550 	qc->flags = 0;
4551 	tag = qc->tag;
4552 	if (likely(ata_tag_valid(tag))) {
4553 		qc->tag = ATA_TAG_POISON;
4554 		clear_bit(tag, &ap->qc_allocated);
4555 	}
4556 }
4557 
4558 void __ata_qc_complete(struct ata_queued_cmd *qc)
4559 {
4560 	struct ata_port *ap = qc->ap;
4561 	struct ata_link *link = qc->dev->link;
4562 
4563 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4564 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4565 
4566 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4567 		ata_sg_clean(qc);
4568 
4569 	/* command should be marked inactive atomically with qc completion */
4570 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4571 		link->sactive &= ~(1 << qc->tag);
4572 		if (!link->sactive)
4573 			ap->nr_active_links--;
4574 	} else {
4575 		link->active_tag = ATA_TAG_POISON;
4576 		ap->nr_active_links--;
4577 	}
4578 
4579 	/* clear exclusive status */
4580 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4581 		     ap->excl_link == link))
4582 		ap->excl_link = NULL;
4583 
4584 	/* atapi: mark qc as inactive to prevent the interrupt handler
4585 	 * from completing the command twice later, before the error handler
4586 	 * is called. (when rc != 0 and atapi request sense is needed)
4587 	 */
4588 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4589 	ap->qc_active &= ~(1 << qc->tag);
4590 
4591 	/* call completion callback */
4592 	qc->complete_fn(qc);
4593 }
4594 
4595 static void fill_result_tf(struct ata_queued_cmd *qc)
4596 {
4597 	struct ata_port *ap = qc->ap;
4598 
4599 	qc->result_tf.flags = qc->tf.flags;
4600 	ap->ops->qc_fill_rtf(qc);
4601 }
4602 
4603 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4604 {
4605 	struct ata_device *dev = qc->dev;
4606 
4607 	if (ata_tag_internal(qc->tag))
4608 		return;
4609 
4610 	if (ata_is_nodata(qc->tf.protocol))
4611 		return;
4612 
4613 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4614 		return;
4615 
4616 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4617 }
4618 
4619 /**
4620  *	ata_qc_complete - Complete an active ATA command
4621  *	@qc: Command to complete
4622  *	@err_mask: ATA Status register contents
4623  *
4624  *	Indicate to the mid and upper layers that an ATA
4625  *	command has completed, with either an ok or not-ok status.
4626  *
4627  *	LOCKING:
4628  *	spin_lock_irqsave(host lock)
4629  */
4630 void ata_qc_complete(struct ata_queued_cmd *qc)
4631 {
4632 	struct ata_port *ap = qc->ap;
4633 
4634 	/* XXX: New EH and old EH use different mechanisms to
4635 	 * synchronize EH with regular execution path.
4636 	 *
4637 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4638 	 * Normal execution path is responsible for not accessing a
4639 	 * failed qc.  libata core enforces the rule by returning NULL
4640 	 * from ata_qc_from_tag() for failed qcs.
4641 	 *
4642 	 * Old EH depends on ata_qc_complete() nullifying completion
4643 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4644 	 * not synchronize with interrupt handler.  Only PIO task is
4645 	 * taken care of.
4646 	 */
4647 	if (ap->ops->error_handler) {
4648 		struct ata_device *dev = qc->dev;
4649 		struct ata_eh_info *ehi = &dev->link->eh_info;
4650 
4651 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4652 
4653 		if (unlikely(qc->err_mask))
4654 			qc->flags |= ATA_QCFLAG_FAILED;
4655 
4656 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4657 			if (!ata_tag_internal(qc->tag)) {
4658 				/* always fill result TF for failed qc */
4659 				fill_result_tf(qc);
4660 				ata_qc_schedule_eh(qc);
4661 				return;
4662 			}
4663 		}
4664 
4665 		/* read result TF if requested */
4666 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4667 			fill_result_tf(qc);
4668 
4669 		/* Some commands need post-processing after successful
4670 		 * completion.
4671 		 */
4672 		switch (qc->tf.command) {
4673 		case ATA_CMD_SET_FEATURES:
4674 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4675 			    qc->tf.feature != SETFEATURES_WC_OFF)
4676 				break;
4677 			/* fall through */
4678 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4679 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4680 			/* revalidate device */
4681 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4682 			ata_port_schedule_eh(ap);
4683 			break;
4684 
4685 		case ATA_CMD_SLEEP:
4686 			dev->flags |= ATA_DFLAG_SLEEPING;
4687 			break;
4688 		}
4689 
4690 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4691 			ata_verify_xfer(qc);
4692 
4693 		__ata_qc_complete(qc);
4694 	} else {
4695 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4696 			return;
4697 
4698 		/* read result TF if failed or requested */
4699 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4700 			fill_result_tf(qc);
4701 
4702 		__ata_qc_complete(qc);
4703 	}
4704 }
4705 
4706 /**
4707  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4708  *	@ap: port in question
4709  *	@qc_active: new qc_active mask
4710  *
4711  *	Complete in-flight commands.  This functions is meant to be
4712  *	called from low-level driver's interrupt routine to complete
4713  *	requests normally.  ap->qc_active and @qc_active is compared
4714  *	and commands are completed accordingly.
4715  *
4716  *	LOCKING:
4717  *	spin_lock_irqsave(host lock)
4718  *
4719  *	RETURNS:
4720  *	Number of completed commands on success, -errno otherwise.
4721  */
4722 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4723 {
4724 	int nr_done = 0;
4725 	u32 done_mask;
4726 	int i;
4727 
4728 	done_mask = ap->qc_active ^ qc_active;
4729 
4730 	if (unlikely(done_mask & qc_active)) {
4731 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4732 				"(%08x->%08x)\n", ap->qc_active, qc_active);
4733 		return -EINVAL;
4734 	}
4735 
4736 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
4737 		struct ata_queued_cmd *qc;
4738 
4739 		if (!(done_mask & (1 << i)))
4740 			continue;
4741 
4742 		if ((qc = ata_qc_from_tag(ap, i))) {
4743 			ata_qc_complete(qc);
4744 			nr_done++;
4745 		}
4746 	}
4747 
4748 	return nr_done;
4749 }
4750 
4751 /**
4752  *	ata_qc_issue - issue taskfile to device
4753  *	@qc: command to issue to device
4754  *
4755  *	Prepare an ATA command to submission to device.
4756  *	This includes mapping the data into a DMA-able
4757  *	area, filling in the S/G table, and finally
4758  *	writing the taskfile to hardware, starting the command.
4759  *
4760  *	LOCKING:
4761  *	spin_lock_irqsave(host lock)
4762  */
4763 void ata_qc_issue(struct ata_queued_cmd *qc)
4764 {
4765 	struct ata_port *ap = qc->ap;
4766 	struct ata_link *link = qc->dev->link;
4767 	u8 prot = qc->tf.protocol;
4768 
4769 	/* Make sure only one non-NCQ command is outstanding.  The
4770 	 * check is skipped for old EH because it reuses active qc to
4771 	 * request ATAPI sense.
4772 	 */
4773 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4774 
4775 	if (ata_is_ncq(prot)) {
4776 		WARN_ON(link->sactive & (1 << qc->tag));
4777 
4778 		if (!link->sactive)
4779 			ap->nr_active_links++;
4780 		link->sactive |= 1 << qc->tag;
4781 	} else {
4782 		WARN_ON(link->sactive);
4783 
4784 		ap->nr_active_links++;
4785 		link->active_tag = qc->tag;
4786 	}
4787 
4788 	qc->flags |= ATA_QCFLAG_ACTIVE;
4789 	ap->qc_active |= 1 << qc->tag;
4790 
4791 	/* We guarantee to LLDs that they will have at least one
4792 	 * non-zero sg if the command is a data command.
4793 	 */
4794 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4795 
4796 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4797 				 (ap->flags & ATA_FLAG_PIO_DMA)))
4798 		if (ata_sg_setup(qc))
4799 			goto sg_err;
4800 
4801 	/* if device is sleeping, schedule reset and abort the link */
4802 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4803 		link->eh_info.action |= ATA_EH_RESET;
4804 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4805 		ata_link_abort(link);
4806 		return;
4807 	}
4808 
4809 	ap->ops->qc_prep(qc);
4810 
4811 	qc->err_mask |= ap->ops->qc_issue(qc);
4812 	if (unlikely(qc->err_mask))
4813 		goto err;
4814 	return;
4815 
4816 sg_err:
4817 	qc->err_mask |= AC_ERR_SYSTEM;
4818 err:
4819 	ata_qc_complete(qc);
4820 }
4821 
4822 /**
4823  *	sata_scr_valid - test whether SCRs are accessible
4824  *	@link: ATA link to test SCR accessibility for
4825  *
4826  *	Test whether SCRs are accessible for @link.
4827  *
4828  *	LOCKING:
4829  *	None.
4830  *
4831  *	RETURNS:
4832  *	1 if SCRs are accessible, 0 otherwise.
4833  */
4834 int sata_scr_valid(struct ata_link *link)
4835 {
4836 	struct ata_port *ap = link->ap;
4837 
4838 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4839 }
4840 
4841 /**
4842  *	sata_scr_read - read SCR register of the specified port
4843  *	@link: ATA link to read SCR for
4844  *	@reg: SCR to read
4845  *	@val: Place to store read value
4846  *
4847  *	Read SCR register @reg of @link into *@val.  This function is
4848  *	guaranteed to succeed if @link is ap->link, the cable type of
4849  *	the port is SATA and the port implements ->scr_read.
4850  *
4851  *	LOCKING:
4852  *	None if @link is ap->link.  Kernel thread context otherwise.
4853  *
4854  *	RETURNS:
4855  *	0 on success, negative errno on failure.
4856  */
4857 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4858 {
4859 	if (ata_is_host_link(link)) {
4860 		struct ata_port *ap = link->ap;
4861 
4862 		if (sata_scr_valid(link))
4863 			return ap->ops->scr_read(ap, reg, val);
4864 		return -EOPNOTSUPP;
4865 	}
4866 
4867 	return sata_pmp_scr_read(link, reg, val);
4868 }
4869 
4870 /**
4871  *	sata_scr_write - write SCR register of the specified port
4872  *	@link: ATA link to write SCR for
4873  *	@reg: SCR to write
4874  *	@val: value to write
4875  *
4876  *	Write @val to SCR register @reg of @link.  This function is
4877  *	guaranteed to succeed if @link is ap->link, the cable type of
4878  *	the port is SATA and the port implements ->scr_read.
4879  *
4880  *	LOCKING:
4881  *	None if @link is ap->link.  Kernel thread context otherwise.
4882  *
4883  *	RETURNS:
4884  *	0 on success, negative errno on failure.
4885  */
4886 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4887 {
4888 	if (ata_is_host_link(link)) {
4889 		struct ata_port *ap = link->ap;
4890 
4891 		if (sata_scr_valid(link))
4892 			return ap->ops->scr_write(ap, reg, val);
4893 		return -EOPNOTSUPP;
4894 	}
4895 
4896 	return sata_pmp_scr_write(link, reg, val);
4897 }
4898 
4899 /**
4900  *	sata_scr_write_flush - write SCR register of the specified port and flush
4901  *	@link: ATA link to write SCR for
4902  *	@reg: SCR to write
4903  *	@val: value to write
4904  *
4905  *	This function is identical to sata_scr_write() except that this
4906  *	function performs flush after writing to the register.
4907  *
4908  *	LOCKING:
4909  *	None if @link is ap->link.  Kernel thread context otherwise.
4910  *
4911  *	RETURNS:
4912  *	0 on success, negative errno on failure.
4913  */
4914 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4915 {
4916 	if (ata_is_host_link(link)) {
4917 		struct ata_port *ap = link->ap;
4918 		int rc;
4919 
4920 		if (sata_scr_valid(link)) {
4921 			rc = ap->ops->scr_write(ap, reg, val);
4922 			if (rc == 0)
4923 				rc = ap->ops->scr_read(ap, reg, &val);
4924 			return rc;
4925 		}
4926 		return -EOPNOTSUPP;
4927 	}
4928 
4929 	return sata_pmp_scr_write(link, reg, val);
4930 }
4931 
4932 /**
4933  *	ata_link_online - test whether the given link is online
4934  *	@link: ATA link to test
4935  *
4936  *	Test whether @link is online.  Note that this function returns
4937  *	0 if online status of @link cannot be obtained, so
4938  *	ata_link_online(link) != !ata_link_offline(link).
4939  *
4940  *	LOCKING:
4941  *	None.
4942  *
4943  *	RETURNS:
4944  *	1 if the port online status is available and online.
4945  */
4946 int ata_link_online(struct ata_link *link)
4947 {
4948 	u32 sstatus;
4949 
4950 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4951 	    (sstatus & 0xf) == 0x3)
4952 		return 1;
4953 	return 0;
4954 }
4955 
4956 /**
4957  *	ata_link_offline - test whether the given link is offline
4958  *	@link: ATA link to test
4959  *
4960  *	Test whether @link is offline.  Note that this function
4961  *	returns 0 if offline status of @link cannot be obtained, so
4962  *	ata_link_online(link) != !ata_link_offline(link).
4963  *
4964  *	LOCKING:
4965  *	None.
4966  *
4967  *	RETURNS:
4968  *	1 if the port offline status is available and offline.
4969  */
4970 int ata_link_offline(struct ata_link *link)
4971 {
4972 	u32 sstatus;
4973 
4974 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4975 	    (sstatus & 0xf) != 0x3)
4976 		return 1;
4977 	return 0;
4978 }
4979 
4980 #ifdef CONFIG_PM
4981 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4982 			       unsigned int action, unsigned int ehi_flags,
4983 			       int wait)
4984 {
4985 	unsigned long flags;
4986 	int i, rc;
4987 
4988 	for (i = 0; i < host->n_ports; i++) {
4989 		struct ata_port *ap = host->ports[i];
4990 		struct ata_link *link;
4991 
4992 		/* Previous resume operation might still be in
4993 		 * progress.  Wait for PM_PENDING to clear.
4994 		 */
4995 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4996 			ata_port_wait_eh(ap);
4997 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4998 		}
4999 
5000 		/* request PM ops to EH */
5001 		spin_lock_irqsave(ap->lock, flags);
5002 
5003 		ap->pm_mesg = mesg;
5004 		if (wait) {
5005 			rc = 0;
5006 			ap->pm_result = &rc;
5007 		}
5008 
5009 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5010 		__ata_port_for_each_link(link, ap) {
5011 			link->eh_info.action |= action;
5012 			link->eh_info.flags |= ehi_flags;
5013 		}
5014 
5015 		ata_port_schedule_eh(ap);
5016 
5017 		spin_unlock_irqrestore(ap->lock, flags);
5018 
5019 		/* wait and check result */
5020 		if (wait) {
5021 			ata_port_wait_eh(ap);
5022 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5023 			if (rc)
5024 				return rc;
5025 		}
5026 	}
5027 
5028 	return 0;
5029 }
5030 
5031 /**
5032  *	ata_host_suspend - suspend host
5033  *	@host: host to suspend
5034  *	@mesg: PM message
5035  *
5036  *	Suspend @host.  Actual operation is performed by EH.  This
5037  *	function requests EH to perform PM operations and waits for EH
5038  *	to finish.
5039  *
5040  *	LOCKING:
5041  *	Kernel thread context (may sleep).
5042  *
5043  *	RETURNS:
5044  *	0 on success, -errno on failure.
5045  */
5046 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5047 {
5048 	int rc;
5049 
5050 	/*
5051 	 * disable link pm on all ports before requesting
5052 	 * any pm activity
5053 	 */
5054 	ata_lpm_enable(host);
5055 
5056 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5057 	if (rc == 0)
5058 		host->dev->power.power_state = mesg;
5059 	return rc;
5060 }
5061 
5062 /**
5063  *	ata_host_resume - resume host
5064  *	@host: host to resume
5065  *
5066  *	Resume @host.  Actual operation is performed by EH.  This
5067  *	function requests EH to perform PM operations and returns.
5068  *	Note that all resume operations are performed parallely.
5069  *
5070  *	LOCKING:
5071  *	Kernel thread context (may sleep).
5072  */
5073 void ata_host_resume(struct ata_host *host)
5074 {
5075 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5076 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5077 	host->dev->power.power_state = PMSG_ON;
5078 
5079 	/* reenable link pm */
5080 	ata_lpm_disable(host);
5081 }
5082 #endif
5083 
5084 /**
5085  *	ata_port_start - Set port up for dma.
5086  *	@ap: Port to initialize
5087  *
5088  *	Called just after data structures for each port are
5089  *	initialized.  Allocates space for PRD table.
5090  *
5091  *	May be used as the port_start() entry in ata_port_operations.
5092  *
5093  *	LOCKING:
5094  *	Inherited from caller.
5095  */
5096 int ata_port_start(struct ata_port *ap)
5097 {
5098 	struct device *dev = ap->dev;
5099 
5100 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5101 				      GFP_KERNEL);
5102 	if (!ap->prd)
5103 		return -ENOMEM;
5104 
5105 	return 0;
5106 }
5107 
5108 /**
5109  *	ata_dev_init - Initialize an ata_device structure
5110  *	@dev: Device structure to initialize
5111  *
5112  *	Initialize @dev in preparation for probing.
5113  *
5114  *	LOCKING:
5115  *	Inherited from caller.
5116  */
5117 void ata_dev_init(struct ata_device *dev)
5118 {
5119 	struct ata_link *link = dev->link;
5120 	struct ata_port *ap = link->ap;
5121 	unsigned long flags;
5122 
5123 	/* SATA spd limit is bound to the first device */
5124 	link->sata_spd_limit = link->hw_sata_spd_limit;
5125 	link->sata_spd = 0;
5126 
5127 	/* High bits of dev->flags are used to record warm plug
5128 	 * requests which occur asynchronously.  Synchronize using
5129 	 * host lock.
5130 	 */
5131 	spin_lock_irqsave(ap->lock, flags);
5132 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5133 	dev->horkage = 0;
5134 	spin_unlock_irqrestore(ap->lock, flags);
5135 
5136 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5137 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5138 	dev->pio_mask = UINT_MAX;
5139 	dev->mwdma_mask = UINT_MAX;
5140 	dev->udma_mask = UINT_MAX;
5141 }
5142 
5143 /**
5144  *	ata_link_init - Initialize an ata_link structure
5145  *	@ap: ATA port link is attached to
5146  *	@link: Link structure to initialize
5147  *	@pmp: Port multiplier port number
5148  *
5149  *	Initialize @link.
5150  *
5151  *	LOCKING:
5152  *	Kernel thread context (may sleep)
5153  */
5154 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5155 {
5156 	int i;
5157 
5158 	/* clear everything except for devices */
5159 	memset(link, 0, offsetof(struct ata_link, device[0]));
5160 
5161 	link->ap = ap;
5162 	link->pmp = pmp;
5163 	link->active_tag = ATA_TAG_POISON;
5164 	link->hw_sata_spd_limit = UINT_MAX;
5165 
5166 	/* can't use iterator, ap isn't initialized yet */
5167 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5168 		struct ata_device *dev = &link->device[i];
5169 
5170 		dev->link = link;
5171 		dev->devno = dev - link->device;
5172 		ata_dev_init(dev);
5173 	}
5174 }
5175 
5176 /**
5177  *	sata_link_init_spd - Initialize link->sata_spd_limit
5178  *	@link: Link to configure sata_spd_limit for
5179  *
5180  *	Initialize @link->[hw_]sata_spd_limit to the currently
5181  *	configured value.
5182  *
5183  *	LOCKING:
5184  *	Kernel thread context (may sleep).
5185  *
5186  *	RETURNS:
5187  *	0 on success, -errno on failure.
5188  */
5189 int sata_link_init_spd(struct ata_link *link)
5190 {
5191 	u32 scontrol;
5192 	u8 spd;
5193 	int rc;
5194 
5195 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5196 	if (rc)
5197 		return rc;
5198 
5199 	spd = (scontrol >> 4) & 0xf;
5200 	if (spd)
5201 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5202 
5203 	ata_force_spd_limit(link);
5204 
5205 	link->sata_spd_limit = link->hw_sata_spd_limit;
5206 
5207 	return 0;
5208 }
5209 
5210 /**
5211  *	ata_port_alloc - allocate and initialize basic ATA port resources
5212  *	@host: ATA host this allocated port belongs to
5213  *
5214  *	Allocate and initialize basic ATA port resources.
5215  *
5216  *	RETURNS:
5217  *	Allocate ATA port on success, NULL on failure.
5218  *
5219  *	LOCKING:
5220  *	Inherited from calling layer (may sleep).
5221  */
5222 struct ata_port *ata_port_alloc(struct ata_host *host)
5223 {
5224 	struct ata_port *ap;
5225 
5226 	DPRINTK("ENTER\n");
5227 
5228 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5229 	if (!ap)
5230 		return NULL;
5231 
5232 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5233 	ap->lock = &host->lock;
5234 	ap->flags = ATA_FLAG_DISABLED;
5235 	ap->print_id = -1;
5236 	ap->ctl = ATA_DEVCTL_OBS;
5237 	ap->host = host;
5238 	ap->dev = host->dev;
5239 	ap->last_ctl = 0xFF;
5240 
5241 #if defined(ATA_VERBOSE_DEBUG)
5242 	/* turn on all debugging levels */
5243 	ap->msg_enable = 0x00FF;
5244 #elif defined(ATA_DEBUG)
5245 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5246 #else
5247 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5248 #endif
5249 
5250 #ifdef CONFIG_ATA_SFF
5251 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5252 #endif
5253 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5254 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5255 	INIT_LIST_HEAD(&ap->eh_done_q);
5256 	init_waitqueue_head(&ap->eh_wait_q);
5257 	init_timer_deferrable(&ap->fastdrain_timer);
5258 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5259 	ap->fastdrain_timer.data = (unsigned long)ap;
5260 
5261 	ap->cbl = ATA_CBL_NONE;
5262 
5263 	ata_link_init(ap, &ap->link, 0);
5264 
5265 #ifdef ATA_IRQ_TRAP
5266 	ap->stats.unhandled_irq = 1;
5267 	ap->stats.idle_irq = 1;
5268 #endif
5269 	return ap;
5270 }
5271 
5272 static void ata_host_release(struct device *gendev, void *res)
5273 {
5274 	struct ata_host *host = dev_get_drvdata(gendev);
5275 	int i;
5276 
5277 	for (i = 0; i < host->n_ports; i++) {
5278 		struct ata_port *ap = host->ports[i];
5279 
5280 		if (!ap)
5281 			continue;
5282 
5283 		if (ap->scsi_host)
5284 			scsi_host_put(ap->scsi_host);
5285 
5286 		kfree(ap->pmp_link);
5287 		kfree(ap);
5288 		host->ports[i] = NULL;
5289 	}
5290 
5291 	dev_set_drvdata(gendev, NULL);
5292 }
5293 
5294 /**
5295  *	ata_host_alloc - allocate and init basic ATA host resources
5296  *	@dev: generic device this host is associated with
5297  *	@max_ports: maximum number of ATA ports associated with this host
5298  *
5299  *	Allocate and initialize basic ATA host resources.  LLD calls
5300  *	this function to allocate a host, initializes it fully and
5301  *	attaches it using ata_host_register().
5302  *
5303  *	@max_ports ports are allocated and host->n_ports is
5304  *	initialized to @max_ports.  The caller is allowed to decrease
5305  *	host->n_ports before calling ata_host_register().  The unused
5306  *	ports will be automatically freed on registration.
5307  *
5308  *	RETURNS:
5309  *	Allocate ATA host on success, NULL on failure.
5310  *
5311  *	LOCKING:
5312  *	Inherited from calling layer (may sleep).
5313  */
5314 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5315 {
5316 	struct ata_host *host;
5317 	size_t sz;
5318 	int i;
5319 
5320 	DPRINTK("ENTER\n");
5321 
5322 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5323 		return NULL;
5324 
5325 	/* alloc a container for our list of ATA ports (buses) */
5326 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5327 	/* alloc a container for our list of ATA ports (buses) */
5328 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5329 	if (!host)
5330 		goto err_out;
5331 
5332 	devres_add(dev, host);
5333 	dev_set_drvdata(dev, host);
5334 
5335 	spin_lock_init(&host->lock);
5336 	host->dev = dev;
5337 	host->n_ports = max_ports;
5338 
5339 	/* allocate ports bound to this host */
5340 	for (i = 0; i < max_ports; i++) {
5341 		struct ata_port *ap;
5342 
5343 		ap = ata_port_alloc(host);
5344 		if (!ap)
5345 			goto err_out;
5346 
5347 		ap->port_no = i;
5348 		host->ports[i] = ap;
5349 	}
5350 
5351 	devres_remove_group(dev, NULL);
5352 	return host;
5353 
5354  err_out:
5355 	devres_release_group(dev, NULL);
5356 	return NULL;
5357 }
5358 
5359 /**
5360  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5361  *	@dev: generic device this host is associated with
5362  *	@ppi: array of ATA port_info to initialize host with
5363  *	@n_ports: number of ATA ports attached to this host
5364  *
5365  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5366  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5367  *	last entry will be used for the remaining ports.
5368  *
5369  *	RETURNS:
5370  *	Allocate ATA host on success, NULL on failure.
5371  *
5372  *	LOCKING:
5373  *	Inherited from calling layer (may sleep).
5374  */
5375 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5376 				      const struct ata_port_info * const * ppi,
5377 				      int n_ports)
5378 {
5379 	const struct ata_port_info *pi;
5380 	struct ata_host *host;
5381 	int i, j;
5382 
5383 	host = ata_host_alloc(dev, n_ports);
5384 	if (!host)
5385 		return NULL;
5386 
5387 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5388 		struct ata_port *ap = host->ports[i];
5389 
5390 		if (ppi[j])
5391 			pi = ppi[j++];
5392 
5393 		ap->pio_mask = pi->pio_mask;
5394 		ap->mwdma_mask = pi->mwdma_mask;
5395 		ap->udma_mask = pi->udma_mask;
5396 		ap->flags |= pi->flags;
5397 		ap->link.flags |= pi->link_flags;
5398 		ap->ops = pi->port_ops;
5399 
5400 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5401 			host->ops = pi->port_ops;
5402 	}
5403 
5404 	return host;
5405 }
5406 
5407 static void ata_host_stop(struct device *gendev, void *res)
5408 {
5409 	struct ata_host *host = dev_get_drvdata(gendev);
5410 	int i;
5411 
5412 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5413 
5414 	for (i = 0; i < host->n_ports; i++) {
5415 		struct ata_port *ap = host->ports[i];
5416 
5417 		if (ap->ops->port_stop)
5418 			ap->ops->port_stop(ap);
5419 	}
5420 
5421 	if (host->ops->host_stop)
5422 		host->ops->host_stop(host);
5423 }
5424 
5425 /**
5426  *	ata_finalize_port_ops - finalize ata_port_operations
5427  *	@ops: ata_port_operations to finalize
5428  *
5429  *	An ata_port_operations can inherit from another ops and that
5430  *	ops can again inherit from another.  This can go on as many
5431  *	times as necessary as long as there is no loop in the
5432  *	inheritance chain.
5433  *
5434  *	Ops tables are finalized when the host is started.  NULL or
5435  *	unspecified entries are inherited from the closet ancestor
5436  *	which has the method and the entry is populated with it.
5437  *	After finalization, the ops table directly points to all the
5438  *	methods and ->inherits is no longer necessary and cleared.
5439  *
5440  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5441  *
5442  *	LOCKING:
5443  *	None.
5444  */
5445 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5446 {
5447 	static DEFINE_SPINLOCK(lock);
5448 	const struct ata_port_operations *cur;
5449 	void **begin = (void **)ops;
5450 	void **end = (void **)&ops->inherits;
5451 	void **pp;
5452 
5453 	if (!ops || !ops->inherits)
5454 		return;
5455 
5456 	spin_lock(&lock);
5457 
5458 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5459 		void **inherit = (void **)cur;
5460 
5461 		for (pp = begin; pp < end; pp++, inherit++)
5462 			if (!*pp)
5463 				*pp = *inherit;
5464 	}
5465 
5466 	for (pp = begin; pp < end; pp++)
5467 		if (IS_ERR(*pp))
5468 			*pp = NULL;
5469 
5470 	ops->inherits = NULL;
5471 
5472 	spin_unlock(&lock);
5473 }
5474 
5475 /**
5476  *	ata_host_start - start and freeze ports of an ATA host
5477  *	@host: ATA host to start ports for
5478  *
5479  *	Start and then freeze ports of @host.  Started status is
5480  *	recorded in host->flags, so this function can be called
5481  *	multiple times.  Ports are guaranteed to get started only
5482  *	once.  If host->ops isn't initialized yet, its set to the
5483  *	first non-dummy port ops.
5484  *
5485  *	LOCKING:
5486  *	Inherited from calling layer (may sleep).
5487  *
5488  *	RETURNS:
5489  *	0 if all ports are started successfully, -errno otherwise.
5490  */
5491 int ata_host_start(struct ata_host *host)
5492 {
5493 	int have_stop = 0;
5494 	void *start_dr = NULL;
5495 	int i, rc;
5496 
5497 	if (host->flags & ATA_HOST_STARTED)
5498 		return 0;
5499 
5500 	ata_finalize_port_ops(host->ops);
5501 
5502 	for (i = 0; i < host->n_ports; i++) {
5503 		struct ata_port *ap = host->ports[i];
5504 
5505 		ata_finalize_port_ops(ap->ops);
5506 
5507 		if (!host->ops && !ata_port_is_dummy(ap))
5508 			host->ops = ap->ops;
5509 
5510 		if (ap->ops->port_stop)
5511 			have_stop = 1;
5512 	}
5513 
5514 	if (host->ops->host_stop)
5515 		have_stop = 1;
5516 
5517 	if (have_stop) {
5518 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5519 		if (!start_dr)
5520 			return -ENOMEM;
5521 	}
5522 
5523 	for (i = 0; i < host->n_ports; i++) {
5524 		struct ata_port *ap = host->ports[i];
5525 
5526 		if (ap->ops->port_start) {
5527 			rc = ap->ops->port_start(ap);
5528 			if (rc) {
5529 				if (rc != -ENODEV)
5530 					dev_printk(KERN_ERR, host->dev,
5531 						"failed to start port %d "
5532 						"(errno=%d)\n", i, rc);
5533 				goto err_out;
5534 			}
5535 		}
5536 		ata_eh_freeze_port(ap);
5537 	}
5538 
5539 	if (start_dr)
5540 		devres_add(host->dev, start_dr);
5541 	host->flags |= ATA_HOST_STARTED;
5542 	return 0;
5543 
5544  err_out:
5545 	while (--i >= 0) {
5546 		struct ata_port *ap = host->ports[i];
5547 
5548 		if (ap->ops->port_stop)
5549 			ap->ops->port_stop(ap);
5550 	}
5551 	devres_free(start_dr);
5552 	return rc;
5553 }
5554 
5555 /**
5556  *	ata_sas_host_init - Initialize a host struct
5557  *	@host:	host to initialize
5558  *	@dev:	device host is attached to
5559  *	@flags:	host flags
5560  *	@ops:	port_ops
5561  *
5562  *	LOCKING:
5563  *	PCI/etc. bus probe sem.
5564  *
5565  */
5566 /* KILLME - the only user left is ipr */
5567 void ata_host_init(struct ata_host *host, struct device *dev,
5568 		   unsigned long flags, struct ata_port_operations *ops)
5569 {
5570 	spin_lock_init(&host->lock);
5571 	host->dev = dev;
5572 	host->flags = flags;
5573 	host->ops = ops;
5574 }
5575 
5576 /**
5577  *	ata_host_register - register initialized ATA host
5578  *	@host: ATA host to register
5579  *	@sht: template for SCSI host
5580  *
5581  *	Register initialized ATA host.  @host is allocated using
5582  *	ata_host_alloc() and fully initialized by LLD.  This function
5583  *	starts ports, registers @host with ATA and SCSI layers and
5584  *	probe registered devices.
5585  *
5586  *	LOCKING:
5587  *	Inherited from calling layer (may sleep).
5588  *
5589  *	RETURNS:
5590  *	0 on success, -errno otherwise.
5591  */
5592 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5593 {
5594 	int i, rc;
5595 
5596 	/* host must have been started */
5597 	if (!(host->flags & ATA_HOST_STARTED)) {
5598 		dev_printk(KERN_ERR, host->dev,
5599 			   "BUG: trying to register unstarted host\n");
5600 		WARN_ON(1);
5601 		return -EINVAL;
5602 	}
5603 
5604 	/* Blow away unused ports.  This happens when LLD can't
5605 	 * determine the exact number of ports to allocate at
5606 	 * allocation time.
5607 	 */
5608 	for (i = host->n_ports; host->ports[i]; i++)
5609 		kfree(host->ports[i]);
5610 
5611 	/* give ports names and add SCSI hosts */
5612 	for (i = 0; i < host->n_ports; i++)
5613 		host->ports[i]->print_id = ata_print_id++;
5614 
5615 	rc = ata_scsi_add_hosts(host, sht);
5616 	if (rc)
5617 		return rc;
5618 
5619 	/* associate with ACPI nodes */
5620 	ata_acpi_associate(host);
5621 
5622 	/* set cable, sata_spd_limit and report */
5623 	for (i = 0; i < host->n_ports; i++) {
5624 		struct ata_port *ap = host->ports[i];
5625 		unsigned long xfer_mask;
5626 
5627 		/* set SATA cable type if still unset */
5628 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5629 			ap->cbl = ATA_CBL_SATA;
5630 
5631 		/* init sata_spd_limit to the current value */
5632 		sata_link_init_spd(&ap->link);
5633 
5634 		/* print per-port info to dmesg */
5635 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5636 					      ap->udma_mask);
5637 
5638 		if (!ata_port_is_dummy(ap)) {
5639 			ata_port_printk(ap, KERN_INFO,
5640 					"%cATA max %s %s\n",
5641 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5642 					ata_mode_string(xfer_mask),
5643 					ap->link.eh_info.desc);
5644 			ata_ehi_clear_desc(&ap->link.eh_info);
5645 		} else
5646 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5647 	}
5648 
5649 	/* perform each probe synchronously */
5650 	DPRINTK("probe begin\n");
5651 	for (i = 0; i < host->n_ports; i++) {
5652 		struct ata_port *ap = host->ports[i];
5653 
5654 		/* probe */
5655 		if (ap->ops->error_handler) {
5656 			struct ata_eh_info *ehi = &ap->link.eh_info;
5657 			unsigned long flags;
5658 
5659 			ata_port_probe(ap);
5660 
5661 			/* kick EH for boot probing */
5662 			spin_lock_irqsave(ap->lock, flags);
5663 
5664 			ehi->probe_mask |= ATA_ALL_DEVICES;
5665 			ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5666 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5667 
5668 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5669 			ap->pflags |= ATA_PFLAG_LOADING;
5670 			ata_port_schedule_eh(ap);
5671 
5672 			spin_unlock_irqrestore(ap->lock, flags);
5673 
5674 			/* wait for EH to finish */
5675 			ata_port_wait_eh(ap);
5676 		} else {
5677 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5678 			rc = ata_bus_probe(ap);
5679 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
5680 
5681 			if (rc) {
5682 				/* FIXME: do something useful here?
5683 				 * Current libata behavior will
5684 				 * tear down everything when
5685 				 * the module is removed
5686 				 * or the h/w is unplugged.
5687 				 */
5688 			}
5689 		}
5690 	}
5691 
5692 	/* probes are done, now scan each port's disk(s) */
5693 	DPRINTK("host probe begin\n");
5694 	for (i = 0; i < host->n_ports; i++) {
5695 		struct ata_port *ap = host->ports[i];
5696 
5697 		ata_scsi_scan_host(ap, 1);
5698 	}
5699 
5700 	return 0;
5701 }
5702 
5703 /**
5704  *	ata_host_activate - start host, request IRQ and register it
5705  *	@host: target ATA host
5706  *	@irq: IRQ to request
5707  *	@irq_handler: irq_handler used when requesting IRQ
5708  *	@irq_flags: irq_flags used when requesting IRQ
5709  *	@sht: scsi_host_template to use when registering the host
5710  *
5711  *	After allocating an ATA host and initializing it, most libata
5712  *	LLDs perform three steps to activate the host - start host,
5713  *	request IRQ and register it.  This helper takes necessasry
5714  *	arguments and performs the three steps in one go.
5715  *
5716  *	An invalid IRQ skips the IRQ registration and expects the host to
5717  *	have set polling mode on the port. In this case, @irq_handler
5718  *	should be NULL.
5719  *
5720  *	LOCKING:
5721  *	Inherited from calling layer (may sleep).
5722  *
5723  *	RETURNS:
5724  *	0 on success, -errno otherwise.
5725  */
5726 int ata_host_activate(struct ata_host *host, int irq,
5727 		      irq_handler_t irq_handler, unsigned long irq_flags,
5728 		      struct scsi_host_template *sht)
5729 {
5730 	int i, rc;
5731 
5732 	rc = ata_host_start(host);
5733 	if (rc)
5734 		return rc;
5735 
5736 	/* Special case for polling mode */
5737 	if (!irq) {
5738 		WARN_ON(irq_handler);
5739 		return ata_host_register(host, sht);
5740 	}
5741 
5742 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5743 			      dev_driver_string(host->dev), host);
5744 	if (rc)
5745 		return rc;
5746 
5747 	for (i = 0; i < host->n_ports; i++)
5748 		ata_port_desc(host->ports[i], "irq %d", irq);
5749 
5750 	rc = ata_host_register(host, sht);
5751 	/* if failed, just free the IRQ and leave ports alone */
5752 	if (rc)
5753 		devm_free_irq(host->dev, irq, host);
5754 
5755 	return rc;
5756 }
5757 
5758 /**
5759  *	ata_port_detach - Detach ATA port in prepration of device removal
5760  *	@ap: ATA port to be detached
5761  *
5762  *	Detach all ATA devices and the associated SCSI devices of @ap;
5763  *	then, remove the associated SCSI host.  @ap is guaranteed to
5764  *	be quiescent on return from this function.
5765  *
5766  *	LOCKING:
5767  *	Kernel thread context (may sleep).
5768  */
5769 static void ata_port_detach(struct ata_port *ap)
5770 {
5771 	unsigned long flags;
5772 	struct ata_link *link;
5773 	struct ata_device *dev;
5774 
5775 	if (!ap->ops->error_handler)
5776 		goto skip_eh;
5777 
5778 	/* tell EH we're leaving & flush EH */
5779 	spin_lock_irqsave(ap->lock, flags);
5780 	ap->pflags |= ATA_PFLAG_UNLOADING;
5781 	spin_unlock_irqrestore(ap->lock, flags);
5782 
5783 	ata_port_wait_eh(ap);
5784 
5785 	/* EH is now guaranteed to see UNLOADING - EH context belongs
5786 	 * to us.  Disable all existing devices.
5787 	 */
5788 	ata_port_for_each_link(link, ap) {
5789 		ata_link_for_each_dev(dev, link)
5790 			ata_dev_disable(dev);
5791 	}
5792 
5793 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
5794 	 * will be skipped and retrials will be terminated with bad
5795 	 * target.
5796 	 */
5797 	spin_lock_irqsave(ap->lock, flags);
5798 	ata_port_freeze(ap);	/* won't be thawed */
5799 	spin_unlock_irqrestore(ap->lock, flags);
5800 
5801 	ata_port_wait_eh(ap);
5802 	cancel_rearming_delayed_work(&ap->hotplug_task);
5803 
5804  skip_eh:
5805 	/* remove the associated SCSI host */
5806 	scsi_remove_host(ap->scsi_host);
5807 }
5808 
5809 /**
5810  *	ata_host_detach - Detach all ports of an ATA host
5811  *	@host: Host to detach
5812  *
5813  *	Detach all ports of @host.
5814  *
5815  *	LOCKING:
5816  *	Kernel thread context (may sleep).
5817  */
5818 void ata_host_detach(struct ata_host *host)
5819 {
5820 	int i;
5821 
5822 	for (i = 0; i < host->n_ports; i++)
5823 		ata_port_detach(host->ports[i]);
5824 
5825 	/* the host is dead now, dissociate ACPI */
5826 	ata_acpi_dissociate(host);
5827 }
5828 
5829 #ifdef CONFIG_PCI
5830 
5831 /**
5832  *	ata_pci_remove_one - PCI layer callback for device removal
5833  *	@pdev: PCI device that was removed
5834  *
5835  *	PCI layer indicates to libata via this hook that hot-unplug or
5836  *	module unload event has occurred.  Detach all ports.  Resource
5837  *	release is handled via devres.
5838  *
5839  *	LOCKING:
5840  *	Inherited from PCI layer (may sleep).
5841  */
5842 void ata_pci_remove_one(struct pci_dev *pdev)
5843 {
5844 	struct device *dev = &pdev->dev;
5845 	struct ata_host *host = dev_get_drvdata(dev);
5846 
5847 	ata_host_detach(host);
5848 }
5849 
5850 /* move to PCI subsystem */
5851 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5852 {
5853 	unsigned long tmp = 0;
5854 
5855 	switch (bits->width) {
5856 	case 1: {
5857 		u8 tmp8 = 0;
5858 		pci_read_config_byte(pdev, bits->reg, &tmp8);
5859 		tmp = tmp8;
5860 		break;
5861 	}
5862 	case 2: {
5863 		u16 tmp16 = 0;
5864 		pci_read_config_word(pdev, bits->reg, &tmp16);
5865 		tmp = tmp16;
5866 		break;
5867 	}
5868 	case 4: {
5869 		u32 tmp32 = 0;
5870 		pci_read_config_dword(pdev, bits->reg, &tmp32);
5871 		tmp = tmp32;
5872 		break;
5873 	}
5874 
5875 	default:
5876 		return -EINVAL;
5877 	}
5878 
5879 	tmp &= bits->mask;
5880 
5881 	return (tmp == bits->val) ? 1 : 0;
5882 }
5883 
5884 #ifdef CONFIG_PM
5885 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5886 {
5887 	pci_save_state(pdev);
5888 	pci_disable_device(pdev);
5889 
5890 	if (mesg.event & PM_EVENT_SLEEP)
5891 		pci_set_power_state(pdev, PCI_D3hot);
5892 }
5893 
5894 int ata_pci_device_do_resume(struct pci_dev *pdev)
5895 {
5896 	int rc;
5897 
5898 	pci_set_power_state(pdev, PCI_D0);
5899 	pci_restore_state(pdev);
5900 
5901 	rc = pcim_enable_device(pdev);
5902 	if (rc) {
5903 		dev_printk(KERN_ERR, &pdev->dev,
5904 			   "failed to enable device after resume (%d)\n", rc);
5905 		return rc;
5906 	}
5907 
5908 	pci_set_master(pdev);
5909 	return 0;
5910 }
5911 
5912 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5913 {
5914 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
5915 	int rc = 0;
5916 
5917 	rc = ata_host_suspend(host, mesg);
5918 	if (rc)
5919 		return rc;
5920 
5921 	ata_pci_device_do_suspend(pdev, mesg);
5922 
5923 	return 0;
5924 }
5925 
5926 int ata_pci_device_resume(struct pci_dev *pdev)
5927 {
5928 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
5929 	int rc;
5930 
5931 	rc = ata_pci_device_do_resume(pdev);
5932 	if (rc == 0)
5933 		ata_host_resume(host);
5934 	return rc;
5935 }
5936 #endif /* CONFIG_PM */
5937 
5938 #endif /* CONFIG_PCI */
5939 
5940 static int __init ata_parse_force_one(char **cur,
5941 				      struct ata_force_ent *force_ent,
5942 				      const char **reason)
5943 {
5944 	/* FIXME: Currently, there's no way to tag init const data and
5945 	 * using __initdata causes build failure on some versions of
5946 	 * gcc.  Once __initdataconst is implemented, add const to the
5947 	 * following structure.
5948 	 */
5949 	static struct ata_force_param force_tbl[] __initdata = {
5950 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
5951 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
5952 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
5953 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
5954 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
5955 		{ "sata",	.cbl		= ATA_CBL_SATA },
5956 		{ "1.5Gbps",	.spd_limit	= 1 },
5957 		{ "3.0Gbps",	.spd_limit	= 2 },
5958 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
5959 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
5960 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
5961 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
5962 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
5963 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
5964 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
5965 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
5966 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
5967 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
5968 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
5969 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
5970 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
5971 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
5972 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5973 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5974 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5975 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5976 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5977 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5978 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5979 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5980 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5981 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5982 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5983 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5984 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5985 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5986 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5987 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5988 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5989 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5990 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5991 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5992 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5993 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
5994 	};
5995 	char *start = *cur, *p = *cur;
5996 	char *id, *val, *endp;
5997 	const struct ata_force_param *match_fp = NULL;
5998 	int nr_matches = 0, i;
5999 
6000 	/* find where this param ends and update *cur */
6001 	while (*p != '\0' && *p != ',')
6002 		p++;
6003 
6004 	if (*p == '\0')
6005 		*cur = p;
6006 	else
6007 		*cur = p + 1;
6008 
6009 	*p = '\0';
6010 
6011 	/* parse */
6012 	p = strchr(start, ':');
6013 	if (!p) {
6014 		val = strstrip(start);
6015 		goto parse_val;
6016 	}
6017 	*p = '\0';
6018 
6019 	id = strstrip(start);
6020 	val = strstrip(p + 1);
6021 
6022 	/* parse id */
6023 	p = strchr(id, '.');
6024 	if (p) {
6025 		*p++ = '\0';
6026 		force_ent->device = simple_strtoul(p, &endp, 10);
6027 		if (p == endp || *endp != '\0') {
6028 			*reason = "invalid device";
6029 			return -EINVAL;
6030 		}
6031 	}
6032 
6033 	force_ent->port = simple_strtoul(id, &endp, 10);
6034 	if (p == endp || *endp != '\0') {
6035 		*reason = "invalid port/link";
6036 		return -EINVAL;
6037 	}
6038 
6039  parse_val:
6040 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6041 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6042 		const struct ata_force_param *fp = &force_tbl[i];
6043 
6044 		if (strncasecmp(val, fp->name, strlen(val)))
6045 			continue;
6046 
6047 		nr_matches++;
6048 		match_fp = fp;
6049 
6050 		if (strcasecmp(val, fp->name) == 0) {
6051 			nr_matches = 1;
6052 			break;
6053 		}
6054 	}
6055 
6056 	if (!nr_matches) {
6057 		*reason = "unknown value";
6058 		return -EINVAL;
6059 	}
6060 	if (nr_matches > 1) {
6061 		*reason = "ambigious value";
6062 		return -EINVAL;
6063 	}
6064 
6065 	force_ent->param = *match_fp;
6066 
6067 	return 0;
6068 }
6069 
6070 static void __init ata_parse_force_param(void)
6071 {
6072 	int idx = 0, size = 1;
6073 	int last_port = -1, last_device = -1;
6074 	char *p, *cur, *next;
6075 
6076 	/* calculate maximum number of params and allocate force_tbl */
6077 	for (p = ata_force_param_buf; *p; p++)
6078 		if (*p == ',')
6079 			size++;
6080 
6081 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6082 	if (!ata_force_tbl) {
6083 		printk(KERN_WARNING "ata: failed to extend force table, "
6084 		       "libata.force ignored\n");
6085 		return;
6086 	}
6087 
6088 	/* parse and populate the table */
6089 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6090 		const char *reason = "";
6091 		struct ata_force_ent te = { .port = -1, .device = -1 };
6092 
6093 		next = cur;
6094 		if (ata_parse_force_one(&next, &te, &reason)) {
6095 			printk(KERN_WARNING "ata: failed to parse force "
6096 			       "parameter \"%s\" (%s)\n",
6097 			       cur, reason);
6098 			continue;
6099 		}
6100 
6101 		if (te.port == -1) {
6102 			te.port = last_port;
6103 			te.device = last_device;
6104 		}
6105 
6106 		ata_force_tbl[idx++] = te;
6107 
6108 		last_port = te.port;
6109 		last_device = te.device;
6110 	}
6111 
6112 	ata_force_tbl_size = idx;
6113 }
6114 
6115 static int __init ata_init(void)
6116 {
6117 	ata_parse_force_param();
6118 
6119 	ata_wq = create_workqueue("ata");
6120 	if (!ata_wq)
6121 		goto free_force_tbl;
6122 
6123 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6124 	if (!ata_aux_wq)
6125 		goto free_wq;
6126 
6127 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6128 	return 0;
6129 
6130 free_wq:
6131 	destroy_workqueue(ata_wq);
6132 free_force_tbl:
6133 	kfree(ata_force_tbl);
6134 	return -ENOMEM;
6135 }
6136 
6137 static void __exit ata_exit(void)
6138 {
6139 	kfree(ata_force_tbl);
6140 	destroy_workqueue(ata_wq);
6141 	destroy_workqueue(ata_aux_wq);
6142 }
6143 
6144 subsys_initcall(ata_init);
6145 module_exit(ata_exit);
6146 
6147 static unsigned long ratelimit_time;
6148 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6149 
6150 int ata_ratelimit(void)
6151 {
6152 	int rc;
6153 	unsigned long flags;
6154 
6155 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6156 
6157 	if (time_after(jiffies, ratelimit_time)) {
6158 		rc = 1;
6159 		ratelimit_time = jiffies + (HZ/5);
6160 	} else
6161 		rc = 0;
6162 
6163 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6164 
6165 	return rc;
6166 }
6167 
6168 /**
6169  *	ata_wait_register - wait until register value changes
6170  *	@reg: IO-mapped register
6171  *	@mask: Mask to apply to read register value
6172  *	@val: Wait condition
6173  *	@interval: polling interval in milliseconds
6174  *	@timeout: timeout in milliseconds
6175  *
6176  *	Waiting for some bits of register to change is a common
6177  *	operation for ATA controllers.  This function reads 32bit LE
6178  *	IO-mapped register @reg and tests for the following condition.
6179  *
6180  *	(*@reg & mask) != val
6181  *
6182  *	If the condition is met, it returns; otherwise, the process is
6183  *	repeated after @interval_msec until timeout.
6184  *
6185  *	LOCKING:
6186  *	Kernel thread context (may sleep)
6187  *
6188  *	RETURNS:
6189  *	The final register value.
6190  */
6191 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6192 		      unsigned long interval, unsigned long timeout)
6193 {
6194 	unsigned long deadline;
6195 	u32 tmp;
6196 
6197 	tmp = ioread32(reg);
6198 
6199 	/* Calculate timeout _after_ the first read to make sure
6200 	 * preceding writes reach the controller before starting to
6201 	 * eat away the timeout.
6202 	 */
6203 	deadline = ata_deadline(jiffies, timeout);
6204 
6205 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6206 		msleep(interval);
6207 		tmp = ioread32(reg);
6208 	}
6209 
6210 	return tmp;
6211 }
6212 
6213 /*
6214  * Dummy port_ops
6215  */
6216 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6217 {
6218 	return AC_ERR_SYSTEM;
6219 }
6220 
6221 static void ata_dummy_error_handler(struct ata_port *ap)
6222 {
6223 	/* truly dummy */
6224 }
6225 
6226 struct ata_port_operations ata_dummy_port_ops = {
6227 	.qc_prep		= ata_noop_qc_prep,
6228 	.qc_issue		= ata_dummy_qc_issue,
6229 	.error_handler		= ata_dummy_error_handler,
6230 };
6231 
6232 const struct ata_port_info ata_dummy_port_info = {
6233 	.port_ops		= &ata_dummy_port_ops,
6234 };
6235 
6236 /*
6237  * libata is essentially a library of internal helper functions for
6238  * low-level ATA host controller drivers.  As such, the API/ABI is
6239  * likely to change as new drivers are added and updated.
6240  * Do not depend on ABI/API stability.
6241  */
6242 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6243 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6244 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6245 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6246 EXPORT_SYMBOL_GPL(sata_port_ops);
6247 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6248 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6249 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6250 EXPORT_SYMBOL_GPL(ata_host_init);
6251 EXPORT_SYMBOL_GPL(ata_host_alloc);
6252 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6253 EXPORT_SYMBOL_GPL(ata_host_start);
6254 EXPORT_SYMBOL_GPL(ata_host_register);
6255 EXPORT_SYMBOL_GPL(ata_host_activate);
6256 EXPORT_SYMBOL_GPL(ata_host_detach);
6257 EXPORT_SYMBOL_GPL(ata_sg_init);
6258 EXPORT_SYMBOL_GPL(ata_qc_complete);
6259 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6260 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6261 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6262 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6263 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6264 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6265 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6266 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6267 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6268 EXPORT_SYMBOL_GPL(ata_mode_string);
6269 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6270 EXPORT_SYMBOL_GPL(ata_port_start);
6271 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6272 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6273 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6274 EXPORT_SYMBOL_GPL(ata_port_probe);
6275 EXPORT_SYMBOL_GPL(ata_dev_disable);
6276 EXPORT_SYMBOL_GPL(sata_set_spd);
6277 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6278 EXPORT_SYMBOL_GPL(sata_link_debounce);
6279 EXPORT_SYMBOL_GPL(sata_link_resume);
6280 EXPORT_SYMBOL_GPL(ata_std_prereset);
6281 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6282 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6283 EXPORT_SYMBOL_GPL(ata_std_postreset);
6284 EXPORT_SYMBOL_GPL(ata_dev_classify);
6285 EXPORT_SYMBOL_GPL(ata_dev_pair);
6286 EXPORT_SYMBOL_GPL(ata_port_disable);
6287 EXPORT_SYMBOL_GPL(ata_ratelimit);
6288 EXPORT_SYMBOL_GPL(ata_wait_register);
6289 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6290 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6291 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6292 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6293 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6294 EXPORT_SYMBOL_GPL(sata_scr_valid);
6295 EXPORT_SYMBOL_GPL(sata_scr_read);
6296 EXPORT_SYMBOL_GPL(sata_scr_write);
6297 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6298 EXPORT_SYMBOL_GPL(ata_link_online);
6299 EXPORT_SYMBOL_GPL(ata_link_offline);
6300 #ifdef CONFIG_PM
6301 EXPORT_SYMBOL_GPL(ata_host_suspend);
6302 EXPORT_SYMBOL_GPL(ata_host_resume);
6303 #endif /* CONFIG_PM */
6304 EXPORT_SYMBOL_GPL(ata_id_string);
6305 EXPORT_SYMBOL_GPL(ata_id_c_string);
6306 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6307 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6308 
6309 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6310 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6311 EXPORT_SYMBOL_GPL(ata_timing_compute);
6312 EXPORT_SYMBOL_GPL(ata_timing_merge);
6313 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6314 
6315 #ifdef CONFIG_PCI
6316 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6317 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6318 #ifdef CONFIG_PM
6319 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6320 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6321 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6322 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6323 #endif /* CONFIG_PM */
6324 #endif /* CONFIG_PCI */
6325 
6326 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6327 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6328 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6329 EXPORT_SYMBOL_GPL(ata_port_desc);
6330 #ifdef CONFIG_PCI
6331 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6332 #endif /* CONFIG_PCI */
6333 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6334 EXPORT_SYMBOL_GPL(ata_link_abort);
6335 EXPORT_SYMBOL_GPL(ata_port_abort);
6336 EXPORT_SYMBOL_GPL(ata_port_freeze);
6337 EXPORT_SYMBOL_GPL(sata_async_notification);
6338 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6339 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6340 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6341 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6342 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6343 EXPORT_SYMBOL_GPL(ata_do_eh);
6344 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6345 
6346 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6347 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6348 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6349 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6350 EXPORT_SYMBOL_GPL(ata_cable_sata);
6351