xref: /linux/drivers/ata/libata-core.c (revision c0c9209ddd96bc4f1d70a8b9958710671e076080)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_host.h>
62 #include <linux/libata.h>
63 #include <asm/byteorder.h>
64 #include <linux/cdrom.h>
65 
66 #include "libata.h"
67 
68 
69 /* debounce timing parameters in msecs { interval, duration, timeout } */
70 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
71 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
72 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
73 
74 const struct ata_port_operations ata_base_port_ops = {
75 	.prereset		= ata_std_prereset,
76 	.postreset		= ata_std_postreset,
77 	.error_handler		= ata_std_error_handler,
78 };
79 
80 const struct ata_port_operations sata_port_ops = {
81 	.inherits		= &ata_base_port_ops,
82 
83 	.qc_defer		= ata_std_qc_defer,
84 	.hardreset		= sata_std_hardreset,
85 };
86 
87 static unsigned int ata_dev_init_params(struct ata_device *dev,
88 					u16 heads, u16 sectors);
89 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
90 static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 					u8 enable, u8 feature);
92 static void ata_dev_xfermask(struct ata_device *dev);
93 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
94 
95 unsigned int ata_print_id = 1;
96 static struct workqueue_struct *ata_wq;
97 
98 struct workqueue_struct *ata_aux_wq;
99 
100 struct ata_force_param {
101 	const char	*name;
102 	unsigned int	cbl;
103 	int		spd_limit;
104 	unsigned long	xfer_mask;
105 	unsigned int	horkage_on;
106 	unsigned int	horkage_off;
107 	unsigned int	lflags;
108 };
109 
110 struct ata_force_ent {
111 	int			port;
112 	int			device;
113 	struct ata_force_param	param;
114 };
115 
116 static struct ata_force_ent *ata_force_tbl;
117 static int ata_force_tbl_size;
118 
119 static char ata_force_param_buf[PAGE_SIZE] __initdata;
120 /* param_buf is thrown away after initialization, disallow read */
121 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123 
124 static int atapi_enabled = 1;
125 module_param(atapi_enabled, int, 0444);
126 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
127 
128 static int atapi_dmadir = 0;
129 module_param(atapi_dmadir, int, 0444);
130 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
131 
132 int atapi_passthru16 = 1;
133 module_param(atapi_passthru16, int, 0444);
134 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
135 
136 int libata_fua = 0;
137 module_param_named(fua, libata_fua, int, 0444);
138 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
139 
140 static int ata_ignore_hpa;
141 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143 
144 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145 module_param_named(dma, libata_dma_mask, int, 0444);
146 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147 
148 static int ata_probe_timeout;
149 module_param(ata_probe_timeout, int, 0444);
150 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151 
152 int libata_noacpi = 0;
153 module_param_named(noacpi, libata_noacpi, int, 0444);
154 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
155 
156 int libata_allow_tpm = 0;
157 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
159 
160 MODULE_AUTHOR("Jeff Garzik");
161 MODULE_DESCRIPTION("Library module for ATA devices");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(DRV_VERSION);
164 
165 
166 /**
167  *	ata_force_cbl - force cable type according to libata.force
168  *	@ap: ATA port of interest
169  *
170  *	Force cable type according to libata.force and whine about it.
171  *	The last entry which has matching port number is used, so it
172  *	can be specified as part of device force parameters.  For
173  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
174  *	same effect.
175  *
176  *	LOCKING:
177  *	EH context.
178  */
179 void ata_force_cbl(struct ata_port *ap)
180 {
181 	int i;
182 
183 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
184 		const struct ata_force_ent *fe = &ata_force_tbl[i];
185 
186 		if (fe->port != -1 && fe->port != ap->print_id)
187 			continue;
188 
189 		if (fe->param.cbl == ATA_CBL_NONE)
190 			continue;
191 
192 		ap->cbl = fe->param.cbl;
193 		ata_port_printk(ap, KERN_NOTICE,
194 				"FORCE: cable set to %s\n", fe->param.name);
195 		return;
196 	}
197 }
198 
199 /**
200  *	ata_force_link_limits - force link limits according to libata.force
201  *	@link: ATA link of interest
202  *
203  *	Force link flags and SATA spd limit according to libata.force
204  *	and whine about it.  When only the port part is specified
205  *	(e.g. 1:), the limit applies to all links connected to both
206  *	the host link and all fan-out ports connected via PMP.  If the
207  *	device part is specified as 0 (e.g. 1.00:), it specifies the
208  *	first fan-out link not the host link.  Device number 15 always
209  *	points to the host link whether PMP is attached or not.
210  *
211  *	LOCKING:
212  *	EH context.
213  */
214 static void ata_force_link_limits(struct ata_link *link)
215 {
216 	bool did_spd = false;
217 	int linkno, i;
218 
219 	if (ata_is_host_link(link))
220 		linkno = 15;
221 	else
222 		linkno = link->pmp;
223 
224 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
225 		const struct ata_force_ent *fe = &ata_force_tbl[i];
226 
227 		if (fe->port != -1 && fe->port != link->ap->print_id)
228 			continue;
229 
230 		if (fe->device != -1 && fe->device != linkno)
231 			continue;
232 
233 		/* only honor the first spd limit */
234 		if (!did_spd && fe->param.spd_limit) {
235 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
236 			ata_link_printk(link, KERN_NOTICE,
237 					"FORCE: PHY spd limit set to %s\n",
238 					fe->param.name);
239 			did_spd = true;
240 		}
241 
242 		/* let lflags stack */
243 		if (fe->param.lflags) {
244 			link->flags |= fe->param.lflags;
245 			ata_link_printk(link, KERN_NOTICE,
246 					"FORCE: link flag 0x%x forced -> 0x%x\n",
247 					fe->param.lflags, link->flags);
248 		}
249 	}
250 }
251 
252 /**
253  *	ata_force_xfermask - force xfermask according to libata.force
254  *	@dev: ATA device of interest
255  *
256  *	Force xfer_mask according to libata.force and whine about it.
257  *	For consistency with link selection, device number 15 selects
258  *	the first device connected to the host link.
259  *
260  *	LOCKING:
261  *	EH context.
262  */
263 static void ata_force_xfermask(struct ata_device *dev)
264 {
265 	int devno = dev->link->pmp + dev->devno;
266 	int alt_devno = devno;
267 	int i;
268 
269 	/* allow n.15 for the first device attached to host port */
270 	if (ata_is_host_link(dev->link) && devno == 0)
271 		alt_devno = 15;
272 
273 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
274 		const struct ata_force_ent *fe = &ata_force_tbl[i];
275 		unsigned long pio_mask, mwdma_mask, udma_mask;
276 
277 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
278 			continue;
279 
280 		if (fe->device != -1 && fe->device != devno &&
281 		    fe->device != alt_devno)
282 			continue;
283 
284 		if (!fe->param.xfer_mask)
285 			continue;
286 
287 		ata_unpack_xfermask(fe->param.xfer_mask,
288 				    &pio_mask, &mwdma_mask, &udma_mask);
289 		if (udma_mask)
290 			dev->udma_mask = udma_mask;
291 		else if (mwdma_mask) {
292 			dev->udma_mask = 0;
293 			dev->mwdma_mask = mwdma_mask;
294 		} else {
295 			dev->udma_mask = 0;
296 			dev->mwdma_mask = 0;
297 			dev->pio_mask = pio_mask;
298 		}
299 
300 		ata_dev_printk(dev, KERN_NOTICE,
301 			"FORCE: xfer_mask set to %s\n", fe->param.name);
302 		return;
303 	}
304 }
305 
306 /**
307  *	ata_force_horkage - force horkage according to libata.force
308  *	@dev: ATA device of interest
309  *
310  *	Force horkage according to libata.force and whine about it.
311  *	For consistency with link selection, device number 15 selects
312  *	the first device connected to the host link.
313  *
314  *	LOCKING:
315  *	EH context.
316  */
317 static void ata_force_horkage(struct ata_device *dev)
318 {
319 	int devno = dev->link->pmp + dev->devno;
320 	int alt_devno = devno;
321 	int i;
322 
323 	/* allow n.15 for the first device attached to host port */
324 	if (ata_is_host_link(dev->link) && devno == 0)
325 		alt_devno = 15;
326 
327 	for (i = 0; i < ata_force_tbl_size; i++) {
328 		const struct ata_force_ent *fe = &ata_force_tbl[i];
329 
330 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
331 			continue;
332 
333 		if (fe->device != -1 && fe->device != devno &&
334 		    fe->device != alt_devno)
335 			continue;
336 
337 		if (!(~dev->horkage & fe->param.horkage_on) &&
338 		    !(dev->horkage & fe->param.horkage_off))
339 			continue;
340 
341 		dev->horkage |= fe->param.horkage_on;
342 		dev->horkage &= ~fe->param.horkage_off;
343 
344 		ata_dev_printk(dev, KERN_NOTICE,
345 			"FORCE: horkage modified (%s)\n", fe->param.name);
346 	}
347 }
348 
349 /**
350  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
351  *	@opcode: SCSI opcode
352  *
353  *	Determine ATAPI command type from @opcode.
354  *
355  *	LOCKING:
356  *	None.
357  *
358  *	RETURNS:
359  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
360  */
361 int atapi_cmd_type(u8 opcode)
362 {
363 	switch (opcode) {
364 	case GPCMD_READ_10:
365 	case GPCMD_READ_12:
366 		return ATAPI_READ;
367 
368 	case GPCMD_WRITE_10:
369 	case GPCMD_WRITE_12:
370 	case GPCMD_WRITE_AND_VERIFY_10:
371 		return ATAPI_WRITE;
372 
373 	case GPCMD_READ_CD:
374 	case GPCMD_READ_CD_MSF:
375 		return ATAPI_READ_CD;
376 
377 	case ATA_16:
378 	case ATA_12:
379 		if (atapi_passthru16)
380 			return ATAPI_PASS_THRU;
381 		/* fall thru */
382 	default:
383 		return ATAPI_MISC;
384 	}
385 }
386 
387 /**
388  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
389  *	@tf: Taskfile to convert
390  *	@pmp: Port multiplier port
391  *	@is_cmd: This FIS is for command
392  *	@fis: Buffer into which data will output
393  *
394  *	Converts a standard ATA taskfile to a Serial ATA
395  *	FIS structure (Register - Host to Device).
396  *
397  *	LOCKING:
398  *	Inherited from caller.
399  */
400 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
401 {
402 	fis[0] = 0x27;			/* Register - Host to Device FIS */
403 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
404 	if (is_cmd)
405 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
406 
407 	fis[2] = tf->command;
408 	fis[3] = tf->feature;
409 
410 	fis[4] = tf->lbal;
411 	fis[5] = tf->lbam;
412 	fis[6] = tf->lbah;
413 	fis[7] = tf->device;
414 
415 	fis[8] = tf->hob_lbal;
416 	fis[9] = tf->hob_lbam;
417 	fis[10] = tf->hob_lbah;
418 	fis[11] = tf->hob_feature;
419 
420 	fis[12] = tf->nsect;
421 	fis[13] = tf->hob_nsect;
422 	fis[14] = 0;
423 	fis[15] = tf->ctl;
424 
425 	fis[16] = 0;
426 	fis[17] = 0;
427 	fis[18] = 0;
428 	fis[19] = 0;
429 }
430 
431 /**
432  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
433  *	@fis: Buffer from which data will be input
434  *	@tf: Taskfile to output
435  *
436  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
437  *
438  *	LOCKING:
439  *	Inherited from caller.
440  */
441 
442 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
443 {
444 	tf->command	= fis[2];	/* status */
445 	tf->feature	= fis[3];	/* error */
446 
447 	tf->lbal	= fis[4];
448 	tf->lbam	= fis[5];
449 	tf->lbah	= fis[6];
450 	tf->device	= fis[7];
451 
452 	tf->hob_lbal	= fis[8];
453 	tf->hob_lbam	= fis[9];
454 	tf->hob_lbah	= fis[10];
455 
456 	tf->nsect	= fis[12];
457 	tf->hob_nsect	= fis[13];
458 }
459 
460 static const u8 ata_rw_cmds[] = {
461 	/* pio multi */
462 	ATA_CMD_READ_MULTI,
463 	ATA_CMD_WRITE_MULTI,
464 	ATA_CMD_READ_MULTI_EXT,
465 	ATA_CMD_WRITE_MULTI_EXT,
466 	0,
467 	0,
468 	0,
469 	ATA_CMD_WRITE_MULTI_FUA_EXT,
470 	/* pio */
471 	ATA_CMD_PIO_READ,
472 	ATA_CMD_PIO_WRITE,
473 	ATA_CMD_PIO_READ_EXT,
474 	ATA_CMD_PIO_WRITE_EXT,
475 	0,
476 	0,
477 	0,
478 	0,
479 	/* dma */
480 	ATA_CMD_READ,
481 	ATA_CMD_WRITE,
482 	ATA_CMD_READ_EXT,
483 	ATA_CMD_WRITE_EXT,
484 	0,
485 	0,
486 	0,
487 	ATA_CMD_WRITE_FUA_EXT
488 };
489 
490 /**
491  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
492  *	@tf: command to examine and configure
493  *	@dev: device tf belongs to
494  *
495  *	Examine the device configuration and tf->flags to calculate
496  *	the proper read/write commands and protocol to use.
497  *
498  *	LOCKING:
499  *	caller.
500  */
501 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
502 {
503 	u8 cmd;
504 
505 	int index, fua, lba48, write;
506 
507 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
508 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
509 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
510 
511 	if (dev->flags & ATA_DFLAG_PIO) {
512 		tf->protocol = ATA_PROT_PIO;
513 		index = dev->multi_count ? 0 : 8;
514 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
515 		/* Unable to use DMA due to host limitation */
516 		tf->protocol = ATA_PROT_PIO;
517 		index = dev->multi_count ? 0 : 8;
518 	} else {
519 		tf->protocol = ATA_PROT_DMA;
520 		index = 16;
521 	}
522 
523 	cmd = ata_rw_cmds[index + fua + lba48 + write];
524 	if (cmd) {
525 		tf->command = cmd;
526 		return 0;
527 	}
528 	return -1;
529 }
530 
531 /**
532  *	ata_tf_read_block - Read block address from ATA taskfile
533  *	@tf: ATA taskfile of interest
534  *	@dev: ATA device @tf belongs to
535  *
536  *	LOCKING:
537  *	None.
538  *
539  *	Read block address from @tf.  This function can handle all
540  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
541  *	flags select the address format to use.
542  *
543  *	RETURNS:
544  *	Block address read from @tf.
545  */
546 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
547 {
548 	u64 block = 0;
549 
550 	if (tf->flags & ATA_TFLAG_LBA) {
551 		if (tf->flags & ATA_TFLAG_LBA48) {
552 			block |= (u64)tf->hob_lbah << 40;
553 			block |= (u64)tf->hob_lbam << 32;
554 			block |= tf->hob_lbal << 24;
555 		} else
556 			block |= (tf->device & 0xf) << 24;
557 
558 		block |= tf->lbah << 16;
559 		block |= tf->lbam << 8;
560 		block |= tf->lbal;
561 	} else {
562 		u32 cyl, head, sect;
563 
564 		cyl = tf->lbam | (tf->lbah << 8);
565 		head = tf->device & 0xf;
566 		sect = tf->lbal;
567 
568 		block = (cyl * dev->heads + head) * dev->sectors + sect;
569 	}
570 
571 	return block;
572 }
573 
574 /**
575  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
576  *	@tf: Target ATA taskfile
577  *	@dev: ATA device @tf belongs to
578  *	@block: Block address
579  *	@n_block: Number of blocks
580  *	@tf_flags: RW/FUA etc...
581  *	@tag: tag
582  *
583  *	LOCKING:
584  *	None.
585  *
586  *	Build ATA taskfile @tf for read/write request described by
587  *	@block, @n_block, @tf_flags and @tag on @dev.
588  *
589  *	RETURNS:
590  *
591  *	0 on success, -ERANGE if the request is too large for @dev,
592  *	-EINVAL if the request is invalid.
593  */
594 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
595 		    u64 block, u32 n_block, unsigned int tf_flags,
596 		    unsigned int tag)
597 {
598 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
599 	tf->flags |= tf_flags;
600 
601 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
602 		/* yay, NCQ */
603 		if (!lba_48_ok(block, n_block))
604 			return -ERANGE;
605 
606 		tf->protocol = ATA_PROT_NCQ;
607 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
608 
609 		if (tf->flags & ATA_TFLAG_WRITE)
610 			tf->command = ATA_CMD_FPDMA_WRITE;
611 		else
612 			tf->command = ATA_CMD_FPDMA_READ;
613 
614 		tf->nsect = tag << 3;
615 		tf->hob_feature = (n_block >> 8) & 0xff;
616 		tf->feature = n_block & 0xff;
617 
618 		tf->hob_lbah = (block >> 40) & 0xff;
619 		tf->hob_lbam = (block >> 32) & 0xff;
620 		tf->hob_lbal = (block >> 24) & 0xff;
621 		tf->lbah = (block >> 16) & 0xff;
622 		tf->lbam = (block >> 8) & 0xff;
623 		tf->lbal = block & 0xff;
624 
625 		tf->device = 1 << 6;
626 		if (tf->flags & ATA_TFLAG_FUA)
627 			tf->device |= 1 << 7;
628 	} else if (dev->flags & ATA_DFLAG_LBA) {
629 		tf->flags |= ATA_TFLAG_LBA;
630 
631 		if (lba_28_ok(block, n_block)) {
632 			/* use LBA28 */
633 			tf->device |= (block >> 24) & 0xf;
634 		} else if (lba_48_ok(block, n_block)) {
635 			if (!(dev->flags & ATA_DFLAG_LBA48))
636 				return -ERANGE;
637 
638 			/* use LBA48 */
639 			tf->flags |= ATA_TFLAG_LBA48;
640 
641 			tf->hob_nsect = (n_block >> 8) & 0xff;
642 
643 			tf->hob_lbah = (block >> 40) & 0xff;
644 			tf->hob_lbam = (block >> 32) & 0xff;
645 			tf->hob_lbal = (block >> 24) & 0xff;
646 		} else
647 			/* request too large even for LBA48 */
648 			return -ERANGE;
649 
650 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
651 			return -EINVAL;
652 
653 		tf->nsect = n_block & 0xff;
654 
655 		tf->lbah = (block >> 16) & 0xff;
656 		tf->lbam = (block >> 8) & 0xff;
657 		tf->lbal = block & 0xff;
658 
659 		tf->device |= ATA_LBA;
660 	} else {
661 		/* CHS */
662 		u32 sect, head, cyl, track;
663 
664 		/* The request -may- be too large for CHS addressing. */
665 		if (!lba_28_ok(block, n_block))
666 			return -ERANGE;
667 
668 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
669 			return -EINVAL;
670 
671 		/* Convert LBA to CHS */
672 		track = (u32)block / dev->sectors;
673 		cyl   = track / dev->heads;
674 		head  = track % dev->heads;
675 		sect  = (u32)block % dev->sectors + 1;
676 
677 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
678 			(u32)block, track, cyl, head, sect);
679 
680 		/* Check whether the converted CHS can fit.
681 		   Cylinder: 0-65535
682 		   Head: 0-15
683 		   Sector: 1-255*/
684 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
685 			return -ERANGE;
686 
687 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
688 		tf->lbal = sect;
689 		tf->lbam = cyl;
690 		tf->lbah = cyl >> 8;
691 		tf->device |= head;
692 	}
693 
694 	return 0;
695 }
696 
697 /**
698  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
699  *	@pio_mask: pio_mask
700  *	@mwdma_mask: mwdma_mask
701  *	@udma_mask: udma_mask
702  *
703  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
704  *	unsigned int xfer_mask.
705  *
706  *	LOCKING:
707  *	None.
708  *
709  *	RETURNS:
710  *	Packed xfer_mask.
711  */
712 unsigned long ata_pack_xfermask(unsigned long pio_mask,
713 				unsigned long mwdma_mask,
714 				unsigned long udma_mask)
715 {
716 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
717 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
718 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
719 }
720 
721 /**
722  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
723  *	@xfer_mask: xfer_mask to unpack
724  *	@pio_mask: resulting pio_mask
725  *	@mwdma_mask: resulting mwdma_mask
726  *	@udma_mask: resulting udma_mask
727  *
728  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
729  *	Any NULL distination masks will be ignored.
730  */
731 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
732 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
733 {
734 	if (pio_mask)
735 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
736 	if (mwdma_mask)
737 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
738 	if (udma_mask)
739 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
740 }
741 
742 static const struct ata_xfer_ent {
743 	int shift, bits;
744 	u8 base;
745 } ata_xfer_tbl[] = {
746 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
747 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
748 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
749 	{ -1, },
750 };
751 
752 /**
753  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
754  *	@xfer_mask: xfer_mask of interest
755  *
756  *	Return matching XFER_* value for @xfer_mask.  Only the highest
757  *	bit of @xfer_mask is considered.
758  *
759  *	LOCKING:
760  *	None.
761  *
762  *	RETURNS:
763  *	Matching XFER_* value, 0xff if no match found.
764  */
765 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
766 {
767 	int highbit = fls(xfer_mask) - 1;
768 	const struct ata_xfer_ent *ent;
769 
770 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
771 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
772 			return ent->base + highbit - ent->shift;
773 	return 0xff;
774 }
775 
776 /**
777  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
778  *	@xfer_mode: XFER_* of interest
779  *
780  *	Return matching xfer_mask for @xfer_mode.
781  *
782  *	LOCKING:
783  *	None.
784  *
785  *	RETURNS:
786  *	Matching xfer_mask, 0 if no match found.
787  */
788 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
789 {
790 	const struct ata_xfer_ent *ent;
791 
792 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
793 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
794 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
795 				& ~((1 << ent->shift) - 1);
796 	return 0;
797 }
798 
799 /**
800  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
801  *	@xfer_mode: XFER_* of interest
802  *
803  *	Return matching xfer_shift for @xfer_mode.
804  *
805  *	LOCKING:
806  *	None.
807  *
808  *	RETURNS:
809  *	Matching xfer_shift, -1 if no match found.
810  */
811 int ata_xfer_mode2shift(unsigned long xfer_mode)
812 {
813 	const struct ata_xfer_ent *ent;
814 
815 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
816 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
817 			return ent->shift;
818 	return -1;
819 }
820 
821 /**
822  *	ata_mode_string - convert xfer_mask to string
823  *	@xfer_mask: mask of bits supported; only highest bit counts.
824  *
825  *	Determine string which represents the highest speed
826  *	(highest bit in @modemask).
827  *
828  *	LOCKING:
829  *	None.
830  *
831  *	RETURNS:
832  *	Constant C string representing highest speed listed in
833  *	@mode_mask, or the constant C string "<n/a>".
834  */
835 const char *ata_mode_string(unsigned long xfer_mask)
836 {
837 	static const char * const xfer_mode_str[] = {
838 		"PIO0",
839 		"PIO1",
840 		"PIO2",
841 		"PIO3",
842 		"PIO4",
843 		"PIO5",
844 		"PIO6",
845 		"MWDMA0",
846 		"MWDMA1",
847 		"MWDMA2",
848 		"MWDMA3",
849 		"MWDMA4",
850 		"UDMA/16",
851 		"UDMA/25",
852 		"UDMA/33",
853 		"UDMA/44",
854 		"UDMA/66",
855 		"UDMA/100",
856 		"UDMA/133",
857 		"UDMA7",
858 	};
859 	int highbit;
860 
861 	highbit = fls(xfer_mask) - 1;
862 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
863 		return xfer_mode_str[highbit];
864 	return "<n/a>";
865 }
866 
867 static const char *sata_spd_string(unsigned int spd)
868 {
869 	static const char * const spd_str[] = {
870 		"1.5 Gbps",
871 		"3.0 Gbps",
872 	};
873 
874 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
875 		return "<unknown>";
876 	return spd_str[spd - 1];
877 }
878 
879 void ata_dev_disable(struct ata_device *dev)
880 {
881 	if (ata_dev_enabled(dev)) {
882 		if (ata_msg_drv(dev->link->ap))
883 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
884 		ata_acpi_on_disable(dev);
885 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
886 					     ATA_DNXFER_QUIET);
887 		dev->class++;
888 	}
889 }
890 
891 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
892 {
893 	struct ata_link *link = dev->link;
894 	struct ata_port *ap = link->ap;
895 	u32 scontrol;
896 	unsigned int err_mask;
897 	int rc;
898 
899 	/*
900 	 * disallow DIPM for drivers which haven't set
901 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
902 	 * phy ready will be set in the interrupt status on
903 	 * state changes, which will cause some drivers to
904 	 * think there are errors - additionally drivers will
905 	 * need to disable hot plug.
906 	 */
907 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
908 		ap->pm_policy = NOT_AVAILABLE;
909 		return -EINVAL;
910 	}
911 
912 	/*
913 	 * For DIPM, we will only enable it for the
914 	 * min_power setting.
915 	 *
916 	 * Why?  Because Disks are too stupid to know that
917 	 * If the host rejects a request to go to SLUMBER
918 	 * they should retry at PARTIAL, and instead it
919 	 * just would give up.  So, for medium_power to
920 	 * work at all, we need to only allow HIPM.
921 	 */
922 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
923 	if (rc)
924 		return rc;
925 
926 	switch (policy) {
927 	case MIN_POWER:
928 		/* no restrictions on IPM transitions */
929 		scontrol &= ~(0x3 << 8);
930 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
931 		if (rc)
932 			return rc;
933 
934 		/* enable DIPM */
935 		if (dev->flags & ATA_DFLAG_DIPM)
936 			err_mask = ata_dev_set_feature(dev,
937 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
938 		break;
939 	case MEDIUM_POWER:
940 		/* allow IPM to PARTIAL */
941 		scontrol &= ~(0x1 << 8);
942 		scontrol |= (0x2 << 8);
943 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
944 		if (rc)
945 			return rc;
946 
947 		/*
948 		 * we don't have to disable DIPM since IPM flags
949 		 * disallow transitions to SLUMBER, which effectively
950 		 * disable DIPM if it does not support PARTIAL
951 		 */
952 		break;
953 	case NOT_AVAILABLE:
954 	case MAX_PERFORMANCE:
955 		/* disable all IPM transitions */
956 		scontrol |= (0x3 << 8);
957 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
958 		if (rc)
959 			return rc;
960 
961 		/*
962 		 * we don't have to disable DIPM since IPM flags
963 		 * disallow all transitions which effectively
964 		 * disable DIPM anyway.
965 		 */
966 		break;
967 	}
968 
969 	/* FIXME: handle SET FEATURES failure */
970 	(void) err_mask;
971 
972 	return 0;
973 }
974 
975 /**
976  *	ata_dev_enable_pm - enable SATA interface power management
977  *	@dev:  device to enable power management
978  *	@policy: the link power management policy
979  *
980  *	Enable SATA Interface power management.  This will enable
981  *	Device Interface Power Management (DIPM) for min_power
982  * 	policy, and then call driver specific callbacks for
983  *	enabling Host Initiated Power management.
984  *
985  *	Locking: Caller.
986  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
987  */
988 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
989 {
990 	int rc = 0;
991 	struct ata_port *ap = dev->link->ap;
992 
993 	/* set HIPM first, then DIPM */
994 	if (ap->ops->enable_pm)
995 		rc = ap->ops->enable_pm(ap, policy);
996 	if (rc)
997 		goto enable_pm_out;
998 	rc = ata_dev_set_dipm(dev, policy);
999 
1000 enable_pm_out:
1001 	if (rc)
1002 		ap->pm_policy = MAX_PERFORMANCE;
1003 	else
1004 		ap->pm_policy = policy;
1005 	return /* rc */;	/* hopefully we can use 'rc' eventually */
1006 }
1007 
1008 #ifdef CONFIG_PM
1009 /**
1010  *	ata_dev_disable_pm - disable SATA interface power management
1011  *	@dev: device to disable power management
1012  *
1013  *	Disable SATA Interface power management.  This will disable
1014  *	Device Interface Power Management (DIPM) without changing
1015  * 	policy,  call driver specific callbacks for disabling Host
1016  * 	Initiated Power management.
1017  *
1018  *	Locking: Caller.
1019  *	Returns: void
1020  */
1021 static void ata_dev_disable_pm(struct ata_device *dev)
1022 {
1023 	struct ata_port *ap = dev->link->ap;
1024 
1025 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1026 	if (ap->ops->disable_pm)
1027 		ap->ops->disable_pm(ap);
1028 }
1029 #endif	/* CONFIG_PM */
1030 
1031 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1032 {
1033 	ap->pm_policy = policy;
1034 	ap->link.eh_info.action |= ATA_EH_LPM;
1035 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1036 	ata_port_schedule_eh(ap);
1037 }
1038 
1039 #ifdef CONFIG_PM
1040 static void ata_lpm_enable(struct ata_host *host)
1041 {
1042 	struct ata_link *link;
1043 	struct ata_port *ap;
1044 	struct ata_device *dev;
1045 	int i;
1046 
1047 	for (i = 0; i < host->n_ports; i++) {
1048 		ap = host->ports[i];
1049 		ata_port_for_each_link(link, ap) {
1050 			ata_link_for_each_dev(dev, link)
1051 				ata_dev_disable_pm(dev);
1052 		}
1053 	}
1054 }
1055 
1056 static void ata_lpm_disable(struct ata_host *host)
1057 {
1058 	int i;
1059 
1060 	for (i = 0; i < host->n_ports; i++) {
1061 		struct ata_port *ap = host->ports[i];
1062 		ata_lpm_schedule(ap, ap->pm_policy);
1063 	}
1064 }
1065 #endif	/* CONFIG_PM */
1066 
1067 /**
1068  *	ata_dev_classify - determine device type based on ATA-spec signature
1069  *	@tf: ATA taskfile register set for device to be identified
1070  *
1071  *	Determine from taskfile register contents whether a device is
1072  *	ATA or ATAPI, as per "Signature and persistence" section
1073  *	of ATA/PI spec (volume 1, sect 5.14).
1074  *
1075  *	LOCKING:
1076  *	None.
1077  *
1078  *	RETURNS:
1079  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1080  *	%ATA_DEV_UNKNOWN the event of failure.
1081  */
1082 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1083 {
1084 	/* Apple's open source Darwin code hints that some devices only
1085 	 * put a proper signature into the LBA mid/high registers,
1086 	 * So, we only check those.  It's sufficient for uniqueness.
1087 	 *
1088 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1089 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1090 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1091 	 * spec has never mentioned about using different signatures
1092 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1093 	 * Multiplier specification began to use 0x69/0x96 to identify
1094 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1095 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1096 	 * 0x69/0x96 shortly and described them as reserved for
1097 	 * SerialATA.
1098 	 *
1099 	 * We follow the current spec and consider that 0x69/0x96
1100 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1101 	 */
1102 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1103 		DPRINTK("found ATA device by sig\n");
1104 		return ATA_DEV_ATA;
1105 	}
1106 
1107 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1108 		DPRINTK("found ATAPI device by sig\n");
1109 		return ATA_DEV_ATAPI;
1110 	}
1111 
1112 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1113 		DPRINTK("found PMP device by sig\n");
1114 		return ATA_DEV_PMP;
1115 	}
1116 
1117 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1118 		printk(KERN_INFO "ata: SEMB device ignored\n");
1119 		return ATA_DEV_SEMB_UNSUP; /* not yet */
1120 	}
1121 
1122 	DPRINTK("unknown device\n");
1123 	return ATA_DEV_UNKNOWN;
1124 }
1125 
1126 /**
1127  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1128  *	@id: IDENTIFY DEVICE results we will examine
1129  *	@s: string into which data is output
1130  *	@ofs: offset into identify device page
1131  *	@len: length of string to return. must be an even number.
1132  *
1133  *	The strings in the IDENTIFY DEVICE page are broken up into
1134  *	16-bit chunks.  Run through the string, and output each
1135  *	8-bit chunk linearly, regardless of platform.
1136  *
1137  *	LOCKING:
1138  *	caller.
1139  */
1140 
1141 void ata_id_string(const u16 *id, unsigned char *s,
1142 		   unsigned int ofs, unsigned int len)
1143 {
1144 	unsigned int c;
1145 
1146 	BUG_ON(len & 1);
1147 
1148 	while (len > 0) {
1149 		c = id[ofs] >> 8;
1150 		*s = c;
1151 		s++;
1152 
1153 		c = id[ofs] & 0xff;
1154 		*s = c;
1155 		s++;
1156 
1157 		ofs++;
1158 		len -= 2;
1159 	}
1160 }
1161 
1162 /**
1163  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1164  *	@id: IDENTIFY DEVICE results we will examine
1165  *	@s: string into which data is output
1166  *	@ofs: offset into identify device page
1167  *	@len: length of string to return. must be an odd number.
1168  *
1169  *	This function is identical to ata_id_string except that it
1170  *	trims trailing spaces and terminates the resulting string with
1171  *	null.  @len must be actual maximum length (even number) + 1.
1172  *
1173  *	LOCKING:
1174  *	caller.
1175  */
1176 void ata_id_c_string(const u16 *id, unsigned char *s,
1177 		     unsigned int ofs, unsigned int len)
1178 {
1179 	unsigned char *p;
1180 
1181 	ata_id_string(id, s, ofs, len - 1);
1182 
1183 	p = s + strnlen(s, len - 1);
1184 	while (p > s && p[-1] == ' ')
1185 		p--;
1186 	*p = '\0';
1187 }
1188 
1189 static u64 ata_id_n_sectors(const u16 *id)
1190 {
1191 	if (ata_id_has_lba(id)) {
1192 		if (ata_id_has_lba48(id))
1193 			return ata_id_u64(id, 100);
1194 		else
1195 			return ata_id_u32(id, 60);
1196 	} else {
1197 		if (ata_id_current_chs_valid(id))
1198 			return ata_id_u32(id, 57);
1199 		else
1200 			return id[1] * id[3] * id[6];
1201 	}
1202 }
1203 
1204 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1205 {
1206 	u64 sectors = 0;
1207 
1208 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1209 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1210 	sectors |= (tf->hob_lbal & 0xff) << 24;
1211 	sectors |= (tf->lbah & 0xff) << 16;
1212 	sectors |= (tf->lbam & 0xff) << 8;
1213 	sectors |= (tf->lbal & 0xff);
1214 
1215 	return sectors;
1216 }
1217 
1218 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1219 {
1220 	u64 sectors = 0;
1221 
1222 	sectors |= (tf->device & 0x0f) << 24;
1223 	sectors |= (tf->lbah & 0xff) << 16;
1224 	sectors |= (tf->lbam & 0xff) << 8;
1225 	sectors |= (tf->lbal & 0xff);
1226 
1227 	return sectors;
1228 }
1229 
1230 /**
1231  *	ata_read_native_max_address - Read native max address
1232  *	@dev: target device
1233  *	@max_sectors: out parameter for the result native max address
1234  *
1235  *	Perform an LBA48 or LBA28 native size query upon the device in
1236  *	question.
1237  *
1238  *	RETURNS:
1239  *	0 on success, -EACCES if command is aborted by the drive.
1240  *	-EIO on other errors.
1241  */
1242 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1243 {
1244 	unsigned int err_mask;
1245 	struct ata_taskfile tf;
1246 	int lba48 = ata_id_has_lba48(dev->id);
1247 
1248 	ata_tf_init(dev, &tf);
1249 
1250 	/* always clear all address registers */
1251 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1252 
1253 	if (lba48) {
1254 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1255 		tf.flags |= ATA_TFLAG_LBA48;
1256 	} else
1257 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1258 
1259 	tf.protocol |= ATA_PROT_NODATA;
1260 	tf.device |= ATA_LBA;
1261 
1262 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1263 	if (err_mask) {
1264 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1265 			       "max address (err_mask=0x%x)\n", err_mask);
1266 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1267 			return -EACCES;
1268 		return -EIO;
1269 	}
1270 
1271 	if (lba48)
1272 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1273 	else
1274 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1275 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1276 		(*max_sectors)--;
1277 	return 0;
1278 }
1279 
1280 /**
1281  *	ata_set_max_sectors - Set max sectors
1282  *	@dev: target device
1283  *	@new_sectors: new max sectors value to set for the device
1284  *
1285  *	Set max sectors of @dev to @new_sectors.
1286  *
1287  *	RETURNS:
1288  *	0 on success, -EACCES if command is aborted or denied (due to
1289  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1290  *	errors.
1291  */
1292 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1293 {
1294 	unsigned int err_mask;
1295 	struct ata_taskfile tf;
1296 	int lba48 = ata_id_has_lba48(dev->id);
1297 
1298 	new_sectors--;
1299 
1300 	ata_tf_init(dev, &tf);
1301 
1302 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1303 
1304 	if (lba48) {
1305 		tf.command = ATA_CMD_SET_MAX_EXT;
1306 		tf.flags |= ATA_TFLAG_LBA48;
1307 
1308 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1309 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1310 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1311 	} else {
1312 		tf.command = ATA_CMD_SET_MAX;
1313 
1314 		tf.device |= (new_sectors >> 24) & 0xf;
1315 	}
1316 
1317 	tf.protocol |= ATA_PROT_NODATA;
1318 	tf.device |= ATA_LBA;
1319 
1320 	tf.lbal = (new_sectors >> 0) & 0xff;
1321 	tf.lbam = (new_sectors >> 8) & 0xff;
1322 	tf.lbah = (new_sectors >> 16) & 0xff;
1323 
1324 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1325 	if (err_mask) {
1326 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1327 			       "max address (err_mask=0x%x)\n", err_mask);
1328 		if (err_mask == AC_ERR_DEV &&
1329 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1330 			return -EACCES;
1331 		return -EIO;
1332 	}
1333 
1334 	return 0;
1335 }
1336 
1337 /**
1338  *	ata_hpa_resize		-	Resize a device with an HPA set
1339  *	@dev: Device to resize
1340  *
1341  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1342  *	it if required to the full size of the media. The caller must check
1343  *	the drive has the HPA feature set enabled.
1344  *
1345  *	RETURNS:
1346  *	0 on success, -errno on failure.
1347  */
1348 static int ata_hpa_resize(struct ata_device *dev)
1349 {
1350 	struct ata_eh_context *ehc = &dev->link->eh_context;
1351 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1352 	u64 sectors = ata_id_n_sectors(dev->id);
1353 	u64 native_sectors;
1354 	int rc;
1355 
1356 	/* do we need to do it? */
1357 	if (dev->class != ATA_DEV_ATA ||
1358 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1359 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1360 		return 0;
1361 
1362 	/* read native max address */
1363 	rc = ata_read_native_max_address(dev, &native_sectors);
1364 	if (rc) {
1365 		/* If device aborted the command or HPA isn't going to
1366 		 * be unlocked, skip HPA resizing.
1367 		 */
1368 		if (rc == -EACCES || !ata_ignore_hpa) {
1369 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1370 				       "broken, skipping HPA handling\n");
1371 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1372 
1373 			/* we can continue if device aborted the command */
1374 			if (rc == -EACCES)
1375 				rc = 0;
1376 		}
1377 
1378 		return rc;
1379 	}
1380 
1381 	/* nothing to do? */
1382 	if (native_sectors <= sectors || !ata_ignore_hpa) {
1383 		if (!print_info || native_sectors == sectors)
1384 			return 0;
1385 
1386 		if (native_sectors > sectors)
1387 			ata_dev_printk(dev, KERN_INFO,
1388 				"HPA detected: current %llu, native %llu\n",
1389 				(unsigned long long)sectors,
1390 				(unsigned long long)native_sectors);
1391 		else if (native_sectors < sectors)
1392 			ata_dev_printk(dev, KERN_WARNING,
1393 				"native sectors (%llu) is smaller than "
1394 				"sectors (%llu)\n",
1395 				(unsigned long long)native_sectors,
1396 				(unsigned long long)sectors);
1397 		return 0;
1398 	}
1399 
1400 	/* let's unlock HPA */
1401 	rc = ata_set_max_sectors(dev, native_sectors);
1402 	if (rc == -EACCES) {
1403 		/* if device aborted the command, skip HPA resizing */
1404 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1405 			       "(%llu -> %llu), skipping HPA handling\n",
1406 			       (unsigned long long)sectors,
1407 			       (unsigned long long)native_sectors);
1408 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1409 		return 0;
1410 	} else if (rc)
1411 		return rc;
1412 
1413 	/* re-read IDENTIFY data */
1414 	rc = ata_dev_reread_id(dev, 0);
1415 	if (rc) {
1416 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1417 			       "data after HPA resizing\n");
1418 		return rc;
1419 	}
1420 
1421 	if (print_info) {
1422 		u64 new_sectors = ata_id_n_sectors(dev->id);
1423 		ata_dev_printk(dev, KERN_INFO,
1424 			"HPA unlocked: %llu -> %llu, native %llu\n",
1425 			(unsigned long long)sectors,
1426 			(unsigned long long)new_sectors,
1427 			(unsigned long long)native_sectors);
1428 	}
1429 
1430 	return 0;
1431 }
1432 
1433 /**
1434  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1435  *	@id: IDENTIFY DEVICE page to dump
1436  *
1437  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1438  *	page.
1439  *
1440  *	LOCKING:
1441  *	caller.
1442  */
1443 
1444 static inline void ata_dump_id(const u16 *id)
1445 {
1446 	DPRINTK("49==0x%04x  "
1447 		"53==0x%04x  "
1448 		"63==0x%04x  "
1449 		"64==0x%04x  "
1450 		"75==0x%04x  \n",
1451 		id[49],
1452 		id[53],
1453 		id[63],
1454 		id[64],
1455 		id[75]);
1456 	DPRINTK("80==0x%04x  "
1457 		"81==0x%04x  "
1458 		"82==0x%04x  "
1459 		"83==0x%04x  "
1460 		"84==0x%04x  \n",
1461 		id[80],
1462 		id[81],
1463 		id[82],
1464 		id[83],
1465 		id[84]);
1466 	DPRINTK("88==0x%04x  "
1467 		"93==0x%04x\n",
1468 		id[88],
1469 		id[93]);
1470 }
1471 
1472 /**
1473  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1474  *	@id: IDENTIFY data to compute xfer mask from
1475  *
1476  *	Compute the xfermask for this device. This is not as trivial
1477  *	as it seems if we must consider early devices correctly.
1478  *
1479  *	FIXME: pre IDE drive timing (do we care ?).
1480  *
1481  *	LOCKING:
1482  *	None.
1483  *
1484  *	RETURNS:
1485  *	Computed xfermask
1486  */
1487 unsigned long ata_id_xfermask(const u16 *id)
1488 {
1489 	unsigned long pio_mask, mwdma_mask, udma_mask;
1490 
1491 	/* Usual case. Word 53 indicates word 64 is valid */
1492 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1493 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1494 		pio_mask <<= 3;
1495 		pio_mask |= 0x7;
1496 	} else {
1497 		/* If word 64 isn't valid then Word 51 high byte holds
1498 		 * the PIO timing number for the maximum. Turn it into
1499 		 * a mask.
1500 		 */
1501 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1502 		if (mode < 5)	/* Valid PIO range */
1503 			pio_mask = (2 << mode) - 1;
1504 		else
1505 			pio_mask = 1;
1506 
1507 		/* But wait.. there's more. Design your standards by
1508 		 * committee and you too can get a free iordy field to
1509 		 * process. However its the speeds not the modes that
1510 		 * are supported... Note drivers using the timing API
1511 		 * will get this right anyway
1512 		 */
1513 	}
1514 
1515 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1516 
1517 	if (ata_id_is_cfa(id)) {
1518 		/*
1519 		 *	Process compact flash extended modes
1520 		 */
1521 		int pio = id[163] & 0x7;
1522 		int dma = (id[163] >> 3) & 7;
1523 
1524 		if (pio)
1525 			pio_mask |= (1 << 5);
1526 		if (pio > 1)
1527 			pio_mask |= (1 << 6);
1528 		if (dma)
1529 			mwdma_mask |= (1 << 3);
1530 		if (dma > 1)
1531 			mwdma_mask |= (1 << 4);
1532 	}
1533 
1534 	udma_mask = 0;
1535 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1536 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1537 
1538 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1539 }
1540 
1541 /**
1542  *	ata_pio_queue_task - Queue port_task
1543  *	@ap: The ata_port to queue port_task for
1544  *	@fn: workqueue function to be scheduled
1545  *	@data: data for @fn to use
1546  *	@delay: delay time in msecs for workqueue function
1547  *
1548  *	Schedule @fn(@data) for execution after @delay jiffies using
1549  *	port_task.  There is one port_task per port and it's the
1550  *	user(low level driver)'s responsibility to make sure that only
1551  *	one task is active at any given time.
1552  *
1553  *	libata core layer takes care of synchronization between
1554  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1555  *	synchronization.
1556  *
1557  *	LOCKING:
1558  *	Inherited from caller.
1559  */
1560 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1561 {
1562 	ap->port_task_data = data;
1563 
1564 	/* may fail if ata_port_flush_task() in progress */
1565 	queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1566 }
1567 
1568 /**
1569  *	ata_port_flush_task - Flush port_task
1570  *	@ap: The ata_port to flush port_task for
1571  *
1572  *	After this function completes, port_task is guranteed not to
1573  *	be running or scheduled.
1574  *
1575  *	LOCKING:
1576  *	Kernel thread context (may sleep)
1577  */
1578 void ata_port_flush_task(struct ata_port *ap)
1579 {
1580 	DPRINTK("ENTER\n");
1581 
1582 	cancel_rearming_delayed_work(&ap->port_task);
1583 
1584 	if (ata_msg_ctl(ap))
1585 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1586 }
1587 
1588 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1589 {
1590 	struct completion *waiting = qc->private_data;
1591 
1592 	complete(waiting);
1593 }
1594 
1595 /**
1596  *	ata_exec_internal_sg - execute libata internal command
1597  *	@dev: Device to which the command is sent
1598  *	@tf: Taskfile registers for the command and the result
1599  *	@cdb: CDB for packet command
1600  *	@dma_dir: Data tranfer direction of the command
1601  *	@sgl: sg list for the data buffer of the command
1602  *	@n_elem: Number of sg entries
1603  *	@timeout: Timeout in msecs (0 for default)
1604  *
1605  *	Executes libata internal command with timeout.  @tf contains
1606  *	command on entry and result on return.  Timeout and error
1607  *	conditions are reported via return value.  No recovery action
1608  *	is taken after a command times out.  It's caller's duty to
1609  *	clean up after timeout.
1610  *
1611  *	LOCKING:
1612  *	None.  Should be called with kernel context, might sleep.
1613  *
1614  *	RETURNS:
1615  *	Zero on success, AC_ERR_* mask on failure
1616  */
1617 unsigned ata_exec_internal_sg(struct ata_device *dev,
1618 			      struct ata_taskfile *tf, const u8 *cdb,
1619 			      int dma_dir, struct scatterlist *sgl,
1620 			      unsigned int n_elem, unsigned long timeout)
1621 {
1622 	struct ata_link *link = dev->link;
1623 	struct ata_port *ap = link->ap;
1624 	u8 command = tf->command;
1625 	int auto_timeout = 0;
1626 	struct ata_queued_cmd *qc;
1627 	unsigned int tag, preempted_tag;
1628 	u32 preempted_sactive, preempted_qc_active;
1629 	int preempted_nr_active_links;
1630 	DECLARE_COMPLETION_ONSTACK(wait);
1631 	unsigned long flags;
1632 	unsigned int err_mask;
1633 	int rc;
1634 
1635 	spin_lock_irqsave(ap->lock, flags);
1636 
1637 	/* no internal command while frozen */
1638 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1639 		spin_unlock_irqrestore(ap->lock, flags);
1640 		return AC_ERR_SYSTEM;
1641 	}
1642 
1643 	/* initialize internal qc */
1644 
1645 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1646 	 * drivers choke if any other tag is given.  This breaks
1647 	 * ata_tag_internal() test for those drivers.  Don't use new
1648 	 * EH stuff without converting to it.
1649 	 */
1650 	if (ap->ops->error_handler)
1651 		tag = ATA_TAG_INTERNAL;
1652 	else
1653 		tag = 0;
1654 
1655 	if (test_and_set_bit(tag, &ap->qc_allocated))
1656 		BUG();
1657 	qc = __ata_qc_from_tag(ap, tag);
1658 
1659 	qc->tag = tag;
1660 	qc->scsicmd = NULL;
1661 	qc->ap = ap;
1662 	qc->dev = dev;
1663 	ata_qc_reinit(qc);
1664 
1665 	preempted_tag = link->active_tag;
1666 	preempted_sactive = link->sactive;
1667 	preempted_qc_active = ap->qc_active;
1668 	preempted_nr_active_links = ap->nr_active_links;
1669 	link->active_tag = ATA_TAG_POISON;
1670 	link->sactive = 0;
1671 	ap->qc_active = 0;
1672 	ap->nr_active_links = 0;
1673 
1674 	/* prepare & issue qc */
1675 	qc->tf = *tf;
1676 	if (cdb)
1677 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1678 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1679 	qc->dma_dir = dma_dir;
1680 	if (dma_dir != DMA_NONE) {
1681 		unsigned int i, buflen = 0;
1682 		struct scatterlist *sg;
1683 
1684 		for_each_sg(sgl, sg, n_elem, i)
1685 			buflen += sg->length;
1686 
1687 		ata_sg_init(qc, sgl, n_elem);
1688 		qc->nbytes = buflen;
1689 	}
1690 
1691 	qc->private_data = &wait;
1692 	qc->complete_fn = ata_qc_complete_internal;
1693 
1694 	ata_qc_issue(qc);
1695 
1696 	spin_unlock_irqrestore(ap->lock, flags);
1697 
1698 	if (!timeout) {
1699 		if (ata_probe_timeout)
1700 			timeout = ata_probe_timeout * 1000;
1701 		else {
1702 			timeout = ata_internal_cmd_timeout(dev, command);
1703 			auto_timeout = 1;
1704 		}
1705 	}
1706 
1707 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1708 
1709 	ata_port_flush_task(ap);
1710 
1711 	if (!rc) {
1712 		spin_lock_irqsave(ap->lock, flags);
1713 
1714 		/* We're racing with irq here.  If we lose, the
1715 		 * following test prevents us from completing the qc
1716 		 * twice.  If we win, the port is frozen and will be
1717 		 * cleaned up by ->post_internal_cmd().
1718 		 */
1719 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1720 			qc->err_mask |= AC_ERR_TIMEOUT;
1721 
1722 			if (ap->ops->error_handler)
1723 				ata_port_freeze(ap);
1724 			else
1725 				ata_qc_complete(qc);
1726 
1727 			if (ata_msg_warn(ap))
1728 				ata_dev_printk(dev, KERN_WARNING,
1729 					"qc timeout (cmd 0x%x)\n", command);
1730 		}
1731 
1732 		spin_unlock_irqrestore(ap->lock, flags);
1733 	}
1734 
1735 	/* do post_internal_cmd */
1736 	if (ap->ops->post_internal_cmd)
1737 		ap->ops->post_internal_cmd(qc);
1738 
1739 	/* perform minimal error analysis */
1740 	if (qc->flags & ATA_QCFLAG_FAILED) {
1741 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1742 			qc->err_mask |= AC_ERR_DEV;
1743 
1744 		if (!qc->err_mask)
1745 			qc->err_mask |= AC_ERR_OTHER;
1746 
1747 		if (qc->err_mask & ~AC_ERR_OTHER)
1748 			qc->err_mask &= ~AC_ERR_OTHER;
1749 	}
1750 
1751 	/* finish up */
1752 	spin_lock_irqsave(ap->lock, flags);
1753 
1754 	*tf = qc->result_tf;
1755 	err_mask = qc->err_mask;
1756 
1757 	ata_qc_free(qc);
1758 	link->active_tag = preempted_tag;
1759 	link->sactive = preempted_sactive;
1760 	ap->qc_active = preempted_qc_active;
1761 	ap->nr_active_links = preempted_nr_active_links;
1762 
1763 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1764 	 * Until those drivers are fixed, we detect the condition
1765 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1766 	 * port.
1767 	 *
1768 	 * Note that this doesn't change any behavior as internal
1769 	 * command failure results in disabling the device in the
1770 	 * higher layer for LLDDs without new reset/EH callbacks.
1771 	 *
1772 	 * Kill the following code as soon as those drivers are fixed.
1773 	 */
1774 	if (ap->flags & ATA_FLAG_DISABLED) {
1775 		err_mask |= AC_ERR_SYSTEM;
1776 		ata_port_probe(ap);
1777 	}
1778 
1779 	spin_unlock_irqrestore(ap->lock, flags);
1780 
1781 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1782 		ata_internal_cmd_timed_out(dev, command);
1783 
1784 	return err_mask;
1785 }
1786 
1787 /**
1788  *	ata_exec_internal - execute libata internal command
1789  *	@dev: Device to which the command is sent
1790  *	@tf: Taskfile registers for the command and the result
1791  *	@cdb: CDB for packet command
1792  *	@dma_dir: Data tranfer direction of the command
1793  *	@buf: Data buffer of the command
1794  *	@buflen: Length of data buffer
1795  *	@timeout: Timeout in msecs (0 for default)
1796  *
1797  *	Wrapper around ata_exec_internal_sg() which takes simple
1798  *	buffer instead of sg list.
1799  *
1800  *	LOCKING:
1801  *	None.  Should be called with kernel context, might sleep.
1802  *
1803  *	RETURNS:
1804  *	Zero on success, AC_ERR_* mask on failure
1805  */
1806 unsigned ata_exec_internal(struct ata_device *dev,
1807 			   struct ata_taskfile *tf, const u8 *cdb,
1808 			   int dma_dir, void *buf, unsigned int buflen,
1809 			   unsigned long timeout)
1810 {
1811 	struct scatterlist *psg = NULL, sg;
1812 	unsigned int n_elem = 0;
1813 
1814 	if (dma_dir != DMA_NONE) {
1815 		WARN_ON(!buf);
1816 		sg_init_one(&sg, buf, buflen);
1817 		psg = &sg;
1818 		n_elem++;
1819 	}
1820 
1821 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1822 				    timeout);
1823 }
1824 
1825 /**
1826  *	ata_do_simple_cmd - execute simple internal command
1827  *	@dev: Device to which the command is sent
1828  *	@cmd: Opcode to execute
1829  *
1830  *	Execute a 'simple' command, that only consists of the opcode
1831  *	'cmd' itself, without filling any other registers
1832  *
1833  *	LOCKING:
1834  *	Kernel thread context (may sleep).
1835  *
1836  *	RETURNS:
1837  *	Zero on success, AC_ERR_* mask on failure
1838  */
1839 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1840 {
1841 	struct ata_taskfile tf;
1842 
1843 	ata_tf_init(dev, &tf);
1844 
1845 	tf.command = cmd;
1846 	tf.flags |= ATA_TFLAG_DEVICE;
1847 	tf.protocol = ATA_PROT_NODATA;
1848 
1849 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1850 }
1851 
1852 /**
1853  *	ata_pio_need_iordy	-	check if iordy needed
1854  *	@adev: ATA device
1855  *
1856  *	Check if the current speed of the device requires IORDY. Used
1857  *	by various controllers for chip configuration.
1858  */
1859 
1860 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1861 {
1862 	/* Controller doesn't support  IORDY. Probably a pointless check
1863 	   as the caller should know this */
1864 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1865 		return 0;
1866 	/* PIO3 and higher it is mandatory */
1867 	if (adev->pio_mode > XFER_PIO_2)
1868 		return 1;
1869 	/* We turn it on when possible */
1870 	if (ata_id_has_iordy(adev->id))
1871 		return 1;
1872 	return 0;
1873 }
1874 
1875 /**
1876  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1877  *	@adev: ATA device
1878  *
1879  *	Compute the highest mode possible if we are not using iordy. Return
1880  *	-1 if no iordy mode is available.
1881  */
1882 
1883 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1884 {
1885 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1886 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1887 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1888 		/* Is the speed faster than the drive allows non IORDY ? */
1889 		if (pio) {
1890 			/* This is cycle times not frequency - watch the logic! */
1891 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1892 				return 3 << ATA_SHIFT_PIO;
1893 			return 7 << ATA_SHIFT_PIO;
1894 		}
1895 	}
1896 	return 3 << ATA_SHIFT_PIO;
1897 }
1898 
1899 /**
1900  *	ata_do_dev_read_id		-	default ID read method
1901  *	@dev: device
1902  *	@tf: proposed taskfile
1903  *	@id: data buffer
1904  *
1905  *	Issue the identify taskfile and hand back the buffer containing
1906  *	identify data. For some RAID controllers and for pre ATA devices
1907  *	this function is wrapped or replaced by the driver
1908  */
1909 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1910 					struct ata_taskfile *tf, u16 *id)
1911 {
1912 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1913 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1914 }
1915 
1916 /**
1917  *	ata_dev_read_id - Read ID data from the specified device
1918  *	@dev: target device
1919  *	@p_class: pointer to class of the target device (may be changed)
1920  *	@flags: ATA_READID_* flags
1921  *	@id: buffer to read IDENTIFY data into
1922  *
1923  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1924  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1925  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1926  *	for pre-ATA4 drives.
1927  *
1928  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1929  *	now we abort if we hit that case.
1930  *
1931  *	LOCKING:
1932  *	Kernel thread context (may sleep)
1933  *
1934  *	RETURNS:
1935  *	0 on success, -errno otherwise.
1936  */
1937 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1938 		    unsigned int flags, u16 *id)
1939 {
1940 	struct ata_port *ap = dev->link->ap;
1941 	unsigned int class = *p_class;
1942 	struct ata_taskfile tf;
1943 	unsigned int err_mask = 0;
1944 	const char *reason;
1945 	int may_fallback = 1, tried_spinup = 0;
1946 	int rc;
1947 
1948 	if (ata_msg_ctl(ap))
1949 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1950 
1951 retry:
1952 	ata_tf_init(dev, &tf);
1953 
1954 	switch (class) {
1955 	case ATA_DEV_ATA:
1956 		tf.command = ATA_CMD_ID_ATA;
1957 		break;
1958 	case ATA_DEV_ATAPI:
1959 		tf.command = ATA_CMD_ID_ATAPI;
1960 		break;
1961 	default:
1962 		rc = -ENODEV;
1963 		reason = "unsupported class";
1964 		goto err_out;
1965 	}
1966 
1967 	tf.protocol = ATA_PROT_PIO;
1968 
1969 	/* Some devices choke if TF registers contain garbage.  Make
1970 	 * sure those are properly initialized.
1971 	 */
1972 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1973 
1974 	/* Device presence detection is unreliable on some
1975 	 * controllers.  Always poll IDENTIFY if available.
1976 	 */
1977 	tf.flags |= ATA_TFLAG_POLLING;
1978 
1979 	if (ap->ops->read_id)
1980 		err_mask = ap->ops->read_id(dev, &tf, id);
1981 	else
1982 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1983 
1984 	if (err_mask) {
1985 		if (err_mask & AC_ERR_NODEV_HINT) {
1986 			ata_dev_printk(dev, KERN_DEBUG,
1987 				       "NODEV after polling detection\n");
1988 			return -ENOENT;
1989 		}
1990 
1991 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1992 			/* Device or controller might have reported
1993 			 * the wrong device class.  Give a shot at the
1994 			 * other IDENTIFY if the current one is
1995 			 * aborted by the device.
1996 			 */
1997 			if (may_fallback) {
1998 				may_fallback = 0;
1999 
2000 				if (class == ATA_DEV_ATA)
2001 					class = ATA_DEV_ATAPI;
2002 				else
2003 					class = ATA_DEV_ATA;
2004 				goto retry;
2005 			}
2006 
2007 			/* Control reaches here iff the device aborted
2008 			 * both flavors of IDENTIFYs which happens
2009 			 * sometimes with phantom devices.
2010 			 */
2011 			ata_dev_printk(dev, KERN_DEBUG,
2012 				       "both IDENTIFYs aborted, assuming NODEV\n");
2013 			return -ENOENT;
2014 		}
2015 
2016 		rc = -EIO;
2017 		reason = "I/O error";
2018 		goto err_out;
2019 	}
2020 
2021 	/* Falling back doesn't make sense if ID data was read
2022 	 * successfully at least once.
2023 	 */
2024 	may_fallback = 0;
2025 
2026 	swap_buf_le16(id, ATA_ID_WORDS);
2027 
2028 	/* sanity check */
2029 	rc = -EINVAL;
2030 	reason = "device reports invalid type";
2031 
2032 	if (class == ATA_DEV_ATA) {
2033 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2034 			goto err_out;
2035 	} else {
2036 		if (ata_id_is_ata(id))
2037 			goto err_out;
2038 	}
2039 
2040 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2041 		tried_spinup = 1;
2042 		/*
2043 		 * Drive powered-up in standby mode, and requires a specific
2044 		 * SET_FEATURES spin-up subcommand before it will accept
2045 		 * anything other than the original IDENTIFY command.
2046 		 */
2047 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2048 		if (err_mask && id[2] != 0x738c) {
2049 			rc = -EIO;
2050 			reason = "SPINUP failed";
2051 			goto err_out;
2052 		}
2053 		/*
2054 		 * If the drive initially returned incomplete IDENTIFY info,
2055 		 * we now must reissue the IDENTIFY command.
2056 		 */
2057 		if (id[2] == 0x37c8)
2058 			goto retry;
2059 	}
2060 
2061 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2062 		/*
2063 		 * The exact sequence expected by certain pre-ATA4 drives is:
2064 		 * SRST RESET
2065 		 * IDENTIFY (optional in early ATA)
2066 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2067 		 * anything else..
2068 		 * Some drives were very specific about that exact sequence.
2069 		 *
2070 		 * Note that ATA4 says lba is mandatory so the second check
2071 		 * shoud never trigger.
2072 		 */
2073 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2074 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2075 			if (err_mask) {
2076 				rc = -EIO;
2077 				reason = "INIT_DEV_PARAMS failed";
2078 				goto err_out;
2079 			}
2080 
2081 			/* current CHS translation info (id[53-58]) might be
2082 			 * changed. reread the identify device info.
2083 			 */
2084 			flags &= ~ATA_READID_POSTRESET;
2085 			goto retry;
2086 		}
2087 	}
2088 
2089 	*p_class = class;
2090 
2091 	return 0;
2092 
2093  err_out:
2094 	if (ata_msg_warn(ap))
2095 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2096 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2097 	return rc;
2098 }
2099 
2100 static inline u8 ata_dev_knobble(struct ata_device *dev)
2101 {
2102 	struct ata_port *ap = dev->link->ap;
2103 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2104 }
2105 
2106 static void ata_dev_config_ncq(struct ata_device *dev,
2107 			       char *desc, size_t desc_sz)
2108 {
2109 	struct ata_port *ap = dev->link->ap;
2110 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2111 
2112 	if (!ata_id_has_ncq(dev->id)) {
2113 		desc[0] = '\0';
2114 		return;
2115 	}
2116 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2117 		snprintf(desc, desc_sz, "NCQ (not used)");
2118 		return;
2119 	}
2120 	if (ap->flags & ATA_FLAG_NCQ) {
2121 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2122 		dev->flags |= ATA_DFLAG_NCQ;
2123 	}
2124 
2125 	if (hdepth >= ddepth)
2126 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2127 	else
2128 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2129 }
2130 
2131 /**
2132  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2133  *	@dev: Target device to configure
2134  *
2135  *	Configure @dev according to @dev->id.  Generic and low-level
2136  *	driver specific fixups are also applied.
2137  *
2138  *	LOCKING:
2139  *	Kernel thread context (may sleep)
2140  *
2141  *	RETURNS:
2142  *	0 on success, -errno otherwise
2143  */
2144 int ata_dev_configure(struct ata_device *dev)
2145 {
2146 	struct ata_port *ap = dev->link->ap;
2147 	struct ata_eh_context *ehc = &dev->link->eh_context;
2148 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2149 	const u16 *id = dev->id;
2150 	unsigned long xfer_mask;
2151 	char revbuf[7];		/* XYZ-99\0 */
2152 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2153 	char modelbuf[ATA_ID_PROD_LEN+1];
2154 	int rc;
2155 
2156 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2157 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2158 			       __func__);
2159 		return 0;
2160 	}
2161 
2162 	if (ata_msg_probe(ap))
2163 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2164 
2165 	/* set horkage */
2166 	dev->horkage |= ata_dev_blacklisted(dev);
2167 	ata_force_horkage(dev);
2168 
2169 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2170 		ata_dev_printk(dev, KERN_INFO,
2171 			       "unsupported device, disabling\n");
2172 		ata_dev_disable(dev);
2173 		return 0;
2174 	}
2175 
2176 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2177 	    dev->class == ATA_DEV_ATAPI) {
2178 		ata_dev_printk(dev, KERN_WARNING,
2179 			"WARNING: ATAPI is %s, device ignored.\n",
2180 			atapi_enabled ? "not supported with this driver"
2181 				      : "disabled");
2182 		ata_dev_disable(dev);
2183 		return 0;
2184 	}
2185 
2186 	/* let ACPI work its magic */
2187 	rc = ata_acpi_on_devcfg(dev);
2188 	if (rc)
2189 		return rc;
2190 
2191 	/* massage HPA, do it early as it might change IDENTIFY data */
2192 	rc = ata_hpa_resize(dev);
2193 	if (rc)
2194 		return rc;
2195 
2196 	/* print device capabilities */
2197 	if (ata_msg_probe(ap))
2198 		ata_dev_printk(dev, KERN_DEBUG,
2199 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2200 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2201 			       __func__,
2202 			       id[49], id[82], id[83], id[84],
2203 			       id[85], id[86], id[87], id[88]);
2204 
2205 	/* initialize to-be-configured parameters */
2206 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2207 	dev->max_sectors = 0;
2208 	dev->cdb_len = 0;
2209 	dev->n_sectors = 0;
2210 	dev->cylinders = 0;
2211 	dev->heads = 0;
2212 	dev->sectors = 0;
2213 
2214 	/*
2215 	 * common ATA, ATAPI feature tests
2216 	 */
2217 
2218 	/* find max transfer mode; for printk only */
2219 	xfer_mask = ata_id_xfermask(id);
2220 
2221 	if (ata_msg_probe(ap))
2222 		ata_dump_id(id);
2223 
2224 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2225 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2226 			sizeof(fwrevbuf));
2227 
2228 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2229 			sizeof(modelbuf));
2230 
2231 	/* ATA-specific feature tests */
2232 	if (dev->class == ATA_DEV_ATA) {
2233 		if (ata_id_is_cfa(id)) {
2234 			if (id[162] & 1) /* CPRM may make this media unusable */
2235 				ata_dev_printk(dev, KERN_WARNING,
2236 					       "supports DRM functions and may "
2237 					       "not be fully accessable.\n");
2238 			snprintf(revbuf, 7, "CFA");
2239 		} else {
2240 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2241 			/* Warn the user if the device has TPM extensions */
2242 			if (ata_id_has_tpm(id))
2243 				ata_dev_printk(dev, KERN_WARNING,
2244 					       "supports DRM functions and may "
2245 					       "not be fully accessable.\n");
2246 		}
2247 
2248 		dev->n_sectors = ata_id_n_sectors(id);
2249 
2250 		if (dev->id[59] & 0x100)
2251 			dev->multi_count = dev->id[59] & 0xff;
2252 
2253 		if (ata_id_has_lba(id)) {
2254 			const char *lba_desc;
2255 			char ncq_desc[20];
2256 
2257 			lba_desc = "LBA";
2258 			dev->flags |= ATA_DFLAG_LBA;
2259 			if (ata_id_has_lba48(id)) {
2260 				dev->flags |= ATA_DFLAG_LBA48;
2261 				lba_desc = "LBA48";
2262 
2263 				if (dev->n_sectors >= (1UL << 28) &&
2264 				    ata_id_has_flush_ext(id))
2265 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2266 			}
2267 
2268 			/* config NCQ */
2269 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2270 
2271 			/* print device info to dmesg */
2272 			if (ata_msg_drv(ap) && print_info) {
2273 				ata_dev_printk(dev, KERN_INFO,
2274 					"%s: %s, %s, max %s\n",
2275 					revbuf, modelbuf, fwrevbuf,
2276 					ata_mode_string(xfer_mask));
2277 				ata_dev_printk(dev, KERN_INFO,
2278 					"%Lu sectors, multi %u: %s %s\n",
2279 					(unsigned long long)dev->n_sectors,
2280 					dev->multi_count, lba_desc, ncq_desc);
2281 			}
2282 		} else {
2283 			/* CHS */
2284 
2285 			/* Default translation */
2286 			dev->cylinders	= id[1];
2287 			dev->heads	= id[3];
2288 			dev->sectors	= id[6];
2289 
2290 			if (ata_id_current_chs_valid(id)) {
2291 				/* Current CHS translation is valid. */
2292 				dev->cylinders = id[54];
2293 				dev->heads     = id[55];
2294 				dev->sectors   = id[56];
2295 			}
2296 
2297 			/* print device info to dmesg */
2298 			if (ata_msg_drv(ap) && print_info) {
2299 				ata_dev_printk(dev, KERN_INFO,
2300 					"%s: %s, %s, max %s\n",
2301 					revbuf,	modelbuf, fwrevbuf,
2302 					ata_mode_string(xfer_mask));
2303 				ata_dev_printk(dev, KERN_INFO,
2304 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2305 					(unsigned long long)dev->n_sectors,
2306 					dev->multi_count, dev->cylinders,
2307 					dev->heads, dev->sectors);
2308 			}
2309 		}
2310 
2311 		dev->cdb_len = 16;
2312 	}
2313 
2314 	/* ATAPI-specific feature tests */
2315 	else if (dev->class == ATA_DEV_ATAPI) {
2316 		const char *cdb_intr_string = "";
2317 		const char *atapi_an_string = "";
2318 		const char *dma_dir_string = "";
2319 		u32 sntf;
2320 
2321 		rc = atapi_cdb_len(id);
2322 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2323 			if (ata_msg_warn(ap))
2324 				ata_dev_printk(dev, KERN_WARNING,
2325 					       "unsupported CDB len\n");
2326 			rc = -EINVAL;
2327 			goto err_out_nosup;
2328 		}
2329 		dev->cdb_len = (unsigned int) rc;
2330 
2331 		/* Enable ATAPI AN if both the host and device have
2332 		 * the support.  If PMP is attached, SNTF is required
2333 		 * to enable ATAPI AN to discern between PHY status
2334 		 * changed notifications and ATAPI ANs.
2335 		 */
2336 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2337 		    (!sata_pmp_attached(ap) ||
2338 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2339 			unsigned int err_mask;
2340 
2341 			/* issue SET feature command to turn this on */
2342 			err_mask = ata_dev_set_feature(dev,
2343 					SETFEATURES_SATA_ENABLE, SATA_AN);
2344 			if (err_mask)
2345 				ata_dev_printk(dev, KERN_ERR,
2346 					"failed to enable ATAPI AN "
2347 					"(err_mask=0x%x)\n", err_mask);
2348 			else {
2349 				dev->flags |= ATA_DFLAG_AN;
2350 				atapi_an_string = ", ATAPI AN";
2351 			}
2352 		}
2353 
2354 		if (ata_id_cdb_intr(dev->id)) {
2355 			dev->flags |= ATA_DFLAG_CDB_INTR;
2356 			cdb_intr_string = ", CDB intr";
2357 		}
2358 
2359 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2360 			dev->flags |= ATA_DFLAG_DMADIR;
2361 			dma_dir_string = ", DMADIR";
2362 		}
2363 
2364 		/* print device info to dmesg */
2365 		if (ata_msg_drv(ap) && print_info)
2366 			ata_dev_printk(dev, KERN_INFO,
2367 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2368 				       modelbuf, fwrevbuf,
2369 				       ata_mode_string(xfer_mask),
2370 				       cdb_intr_string, atapi_an_string,
2371 				       dma_dir_string);
2372 	}
2373 
2374 	/* determine max_sectors */
2375 	dev->max_sectors = ATA_MAX_SECTORS;
2376 	if (dev->flags & ATA_DFLAG_LBA48)
2377 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2378 
2379 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2380 		if (ata_id_has_hipm(dev->id))
2381 			dev->flags |= ATA_DFLAG_HIPM;
2382 		if (ata_id_has_dipm(dev->id))
2383 			dev->flags |= ATA_DFLAG_DIPM;
2384 	}
2385 
2386 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2387 	   200 sectors */
2388 	if (ata_dev_knobble(dev)) {
2389 		if (ata_msg_drv(ap) && print_info)
2390 			ata_dev_printk(dev, KERN_INFO,
2391 				       "applying bridge limits\n");
2392 		dev->udma_mask &= ATA_UDMA5;
2393 		dev->max_sectors = ATA_MAX_SECTORS;
2394 	}
2395 
2396 	if ((dev->class == ATA_DEV_ATAPI) &&
2397 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2398 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2399 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2400 	}
2401 
2402 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2403 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2404 					 dev->max_sectors);
2405 
2406 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2407 		dev->horkage |= ATA_HORKAGE_IPM;
2408 
2409 		/* reset link pm_policy for this port to no pm */
2410 		ap->pm_policy = MAX_PERFORMANCE;
2411 	}
2412 
2413 	if (ap->ops->dev_config)
2414 		ap->ops->dev_config(dev);
2415 
2416 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2417 		/* Let the user know. We don't want to disallow opens for
2418 		   rescue purposes, or in case the vendor is just a blithering
2419 		   idiot. Do this after the dev_config call as some controllers
2420 		   with buggy firmware may want to avoid reporting false device
2421 		   bugs */
2422 
2423 		if (print_info) {
2424 			ata_dev_printk(dev, KERN_WARNING,
2425 "Drive reports diagnostics failure. This may indicate a drive\n");
2426 			ata_dev_printk(dev, KERN_WARNING,
2427 "fault or invalid emulation. Contact drive vendor for information.\n");
2428 		}
2429 	}
2430 
2431 	return 0;
2432 
2433 err_out_nosup:
2434 	if (ata_msg_probe(ap))
2435 		ata_dev_printk(dev, KERN_DEBUG,
2436 			       "%s: EXIT, err\n", __func__);
2437 	return rc;
2438 }
2439 
2440 /**
2441  *	ata_cable_40wire	-	return 40 wire cable type
2442  *	@ap: port
2443  *
2444  *	Helper method for drivers which want to hardwire 40 wire cable
2445  *	detection.
2446  */
2447 
2448 int ata_cable_40wire(struct ata_port *ap)
2449 {
2450 	return ATA_CBL_PATA40;
2451 }
2452 
2453 /**
2454  *	ata_cable_80wire	-	return 80 wire cable type
2455  *	@ap: port
2456  *
2457  *	Helper method for drivers which want to hardwire 80 wire cable
2458  *	detection.
2459  */
2460 
2461 int ata_cable_80wire(struct ata_port *ap)
2462 {
2463 	return ATA_CBL_PATA80;
2464 }
2465 
2466 /**
2467  *	ata_cable_unknown	-	return unknown PATA cable.
2468  *	@ap: port
2469  *
2470  *	Helper method for drivers which have no PATA cable detection.
2471  */
2472 
2473 int ata_cable_unknown(struct ata_port *ap)
2474 {
2475 	return ATA_CBL_PATA_UNK;
2476 }
2477 
2478 /**
2479  *	ata_cable_ignore	-	return ignored PATA cable.
2480  *	@ap: port
2481  *
2482  *	Helper method for drivers which don't use cable type to limit
2483  *	transfer mode.
2484  */
2485 int ata_cable_ignore(struct ata_port *ap)
2486 {
2487 	return ATA_CBL_PATA_IGN;
2488 }
2489 
2490 /**
2491  *	ata_cable_sata	-	return SATA cable type
2492  *	@ap: port
2493  *
2494  *	Helper method for drivers which have SATA cables
2495  */
2496 
2497 int ata_cable_sata(struct ata_port *ap)
2498 {
2499 	return ATA_CBL_SATA;
2500 }
2501 
2502 /**
2503  *	ata_bus_probe - Reset and probe ATA bus
2504  *	@ap: Bus to probe
2505  *
2506  *	Master ATA bus probing function.  Initiates a hardware-dependent
2507  *	bus reset, then attempts to identify any devices found on
2508  *	the bus.
2509  *
2510  *	LOCKING:
2511  *	PCI/etc. bus probe sem.
2512  *
2513  *	RETURNS:
2514  *	Zero on success, negative errno otherwise.
2515  */
2516 
2517 int ata_bus_probe(struct ata_port *ap)
2518 {
2519 	unsigned int classes[ATA_MAX_DEVICES];
2520 	int tries[ATA_MAX_DEVICES];
2521 	int rc;
2522 	struct ata_device *dev;
2523 
2524 	ata_port_probe(ap);
2525 
2526 	ata_link_for_each_dev(dev, &ap->link)
2527 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2528 
2529  retry:
2530 	ata_link_for_each_dev(dev, &ap->link) {
2531 		/* If we issue an SRST then an ATA drive (not ATAPI)
2532 		 * may change configuration and be in PIO0 timing. If
2533 		 * we do a hard reset (or are coming from power on)
2534 		 * this is true for ATA or ATAPI. Until we've set a
2535 		 * suitable controller mode we should not touch the
2536 		 * bus as we may be talking too fast.
2537 		 */
2538 		dev->pio_mode = XFER_PIO_0;
2539 
2540 		/* If the controller has a pio mode setup function
2541 		 * then use it to set the chipset to rights. Don't
2542 		 * touch the DMA setup as that will be dealt with when
2543 		 * configuring devices.
2544 		 */
2545 		if (ap->ops->set_piomode)
2546 			ap->ops->set_piomode(ap, dev);
2547 	}
2548 
2549 	/* reset and determine device classes */
2550 	ap->ops->phy_reset(ap);
2551 
2552 	ata_link_for_each_dev(dev, &ap->link) {
2553 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2554 		    dev->class != ATA_DEV_UNKNOWN)
2555 			classes[dev->devno] = dev->class;
2556 		else
2557 			classes[dev->devno] = ATA_DEV_NONE;
2558 
2559 		dev->class = ATA_DEV_UNKNOWN;
2560 	}
2561 
2562 	ata_port_probe(ap);
2563 
2564 	/* read IDENTIFY page and configure devices. We have to do the identify
2565 	   specific sequence bass-ackwards so that PDIAG- is released by
2566 	   the slave device */
2567 
2568 	ata_link_for_each_dev_reverse(dev, &ap->link) {
2569 		if (tries[dev->devno])
2570 			dev->class = classes[dev->devno];
2571 
2572 		if (!ata_dev_enabled(dev))
2573 			continue;
2574 
2575 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2576 				     dev->id);
2577 		if (rc)
2578 			goto fail;
2579 	}
2580 
2581 	/* Now ask for the cable type as PDIAG- should have been released */
2582 	if (ap->ops->cable_detect)
2583 		ap->cbl = ap->ops->cable_detect(ap);
2584 
2585 	/* We may have SATA bridge glue hiding here irrespective of the
2586 	   reported cable types and sensed types */
2587 	ata_link_for_each_dev(dev, &ap->link) {
2588 		if (!ata_dev_enabled(dev))
2589 			continue;
2590 		/* SATA drives indicate we have a bridge. We don't know which
2591 		   end of the link the bridge is which is a problem */
2592 		if (ata_id_is_sata(dev->id))
2593 			ap->cbl = ATA_CBL_SATA;
2594 	}
2595 
2596 	/* After the identify sequence we can now set up the devices. We do
2597 	   this in the normal order so that the user doesn't get confused */
2598 
2599 	ata_link_for_each_dev(dev, &ap->link) {
2600 		if (!ata_dev_enabled(dev))
2601 			continue;
2602 
2603 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2604 		rc = ata_dev_configure(dev);
2605 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2606 		if (rc)
2607 			goto fail;
2608 	}
2609 
2610 	/* configure transfer mode */
2611 	rc = ata_set_mode(&ap->link, &dev);
2612 	if (rc)
2613 		goto fail;
2614 
2615 	ata_link_for_each_dev(dev, &ap->link)
2616 		if (ata_dev_enabled(dev))
2617 			return 0;
2618 
2619 	/* no device present, disable port */
2620 	ata_port_disable(ap);
2621 	return -ENODEV;
2622 
2623  fail:
2624 	tries[dev->devno]--;
2625 
2626 	switch (rc) {
2627 	case -EINVAL:
2628 		/* eeek, something went very wrong, give up */
2629 		tries[dev->devno] = 0;
2630 		break;
2631 
2632 	case -ENODEV:
2633 		/* give it just one more chance */
2634 		tries[dev->devno] = min(tries[dev->devno], 1);
2635 	case -EIO:
2636 		if (tries[dev->devno] == 1) {
2637 			/* This is the last chance, better to slow
2638 			 * down than lose it.
2639 			 */
2640 			sata_down_spd_limit(&ap->link);
2641 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2642 		}
2643 	}
2644 
2645 	if (!tries[dev->devno])
2646 		ata_dev_disable(dev);
2647 
2648 	goto retry;
2649 }
2650 
2651 /**
2652  *	ata_port_probe - Mark port as enabled
2653  *	@ap: Port for which we indicate enablement
2654  *
2655  *	Modify @ap data structure such that the system
2656  *	thinks that the entire port is enabled.
2657  *
2658  *	LOCKING: host lock, or some other form of
2659  *	serialization.
2660  */
2661 
2662 void ata_port_probe(struct ata_port *ap)
2663 {
2664 	ap->flags &= ~ATA_FLAG_DISABLED;
2665 }
2666 
2667 /**
2668  *	sata_print_link_status - Print SATA link status
2669  *	@link: SATA link to printk link status about
2670  *
2671  *	This function prints link speed and status of a SATA link.
2672  *
2673  *	LOCKING:
2674  *	None.
2675  */
2676 static void sata_print_link_status(struct ata_link *link)
2677 {
2678 	u32 sstatus, scontrol, tmp;
2679 
2680 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2681 		return;
2682 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2683 
2684 	if (ata_link_online(link)) {
2685 		tmp = (sstatus >> 4) & 0xf;
2686 		ata_link_printk(link, KERN_INFO,
2687 				"SATA link up %s (SStatus %X SControl %X)\n",
2688 				sata_spd_string(tmp), sstatus, scontrol);
2689 	} else {
2690 		ata_link_printk(link, KERN_INFO,
2691 				"SATA link down (SStatus %X SControl %X)\n",
2692 				sstatus, scontrol);
2693 	}
2694 }
2695 
2696 /**
2697  *	ata_dev_pair		-	return other device on cable
2698  *	@adev: device
2699  *
2700  *	Obtain the other device on the same cable, or if none is
2701  *	present NULL is returned
2702  */
2703 
2704 struct ata_device *ata_dev_pair(struct ata_device *adev)
2705 {
2706 	struct ata_link *link = adev->link;
2707 	struct ata_device *pair = &link->device[1 - adev->devno];
2708 	if (!ata_dev_enabled(pair))
2709 		return NULL;
2710 	return pair;
2711 }
2712 
2713 /**
2714  *	ata_port_disable - Disable port.
2715  *	@ap: Port to be disabled.
2716  *
2717  *	Modify @ap data structure such that the system
2718  *	thinks that the entire port is disabled, and should
2719  *	never attempt to probe or communicate with devices
2720  *	on this port.
2721  *
2722  *	LOCKING: host lock, or some other form of
2723  *	serialization.
2724  */
2725 
2726 void ata_port_disable(struct ata_port *ap)
2727 {
2728 	ap->link.device[0].class = ATA_DEV_NONE;
2729 	ap->link.device[1].class = ATA_DEV_NONE;
2730 	ap->flags |= ATA_FLAG_DISABLED;
2731 }
2732 
2733 /**
2734  *	sata_down_spd_limit - adjust SATA spd limit downward
2735  *	@link: Link to adjust SATA spd limit for
2736  *
2737  *	Adjust SATA spd limit of @link downward.  Note that this
2738  *	function only adjusts the limit.  The change must be applied
2739  *	using sata_set_spd().
2740  *
2741  *	LOCKING:
2742  *	Inherited from caller.
2743  *
2744  *	RETURNS:
2745  *	0 on success, negative errno on failure
2746  */
2747 int sata_down_spd_limit(struct ata_link *link)
2748 {
2749 	u32 sstatus, spd, mask;
2750 	int rc, highbit;
2751 
2752 	if (!sata_scr_valid(link))
2753 		return -EOPNOTSUPP;
2754 
2755 	/* If SCR can be read, use it to determine the current SPD.
2756 	 * If not, use cached value in link->sata_spd.
2757 	 */
2758 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2759 	if (rc == 0)
2760 		spd = (sstatus >> 4) & 0xf;
2761 	else
2762 		spd = link->sata_spd;
2763 
2764 	mask = link->sata_spd_limit;
2765 	if (mask <= 1)
2766 		return -EINVAL;
2767 
2768 	/* unconditionally mask off the highest bit */
2769 	highbit = fls(mask) - 1;
2770 	mask &= ~(1 << highbit);
2771 
2772 	/* Mask off all speeds higher than or equal to the current
2773 	 * one.  Force 1.5Gbps if current SPD is not available.
2774 	 */
2775 	if (spd > 1)
2776 		mask &= (1 << (spd - 1)) - 1;
2777 	else
2778 		mask &= 1;
2779 
2780 	/* were we already at the bottom? */
2781 	if (!mask)
2782 		return -EINVAL;
2783 
2784 	link->sata_spd_limit = mask;
2785 
2786 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2787 			sata_spd_string(fls(mask)));
2788 
2789 	return 0;
2790 }
2791 
2792 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2793 {
2794 	struct ata_link *host_link = &link->ap->link;
2795 	u32 limit, target, spd;
2796 
2797 	limit = link->sata_spd_limit;
2798 
2799 	/* Don't configure downstream link faster than upstream link.
2800 	 * It doesn't speed up anything and some PMPs choke on such
2801 	 * configuration.
2802 	 */
2803 	if (!ata_is_host_link(link) && host_link->sata_spd)
2804 		limit &= (1 << host_link->sata_spd) - 1;
2805 
2806 	if (limit == UINT_MAX)
2807 		target = 0;
2808 	else
2809 		target = fls(limit);
2810 
2811 	spd = (*scontrol >> 4) & 0xf;
2812 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2813 
2814 	return spd != target;
2815 }
2816 
2817 /**
2818  *	sata_set_spd_needed - is SATA spd configuration needed
2819  *	@link: Link in question
2820  *
2821  *	Test whether the spd limit in SControl matches
2822  *	@link->sata_spd_limit.  This function is used to determine
2823  *	whether hardreset is necessary to apply SATA spd
2824  *	configuration.
2825  *
2826  *	LOCKING:
2827  *	Inherited from caller.
2828  *
2829  *	RETURNS:
2830  *	1 if SATA spd configuration is needed, 0 otherwise.
2831  */
2832 static int sata_set_spd_needed(struct ata_link *link)
2833 {
2834 	u32 scontrol;
2835 
2836 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2837 		return 1;
2838 
2839 	return __sata_set_spd_needed(link, &scontrol);
2840 }
2841 
2842 /**
2843  *	sata_set_spd - set SATA spd according to spd limit
2844  *	@link: Link to set SATA spd for
2845  *
2846  *	Set SATA spd of @link according to sata_spd_limit.
2847  *
2848  *	LOCKING:
2849  *	Inherited from caller.
2850  *
2851  *	RETURNS:
2852  *	0 if spd doesn't need to be changed, 1 if spd has been
2853  *	changed.  Negative errno if SCR registers are inaccessible.
2854  */
2855 int sata_set_spd(struct ata_link *link)
2856 {
2857 	u32 scontrol;
2858 	int rc;
2859 
2860 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2861 		return rc;
2862 
2863 	if (!__sata_set_spd_needed(link, &scontrol))
2864 		return 0;
2865 
2866 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2867 		return rc;
2868 
2869 	return 1;
2870 }
2871 
2872 /*
2873  * This mode timing computation functionality is ported over from
2874  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2875  */
2876 /*
2877  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2878  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2879  * for UDMA6, which is currently supported only by Maxtor drives.
2880  *
2881  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2882  */
2883 
2884 static const struct ata_timing ata_timing[] = {
2885 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2886 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2887 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2888 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2889 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2890 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2891 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2892 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2893 
2894 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2895 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2896 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2897 
2898 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2899 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2900 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2901 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2902 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2903 
2904 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2905 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2906 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2907 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2908 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2909 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2910 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2911 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2912 
2913 	{ 0xFF }
2914 };
2915 
2916 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2917 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2918 
2919 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2920 {
2921 	q->setup   = EZ(t->setup   * 1000,  T);
2922 	q->act8b   = EZ(t->act8b   * 1000,  T);
2923 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2924 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2925 	q->active  = EZ(t->active  * 1000,  T);
2926 	q->recover = EZ(t->recover * 1000,  T);
2927 	q->cycle   = EZ(t->cycle   * 1000,  T);
2928 	q->udma    = EZ(t->udma    * 1000, UT);
2929 }
2930 
2931 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2932 		      struct ata_timing *m, unsigned int what)
2933 {
2934 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2935 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2936 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2937 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2938 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2939 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2940 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2941 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2942 }
2943 
2944 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2945 {
2946 	const struct ata_timing *t = ata_timing;
2947 
2948 	while (xfer_mode > t->mode)
2949 		t++;
2950 
2951 	if (xfer_mode == t->mode)
2952 		return t;
2953 	return NULL;
2954 }
2955 
2956 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2957 		       struct ata_timing *t, int T, int UT)
2958 {
2959 	const struct ata_timing *s;
2960 	struct ata_timing p;
2961 
2962 	/*
2963 	 * Find the mode.
2964 	 */
2965 
2966 	if (!(s = ata_timing_find_mode(speed)))
2967 		return -EINVAL;
2968 
2969 	memcpy(t, s, sizeof(*s));
2970 
2971 	/*
2972 	 * If the drive is an EIDE drive, it can tell us it needs extended
2973 	 * PIO/MW_DMA cycle timing.
2974 	 */
2975 
2976 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2977 		memset(&p, 0, sizeof(p));
2978 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2979 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2980 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2981 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2982 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2983 		}
2984 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2985 	}
2986 
2987 	/*
2988 	 * Convert the timing to bus clock counts.
2989 	 */
2990 
2991 	ata_timing_quantize(t, t, T, UT);
2992 
2993 	/*
2994 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2995 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2996 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2997 	 */
2998 
2999 	if (speed > XFER_PIO_6) {
3000 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3001 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3002 	}
3003 
3004 	/*
3005 	 * Lengthen active & recovery time so that cycle time is correct.
3006 	 */
3007 
3008 	if (t->act8b + t->rec8b < t->cyc8b) {
3009 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3010 		t->rec8b = t->cyc8b - t->act8b;
3011 	}
3012 
3013 	if (t->active + t->recover < t->cycle) {
3014 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3015 		t->recover = t->cycle - t->active;
3016 	}
3017 
3018 	/* In a few cases quantisation may produce enough errors to
3019 	   leave t->cycle too low for the sum of active and recovery
3020 	   if so we must correct this */
3021 	if (t->active + t->recover > t->cycle)
3022 		t->cycle = t->active + t->recover;
3023 
3024 	return 0;
3025 }
3026 
3027 /**
3028  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3029  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3030  *	@cycle: cycle duration in ns
3031  *
3032  *	Return matching xfer mode for @cycle.  The returned mode is of
3033  *	the transfer type specified by @xfer_shift.  If @cycle is too
3034  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3035  *	than the fastest known mode, the fasted mode is returned.
3036  *
3037  *	LOCKING:
3038  *	None.
3039  *
3040  *	RETURNS:
3041  *	Matching xfer_mode, 0xff if no match found.
3042  */
3043 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3044 {
3045 	u8 base_mode = 0xff, last_mode = 0xff;
3046 	const struct ata_xfer_ent *ent;
3047 	const struct ata_timing *t;
3048 
3049 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3050 		if (ent->shift == xfer_shift)
3051 			base_mode = ent->base;
3052 
3053 	for (t = ata_timing_find_mode(base_mode);
3054 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3055 		unsigned short this_cycle;
3056 
3057 		switch (xfer_shift) {
3058 		case ATA_SHIFT_PIO:
3059 		case ATA_SHIFT_MWDMA:
3060 			this_cycle = t->cycle;
3061 			break;
3062 		case ATA_SHIFT_UDMA:
3063 			this_cycle = t->udma;
3064 			break;
3065 		default:
3066 			return 0xff;
3067 		}
3068 
3069 		if (cycle > this_cycle)
3070 			break;
3071 
3072 		last_mode = t->mode;
3073 	}
3074 
3075 	return last_mode;
3076 }
3077 
3078 /**
3079  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3080  *	@dev: Device to adjust xfer masks
3081  *	@sel: ATA_DNXFER_* selector
3082  *
3083  *	Adjust xfer masks of @dev downward.  Note that this function
3084  *	does not apply the change.  Invoking ata_set_mode() afterwards
3085  *	will apply the limit.
3086  *
3087  *	LOCKING:
3088  *	Inherited from caller.
3089  *
3090  *	RETURNS:
3091  *	0 on success, negative errno on failure
3092  */
3093 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3094 {
3095 	char buf[32];
3096 	unsigned long orig_mask, xfer_mask;
3097 	unsigned long pio_mask, mwdma_mask, udma_mask;
3098 	int quiet, highbit;
3099 
3100 	quiet = !!(sel & ATA_DNXFER_QUIET);
3101 	sel &= ~ATA_DNXFER_QUIET;
3102 
3103 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3104 						  dev->mwdma_mask,
3105 						  dev->udma_mask);
3106 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3107 
3108 	switch (sel) {
3109 	case ATA_DNXFER_PIO:
3110 		highbit = fls(pio_mask) - 1;
3111 		pio_mask &= ~(1 << highbit);
3112 		break;
3113 
3114 	case ATA_DNXFER_DMA:
3115 		if (udma_mask) {
3116 			highbit = fls(udma_mask) - 1;
3117 			udma_mask &= ~(1 << highbit);
3118 			if (!udma_mask)
3119 				return -ENOENT;
3120 		} else if (mwdma_mask) {
3121 			highbit = fls(mwdma_mask) - 1;
3122 			mwdma_mask &= ~(1 << highbit);
3123 			if (!mwdma_mask)
3124 				return -ENOENT;
3125 		}
3126 		break;
3127 
3128 	case ATA_DNXFER_40C:
3129 		udma_mask &= ATA_UDMA_MASK_40C;
3130 		break;
3131 
3132 	case ATA_DNXFER_FORCE_PIO0:
3133 		pio_mask &= 1;
3134 	case ATA_DNXFER_FORCE_PIO:
3135 		mwdma_mask = 0;
3136 		udma_mask = 0;
3137 		break;
3138 
3139 	default:
3140 		BUG();
3141 	}
3142 
3143 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3144 
3145 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3146 		return -ENOENT;
3147 
3148 	if (!quiet) {
3149 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3150 			snprintf(buf, sizeof(buf), "%s:%s",
3151 				 ata_mode_string(xfer_mask),
3152 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3153 		else
3154 			snprintf(buf, sizeof(buf), "%s",
3155 				 ata_mode_string(xfer_mask));
3156 
3157 		ata_dev_printk(dev, KERN_WARNING,
3158 			       "limiting speed to %s\n", buf);
3159 	}
3160 
3161 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3162 			    &dev->udma_mask);
3163 
3164 	return 0;
3165 }
3166 
3167 static int ata_dev_set_mode(struct ata_device *dev)
3168 {
3169 	struct ata_eh_context *ehc = &dev->link->eh_context;
3170 	const char *dev_err_whine = "";
3171 	int ign_dev_err = 0;
3172 	unsigned int err_mask;
3173 	int rc;
3174 
3175 	dev->flags &= ~ATA_DFLAG_PIO;
3176 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3177 		dev->flags |= ATA_DFLAG_PIO;
3178 
3179 	err_mask = ata_dev_set_xfermode(dev);
3180 
3181 	if (err_mask & ~AC_ERR_DEV)
3182 		goto fail;
3183 
3184 	/* revalidate */
3185 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3186 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3187 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3188 	if (rc)
3189 		return rc;
3190 
3191 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3192 		/* Old CFA may refuse this command, which is just fine */
3193 		if (ata_id_is_cfa(dev->id))
3194 			ign_dev_err = 1;
3195 		/* Catch several broken garbage emulations plus some pre
3196 		   ATA devices */
3197 		if (ata_id_major_version(dev->id) == 0 &&
3198 					dev->pio_mode <= XFER_PIO_2)
3199 			ign_dev_err = 1;
3200 		/* Some very old devices and some bad newer ones fail
3201 		   any kind of SET_XFERMODE request but support PIO0-2
3202 		   timings and no IORDY */
3203 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3204 			ign_dev_err = 1;
3205 	}
3206 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3207 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3208 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3209 	    dev->dma_mode == XFER_MW_DMA_0 &&
3210 	    (dev->id[63] >> 8) & 1)
3211 		ign_dev_err = 1;
3212 
3213 	/* if the device is actually configured correctly, ignore dev err */
3214 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3215 		ign_dev_err = 1;
3216 
3217 	if (err_mask & AC_ERR_DEV) {
3218 		if (!ign_dev_err)
3219 			goto fail;
3220 		else
3221 			dev_err_whine = " (device error ignored)";
3222 	}
3223 
3224 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3225 		dev->xfer_shift, (int)dev->xfer_mode);
3226 
3227 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3228 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3229 		       dev_err_whine);
3230 
3231 	return 0;
3232 
3233  fail:
3234 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3235 		       "(err_mask=0x%x)\n", err_mask);
3236 	return -EIO;
3237 }
3238 
3239 /**
3240  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3241  *	@link: link on which timings will be programmed
3242  *	@r_failed_dev: out parameter for failed device
3243  *
3244  *	Standard implementation of the function used to tune and set
3245  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3246  *	ata_dev_set_mode() fails, pointer to the failing device is
3247  *	returned in @r_failed_dev.
3248  *
3249  *	LOCKING:
3250  *	PCI/etc. bus probe sem.
3251  *
3252  *	RETURNS:
3253  *	0 on success, negative errno otherwise
3254  */
3255 
3256 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3257 {
3258 	struct ata_port *ap = link->ap;
3259 	struct ata_device *dev;
3260 	int rc = 0, used_dma = 0, found = 0;
3261 
3262 	/* step 1: calculate xfer_mask */
3263 	ata_link_for_each_dev(dev, link) {
3264 		unsigned long pio_mask, dma_mask;
3265 		unsigned int mode_mask;
3266 
3267 		if (!ata_dev_enabled(dev))
3268 			continue;
3269 
3270 		mode_mask = ATA_DMA_MASK_ATA;
3271 		if (dev->class == ATA_DEV_ATAPI)
3272 			mode_mask = ATA_DMA_MASK_ATAPI;
3273 		else if (ata_id_is_cfa(dev->id))
3274 			mode_mask = ATA_DMA_MASK_CFA;
3275 
3276 		ata_dev_xfermask(dev);
3277 		ata_force_xfermask(dev);
3278 
3279 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3280 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3281 
3282 		if (libata_dma_mask & mode_mask)
3283 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3284 		else
3285 			dma_mask = 0;
3286 
3287 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3288 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3289 
3290 		found = 1;
3291 		if (ata_dma_enabled(dev))
3292 			used_dma = 1;
3293 	}
3294 	if (!found)
3295 		goto out;
3296 
3297 	/* step 2: always set host PIO timings */
3298 	ata_link_for_each_dev(dev, link) {
3299 		if (!ata_dev_enabled(dev))
3300 			continue;
3301 
3302 		if (dev->pio_mode == 0xff) {
3303 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3304 			rc = -EINVAL;
3305 			goto out;
3306 		}
3307 
3308 		dev->xfer_mode = dev->pio_mode;
3309 		dev->xfer_shift = ATA_SHIFT_PIO;
3310 		if (ap->ops->set_piomode)
3311 			ap->ops->set_piomode(ap, dev);
3312 	}
3313 
3314 	/* step 3: set host DMA timings */
3315 	ata_link_for_each_dev(dev, link) {
3316 		if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev))
3317 			continue;
3318 
3319 		dev->xfer_mode = dev->dma_mode;
3320 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3321 		if (ap->ops->set_dmamode)
3322 			ap->ops->set_dmamode(ap, dev);
3323 	}
3324 
3325 	/* step 4: update devices' xfer mode */
3326 	ata_link_for_each_dev(dev, link) {
3327 		/* don't update suspended devices' xfer mode */
3328 		if (!ata_dev_enabled(dev))
3329 			continue;
3330 
3331 		rc = ata_dev_set_mode(dev);
3332 		if (rc)
3333 			goto out;
3334 	}
3335 
3336 	/* Record simplex status. If we selected DMA then the other
3337 	 * host channels are not permitted to do so.
3338 	 */
3339 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3340 		ap->host->simplex_claimed = ap;
3341 
3342  out:
3343 	if (rc)
3344 		*r_failed_dev = dev;
3345 	return rc;
3346 }
3347 
3348 /**
3349  *	ata_wait_ready - wait for link to become ready
3350  *	@link: link to be waited on
3351  *	@deadline: deadline jiffies for the operation
3352  *	@check_ready: callback to check link readiness
3353  *
3354  *	Wait for @link to become ready.  @check_ready should return
3355  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3356  *	link doesn't seem to be occupied, other errno for other error
3357  *	conditions.
3358  *
3359  *	Transient -ENODEV conditions are allowed for
3360  *	ATA_TMOUT_FF_WAIT.
3361  *
3362  *	LOCKING:
3363  *	EH context.
3364  *
3365  *	RETURNS:
3366  *	0 if @linke is ready before @deadline; otherwise, -errno.
3367  */
3368 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3369 		   int (*check_ready)(struct ata_link *link))
3370 {
3371 	unsigned long start = jiffies;
3372 	unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3373 	int warned = 0;
3374 
3375 	if (time_after(nodev_deadline, deadline))
3376 		nodev_deadline = deadline;
3377 
3378 	while (1) {
3379 		unsigned long now = jiffies;
3380 		int ready, tmp;
3381 
3382 		ready = tmp = check_ready(link);
3383 		if (ready > 0)
3384 			return 0;
3385 
3386 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3387 		 * is online.  Also, some SATA devices take a long
3388 		 * time to clear 0xff after reset.  For example,
3389 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3390 		 * GoVault needs even more than that.  Wait for
3391 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3392 		 *
3393 		 * Note that some PATA controllers (pata_ali) explode
3394 		 * if status register is read more than once when
3395 		 * there's no device attached.
3396 		 */
3397 		if (ready == -ENODEV) {
3398 			if (ata_link_online(link))
3399 				ready = 0;
3400 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3401 				 !ata_link_offline(link) &&
3402 				 time_before(now, nodev_deadline))
3403 				ready = 0;
3404 		}
3405 
3406 		if (ready)
3407 			return ready;
3408 		if (time_after(now, deadline))
3409 			return -EBUSY;
3410 
3411 		if (!warned && time_after(now, start + 5 * HZ) &&
3412 		    (deadline - now > 3 * HZ)) {
3413 			ata_link_printk(link, KERN_WARNING,
3414 				"link is slow to respond, please be patient "
3415 				"(ready=%d)\n", tmp);
3416 			warned = 1;
3417 		}
3418 
3419 		msleep(50);
3420 	}
3421 }
3422 
3423 /**
3424  *	ata_wait_after_reset - wait for link to become ready after reset
3425  *	@link: link to be waited on
3426  *	@deadline: deadline jiffies for the operation
3427  *	@check_ready: callback to check link readiness
3428  *
3429  *	Wait for @link to become ready after reset.
3430  *
3431  *	LOCKING:
3432  *	EH context.
3433  *
3434  *	RETURNS:
3435  *	0 if @linke is ready before @deadline; otherwise, -errno.
3436  */
3437 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3438 				int (*check_ready)(struct ata_link *link))
3439 {
3440 	msleep(ATA_WAIT_AFTER_RESET);
3441 
3442 	return ata_wait_ready(link, deadline, check_ready);
3443 }
3444 
3445 /**
3446  *	sata_link_debounce - debounce SATA phy status
3447  *	@link: ATA link to debounce SATA phy status for
3448  *	@params: timing parameters { interval, duratinon, timeout } in msec
3449  *	@deadline: deadline jiffies for the operation
3450  *
3451 *	Make sure SStatus of @link reaches stable state, determined by
3452  *	holding the same value where DET is not 1 for @duration polled
3453  *	every @interval, before @timeout.  Timeout constraints the
3454  *	beginning of the stable state.  Because DET gets stuck at 1 on
3455  *	some controllers after hot unplugging, this functions waits
3456  *	until timeout then returns 0 if DET is stable at 1.
3457  *
3458  *	@timeout is further limited by @deadline.  The sooner of the
3459  *	two is used.
3460  *
3461  *	LOCKING:
3462  *	Kernel thread context (may sleep)
3463  *
3464  *	RETURNS:
3465  *	0 on success, -errno on failure.
3466  */
3467 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3468 		       unsigned long deadline)
3469 {
3470 	unsigned long interval = params[0];
3471 	unsigned long duration = params[1];
3472 	unsigned long last_jiffies, t;
3473 	u32 last, cur;
3474 	int rc;
3475 
3476 	t = ata_deadline(jiffies, params[2]);
3477 	if (time_before(t, deadline))
3478 		deadline = t;
3479 
3480 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3481 		return rc;
3482 	cur &= 0xf;
3483 
3484 	last = cur;
3485 	last_jiffies = jiffies;
3486 
3487 	while (1) {
3488 		msleep(interval);
3489 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3490 			return rc;
3491 		cur &= 0xf;
3492 
3493 		/* DET stable? */
3494 		if (cur == last) {
3495 			if (cur == 1 && time_before(jiffies, deadline))
3496 				continue;
3497 			if (time_after(jiffies,
3498 				       ata_deadline(last_jiffies, duration)))
3499 				return 0;
3500 			continue;
3501 		}
3502 
3503 		/* unstable, start over */
3504 		last = cur;
3505 		last_jiffies = jiffies;
3506 
3507 		/* Check deadline.  If debouncing failed, return
3508 		 * -EPIPE to tell upper layer to lower link speed.
3509 		 */
3510 		if (time_after(jiffies, deadline))
3511 			return -EPIPE;
3512 	}
3513 }
3514 
3515 /**
3516  *	sata_link_resume - resume SATA link
3517  *	@link: ATA link to resume SATA
3518  *	@params: timing parameters { interval, duratinon, timeout } in msec
3519  *	@deadline: deadline jiffies for the operation
3520  *
3521  *	Resume SATA phy @link and debounce it.
3522  *
3523  *	LOCKING:
3524  *	Kernel thread context (may sleep)
3525  *
3526  *	RETURNS:
3527  *	0 on success, -errno on failure.
3528  */
3529 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3530 		     unsigned long deadline)
3531 {
3532 	u32 scontrol, serror;
3533 	int rc;
3534 
3535 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3536 		return rc;
3537 
3538 	scontrol = (scontrol & 0x0f0) | 0x300;
3539 
3540 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3541 		return rc;
3542 
3543 	/* Some PHYs react badly if SStatus is pounded immediately
3544 	 * after resuming.  Delay 200ms before debouncing.
3545 	 */
3546 	msleep(200);
3547 
3548 	if ((rc = sata_link_debounce(link, params, deadline)))
3549 		return rc;
3550 
3551 	/* clear SError, some PHYs require this even for SRST to work */
3552 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3553 		rc = sata_scr_write(link, SCR_ERROR, serror);
3554 
3555 	return rc != -EINVAL ? rc : 0;
3556 }
3557 
3558 /**
3559  *	ata_std_prereset - prepare for reset
3560  *	@link: ATA link to be reset
3561  *	@deadline: deadline jiffies for the operation
3562  *
3563  *	@link is about to be reset.  Initialize it.  Failure from
3564  *	prereset makes libata abort whole reset sequence and give up
3565  *	that port, so prereset should be best-effort.  It does its
3566  *	best to prepare for reset sequence but if things go wrong, it
3567  *	should just whine, not fail.
3568  *
3569  *	LOCKING:
3570  *	Kernel thread context (may sleep)
3571  *
3572  *	RETURNS:
3573  *	0 on success, -errno otherwise.
3574  */
3575 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3576 {
3577 	struct ata_port *ap = link->ap;
3578 	struct ata_eh_context *ehc = &link->eh_context;
3579 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3580 	int rc;
3581 
3582 	/* if we're about to do hardreset, nothing more to do */
3583 	if (ehc->i.action & ATA_EH_HARDRESET)
3584 		return 0;
3585 
3586 	/* if SATA, resume link */
3587 	if (ap->flags & ATA_FLAG_SATA) {
3588 		rc = sata_link_resume(link, timing, deadline);
3589 		/* whine about phy resume failure but proceed */
3590 		if (rc && rc != -EOPNOTSUPP)
3591 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3592 					"link for reset (errno=%d)\n", rc);
3593 	}
3594 
3595 	/* no point in trying softreset on offline link */
3596 	if (ata_link_offline(link))
3597 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3598 
3599 	return 0;
3600 }
3601 
3602 /**
3603  *	sata_link_hardreset - reset link via SATA phy reset
3604  *	@link: link to reset
3605  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3606  *	@deadline: deadline jiffies for the operation
3607  *	@online: optional out parameter indicating link onlineness
3608  *	@check_ready: optional callback to check link readiness
3609  *
3610  *	SATA phy-reset @link using DET bits of SControl register.
3611  *	After hardreset, link readiness is waited upon using
3612  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3613  *	allowed to not specify @check_ready and wait itself after this
3614  *	function returns.  Device classification is LLD's
3615  *	responsibility.
3616  *
3617  *	*@online is set to one iff reset succeeded and @link is online
3618  *	after reset.
3619  *
3620  *	LOCKING:
3621  *	Kernel thread context (may sleep)
3622  *
3623  *	RETURNS:
3624  *	0 on success, -errno otherwise.
3625  */
3626 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3627 			unsigned long deadline,
3628 			bool *online, int (*check_ready)(struct ata_link *))
3629 {
3630 	u32 scontrol;
3631 	int rc;
3632 
3633 	DPRINTK("ENTER\n");
3634 
3635 	if (online)
3636 		*online = false;
3637 
3638 	if (sata_set_spd_needed(link)) {
3639 		/* SATA spec says nothing about how to reconfigure
3640 		 * spd.  To be on the safe side, turn off phy during
3641 		 * reconfiguration.  This works for at least ICH7 AHCI
3642 		 * and Sil3124.
3643 		 */
3644 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3645 			goto out;
3646 
3647 		scontrol = (scontrol & 0x0f0) | 0x304;
3648 
3649 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3650 			goto out;
3651 
3652 		sata_set_spd(link);
3653 	}
3654 
3655 	/* issue phy wake/reset */
3656 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3657 		goto out;
3658 
3659 	scontrol = (scontrol & 0x0f0) | 0x301;
3660 
3661 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3662 		goto out;
3663 
3664 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3665 	 * 10.4.2 says at least 1 ms.
3666 	 */
3667 	msleep(1);
3668 
3669 	/* bring link back */
3670 	rc = sata_link_resume(link, timing, deadline);
3671 	if (rc)
3672 		goto out;
3673 	/* if link is offline nothing more to do */
3674 	if (ata_link_offline(link))
3675 		goto out;
3676 
3677 	/* Link is online.  From this point, -ENODEV too is an error. */
3678 	if (online)
3679 		*online = true;
3680 
3681 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3682 		/* If PMP is supported, we have to do follow-up SRST.
3683 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3684 		 * the first port is empty.  Wait only for
3685 		 * ATA_TMOUT_PMP_SRST_WAIT.
3686 		 */
3687 		if (check_ready) {
3688 			unsigned long pmp_deadline;
3689 
3690 			pmp_deadline = ata_deadline(jiffies,
3691 						    ATA_TMOUT_PMP_SRST_WAIT);
3692 			if (time_after(pmp_deadline, deadline))
3693 				pmp_deadline = deadline;
3694 			ata_wait_ready(link, pmp_deadline, check_ready);
3695 		}
3696 		rc = -EAGAIN;
3697 		goto out;
3698 	}
3699 
3700 	rc = 0;
3701 	if (check_ready)
3702 		rc = ata_wait_ready(link, deadline, check_ready);
3703  out:
3704 	if (rc && rc != -EAGAIN) {
3705 		/* online is set iff link is online && reset succeeded */
3706 		if (online)
3707 			*online = false;
3708 		ata_link_printk(link, KERN_ERR,
3709 				"COMRESET failed (errno=%d)\n", rc);
3710 	}
3711 	DPRINTK("EXIT, rc=%d\n", rc);
3712 	return rc;
3713 }
3714 
3715 /**
3716  *	sata_std_hardreset - COMRESET w/o waiting or classification
3717  *	@link: link to reset
3718  *	@class: resulting class of attached device
3719  *	@deadline: deadline jiffies for the operation
3720  *
3721  *	Standard SATA COMRESET w/o waiting or classification.
3722  *
3723  *	LOCKING:
3724  *	Kernel thread context (may sleep)
3725  *
3726  *	RETURNS:
3727  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3728  */
3729 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3730 		       unsigned long deadline)
3731 {
3732 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3733 	bool online;
3734 	int rc;
3735 
3736 	/* do hardreset */
3737 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3738 	return online ? -EAGAIN : rc;
3739 }
3740 
3741 /**
3742  *	ata_std_postreset - standard postreset callback
3743  *	@link: the target ata_link
3744  *	@classes: classes of attached devices
3745  *
3746  *	This function is invoked after a successful reset.  Note that
3747  *	the device might have been reset more than once using
3748  *	different reset methods before postreset is invoked.
3749  *
3750  *	LOCKING:
3751  *	Kernel thread context (may sleep)
3752  */
3753 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3754 {
3755 	u32 serror;
3756 
3757 	DPRINTK("ENTER\n");
3758 
3759 	/* reset complete, clear SError */
3760 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3761 		sata_scr_write(link, SCR_ERROR, serror);
3762 
3763 	/* print link status */
3764 	sata_print_link_status(link);
3765 
3766 	DPRINTK("EXIT\n");
3767 }
3768 
3769 /**
3770  *	ata_dev_same_device - Determine whether new ID matches configured device
3771  *	@dev: device to compare against
3772  *	@new_class: class of the new device
3773  *	@new_id: IDENTIFY page of the new device
3774  *
3775  *	Compare @new_class and @new_id against @dev and determine
3776  *	whether @dev is the device indicated by @new_class and
3777  *	@new_id.
3778  *
3779  *	LOCKING:
3780  *	None.
3781  *
3782  *	RETURNS:
3783  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3784  */
3785 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3786 			       const u16 *new_id)
3787 {
3788 	const u16 *old_id = dev->id;
3789 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3790 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3791 
3792 	if (dev->class != new_class) {
3793 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3794 			       dev->class, new_class);
3795 		return 0;
3796 	}
3797 
3798 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3799 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3800 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3801 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3802 
3803 	if (strcmp(model[0], model[1])) {
3804 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3805 			       "'%s' != '%s'\n", model[0], model[1]);
3806 		return 0;
3807 	}
3808 
3809 	if (strcmp(serial[0], serial[1])) {
3810 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3811 			       "'%s' != '%s'\n", serial[0], serial[1]);
3812 		return 0;
3813 	}
3814 
3815 	return 1;
3816 }
3817 
3818 /**
3819  *	ata_dev_reread_id - Re-read IDENTIFY data
3820  *	@dev: target ATA device
3821  *	@readid_flags: read ID flags
3822  *
3823  *	Re-read IDENTIFY page and make sure @dev is still attached to
3824  *	the port.
3825  *
3826  *	LOCKING:
3827  *	Kernel thread context (may sleep)
3828  *
3829  *	RETURNS:
3830  *	0 on success, negative errno otherwise
3831  */
3832 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3833 {
3834 	unsigned int class = dev->class;
3835 	u16 *id = (void *)dev->link->ap->sector_buf;
3836 	int rc;
3837 
3838 	/* read ID data */
3839 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3840 	if (rc)
3841 		return rc;
3842 
3843 	/* is the device still there? */
3844 	if (!ata_dev_same_device(dev, class, id))
3845 		return -ENODEV;
3846 
3847 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3848 	return 0;
3849 }
3850 
3851 /**
3852  *	ata_dev_revalidate - Revalidate ATA device
3853  *	@dev: device to revalidate
3854  *	@new_class: new class code
3855  *	@readid_flags: read ID flags
3856  *
3857  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3858  *	port and reconfigure it according to the new IDENTIFY page.
3859  *
3860  *	LOCKING:
3861  *	Kernel thread context (may sleep)
3862  *
3863  *	RETURNS:
3864  *	0 on success, negative errno otherwise
3865  */
3866 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3867 		       unsigned int readid_flags)
3868 {
3869 	u64 n_sectors = dev->n_sectors;
3870 	int rc;
3871 
3872 	if (!ata_dev_enabled(dev))
3873 		return -ENODEV;
3874 
3875 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3876 	if (ata_class_enabled(new_class) &&
3877 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3878 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3879 			       dev->class, new_class);
3880 		rc = -ENODEV;
3881 		goto fail;
3882 	}
3883 
3884 	/* re-read ID */
3885 	rc = ata_dev_reread_id(dev, readid_flags);
3886 	if (rc)
3887 		goto fail;
3888 
3889 	/* configure device according to the new ID */
3890 	rc = ata_dev_configure(dev);
3891 	if (rc)
3892 		goto fail;
3893 
3894 	/* verify n_sectors hasn't changed */
3895 	if (dev->class == ATA_DEV_ATA && n_sectors &&
3896 	    dev->n_sectors != n_sectors) {
3897 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3898 			       "%llu != %llu\n",
3899 			       (unsigned long long)n_sectors,
3900 			       (unsigned long long)dev->n_sectors);
3901 
3902 		/* restore original n_sectors */
3903 		dev->n_sectors = n_sectors;
3904 
3905 		rc = -ENODEV;
3906 		goto fail;
3907 	}
3908 
3909 	return 0;
3910 
3911  fail:
3912 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3913 	return rc;
3914 }
3915 
3916 struct ata_blacklist_entry {
3917 	const char *model_num;
3918 	const char *model_rev;
3919 	unsigned long horkage;
3920 };
3921 
3922 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3923 	/* Devices with DMA related problems under Linux */
3924 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
3925 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
3926 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
3927 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
3928 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
3929 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
3930 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
3931 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
3932 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
3933 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
3934 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
3935 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
3936 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
3937 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
3938 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
3939 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
3940 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
3941 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
3942 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
3943 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
3944 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
3945 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
3946 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
3947 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
3948 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
3949 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
3950 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3951 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
3952 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
3953 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
3954 	/* Odd clown on sil3726/4726 PMPs */
3955 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
3956 
3957 	/* Weird ATAPI devices */
3958 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
3959 
3960 	/* Devices we expect to fail diagnostics */
3961 
3962 	/* Devices where NCQ should be avoided */
3963 	/* NCQ is slow */
3964 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
3965 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3966 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
3967 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
3968 	/* NCQ is broken */
3969 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
3970 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
3971 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
3972 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
3973 
3974 	/* Blacklist entries taken from Silicon Image 3124/3132
3975 	   Windows driver .inf file - also several Linux problem reports */
3976 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
3977 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
3978 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3979 
3980 	/* devices which puke on READ_NATIVE_MAX */
3981 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
3982 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3983 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3984 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
3985 
3986 	/* Devices which report 1 sector over size HPA */
3987 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3988 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3989 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3990 
3991 	/* Devices which get the IVB wrong */
3992 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3993 	/* Maybe we should just blacklist TSSTcorp... */
3994 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
3995 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
3996 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
3997 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
3998 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
3999 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
4000 
4001 	/* End Marker */
4002 	{ }
4003 };
4004 
4005 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4006 {
4007 	const char *p;
4008 	int len;
4009 
4010 	/*
4011 	 * check for trailing wildcard: *\0
4012 	 */
4013 	p = strchr(patt, wildchar);
4014 	if (p && ((*(p + 1)) == 0))
4015 		len = p - patt;
4016 	else {
4017 		len = strlen(name);
4018 		if (!len) {
4019 			if (!*patt)
4020 				return 0;
4021 			return -1;
4022 		}
4023 	}
4024 
4025 	return strncmp(patt, name, len);
4026 }
4027 
4028 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4029 {
4030 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4031 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4032 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4033 
4034 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4035 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4036 
4037 	while (ad->model_num) {
4038 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4039 			if (ad->model_rev == NULL)
4040 				return ad->horkage;
4041 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4042 				return ad->horkage;
4043 		}
4044 		ad++;
4045 	}
4046 	return 0;
4047 }
4048 
4049 static int ata_dma_blacklisted(const struct ata_device *dev)
4050 {
4051 	/* We don't support polling DMA.
4052 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4053 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4054 	 */
4055 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4056 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4057 		return 1;
4058 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4059 }
4060 
4061 /**
4062  *	ata_is_40wire		-	check drive side detection
4063  *	@dev: device
4064  *
4065  *	Perform drive side detection decoding, allowing for device vendors
4066  *	who can't follow the documentation.
4067  */
4068 
4069 static int ata_is_40wire(struct ata_device *dev)
4070 {
4071 	if (dev->horkage & ATA_HORKAGE_IVB)
4072 		return ata_drive_40wire_relaxed(dev->id);
4073 	return ata_drive_40wire(dev->id);
4074 }
4075 
4076 /**
4077  *	cable_is_40wire		-	40/80/SATA decider
4078  *	@ap: port to consider
4079  *
4080  *	This function encapsulates the policy for speed management
4081  *	in one place. At the moment we don't cache the result but
4082  *	there is a good case for setting ap->cbl to the result when
4083  *	we are called with unknown cables (and figuring out if it
4084  *	impacts hotplug at all).
4085  *
4086  *	Return 1 if the cable appears to be 40 wire.
4087  */
4088 
4089 static int cable_is_40wire(struct ata_port *ap)
4090 {
4091 	struct ata_link *link;
4092 	struct ata_device *dev;
4093 
4094 	/* If the controller thinks we are 40 wire, we are */
4095 	if (ap->cbl == ATA_CBL_PATA40)
4096 		return 1;
4097 	/* If the controller thinks we are 80 wire, we are */
4098 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4099 		return 0;
4100 	/* If the system is known to be 40 wire short cable (eg laptop),
4101 	   then we allow 80 wire modes even if the drive isn't sure */
4102 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4103 		return 0;
4104 	/* If the controller doesn't know we scan
4105 
4106 	   - Note: We look for all 40 wire detects at this point.
4107 	     Any 80 wire detect is taken to be 80 wire cable
4108 	     because
4109 	     - In many setups only the one drive (slave if present)
4110                will give a valid detect
4111              - If you have a non detect capable drive you don't
4112                want it to colour the choice
4113         */
4114 	ata_port_for_each_link(link, ap) {
4115 		ata_link_for_each_dev(dev, link) {
4116 			if (!ata_is_40wire(dev))
4117 				return 0;
4118 		}
4119 	}
4120 	return 1;
4121 }
4122 
4123 /**
4124  *	ata_dev_xfermask - Compute supported xfermask of the given device
4125  *	@dev: Device to compute xfermask for
4126  *
4127  *	Compute supported xfermask of @dev and store it in
4128  *	dev->*_mask.  This function is responsible for applying all
4129  *	known limits including host controller limits, device
4130  *	blacklist, etc...
4131  *
4132  *	LOCKING:
4133  *	None.
4134  */
4135 static void ata_dev_xfermask(struct ata_device *dev)
4136 {
4137 	struct ata_link *link = dev->link;
4138 	struct ata_port *ap = link->ap;
4139 	struct ata_host *host = ap->host;
4140 	unsigned long xfer_mask;
4141 
4142 	/* controller modes available */
4143 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4144 				      ap->mwdma_mask, ap->udma_mask);
4145 
4146 	/* drive modes available */
4147 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4148 				       dev->mwdma_mask, dev->udma_mask);
4149 	xfer_mask &= ata_id_xfermask(dev->id);
4150 
4151 	/*
4152 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4153 	 *	cable
4154 	 */
4155 	if (ata_dev_pair(dev)) {
4156 		/* No PIO5 or PIO6 */
4157 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4158 		/* No MWDMA3 or MWDMA 4 */
4159 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4160 	}
4161 
4162 	if (ata_dma_blacklisted(dev)) {
4163 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4164 		ata_dev_printk(dev, KERN_WARNING,
4165 			       "device is on DMA blacklist, disabling DMA\n");
4166 	}
4167 
4168 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4169 	    host->simplex_claimed && host->simplex_claimed != ap) {
4170 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4171 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4172 			       "other device, disabling DMA\n");
4173 	}
4174 
4175 	if (ap->flags & ATA_FLAG_NO_IORDY)
4176 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4177 
4178 	if (ap->ops->mode_filter)
4179 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4180 
4181 	/* Apply cable rule here.  Don't apply it early because when
4182 	 * we handle hot plug the cable type can itself change.
4183 	 * Check this last so that we know if the transfer rate was
4184 	 * solely limited by the cable.
4185 	 * Unknown or 80 wire cables reported host side are checked
4186 	 * drive side as well. Cases where we know a 40wire cable
4187 	 * is used safely for 80 are not checked here.
4188 	 */
4189 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4190 		/* UDMA/44 or higher would be available */
4191 		if (cable_is_40wire(ap)) {
4192 			ata_dev_printk(dev, KERN_WARNING,
4193 				 "limited to UDMA/33 due to 40-wire cable\n");
4194 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4195 		}
4196 
4197 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4198 			    &dev->mwdma_mask, &dev->udma_mask);
4199 }
4200 
4201 /**
4202  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4203  *	@dev: Device to which command will be sent
4204  *
4205  *	Issue SET FEATURES - XFER MODE command to device @dev
4206  *	on port @ap.
4207  *
4208  *	LOCKING:
4209  *	PCI/etc. bus probe sem.
4210  *
4211  *	RETURNS:
4212  *	0 on success, AC_ERR_* mask otherwise.
4213  */
4214 
4215 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4216 {
4217 	struct ata_taskfile tf;
4218 	unsigned int err_mask;
4219 
4220 	/* set up set-features taskfile */
4221 	DPRINTK("set features - xfer mode\n");
4222 
4223 	/* Some controllers and ATAPI devices show flaky interrupt
4224 	 * behavior after setting xfer mode.  Use polling instead.
4225 	 */
4226 	ata_tf_init(dev, &tf);
4227 	tf.command = ATA_CMD_SET_FEATURES;
4228 	tf.feature = SETFEATURES_XFER;
4229 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4230 	tf.protocol = ATA_PROT_NODATA;
4231 	/* If we are using IORDY we must send the mode setting command */
4232 	if (ata_pio_need_iordy(dev))
4233 		tf.nsect = dev->xfer_mode;
4234 	/* If the device has IORDY and the controller does not - turn it off */
4235  	else if (ata_id_has_iordy(dev->id))
4236 		tf.nsect = 0x01;
4237 	else /* In the ancient relic department - skip all of this */
4238 		return 0;
4239 
4240 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4241 
4242 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4243 	return err_mask;
4244 }
4245 /**
4246  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4247  *	@dev: Device to which command will be sent
4248  *	@enable: Whether to enable or disable the feature
4249  *	@feature: The sector count represents the feature to set
4250  *
4251  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4252  *	on port @ap with sector count
4253  *
4254  *	LOCKING:
4255  *	PCI/etc. bus probe sem.
4256  *
4257  *	RETURNS:
4258  *	0 on success, AC_ERR_* mask otherwise.
4259  */
4260 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4261 					u8 feature)
4262 {
4263 	struct ata_taskfile tf;
4264 	unsigned int err_mask;
4265 
4266 	/* set up set-features taskfile */
4267 	DPRINTK("set features - SATA features\n");
4268 
4269 	ata_tf_init(dev, &tf);
4270 	tf.command = ATA_CMD_SET_FEATURES;
4271 	tf.feature = enable;
4272 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4273 	tf.protocol = ATA_PROT_NODATA;
4274 	tf.nsect = feature;
4275 
4276 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4277 
4278 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4279 	return err_mask;
4280 }
4281 
4282 /**
4283  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4284  *	@dev: Device to which command will be sent
4285  *	@heads: Number of heads (taskfile parameter)
4286  *	@sectors: Number of sectors (taskfile parameter)
4287  *
4288  *	LOCKING:
4289  *	Kernel thread context (may sleep)
4290  *
4291  *	RETURNS:
4292  *	0 on success, AC_ERR_* mask otherwise.
4293  */
4294 static unsigned int ata_dev_init_params(struct ata_device *dev,
4295 					u16 heads, u16 sectors)
4296 {
4297 	struct ata_taskfile tf;
4298 	unsigned int err_mask;
4299 
4300 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4301 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4302 		return AC_ERR_INVALID;
4303 
4304 	/* set up init dev params taskfile */
4305 	DPRINTK("init dev params \n");
4306 
4307 	ata_tf_init(dev, &tf);
4308 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4309 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4310 	tf.protocol = ATA_PROT_NODATA;
4311 	tf.nsect = sectors;
4312 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4313 
4314 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4315 	/* A clean abort indicates an original or just out of spec drive
4316 	   and we should continue as we issue the setup based on the
4317 	   drive reported working geometry */
4318 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4319 		err_mask = 0;
4320 
4321 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4322 	return err_mask;
4323 }
4324 
4325 /**
4326  *	ata_sg_clean - Unmap DMA memory associated with command
4327  *	@qc: Command containing DMA memory to be released
4328  *
4329  *	Unmap all mapped DMA memory associated with this command.
4330  *
4331  *	LOCKING:
4332  *	spin_lock_irqsave(host lock)
4333  */
4334 void ata_sg_clean(struct ata_queued_cmd *qc)
4335 {
4336 	struct ata_port *ap = qc->ap;
4337 	struct scatterlist *sg = qc->sg;
4338 	int dir = qc->dma_dir;
4339 
4340 	WARN_ON(sg == NULL);
4341 
4342 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4343 
4344 	if (qc->n_elem)
4345 		dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4346 
4347 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4348 	qc->sg = NULL;
4349 }
4350 
4351 /**
4352  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4353  *	@qc: Metadata associated with taskfile to check
4354  *
4355  *	Allow low-level driver to filter ATA PACKET commands, returning
4356  *	a status indicating whether or not it is OK to use DMA for the
4357  *	supplied PACKET command.
4358  *
4359  *	LOCKING:
4360  *	spin_lock_irqsave(host lock)
4361  *
4362  *	RETURNS: 0 when ATAPI DMA can be used
4363  *               nonzero otherwise
4364  */
4365 int atapi_check_dma(struct ata_queued_cmd *qc)
4366 {
4367 	struct ata_port *ap = qc->ap;
4368 
4369 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4370 	 * few ATAPI devices choke on such DMA requests.
4371 	 */
4372 	if (unlikely(qc->nbytes & 15))
4373 		return 1;
4374 
4375 	if (ap->ops->check_atapi_dma)
4376 		return ap->ops->check_atapi_dma(qc);
4377 
4378 	return 0;
4379 }
4380 
4381 /**
4382  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4383  *	@qc: ATA command in question
4384  *
4385  *	Non-NCQ commands cannot run with any other command, NCQ or
4386  *	not.  As upper layer only knows the queue depth, we are
4387  *	responsible for maintaining exclusion.  This function checks
4388  *	whether a new command @qc can be issued.
4389  *
4390  *	LOCKING:
4391  *	spin_lock_irqsave(host lock)
4392  *
4393  *	RETURNS:
4394  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4395  */
4396 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4397 {
4398 	struct ata_link *link = qc->dev->link;
4399 
4400 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4401 		if (!ata_tag_valid(link->active_tag))
4402 			return 0;
4403 	} else {
4404 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4405 			return 0;
4406 	}
4407 
4408 	return ATA_DEFER_LINK;
4409 }
4410 
4411 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4412 
4413 /**
4414  *	ata_sg_init - Associate command with scatter-gather table.
4415  *	@qc: Command to be associated
4416  *	@sg: Scatter-gather table.
4417  *	@n_elem: Number of elements in s/g table.
4418  *
4419  *	Initialize the data-related elements of queued_cmd @qc
4420  *	to point to a scatter-gather table @sg, containing @n_elem
4421  *	elements.
4422  *
4423  *	LOCKING:
4424  *	spin_lock_irqsave(host lock)
4425  */
4426 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4427 		 unsigned int n_elem)
4428 {
4429 	qc->sg = sg;
4430 	qc->n_elem = n_elem;
4431 	qc->cursg = qc->sg;
4432 }
4433 
4434 /**
4435  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4436  *	@qc: Command with scatter-gather table to be mapped.
4437  *
4438  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4439  *
4440  *	LOCKING:
4441  *	spin_lock_irqsave(host lock)
4442  *
4443  *	RETURNS:
4444  *	Zero on success, negative on error.
4445  *
4446  */
4447 static int ata_sg_setup(struct ata_queued_cmd *qc)
4448 {
4449 	struct ata_port *ap = qc->ap;
4450 	unsigned int n_elem;
4451 
4452 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4453 
4454 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4455 	if (n_elem < 1)
4456 		return -1;
4457 
4458 	DPRINTK("%d sg elements mapped\n", n_elem);
4459 
4460 	qc->n_elem = n_elem;
4461 	qc->flags |= ATA_QCFLAG_DMAMAP;
4462 
4463 	return 0;
4464 }
4465 
4466 /**
4467  *	swap_buf_le16 - swap halves of 16-bit words in place
4468  *	@buf:  Buffer to swap
4469  *	@buf_words:  Number of 16-bit words in buffer.
4470  *
4471  *	Swap halves of 16-bit words if needed to convert from
4472  *	little-endian byte order to native cpu byte order, or
4473  *	vice-versa.
4474  *
4475  *	LOCKING:
4476  *	Inherited from caller.
4477  */
4478 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4479 {
4480 #ifdef __BIG_ENDIAN
4481 	unsigned int i;
4482 
4483 	for (i = 0; i < buf_words; i++)
4484 		buf[i] = le16_to_cpu(buf[i]);
4485 #endif /* __BIG_ENDIAN */
4486 }
4487 
4488 /**
4489  *	ata_qc_new - Request an available ATA command, for queueing
4490  *	@ap: Port associated with device @dev
4491  *	@dev: Device from whom we request an available command structure
4492  *
4493  *	LOCKING:
4494  *	None.
4495  */
4496 
4497 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4498 {
4499 	struct ata_queued_cmd *qc = NULL;
4500 	unsigned int i;
4501 
4502 	/* no command while frozen */
4503 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4504 		return NULL;
4505 
4506 	/* the last tag is reserved for internal command. */
4507 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4508 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4509 			qc = __ata_qc_from_tag(ap, i);
4510 			break;
4511 		}
4512 
4513 	if (qc)
4514 		qc->tag = i;
4515 
4516 	return qc;
4517 }
4518 
4519 /**
4520  *	ata_qc_new_init - Request an available ATA command, and initialize it
4521  *	@dev: Device from whom we request an available command structure
4522  *
4523  *	LOCKING:
4524  *	None.
4525  */
4526 
4527 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4528 {
4529 	struct ata_port *ap = dev->link->ap;
4530 	struct ata_queued_cmd *qc;
4531 
4532 	qc = ata_qc_new(ap);
4533 	if (qc) {
4534 		qc->scsicmd = NULL;
4535 		qc->ap = ap;
4536 		qc->dev = dev;
4537 
4538 		ata_qc_reinit(qc);
4539 	}
4540 
4541 	return qc;
4542 }
4543 
4544 /**
4545  *	ata_qc_free - free unused ata_queued_cmd
4546  *	@qc: Command to complete
4547  *
4548  *	Designed to free unused ata_queued_cmd object
4549  *	in case something prevents using it.
4550  *
4551  *	LOCKING:
4552  *	spin_lock_irqsave(host lock)
4553  */
4554 void ata_qc_free(struct ata_queued_cmd *qc)
4555 {
4556 	struct ata_port *ap = qc->ap;
4557 	unsigned int tag;
4558 
4559 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4560 
4561 	qc->flags = 0;
4562 	tag = qc->tag;
4563 	if (likely(ata_tag_valid(tag))) {
4564 		qc->tag = ATA_TAG_POISON;
4565 		clear_bit(tag, &ap->qc_allocated);
4566 	}
4567 }
4568 
4569 void __ata_qc_complete(struct ata_queued_cmd *qc)
4570 {
4571 	struct ata_port *ap = qc->ap;
4572 	struct ata_link *link = qc->dev->link;
4573 
4574 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4575 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4576 
4577 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4578 		ata_sg_clean(qc);
4579 
4580 	/* command should be marked inactive atomically with qc completion */
4581 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4582 		link->sactive &= ~(1 << qc->tag);
4583 		if (!link->sactive)
4584 			ap->nr_active_links--;
4585 	} else {
4586 		link->active_tag = ATA_TAG_POISON;
4587 		ap->nr_active_links--;
4588 	}
4589 
4590 	/* clear exclusive status */
4591 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4592 		     ap->excl_link == link))
4593 		ap->excl_link = NULL;
4594 
4595 	/* atapi: mark qc as inactive to prevent the interrupt handler
4596 	 * from completing the command twice later, before the error handler
4597 	 * is called. (when rc != 0 and atapi request sense is needed)
4598 	 */
4599 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4600 	ap->qc_active &= ~(1 << qc->tag);
4601 
4602 	/* call completion callback */
4603 	qc->complete_fn(qc);
4604 }
4605 
4606 static void fill_result_tf(struct ata_queued_cmd *qc)
4607 {
4608 	struct ata_port *ap = qc->ap;
4609 
4610 	qc->result_tf.flags = qc->tf.flags;
4611 	ap->ops->qc_fill_rtf(qc);
4612 }
4613 
4614 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4615 {
4616 	struct ata_device *dev = qc->dev;
4617 
4618 	if (ata_tag_internal(qc->tag))
4619 		return;
4620 
4621 	if (ata_is_nodata(qc->tf.protocol))
4622 		return;
4623 
4624 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4625 		return;
4626 
4627 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4628 }
4629 
4630 /**
4631  *	ata_qc_complete - Complete an active ATA command
4632  *	@qc: Command to complete
4633  *	@err_mask: ATA Status register contents
4634  *
4635  *	Indicate to the mid and upper layers that an ATA
4636  *	command has completed, with either an ok or not-ok status.
4637  *
4638  *	LOCKING:
4639  *	spin_lock_irqsave(host lock)
4640  */
4641 void ata_qc_complete(struct ata_queued_cmd *qc)
4642 {
4643 	struct ata_port *ap = qc->ap;
4644 
4645 	/* XXX: New EH and old EH use different mechanisms to
4646 	 * synchronize EH with regular execution path.
4647 	 *
4648 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4649 	 * Normal execution path is responsible for not accessing a
4650 	 * failed qc.  libata core enforces the rule by returning NULL
4651 	 * from ata_qc_from_tag() for failed qcs.
4652 	 *
4653 	 * Old EH depends on ata_qc_complete() nullifying completion
4654 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4655 	 * not synchronize with interrupt handler.  Only PIO task is
4656 	 * taken care of.
4657 	 */
4658 	if (ap->ops->error_handler) {
4659 		struct ata_device *dev = qc->dev;
4660 		struct ata_eh_info *ehi = &dev->link->eh_info;
4661 
4662 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4663 
4664 		if (unlikely(qc->err_mask))
4665 			qc->flags |= ATA_QCFLAG_FAILED;
4666 
4667 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4668 			if (!ata_tag_internal(qc->tag)) {
4669 				/* always fill result TF for failed qc */
4670 				fill_result_tf(qc);
4671 				ata_qc_schedule_eh(qc);
4672 				return;
4673 			}
4674 		}
4675 
4676 		/* read result TF if requested */
4677 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4678 			fill_result_tf(qc);
4679 
4680 		/* Some commands need post-processing after successful
4681 		 * completion.
4682 		 */
4683 		switch (qc->tf.command) {
4684 		case ATA_CMD_SET_FEATURES:
4685 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4686 			    qc->tf.feature != SETFEATURES_WC_OFF)
4687 				break;
4688 			/* fall through */
4689 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4690 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4691 			/* revalidate device */
4692 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4693 			ata_port_schedule_eh(ap);
4694 			break;
4695 
4696 		case ATA_CMD_SLEEP:
4697 			dev->flags |= ATA_DFLAG_SLEEPING;
4698 			break;
4699 		}
4700 
4701 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4702 			ata_verify_xfer(qc);
4703 
4704 		__ata_qc_complete(qc);
4705 	} else {
4706 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4707 			return;
4708 
4709 		/* read result TF if failed or requested */
4710 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4711 			fill_result_tf(qc);
4712 
4713 		__ata_qc_complete(qc);
4714 	}
4715 }
4716 
4717 /**
4718  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4719  *	@ap: port in question
4720  *	@qc_active: new qc_active mask
4721  *
4722  *	Complete in-flight commands.  This functions is meant to be
4723  *	called from low-level driver's interrupt routine to complete
4724  *	requests normally.  ap->qc_active and @qc_active is compared
4725  *	and commands are completed accordingly.
4726  *
4727  *	LOCKING:
4728  *	spin_lock_irqsave(host lock)
4729  *
4730  *	RETURNS:
4731  *	Number of completed commands on success, -errno otherwise.
4732  */
4733 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4734 {
4735 	int nr_done = 0;
4736 	u32 done_mask;
4737 	int i;
4738 
4739 	done_mask = ap->qc_active ^ qc_active;
4740 
4741 	if (unlikely(done_mask & qc_active)) {
4742 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4743 				"(%08x->%08x)\n", ap->qc_active, qc_active);
4744 		return -EINVAL;
4745 	}
4746 
4747 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
4748 		struct ata_queued_cmd *qc;
4749 
4750 		if (!(done_mask & (1 << i)))
4751 			continue;
4752 
4753 		if ((qc = ata_qc_from_tag(ap, i))) {
4754 			ata_qc_complete(qc);
4755 			nr_done++;
4756 		}
4757 	}
4758 
4759 	return nr_done;
4760 }
4761 
4762 /**
4763  *	ata_qc_issue - issue taskfile to device
4764  *	@qc: command to issue to device
4765  *
4766  *	Prepare an ATA command to submission to device.
4767  *	This includes mapping the data into a DMA-able
4768  *	area, filling in the S/G table, and finally
4769  *	writing the taskfile to hardware, starting the command.
4770  *
4771  *	LOCKING:
4772  *	spin_lock_irqsave(host lock)
4773  */
4774 void ata_qc_issue(struct ata_queued_cmd *qc)
4775 {
4776 	struct ata_port *ap = qc->ap;
4777 	struct ata_link *link = qc->dev->link;
4778 	u8 prot = qc->tf.protocol;
4779 
4780 	/* Make sure only one non-NCQ command is outstanding.  The
4781 	 * check is skipped for old EH because it reuses active qc to
4782 	 * request ATAPI sense.
4783 	 */
4784 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4785 
4786 	if (ata_is_ncq(prot)) {
4787 		WARN_ON(link->sactive & (1 << qc->tag));
4788 
4789 		if (!link->sactive)
4790 			ap->nr_active_links++;
4791 		link->sactive |= 1 << qc->tag;
4792 	} else {
4793 		WARN_ON(link->sactive);
4794 
4795 		ap->nr_active_links++;
4796 		link->active_tag = qc->tag;
4797 	}
4798 
4799 	qc->flags |= ATA_QCFLAG_ACTIVE;
4800 	ap->qc_active |= 1 << qc->tag;
4801 
4802 	/* We guarantee to LLDs that they will have at least one
4803 	 * non-zero sg if the command is a data command.
4804 	 */
4805 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4806 
4807 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4808 				 (ap->flags & ATA_FLAG_PIO_DMA)))
4809 		if (ata_sg_setup(qc))
4810 			goto sg_err;
4811 
4812 	/* if device is sleeping, schedule reset and abort the link */
4813 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4814 		link->eh_info.action |= ATA_EH_RESET;
4815 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4816 		ata_link_abort(link);
4817 		return;
4818 	}
4819 
4820 	ap->ops->qc_prep(qc);
4821 
4822 	qc->err_mask |= ap->ops->qc_issue(qc);
4823 	if (unlikely(qc->err_mask))
4824 		goto err;
4825 	return;
4826 
4827 sg_err:
4828 	qc->err_mask |= AC_ERR_SYSTEM;
4829 err:
4830 	ata_qc_complete(qc);
4831 }
4832 
4833 /**
4834  *	sata_scr_valid - test whether SCRs are accessible
4835  *	@link: ATA link to test SCR accessibility for
4836  *
4837  *	Test whether SCRs are accessible for @link.
4838  *
4839  *	LOCKING:
4840  *	None.
4841  *
4842  *	RETURNS:
4843  *	1 if SCRs are accessible, 0 otherwise.
4844  */
4845 int sata_scr_valid(struct ata_link *link)
4846 {
4847 	struct ata_port *ap = link->ap;
4848 
4849 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4850 }
4851 
4852 /**
4853  *	sata_scr_read - read SCR register of the specified port
4854  *	@link: ATA link to read SCR for
4855  *	@reg: SCR to read
4856  *	@val: Place to store read value
4857  *
4858  *	Read SCR register @reg of @link into *@val.  This function is
4859  *	guaranteed to succeed if @link is ap->link, the cable type of
4860  *	the port is SATA and the port implements ->scr_read.
4861  *
4862  *	LOCKING:
4863  *	None if @link is ap->link.  Kernel thread context otherwise.
4864  *
4865  *	RETURNS:
4866  *	0 on success, negative errno on failure.
4867  */
4868 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4869 {
4870 	if (ata_is_host_link(link)) {
4871 		struct ata_port *ap = link->ap;
4872 
4873 		if (sata_scr_valid(link))
4874 			return ap->ops->scr_read(ap, reg, val);
4875 		return -EOPNOTSUPP;
4876 	}
4877 
4878 	return sata_pmp_scr_read(link, reg, val);
4879 }
4880 
4881 /**
4882  *	sata_scr_write - write SCR register of the specified port
4883  *	@link: ATA link to write SCR for
4884  *	@reg: SCR to write
4885  *	@val: value to write
4886  *
4887  *	Write @val to SCR register @reg of @link.  This function is
4888  *	guaranteed to succeed if @link is ap->link, the cable type of
4889  *	the port is SATA and the port implements ->scr_read.
4890  *
4891  *	LOCKING:
4892  *	None if @link is ap->link.  Kernel thread context otherwise.
4893  *
4894  *	RETURNS:
4895  *	0 on success, negative errno on failure.
4896  */
4897 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4898 {
4899 	if (ata_is_host_link(link)) {
4900 		struct ata_port *ap = link->ap;
4901 
4902 		if (sata_scr_valid(link))
4903 			return ap->ops->scr_write(ap, reg, val);
4904 		return -EOPNOTSUPP;
4905 	}
4906 
4907 	return sata_pmp_scr_write(link, reg, val);
4908 }
4909 
4910 /**
4911  *	sata_scr_write_flush - write SCR register of the specified port and flush
4912  *	@link: ATA link to write SCR for
4913  *	@reg: SCR to write
4914  *	@val: value to write
4915  *
4916  *	This function is identical to sata_scr_write() except that this
4917  *	function performs flush after writing to the register.
4918  *
4919  *	LOCKING:
4920  *	None if @link is ap->link.  Kernel thread context otherwise.
4921  *
4922  *	RETURNS:
4923  *	0 on success, negative errno on failure.
4924  */
4925 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4926 {
4927 	if (ata_is_host_link(link)) {
4928 		struct ata_port *ap = link->ap;
4929 		int rc;
4930 
4931 		if (sata_scr_valid(link)) {
4932 			rc = ap->ops->scr_write(ap, reg, val);
4933 			if (rc == 0)
4934 				rc = ap->ops->scr_read(ap, reg, &val);
4935 			return rc;
4936 		}
4937 		return -EOPNOTSUPP;
4938 	}
4939 
4940 	return sata_pmp_scr_write(link, reg, val);
4941 }
4942 
4943 /**
4944  *	ata_link_online - test whether the given link is online
4945  *	@link: ATA link to test
4946  *
4947  *	Test whether @link is online.  Note that this function returns
4948  *	0 if online status of @link cannot be obtained, so
4949  *	ata_link_online(link) != !ata_link_offline(link).
4950  *
4951  *	LOCKING:
4952  *	None.
4953  *
4954  *	RETURNS:
4955  *	1 if the port online status is available and online.
4956  */
4957 int ata_link_online(struct ata_link *link)
4958 {
4959 	u32 sstatus;
4960 
4961 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4962 	    (sstatus & 0xf) == 0x3)
4963 		return 1;
4964 	return 0;
4965 }
4966 
4967 /**
4968  *	ata_link_offline - test whether the given link is offline
4969  *	@link: ATA link to test
4970  *
4971  *	Test whether @link is offline.  Note that this function
4972  *	returns 0 if offline status of @link cannot be obtained, so
4973  *	ata_link_online(link) != !ata_link_offline(link).
4974  *
4975  *	LOCKING:
4976  *	None.
4977  *
4978  *	RETURNS:
4979  *	1 if the port offline status is available and offline.
4980  */
4981 int ata_link_offline(struct ata_link *link)
4982 {
4983 	u32 sstatus;
4984 
4985 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4986 	    (sstatus & 0xf) != 0x3)
4987 		return 1;
4988 	return 0;
4989 }
4990 
4991 #ifdef CONFIG_PM
4992 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4993 			       unsigned int action, unsigned int ehi_flags,
4994 			       int wait)
4995 {
4996 	unsigned long flags;
4997 	int i, rc;
4998 
4999 	for (i = 0; i < host->n_ports; i++) {
5000 		struct ata_port *ap = host->ports[i];
5001 		struct ata_link *link;
5002 
5003 		/* Previous resume operation might still be in
5004 		 * progress.  Wait for PM_PENDING to clear.
5005 		 */
5006 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5007 			ata_port_wait_eh(ap);
5008 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5009 		}
5010 
5011 		/* request PM ops to EH */
5012 		spin_lock_irqsave(ap->lock, flags);
5013 
5014 		ap->pm_mesg = mesg;
5015 		if (wait) {
5016 			rc = 0;
5017 			ap->pm_result = &rc;
5018 		}
5019 
5020 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5021 		__ata_port_for_each_link(link, ap) {
5022 			link->eh_info.action |= action;
5023 			link->eh_info.flags |= ehi_flags;
5024 		}
5025 
5026 		ata_port_schedule_eh(ap);
5027 
5028 		spin_unlock_irqrestore(ap->lock, flags);
5029 
5030 		/* wait and check result */
5031 		if (wait) {
5032 			ata_port_wait_eh(ap);
5033 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5034 			if (rc)
5035 				return rc;
5036 		}
5037 	}
5038 
5039 	return 0;
5040 }
5041 
5042 /**
5043  *	ata_host_suspend - suspend host
5044  *	@host: host to suspend
5045  *	@mesg: PM message
5046  *
5047  *	Suspend @host.  Actual operation is performed by EH.  This
5048  *	function requests EH to perform PM operations and waits for EH
5049  *	to finish.
5050  *
5051  *	LOCKING:
5052  *	Kernel thread context (may sleep).
5053  *
5054  *	RETURNS:
5055  *	0 on success, -errno on failure.
5056  */
5057 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5058 {
5059 	int rc;
5060 
5061 	/*
5062 	 * disable link pm on all ports before requesting
5063 	 * any pm activity
5064 	 */
5065 	ata_lpm_enable(host);
5066 
5067 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5068 	if (rc == 0)
5069 		host->dev->power.power_state = mesg;
5070 	return rc;
5071 }
5072 
5073 /**
5074  *	ata_host_resume - resume host
5075  *	@host: host to resume
5076  *
5077  *	Resume @host.  Actual operation is performed by EH.  This
5078  *	function requests EH to perform PM operations and returns.
5079  *	Note that all resume operations are performed parallely.
5080  *
5081  *	LOCKING:
5082  *	Kernel thread context (may sleep).
5083  */
5084 void ata_host_resume(struct ata_host *host)
5085 {
5086 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5087 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5088 	host->dev->power.power_state = PMSG_ON;
5089 
5090 	/* reenable link pm */
5091 	ata_lpm_disable(host);
5092 }
5093 #endif
5094 
5095 /**
5096  *	ata_port_start - Set port up for dma.
5097  *	@ap: Port to initialize
5098  *
5099  *	Called just after data structures for each port are
5100  *	initialized.  Allocates space for PRD table.
5101  *
5102  *	May be used as the port_start() entry in ata_port_operations.
5103  *
5104  *	LOCKING:
5105  *	Inherited from caller.
5106  */
5107 int ata_port_start(struct ata_port *ap)
5108 {
5109 	struct device *dev = ap->dev;
5110 
5111 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5112 				      GFP_KERNEL);
5113 	if (!ap->prd)
5114 		return -ENOMEM;
5115 
5116 	return 0;
5117 }
5118 
5119 /**
5120  *	ata_dev_init - Initialize an ata_device structure
5121  *	@dev: Device structure to initialize
5122  *
5123  *	Initialize @dev in preparation for probing.
5124  *
5125  *	LOCKING:
5126  *	Inherited from caller.
5127  */
5128 void ata_dev_init(struct ata_device *dev)
5129 {
5130 	struct ata_link *link = dev->link;
5131 	struct ata_port *ap = link->ap;
5132 	unsigned long flags;
5133 
5134 	/* SATA spd limit is bound to the first device */
5135 	link->sata_spd_limit = link->hw_sata_spd_limit;
5136 	link->sata_spd = 0;
5137 
5138 	/* High bits of dev->flags are used to record warm plug
5139 	 * requests which occur asynchronously.  Synchronize using
5140 	 * host lock.
5141 	 */
5142 	spin_lock_irqsave(ap->lock, flags);
5143 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5144 	dev->horkage = 0;
5145 	spin_unlock_irqrestore(ap->lock, flags);
5146 
5147 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5148 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5149 	dev->pio_mask = UINT_MAX;
5150 	dev->mwdma_mask = UINT_MAX;
5151 	dev->udma_mask = UINT_MAX;
5152 }
5153 
5154 /**
5155  *	ata_link_init - Initialize an ata_link structure
5156  *	@ap: ATA port link is attached to
5157  *	@link: Link structure to initialize
5158  *	@pmp: Port multiplier port number
5159  *
5160  *	Initialize @link.
5161  *
5162  *	LOCKING:
5163  *	Kernel thread context (may sleep)
5164  */
5165 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5166 {
5167 	int i;
5168 
5169 	/* clear everything except for devices */
5170 	memset(link, 0, offsetof(struct ata_link, device[0]));
5171 
5172 	link->ap = ap;
5173 	link->pmp = pmp;
5174 	link->active_tag = ATA_TAG_POISON;
5175 	link->hw_sata_spd_limit = UINT_MAX;
5176 
5177 	/* can't use iterator, ap isn't initialized yet */
5178 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5179 		struct ata_device *dev = &link->device[i];
5180 
5181 		dev->link = link;
5182 		dev->devno = dev - link->device;
5183 		ata_dev_init(dev);
5184 	}
5185 }
5186 
5187 /**
5188  *	sata_link_init_spd - Initialize link->sata_spd_limit
5189  *	@link: Link to configure sata_spd_limit for
5190  *
5191  *	Initialize @link->[hw_]sata_spd_limit to the currently
5192  *	configured value.
5193  *
5194  *	LOCKING:
5195  *	Kernel thread context (may sleep).
5196  *
5197  *	RETURNS:
5198  *	0 on success, -errno on failure.
5199  */
5200 int sata_link_init_spd(struct ata_link *link)
5201 {
5202 	u8 spd;
5203 	int rc;
5204 
5205 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5206 	if (rc)
5207 		return rc;
5208 
5209 	spd = (link->saved_scontrol >> 4) & 0xf;
5210 	if (spd)
5211 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5212 
5213 	ata_force_link_limits(link);
5214 
5215 	link->sata_spd_limit = link->hw_sata_spd_limit;
5216 
5217 	return 0;
5218 }
5219 
5220 /**
5221  *	ata_port_alloc - allocate and initialize basic ATA port resources
5222  *	@host: ATA host this allocated port belongs to
5223  *
5224  *	Allocate and initialize basic ATA port resources.
5225  *
5226  *	RETURNS:
5227  *	Allocate ATA port on success, NULL on failure.
5228  *
5229  *	LOCKING:
5230  *	Inherited from calling layer (may sleep).
5231  */
5232 struct ata_port *ata_port_alloc(struct ata_host *host)
5233 {
5234 	struct ata_port *ap;
5235 
5236 	DPRINTK("ENTER\n");
5237 
5238 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5239 	if (!ap)
5240 		return NULL;
5241 
5242 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5243 	ap->lock = &host->lock;
5244 	ap->flags = ATA_FLAG_DISABLED;
5245 	ap->print_id = -1;
5246 	ap->ctl = ATA_DEVCTL_OBS;
5247 	ap->host = host;
5248 	ap->dev = host->dev;
5249 	ap->last_ctl = 0xFF;
5250 
5251 #if defined(ATA_VERBOSE_DEBUG)
5252 	/* turn on all debugging levels */
5253 	ap->msg_enable = 0x00FF;
5254 #elif defined(ATA_DEBUG)
5255 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5256 #else
5257 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5258 #endif
5259 
5260 #ifdef CONFIG_ATA_SFF
5261 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5262 #endif
5263 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5264 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5265 	INIT_LIST_HEAD(&ap->eh_done_q);
5266 	init_waitqueue_head(&ap->eh_wait_q);
5267 	init_timer_deferrable(&ap->fastdrain_timer);
5268 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5269 	ap->fastdrain_timer.data = (unsigned long)ap;
5270 
5271 	ap->cbl = ATA_CBL_NONE;
5272 
5273 	ata_link_init(ap, &ap->link, 0);
5274 
5275 #ifdef ATA_IRQ_TRAP
5276 	ap->stats.unhandled_irq = 1;
5277 	ap->stats.idle_irq = 1;
5278 #endif
5279 	return ap;
5280 }
5281 
5282 static void ata_host_release(struct device *gendev, void *res)
5283 {
5284 	struct ata_host *host = dev_get_drvdata(gendev);
5285 	int i;
5286 
5287 	for (i = 0; i < host->n_ports; i++) {
5288 		struct ata_port *ap = host->ports[i];
5289 
5290 		if (!ap)
5291 			continue;
5292 
5293 		if (ap->scsi_host)
5294 			scsi_host_put(ap->scsi_host);
5295 
5296 		kfree(ap->pmp_link);
5297 		kfree(ap);
5298 		host->ports[i] = NULL;
5299 	}
5300 
5301 	dev_set_drvdata(gendev, NULL);
5302 }
5303 
5304 /**
5305  *	ata_host_alloc - allocate and init basic ATA host resources
5306  *	@dev: generic device this host is associated with
5307  *	@max_ports: maximum number of ATA ports associated with this host
5308  *
5309  *	Allocate and initialize basic ATA host resources.  LLD calls
5310  *	this function to allocate a host, initializes it fully and
5311  *	attaches it using ata_host_register().
5312  *
5313  *	@max_ports ports are allocated and host->n_ports is
5314  *	initialized to @max_ports.  The caller is allowed to decrease
5315  *	host->n_ports before calling ata_host_register().  The unused
5316  *	ports will be automatically freed on registration.
5317  *
5318  *	RETURNS:
5319  *	Allocate ATA host on success, NULL on failure.
5320  *
5321  *	LOCKING:
5322  *	Inherited from calling layer (may sleep).
5323  */
5324 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5325 {
5326 	struct ata_host *host;
5327 	size_t sz;
5328 	int i;
5329 
5330 	DPRINTK("ENTER\n");
5331 
5332 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5333 		return NULL;
5334 
5335 	/* alloc a container for our list of ATA ports (buses) */
5336 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5337 	/* alloc a container for our list of ATA ports (buses) */
5338 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5339 	if (!host)
5340 		goto err_out;
5341 
5342 	devres_add(dev, host);
5343 	dev_set_drvdata(dev, host);
5344 
5345 	spin_lock_init(&host->lock);
5346 	host->dev = dev;
5347 	host->n_ports = max_ports;
5348 
5349 	/* allocate ports bound to this host */
5350 	for (i = 0; i < max_ports; i++) {
5351 		struct ata_port *ap;
5352 
5353 		ap = ata_port_alloc(host);
5354 		if (!ap)
5355 			goto err_out;
5356 
5357 		ap->port_no = i;
5358 		host->ports[i] = ap;
5359 	}
5360 
5361 	devres_remove_group(dev, NULL);
5362 	return host;
5363 
5364  err_out:
5365 	devres_release_group(dev, NULL);
5366 	return NULL;
5367 }
5368 
5369 /**
5370  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5371  *	@dev: generic device this host is associated with
5372  *	@ppi: array of ATA port_info to initialize host with
5373  *	@n_ports: number of ATA ports attached to this host
5374  *
5375  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5376  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5377  *	last entry will be used for the remaining ports.
5378  *
5379  *	RETURNS:
5380  *	Allocate ATA host on success, NULL on failure.
5381  *
5382  *	LOCKING:
5383  *	Inherited from calling layer (may sleep).
5384  */
5385 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5386 				      const struct ata_port_info * const * ppi,
5387 				      int n_ports)
5388 {
5389 	const struct ata_port_info *pi;
5390 	struct ata_host *host;
5391 	int i, j;
5392 
5393 	host = ata_host_alloc(dev, n_ports);
5394 	if (!host)
5395 		return NULL;
5396 
5397 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5398 		struct ata_port *ap = host->ports[i];
5399 
5400 		if (ppi[j])
5401 			pi = ppi[j++];
5402 
5403 		ap->pio_mask = pi->pio_mask;
5404 		ap->mwdma_mask = pi->mwdma_mask;
5405 		ap->udma_mask = pi->udma_mask;
5406 		ap->flags |= pi->flags;
5407 		ap->link.flags |= pi->link_flags;
5408 		ap->ops = pi->port_ops;
5409 
5410 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5411 			host->ops = pi->port_ops;
5412 	}
5413 
5414 	return host;
5415 }
5416 
5417 static void ata_host_stop(struct device *gendev, void *res)
5418 {
5419 	struct ata_host *host = dev_get_drvdata(gendev);
5420 	int i;
5421 
5422 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5423 
5424 	for (i = 0; i < host->n_ports; i++) {
5425 		struct ata_port *ap = host->ports[i];
5426 
5427 		if (ap->ops->port_stop)
5428 			ap->ops->port_stop(ap);
5429 	}
5430 
5431 	if (host->ops->host_stop)
5432 		host->ops->host_stop(host);
5433 }
5434 
5435 /**
5436  *	ata_finalize_port_ops - finalize ata_port_operations
5437  *	@ops: ata_port_operations to finalize
5438  *
5439  *	An ata_port_operations can inherit from another ops and that
5440  *	ops can again inherit from another.  This can go on as many
5441  *	times as necessary as long as there is no loop in the
5442  *	inheritance chain.
5443  *
5444  *	Ops tables are finalized when the host is started.  NULL or
5445  *	unspecified entries are inherited from the closet ancestor
5446  *	which has the method and the entry is populated with it.
5447  *	After finalization, the ops table directly points to all the
5448  *	methods and ->inherits is no longer necessary and cleared.
5449  *
5450  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5451  *
5452  *	LOCKING:
5453  *	None.
5454  */
5455 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5456 {
5457 	static DEFINE_SPINLOCK(lock);
5458 	const struct ata_port_operations *cur;
5459 	void **begin = (void **)ops;
5460 	void **end = (void **)&ops->inherits;
5461 	void **pp;
5462 
5463 	if (!ops || !ops->inherits)
5464 		return;
5465 
5466 	spin_lock(&lock);
5467 
5468 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5469 		void **inherit = (void **)cur;
5470 
5471 		for (pp = begin; pp < end; pp++, inherit++)
5472 			if (!*pp)
5473 				*pp = *inherit;
5474 	}
5475 
5476 	for (pp = begin; pp < end; pp++)
5477 		if (IS_ERR(*pp))
5478 			*pp = NULL;
5479 
5480 	ops->inherits = NULL;
5481 
5482 	spin_unlock(&lock);
5483 }
5484 
5485 /**
5486  *	ata_host_start - start and freeze ports of an ATA host
5487  *	@host: ATA host to start ports for
5488  *
5489  *	Start and then freeze ports of @host.  Started status is
5490  *	recorded in host->flags, so this function can be called
5491  *	multiple times.  Ports are guaranteed to get started only
5492  *	once.  If host->ops isn't initialized yet, its set to the
5493  *	first non-dummy port ops.
5494  *
5495  *	LOCKING:
5496  *	Inherited from calling layer (may sleep).
5497  *
5498  *	RETURNS:
5499  *	0 if all ports are started successfully, -errno otherwise.
5500  */
5501 int ata_host_start(struct ata_host *host)
5502 {
5503 	int have_stop = 0;
5504 	void *start_dr = NULL;
5505 	int i, rc;
5506 
5507 	if (host->flags & ATA_HOST_STARTED)
5508 		return 0;
5509 
5510 	ata_finalize_port_ops(host->ops);
5511 
5512 	for (i = 0; i < host->n_ports; i++) {
5513 		struct ata_port *ap = host->ports[i];
5514 
5515 		ata_finalize_port_ops(ap->ops);
5516 
5517 		if (!host->ops && !ata_port_is_dummy(ap))
5518 			host->ops = ap->ops;
5519 
5520 		if (ap->ops->port_stop)
5521 			have_stop = 1;
5522 	}
5523 
5524 	if (host->ops->host_stop)
5525 		have_stop = 1;
5526 
5527 	if (have_stop) {
5528 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5529 		if (!start_dr)
5530 			return -ENOMEM;
5531 	}
5532 
5533 	for (i = 0; i < host->n_ports; i++) {
5534 		struct ata_port *ap = host->ports[i];
5535 
5536 		if (ap->ops->port_start) {
5537 			rc = ap->ops->port_start(ap);
5538 			if (rc) {
5539 				if (rc != -ENODEV)
5540 					dev_printk(KERN_ERR, host->dev,
5541 						"failed to start port %d "
5542 						"(errno=%d)\n", i, rc);
5543 				goto err_out;
5544 			}
5545 		}
5546 		ata_eh_freeze_port(ap);
5547 	}
5548 
5549 	if (start_dr)
5550 		devres_add(host->dev, start_dr);
5551 	host->flags |= ATA_HOST_STARTED;
5552 	return 0;
5553 
5554  err_out:
5555 	while (--i >= 0) {
5556 		struct ata_port *ap = host->ports[i];
5557 
5558 		if (ap->ops->port_stop)
5559 			ap->ops->port_stop(ap);
5560 	}
5561 	devres_free(start_dr);
5562 	return rc;
5563 }
5564 
5565 /**
5566  *	ata_sas_host_init - Initialize a host struct
5567  *	@host:	host to initialize
5568  *	@dev:	device host is attached to
5569  *	@flags:	host flags
5570  *	@ops:	port_ops
5571  *
5572  *	LOCKING:
5573  *	PCI/etc. bus probe sem.
5574  *
5575  */
5576 /* KILLME - the only user left is ipr */
5577 void ata_host_init(struct ata_host *host, struct device *dev,
5578 		   unsigned long flags, struct ata_port_operations *ops)
5579 {
5580 	spin_lock_init(&host->lock);
5581 	host->dev = dev;
5582 	host->flags = flags;
5583 	host->ops = ops;
5584 }
5585 
5586 /**
5587  *	ata_host_register - register initialized ATA host
5588  *	@host: ATA host to register
5589  *	@sht: template for SCSI host
5590  *
5591  *	Register initialized ATA host.  @host is allocated using
5592  *	ata_host_alloc() and fully initialized by LLD.  This function
5593  *	starts ports, registers @host with ATA and SCSI layers and
5594  *	probe registered devices.
5595  *
5596  *	LOCKING:
5597  *	Inherited from calling layer (may sleep).
5598  *
5599  *	RETURNS:
5600  *	0 on success, -errno otherwise.
5601  */
5602 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5603 {
5604 	int i, rc;
5605 
5606 	/* host must have been started */
5607 	if (!(host->flags & ATA_HOST_STARTED)) {
5608 		dev_printk(KERN_ERR, host->dev,
5609 			   "BUG: trying to register unstarted host\n");
5610 		WARN_ON(1);
5611 		return -EINVAL;
5612 	}
5613 
5614 	/* Blow away unused ports.  This happens when LLD can't
5615 	 * determine the exact number of ports to allocate at
5616 	 * allocation time.
5617 	 */
5618 	for (i = host->n_ports; host->ports[i]; i++)
5619 		kfree(host->ports[i]);
5620 
5621 	/* give ports names and add SCSI hosts */
5622 	for (i = 0; i < host->n_ports; i++)
5623 		host->ports[i]->print_id = ata_print_id++;
5624 
5625 	rc = ata_scsi_add_hosts(host, sht);
5626 	if (rc)
5627 		return rc;
5628 
5629 	/* associate with ACPI nodes */
5630 	ata_acpi_associate(host);
5631 
5632 	/* set cable, sata_spd_limit and report */
5633 	for (i = 0; i < host->n_ports; i++) {
5634 		struct ata_port *ap = host->ports[i];
5635 		unsigned long xfer_mask;
5636 
5637 		/* set SATA cable type if still unset */
5638 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5639 			ap->cbl = ATA_CBL_SATA;
5640 
5641 		/* init sata_spd_limit to the current value */
5642 		sata_link_init_spd(&ap->link);
5643 
5644 		/* print per-port info to dmesg */
5645 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5646 					      ap->udma_mask);
5647 
5648 		if (!ata_port_is_dummy(ap)) {
5649 			ata_port_printk(ap, KERN_INFO,
5650 					"%cATA max %s %s\n",
5651 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5652 					ata_mode_string(xfer_mask),
5653 					ap->link.eh_info.desc);
5654 			ata_ehi_clear_desc(&ap->link.eh_info);
5655 		} else
5656 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5657 	}
5658 
5659 	/* perform each probe synchronously */
5660 	DPRINTK("probe begin\n");
5661 	for (i = 0; i < host->n_ports; i++) {
5662 		struct ata_port *ap = host->ports[i];
5663 
5664 		/* probe */
5665 		if (ap->ops->error_handler) {
5666 			struct ata_eh_info *ehi = &ap->link.eh_info;
5667 			unsigned long flags;
5668 
5669 			ata_port_probe(ap);
5670 
5671 			/* kick EH for boot probing */
5672 			spin_lock_irqsave(ap->lock, flags);
5673 
5674 			ehi->probe_mask |= ATA_ALL_DEVICES;
5675 			ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5676 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5677 
5678 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5679 			ap->pflags |= ATA_PFLAG_LOADING;
5680 			ata_port_schedule_eh(ap);
5681 
5682 			spin_unlock_irqrestore(ap->lock, flags);
5683 
5684 			/* wait for EH to finish */
5685 			ata_port_wait_eh(ap);
5686 		} else {
5687 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5688 			rc = ata_bus_probe(ap);
5689 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
5690 
5691 			if (rc) {
5692 				/* FIXME: do something useful here?
5693 				 * Current libata behavior will
5694 				 * tear down everything when
5695 				 * the module is removed
5696 				 * or the h/w is unplugged.
5697 				 */
5698 			}
5699 		}
5700 	}
5701 
5702 	/* probes are done, now scan each port's disk(s) */
5703 	DPRINTK("host probe begin\n");
5704 	for (i = 0; i < host->n_ports; i++) {
5705 		struct ata_port *ap = host->ports[i];
5706 
5707 		ata_scsi_scan_host(ap, 1);
5708 	}
5709 
5710 	return 0;
5711 }
5712 
5713 /**
5714  *	ata_host_activate - start host, request IRQ and register it
5715  *	@host: target ATA host
5716  *	@irq: IRQ to request
5717  *	@irq_handler: irq_handler used when requesting IRQ
5718  *	@irq_flags: irq_flags used when requesting IRQ
5719  *	@sht: scsi_host_template to use when registering the host
5720  *
5721  *	After allocating an ATA host and initializing it, most libata
5722  *	LLDs perform three steps to activate the host - start host,
5723  *	request IRQ and register it.  This helper takes necessasry
5724  *	arguments and performs the three steps in one go.
5725  *
5726  *	An invalid IRQ skips the IRQ registration and expects the host to
5727  *	have set polling mode on the port. In this case, @irq_handler
5728  *	should be NULL.
5729  *
5730  *	LOCKING:
5731  *	Inherited from calling layer (may sleep).
5732  *
5733  *	RETURNS:
5734  *	0 on success, -errno otherwise.
5735  */
5736 int ata_host_activate(struct ata_host *host, int irq,
5737 		      irq_handler_t irq_handler, unsigned long irq_flags,
5738 		      struct scsi_host_template *sht)
5739 {
5740 	int i, rc;
5741 
5742 	rc = ata_host_start(host);
5743 	if (rc)
5744 		return rc;
5745 
5746 	/* Special case for polling mode */
5747 	if (!irq) {
5748 		WARN_ON(irq_handler);
5749 		return ata_host_register(host, sht);
5750 	}
5751 
5752 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5753 			      dev_driver_string(host->dev), host);
5754 	if (rc)
5755 		return rc;
5756 
5757 	for (i = 0; i < host->n_ports; i++)
5758 		ata_port_desc(host->ports[i], "irq %d", irq);
5759 
5760 	rc = ata_host_register(host, sht);
5761 	/* if failed, just free the IRQ and leave ports alone */
5762 	if (rc)
5763 		devm_free_irq(host->dev, irq, host);
5764 
5765 	return rc;
5766 }
5767 
5768 /**
5769  *	ata_port_detach - Detach ATA port in prepration of device removal
5770  *	@ap: ATA port to be detached
5771  *
5772  *	Detach all ATA devices and the associated SCSI devices of @ap;
5773  *	then, remove the associated SCSI host.  @ap is guaranteed to
5774  *	be quiescent on return from this function.
5775  *
5776  *	LOCKING:
5777  *	Kernel thread context (may sleep).
5778  */
5779 static void ata_port_detach(struct ata_port *ap)
5780 {
5781 	unsigned long flags;
5782 	struct ata_link *link;
5783 	struct ata_device *dev;
5784 
5785 	if (!ap->ops->error_handler)
5786 		goto skip_eh;
5787 
5788 	/* tell EH we're leaving & flush EH */
5789 	spin_lock_irqsave(ap->lock, flags);
5790 	ap->pflags |= ATA_PFLAG_UNLOADING;
5791 	spin_unlock_irqrestore(ap->lock, flags);
5792 
5793 	ata_port_wait_eh(ap);
5794 
5795 	/* EH is now guaranteed to see UNLOADING - EH context belongs
5796 	 * to us.  Restore SControl and disable all existing devices.
5797 	 */
5798 	__ata_port_for_each_link(link, ap) {
5799 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol);
5800 		ata_link_for_each_dev(dev, link)
5801 			ata_dev_disable(dev);
5802 	}
5803 
5804 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
5805 	 * will be skipped and retrials will be terminated with bad
5806 	 * target.
5807 	 */
5808 	spin_lock_irqsave(ap->lock, flags);
5809 	ata_port_freeze(ap);	/* won't be thawed */
5810 	spin_unlock_irqrestore(ap->lock, flags);
5811 
5812 	ata_port_wait_eh(ap);
5813 	cancel_rearming_delayed_work(&ap->hotplug_task);
5814 
5815  skip_eh:
5816 	/* remove the associated SCSI host */
5817 	scsi_remove_host(ap->scsi_host);
5818 }
5819 
5820 /**
5821  *	ata_host_detach - Detach all ports of an ATA host
5822  *	@host: Host to detach
5823  *
5824  *	Detach all ports of @host.
5825  *
5826  *	LOCKING:
5827  *	Kernel thread context (may sleep).
5828  */
5829 void ata_host_detach(struct ata_host *host)
5830 {
5831 	int i;
5832 
5833 	for (i = 0; i < host->n_ports; i++)
5834 		ata_port_detach(host->ports[i]);
5835 
5836 	/* the host is dead now, dissociate ACPI */
5837 	ata_acpi_dissociate(host);
5838 }
5839 
5840 #ifdef CONFIG_PCI
5841 
5842 /**
5843  *	ata_pci_remove_one - PCI layer callback for device removal
5844  *	@pdev: PCI device that was removed
5845  *
5846  *	PCI layer indicates to libata via this hook that hot-unplug or
5847  *	module unload event has occurred.  Detach all ports.  Resource
5848  *	release is handled via devres.
5849  *
5850  *	LOCKING:
5851  *	Inherited from PCI layer (may sleep).
5852  */
5853 void ata_pci_remove_one(struct pci_dev *pdev)
5854 {
5855 	struct device *dev = &pdev->dev;
5856 	struct ata_host *host = dev_get_drvdata(dev);
5857 
5858 	ata_host_detach(host);
5859 }
5860 
5861 /* move to PCI subsystem */
5862 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5863 {
5864 	unsigned long tmp = 0;
5865 
5866 	switch (bits->width) {
5867 	case 1: {
5868 		u8 tmp8 = 0;
5869 		pci_read_config_byte(pdev, bits->reg, &tmp8);
5870 		tmp = tmp8;
5871 		break;
5872 	}
5873 	case 2: {
5874 		u16 tmp16 = 0;
5875 		pci_read_config_word(pdev, bits->reg, &tmp16);
5876 		tmp = tmp16;
5877 		break;
5878 	}
5879 	case 4: {
5880 		u32 tmp32 = 0;
5881 		pci_read_config_dword(pdev, bits->reg, &tmp32);
5882 		tmp = tmp32;
5883 		break;
5884 	}
5885 
5886 	default:
5887 		return -EINVAL;
5888 	}
5889 
5890 	tmp &= bits->mask;
5891 
5892 	return (tmp == bits->val) ? 1 : 0;
5893 }
5894 
5895 #ifdef CONFIG_PM
5896 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5897 {
5898 	pci_save_state(pdev);
5899 	pci_disable_device(pdev);
5900 
5901 	if (mesg.event & PM_EVENT_SLEEP)
5902 		pci_set_power_state(pdev, PCI_D3hot);
5903 }
5904 
5905 int ata_pci_device_do_resume(struct pci_dev *pdev)
5906 {
5907 	int rc;
5908 
5909 	pci_set_power_state(pdev, PCI_D0);
5910 	pci_restore_state(pdev);
5911 
5912 	rc = pcim_enable_device(pdev);
5913 	if (rc) {
5914 		dev_printk(KERN_ERR, &pdev->dev,
5915 			   "failed to enable device after resume (%d)\n", rc);
5916 		return rc;
5917 	}
5918 
5919 	pci_set_master(pdev);
5920 	return 0;
5921 }
5922 
5923 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5924 {
5925 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
5926 	int rc = 0;
5927 
5928 	rc = ata_host_suspend(host, mesg);
5929 	if (rc)
5930 		return rc;
5931 
5932 	ata_pci_device_do_suspend(pdev, mesg);
5933 
5934 	return 0;
5935 }
5936 
5937 int ata_pci_device_resume(struct pci_dev *pdev)
5938 {
5939 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
5940 	int rc;
5941 
5942 	rc = ata_pci_device_do_resume(pdev);
5943 	if (rc == 0)
5944 		ata_host_resume(host);
5945 	return rc;
5946 }
5947 #endif /* CONFIG_PM */
5948 
5949 #endif /* CONFIG_PCI */
5950 
5951 static int __init ata_parse_force_one(char **cur,
5952 				      struct ata_force_ent *force_ent,
5953 				      const char **reason)
5954 {
5955 	/* FIXME: Currently, there's no way to tag init const data and
5956 	 * using __initdata causes build failure on some versions of
5957 	 * gcc.  Once __initdataconst is implemented, add const to the
5958 	 * following structure.
5959 	 */
5960 	static struct ata_force_param force_tbl[] __initdata = {
5961 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
5962 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
5963 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
5964 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
5965 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
5966 		{ "sata",	.cbl		= ATA_CBL_SATA },
5967 		{ "1.5Gbps",	.spd_limit	= 1 },
5968 		{ "3.0Gbps",	.spd_limit	= 2 },
5969 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
5970 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
5971 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
5972 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
5973 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
5974 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
5975 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
5976 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
5977 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
5978 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
5979 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
5980 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
5981 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
5982 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
5983 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5984 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5985 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5986 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5987 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5988 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5989 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5990 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5991 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5992 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5993 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5994 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5995 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5996 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5997 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5998 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5999 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6000 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6001 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6002 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6003 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6004 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6005 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6006 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6007 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6008 	};
6009 	char *start = *cur, *p = *cur;
6010 	char *id, *val, *endp;
6011 	const struct ata_force_param *match_fp = NULL;
6012 	int nr_matches = 0, i;
6013 
6014 	/* find where this param ends and update *cur */
6015 	while (*p != '\0' && *p != ',')
6016 		p++;
6017 
6018 	if (*p == '\0')
6019 		*cur = p;
6020 	else
6021 		*cur = p + 1;
6022 
6023 	*p = '\0';
6024 
6025 	/* parse */
6026 	p = strchr(start, ':');
6027 	if (!p) {
6028 		val = strstrip(start);
6029 		goto parse_val;
6030 	}
6031 	*p = '\0';
6032 
6033 	id = strstrip(start);
6034 	val = strstrip(p + 1);
6035 
6036 	/* parse id */
6037 	p = strchr(id, '.');
6038 	if (p) {
6039 		*p++ = '\0';
6040 		force_ent->device = simple_strtoul(p, &endp, 10);
6041 		if (p == endp || *endp != '\0') {
6042 			*reason = "invalid device";
6043 			return -EINVAL;
6044 		}
6045 	}
6046 
6047 	force_ent->port = simple_strtoul(id, &endp, 10);
6048 	if (p == endp || *endp != '\0') {
6049 		*reason = "invalid port/link";
6050 		return -EINVAL;
6051 	}
6052 
6053  parse_val:
6054 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6055 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6056 		const struct ata_force_param *fp = &force_tbl[i];
6057 
6058 		if (strncasecmp(val, fp->name, strlen(val)))
6059 			continue;
6060 
6061 		nr_matches++;
6062 		match_fp = fp;
6063 
6064 		if (strcasecmp(val, fp->name) == 0) {
6065 			nr_matches = 1;
6066 			break;
6067 		}
6068 	}
6069 
6070 	if (!nr_matches) {
6071 		*reason = "unknown value";
6072 		return -EINVAL;
6073 	}
6074 	if (nr_matches > 1) {
6075 		*reason = "ambigious value";
6076 		return -EINVAL;
6077 	}
6078 
6079 	force_ent->param = *match_fp;
6080 
6081 	return 0;
6082 }
6083 
6084 static void __init ata_parse_force_param(void)
6085 {
6086 	int idx = 0, size = 1;
6087 	int last_port = -1, last_device = -1;
6088 	char *p, *cur, *next;
6089 
6090 	/* calculate maximum number of params and allocate force_tbl */
6091 	for (p = ata_force_param_buf; *p; p++)
6092 		if (*p == ',')
6093 			size++;
6094 
6095 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6096 	if (!ata_force_tbl) {
6097 		printk(KERN_WARNING "ata: failed to extend force table, "
6098 		       "libata.force ignored\n");
6099 		return;
6100 	}
6101 
6102 	/* parse and populate the table */
6103 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6104 		const char *reason = "";
6105 		struct ata_force_ent te = { .port = -1, .device = -1 };
6106 
6107 		next = cur;
6108 		if (ata_parse_force_one(&next, &te, &reason)) {
6109 			printk(KERN_WARNING "ata: failed to parse force "
6110 			       "parameter \"%s\" (%s)\n",
6111 			       cur, reason);
6112 			continue;
6113 		}
6114 
6115 		if (te.port == -1) {
6116 			te.port = last_port;
6117 			te.device = last_device;
6118 		}
6119 
6120 		ata_force_tbl[idx++] = te;
6121 
6122 		last_port = te.port;
6123 		last_device = te.device;
6124 	}
6125 
6126 	ata_force_tbl_size = idx;
6127 }
6128 
6129 static int __init ata_init(void)
6130 {
6131 	ata_parse_force_param();
6132 
6133 	ata_wq = create_workqueue("ata");
6134 	if (!ata_wq)
6135 		goto free_force_tbl;
6136 
6137 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6138 	if (!ata_aux_wq)
6139 		goto free_wq;
6140 
6141 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6142 	return 0;
6143 
6144 free_wq:
6145 	destroy_workqueue(ata_wq);
6146 free_force_tbl:
6147 	kfree(ata_force_tbl);
6148 	return -ENOMEM;
6149 }
6150 
6151 static void __exit ata_exit(void)
6152 {
6153 	kfree(ata_force_tbl);
6154 	destroy_workqueue(ata_wq);
6155 	destroy_workqueue(ata_aux_wq);
6156 }
6157 
6158 subsys_initcall(ata_init);
6159 module_exit(ata_exit);
6160 
6161 static unsigned long ratelimit_time;
6162 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6163 
6164 int ata_ratelimit(void)
6165 {
6166 	int rc;
6167 	unsigned long flags;
6168 
6169 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6170 
6171 	if (time_after(jiffies, ratelimit_time)) {
6172 		rc = 1;
6173 		ratelimit_time = jiffies + (HZ/5);
6174 	} else
6175 		rc = 0;
6176 
6177 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6178 
6179 	return rc;
6180 }
6181 
6182 /**
6183  *	ata_wait_register - wait until register value changes
6184  *	@reg: IO-mapped register
6185  *	@mask: Mask to apply to read register value
6186  *	@val: Wait condition
6187  *	@interval: polling interval in milliseconds
6188  *	@timeout: timeout in milliseconds
6189  *
6190  *	Waiting for some bits of register to change is a common
6191  *	operation for ATA controllers.  This function reads 32bit LE
6192  *	IO-mapped register @reg and tests for the following condition.
6193  *
6194  *	(*@reg & mask) != val
6195  *
6196  *	If the condition is met, it returns; otherwise, the process is
6197  *	repeated after @interval_msec until timeout.
6198  *
6199  *	LOCKING:
6200  *	Kernel thread context (may sleep)
6201  *
6202  *	RETURNS:
6203  *	The final register value.
6204  */
6205 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6206 		      unsigned long interval, unsigned long timeout)
6207 {
6208 	unsigned long deadline;
6209 	u32 tmp;
6210 
6211 	tmp = ioread32(reg);
6212 
6213 	/* Calculate timeout _after_ the first read to make sure
6214 	 * preceding writes reach the controller before starting to
6215 	 * eat away the timeout.
6216 	 */
6217 	deadline = ata_deadline(jiffies, timeout);
6218 
6219 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6220 		msleep(interval);
6221 		tmp = ioread32(reg);
6222 	}
6223 
6224 	return tmp;
6225 }
6226 
6227 /*
6228  * Dummy port_ops
6229  */
6230 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6231 {
6232 	return AC_ERR_SYSTEM;
6233 }
6234 
6235 static void ata_dummy_error_handler(struct ata_port *ap)
6236 {
6237 	/* truly dummy */
6238 }
6239 
6240 struct ata_port_operations ata_dummy_port_ops = {
6241 	.qc_prep		= ata_noop_qc_prep,
6242 	.qc_issue		= ata_dummy_qc_issue,
6243 	.error_handler		= ata_dummy_error_handler,
6244 };
6245 
6246 const struct ata_port_info ata_dummy_port_info = {
6247 	.port_ops		= &ata_dummy_port_ops,
6248 };
6249 
6250 /*
6251  * libata is essentially a library of internal helper functions for
6252  * low-level ATA host controller drivers.  As such, the API/ABI is
6253  * likely to change as new drivers are added and updated.
6254  * Do not depend on ABI/API stability.
6255  */
6256 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6257 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6258 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6259 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6260 EXPORT_SYMBOL_GPL(sata_port_ops);
6261 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6262 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6263 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6264 EXPORT_SYMBOL_GPL(ata_host_init);
6265 EXPORT_SYMBOL_GPL(ata_host_alloc);
6266 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6267 EXPORT_SYMBOL_GPL(ata_host_start);
6268 EXPORT_SYMBOL_GPL(ata_host_register);
6269 EXPORT_SYMBOL_GPL(ata_host_activate);
6270 EXPORT_SYMBOL_GPL(ata_host_detach);
6271 EXPORT_SYMBOL_GPL(ata_sg_init);
6272 EXPORT_SYMBOL_GPL(ata_qc_complete);
6273 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6274 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6275 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6276 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6277 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6278 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6279 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6280 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6281 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6282 EXPORT_SYMBOL_GPL(ata_mode_string);
6283 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6284 EXPORT_SYMBOL_GPL(ata_port_start);
6285 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6286 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6287 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6288 EXPORT_SYMBOL_GPL(ata_port_probe);
6289 EXPORT_SYMBOL_GPL(ata_dev_disable);
6290 EXPORT_SYMBOL_GPL(sata_set_spd);
6291 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6292 EXPORT_SYMBOL_GPL(sata_link_debounce);
6293 EXPORT_SYMBOL_GPL(sata_link_resume);
6294 EXPORT_SYMBOL_GPL(ata_std_prereset);
6295 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6296 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6297 EXPORT_SYMBOL_GPL(ata_std_postreset);
6298 EXPORT_SYMBOL_GPL(ata_dev_classify);
6299 EXPORT_SYMBOL_GPL(ata_dev_pair);
6300 EXPORT_SYMBOL_GPL(ata_port_disable);
6301 EXPORT_SYMBOL_GPL(ata_ratelimit);
6302 EXPORT_SYMBOL_GPL(ata_wait_register);
6303 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6304 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6305 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6306 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6307 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6308 EXPORT_SYMBOL_GPL(sata_scr_valid);
6309 EXPORT_SYMBOL_GPL(sata_scr_read);
6310 EXPORT_SYMBOL_GPL(sata_scr_write);
6311 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6312 EXPORT_SYMBOL_GPL(ata_link_online);
6313 EXPORT_SYMBOL_GPL(ata_link_offline);
6314 #ifdef CONFIG_PM
6315 EXPORT_SYMBOL_GPL(ata_host_suspend);
6316 EXPORT_SYMBOL_GPL(ata_host_resume);
6317 #endif /* CONFIG_PM */
6318 EXPORT_SYMBOL_GPL(ata_id_string);
6319 EXPORT_SYMBOL_GPL(ata_id_c_string);
6320 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6321 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6322 
6323 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6324 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6325 EXPORT_SYMBOL_GPL(ata_timing_compute);
6326 EXPORT_SYMBOL_GPL(ata_timing_merge);
6327 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6328 
6329 #ifdef CONFIG_PCI
6330 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6331 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6332 #ifdef CONFIG_PM
6333 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6334 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6335 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6336 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6337 #endif /* CONFIG_PM */
6338 #endif /* CONFIG_PCI */
6339 
6340 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6341 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6342 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6343 EXPORT_SYMBOL_GPL(ata_port_desc);
6344 #ifdef CONFIG_PCI
6345 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6346 #endif /* CONFIG_PCI */
6347 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6348 EXPORT_SYMBOL_GPL(ata_link_abort);
6349 EXPORT_SYMBOL_GPL(ata_port_abort);
6350 EXPORT_SYMBOL_GPL(ata_port_freeze);
6351 EXPORT_SYMBOL_GPL(sata_async_notification);
6352 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6353 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6354 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6355 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6356 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6357 EXPORT_SYMBOL_GPL(ata_do_eh);
6358 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6359 
6360 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6361 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6362 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6363 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6364 EXPORT_SYMBOL_GPL(ata_cable_sata);
6365