xref: /linux/drivers/ata/libata-core.c (revision 843aef4930b9953c9ca624a990b201440304b56f)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <scsi/scsi.h>
61 #include <scsi/scsi_cmnd.h>
62 #include <scsi/scsi_host.h>
63 #include <linux/libata.h>
64 #include <asm/byteorder.h>
65 #include <linux/cdrom.h>
66 
67 #include "libata.h"
68 
69 
70 /* debounce timing parameters in msecs { interval, duration, timeout } */
71 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
72 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
73 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
74 
75 const struct ata_port_operations ata_base_port_ops = {
76 	.prereset		= ata_std_prereset,
77 	.postreset		= ata_std_postreset,
78 	.error_handler		= ata_std_error_handler,
79 };
80 
81 const struct ata_port_operations sata_port_ops = {
82 	.inherits		= &ata_base_port_ops,
83 
84 	.qc_defer		= ata_std_qc_defer,
85 	.hardreset		= sata_std_hardreset,
86 };
87 
88 static unsigned int ata_dev_init_params(struct ata_device *dev,
89 					u16 heads, u16 sectors);
90 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
91 static unsigned int ata_dev_set_feature(struct ata_device *dev,
92 					u8 enable, u8 feature);
93 static void ata_dev_xfermask(struct ata_device *dev);
94 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
95 
96 unsigned int ata_print_id = 1;
97 static struct workqueue_struct *ata_wq;
98 
99 struct workqueue_struct *ata_aux_wq;
100 
101 struct ata_force_param {
102 	const char	*name;
103 	unsigned int	cbl;
104 	int		spd_limit;
105 	unsigned long	xfer_mask;
106 	unsigned int	horkage_on;
107 	unsigned int	horkage_off;
108 	unsigned int	lflags;
109 };
110 
111 struct ata_force_ent {
112 	int			port;
113 	int			device;
114 	struct ata_force_param	param;
115 };
116 
117 static struct ata_force_ent *ata_force_tbl;
118 static int ata_force_tbl_size;
119 
120 static char ata_force_param_buf[PAGE_SIZE] __initdata;
121 /* param_buf is thrown away after initialization, disallow read */
122 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
123 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
124 
125 static int atapi_enabled = 1;
126 module_param(atapi_enabled, int, 0444);
127 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
128 
129 static int atapi_dmadir = 0;
130 module_param(atapi_dmadir, int, 0444);
131 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
132 
133 int atapi_passthru16 = 1;
134 module_param(atapi_passthru16, int, 0444);
135 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
136 
137 int libata_fua = 0;
138 module_param_named(fua, libata_fua, int, 0444);
139 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
140 
141 static int ata_ignore_hpa;
142 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
143 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
144 
145 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
146 module_param_named(dma, libata_dma_mask, int, 0444);
147 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
148 
149 static int ata_probe_timeout;
150 module_param(ata_probe_timeout, int, 0444);
151 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
152 
153 int libata_noacpi = 0;
154 module_param_named(noacpi, libata_noacpi, int, 0444);
155 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
156 
157 int libata_allow_tpm = 0;
158 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
159 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
160 
161 MODULE_AUTHOR("Jeff Garzik");
162 MODULE_DESCRIPTION("Library module for ATA devices");
163 MODULE_LICENSE("GPL");
164 MODULE_VERSION(DRV_VERSION);
165 
166 
167 /**
168  *	ata_link_next - link iteration helper
169  *	@link: the previous link, NULL to start
170  *	@ap: ATA port containing links to iterate
171  *	@mode: iteration mode, one of ATA_LITER_*
172  *
173  *	LOCKING:
174  *	Host lock or EH context.
175  *
176  *	RETURNS:
177  *	Pointer to the next link.
178  */
179 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
180 			       enum ata_link_iter_mode mode)
181 {
182 	BUG_ON(mode != ATA_LITER_EDGE &&
183 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
184 
185 	/* NULL link indicates start of iteration */
186 	if (!link)
187 		switch (mode) {
188 		case ATA_LITER_EDGE:
189 		case ATA_LITER_PMP_FIRST:
190 			if (sata_pmp_attached(ap))
191 				return ap->pmp_link;
192 			/* fall through */
193 		case ATA_LITER_HOST_FIRST:
194 			return &ap->link;
195 		}
196 
197 	/* we just iterated over the host link, what's next? */
198 	if (link == &ap->link)
199 		switch (mode) {
200 		case ATA_LITER_HOST_FIRST:
201 			if (sata_pmp_attached(ap))
202 				return ap->pmp_link;
203 			/* fall through */
204 		case ATA_LITER_PMP_FIRST:
205 			if (unlikely(ap->slave_link))
206 				return ap->slave_link;
207 			/* fall through */
208 		case ATA_LITER_EDGE:
209 			return NULL;
210 		}
211 
212 	/* slave_link excludes PMP */
213 	if (unlikely(link == ap->slave_link))
214 		return NULL;
215 
216 	/* we were over a PMP link */
217 	if (++link < ap->pmp_link + ap->nr_pmp_links)
218 		return link;
219 
220 	if (mode == ATA_LITER_PMP_FIRST)
221 		return &ap->link;
222 
223 	return NULL;
224 }
225 
226 /**
227  *	ata_dev_next - device iteration helper
228  *	@dev: the previous device, NULL to start
229  *	@link: ATA link containing devices to iterate
230  *	@mode: iteration mode, one of ATA_DITER_*
231  *
232  *	LOCKING:
233  *	Host lock or EH context.
234  *
235  *	RETURNS:
236  *	Pointer to the next device.
237  */
238 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
239 				enum ata_dev_iter_mode mode)
240 {
241 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
242 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
243 
244 	/* NULL dev indicates start of iteration */
245 	if (!dev)
246 		switch (mode) {
247 		case ATA_DITER_ENABLED:
248 		case ATA_DITER_ALL:
249 			dev = link->device;
250 			goto check;
251 		case ATA_DITER_ENABLED_REVERSE:
252 		case ATA_DITER_ALL_REVERSE:
253 			dev = link->device + ata_link_max_devices(link) - 1;
254 			goto check;
255 		}
256 
257  next:
258 	/* move to the next one */
259 	switch (mode) {
260 	case ATA_DITER_ENABLED:
261 	case ATA_DITER_ALL:
262 		if (++dev < link->device + ata_link_max_devices(link))
263 			goto check;
264 		return NULL;
265 	case ATA_DITER_ENABLED_REVERSE:
266 	case ATA_DITER_ALL_REVERSE:
267 		if (--dev >= link->device)
268 			goto check;
269 		return NULL;
270 	}
271 
272  check:
273 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
274 	    !ata_dev_enabled(dev))
275 		goto next;
276 	return dev;
277 }
278 
279 /**
280  *	ata_dev_phys_link - find physical link for a device
281  *	@dev: ATA device to look up physical link for
282  *
283  *	Look up physical link which @dev is attached to.  Note that
284  *	this is different from @dev->link only when @dev is on slave
285  *	link.  For all other cases, it's the same as @dev->link.
286  *
287  *	LOCKING:
288  *	Don't care.
289  *
290  *	RETURNS:
291  *	Pointer to the found physical link.
292  */
293 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
294 {
295 	struct ata_port *ap = dev->link->ap;
296 
297 	if (!ap->slave_link)
298 		return dev->link;
299 	if (!dev->devno)
300 		return &ap->link;
301 	return ap->slave_link;
302 }
303 
304 /**
305  *	ata_force_cbl - force cable type according to libata.force
306  *	@ap: ATA port of interest
307  *
308  *	Force cable type according to libata.force and whine about it.
309  *	The last entry which has matching port number is used, so it
310  *	can be specified as part of device force parameters.  For
311  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
312  *	same effect.
313  *
314  *	LOCKING:
315  *	EH context.
316  */
317 void ata_force_cbl(struct ata_port *ap)
318 {
319 	int i;
320 
321 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
322 		const struct ata_force_ent *fe = &ata_force_tbl[i];
323 
324 		if (fe->port != -1 && fe->port != ap->print_id)
325 			continue;
326 
327 		if (fe->param.cbl == ATA_CBL_NONE)
328 			continue;
329 
330 		ap->cbl = fe->param.cbl;
331 		ata_port_printk(ap, KERN_NOTICE,
332 				"FORCE: cable set to %s\n", fe->param.name);
333 		return;
334 	}
335 }
336 
337 /**
338  *	ata_force_link_limits - force link limits according to libata.force
339  *	@link: ATA link of interest
340  *
341  *	Force link flags and SATA spd limit according to libata.force
342  *	and whine about it.  When only the port part is specified
343  *	(e.g. 1:), the limit applies to all links connected to both
344  *	the host link and all fan-out ports connected via PMP.  If the
345  *	device part is specified as 0 (e.g. 1.00:), it specifies the
346  *	first fan-out link not the host link.  Device number 15 always
347  *	points to the host link whether PMP is attached or not.  If the
348  *	controller has slave link, device number 16 points to it.
349  *
350  *	LOCKING:
351  *	EH context.
352  */
353 static void ata_force_link_limits(struct ata_link *link)
354 {
355 	bool did_spd = false;
356 	int linkno = link->pmp;
357 	int i;
358 
359 	if (ata_is_host_link(link))
360 		linkno += 15;
361 
362 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
363 		const struct ata_force_ent *fe = &ata_force_tbl[i];
364 
365 		if (fe->port != -1 && fe->port != link->ap->print_id)
366 			continue;
367 
368 		if (fe->device != -1 && fe->device != linkno)
369 			continue;
370 
371 		/* only honor the first spd limit */
372 		if (!did_spd && fe->param.spd_limit) {
373 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
374 			ata_link_printk(link, KERN_NOTICE,
375 					"FORCE: PHY spd limit set to %s\n",
376 					fe->param.name);
377 			did_spd = true;
378 		}
379 
380 		/* let lflags stack */
381 		if (fe->param.lflags) {
382 			link->flags |= fe->param.lflags;
383 			ata_link_printk(link, KERN_NOTICE,
384 					"FORCE: link flag 0x%x forced -> 0x%x\n",
385 					fe->param.lflags, link->flags);
386 		}
387 	}
388 }
389 
390 /**
391  *	ata_force_xfermask - force xfermask according to libata.force
392  *	@dev: ATA device of interest
393  *
394  *	Force xfer_mask according to libata.force and whine about it.
395  *	For consistency with link selection, device number 15 selects
396  *	the first device connected to the host link.
397  *
398  *	LOCKING:
399  *	EH context.
400  */
401 static void ata_force_xfermask(struct ata_device *dev)
402 {
403 	int devno = dev->link->pmp + dev->devno;
404 	int alt_devno = devno;
405 	int i;
406 
407 	/* allow n.15/16 for devices attached to host port */
408 	if (ata_is_host_link(dev->link))
409 		alt_devno += 15;
410 
411 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
412 		const struct ata_force_ent *fe = &ata_force_tbl[i];
413 		unsigned long pio_mask, mwdma_mask, udma_mask;
414 
415 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
416 			continue;
417 
418 		if (fe->device != -1 && fe->device != devno &&
419 		    fe->device != alt_devno)
420 			continue;
421 
422 		if (!fe->param.xfer_mask)
423 			continue;
424 
425 		ata_unpack_xfermask(fe->param.xfer_mask,
426 				    &pio_mask, &mwdma_mask, &udma_mask);
427 		if (udma_mask)
428 			dev->udma_mask = udma_mask;
429 		else if (mwdma_mask) {
430 			dev->udma_mask = 0;
431 			dev->mwdma_mask = mwdma_mask;
432 		} else {
433 			dev->udma_mask = 0;
434 			dev->mwdma_mask = 0;
435 			dev->pio_mask = pio_mask;
436 		}
437 
438 		ata_dev_printk(dev, KERN_NOTICE,
439 			"FORCE: xfer_mask set to %s\n", fe->param.name);
440 		return;
441 	}
442 }
443 
444 /**
445  *	ata_force_horkage - force horkage according to libata.force
446  *	@dev: ATA device of interest
447  *
448  *	Force horkage according to libata.force and whine about it.
449  *	For consistency with link selection, device number 15 selects
450  *	the first device connected to the host link.
451  *
452  *	LOCKING:
453  *	EH context.
454  */
455 static void ata_force_horkage(struct ata_device *dev)
456 {
457 	int devno = dev->link->pmp + dev->devno;
458 	int alt_devno = devno;
459 	int i;
460 
461 	/* allow n.15/16 for devices attached to host port */
462 	if (ata_is_host_link(dev->link))
463 		alt_devno += 15;
464 
465 	for (i = 0; i < ata_force_tbl_size; i++) {
466 		const struct ata_force_ent *fe = &ata_force_tbl[i];
467 
468 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
469 			continue;
470 
471 		if (fe->device != -1 && fe->device != devno &&
472 		    fe->device != alt_devno)
473 			continue;
474 
475 		if (!(~dev->horkage & fe->param.horkage_on) &&
476 		    !(dev->horkage & fe->param.horkage_off))
477 			continue;
478 
479 		dev->horkage |= fe->param.horkage_on;
480 		dev->horkage &= ~fe->param.horkage_off;
481 
482 		ata_dev_printk(dev, KERN_NOTICE,
483 			"FORCE: horkage modified (%s)\n", fe->param.name);
484 	}
485 }
486 
487 /**
488  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
489  *	@opcode: SCSI opcode
490  *
491  *	Determine ATAPI command type from @opcode.
492  *
493  *	LOCKING:
494  *	None.
495  *
496  *	RETURNS:
497  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
498  */
499 int atapi_cmd_type(u8 opcode)
500 {
501 	switch (opcode) {
502 	case GPCMD_READ_10:
503 	case GPCMD_READ_12:
504 		return ATAPI_READ;
505 
506 	case GPCMD_WRITE_10:
507 	case GPCMD_WRITE_12:
508 	case GPCMD_WRITE_AND_VERIFY_10:
509 		return ATAPI_WRITE;
510 
511 	case GPCMD_READ_CD:
512 	case GPCMD_READ_CD_MSF:
513 		return ATAPI_READ_CD;
514 
515 	case ATA_16:
516 	case ATA_12:
517 		if (atapi_passthru16)
518 			return ATAPI_PASS_THRU;
519 		/* fall thru */
520 	default:
521 		return ATAPI_MISC;
522 	}
523 }
524 
525 /**
526  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
527  *	@tf: Taskfile to convert
528  *	@pmp: Port multiplier port
529  *	@is_cmd: This FIS is for command
530  *	@fis: Buffer into which data will output
531  *
532  *	Converts a standard ATA taskfile to a Serial ATA
533  *	FIS structure (Register - Host to Device).
534  *
535  *	LOCKING:
536  *	Inherited from caller.
537  */
538 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
539 {
540 	fis[0] = 0x27;			/* Register - Host to Device FIS */
541 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
542 	if (is_cmd)
543 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
544 
545 	fis[2] = tf->command;
546 	fis[3] = tf->feature;
547 
548 	fis[4] = tf->lbal;
549 	fis[5] = tf->lbam;
550 	fis[6] = tf->lbah;
551 	fis[7] = tf->device;
552 
553 	fis[8] = tf->hob_lbal;
554 	fis[9] = tf->hob_lbam;
555 	fis[10] = tf->hob_lbah;
556 	fis[11] = tf->hob_feature;
557 
558 	fis[12] = tf->nsect;
559 	fis[13] = tf->hob_nsect;
560 	fis[14] = 0;
561 	fis[15] = tf->ctl;
562 
563 	fis[16] = 0;
564 	fis[17] = 0;
565 	fis[18] = 0;
566 	fis[19] = 0;
567 }
568 
569 /**
570  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
571  *	@fis: Buffer from which data will be input
572  *	@tf: Taskfile to output
573  *
574  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
575  *
576  *	LOCKING:
577  *	Inherited from caller.
578  */
579 
580 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
581 {
582 	tf->command	= fis[2];	/* status */
583 	tf->feature	= fis[3];	/* error */
584 
585 	tf->lbal	= fis[4];
586 	tf->lbam	= fis[5];
587 	tf->lbah	= fis[6];
588 	tf->device	= fis[7];
589 
590 	tf->hob_lbal	= fis[8];
591 	tf->hob_lbam	= fis[9];
592 	tf->hob_lbah	= fis[10];
593 
594 	tf->nsect	= fis[12];
595 	tf->hob_nsect	= fis[13];
596 }
597 
598 static const u8 ata_rw_cmds[] = {
599 	/* pio multi */
600 	ATA_CMD_READ_MULTI,
601 	ATA_CMD_WRITE_MULTI,
602 	ATA_CMD_READ_MULTI_EXT,
603 	ATA_CMD_WRITE_MULTI_EXT,
604 	0,
605 	0,
606 	0,
607 	ATA_CMD_WRITE_MULTI_FUA_EXT,
608 	/* pio */
609 	ATA_CMD_PIO_READ,
610 	ATA_CMD_PIO_WRITE,
611 	ATA_CMD_PIO_READ_EXT,
612 	ATA_CMD_PIO_WRITE_EXT,
613 	0,
614 	0,
615 	0,
616 	0,
617 	/* dma */
618 	ATA_CMD_READ,
619 	ATA_CMD_WRITE,
620 	ATA_CMD_READ_EXT,
621 	ATA_CMD_WRITE_EXT,
622 	0,
623 	0,
624 	0,
625 	ATA_CMD_WRITE_FUA_EXT
626 };
627 
628 /**
629  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
630  *	@tf: command to examine and configure
631  *	@dev: device tf belongs to
632  *
633  *	Examine the device configuration and tf->flags to calculate
634  *	the proper read/write commands and protocol to use.
635  *
636  *	LOCKING:
637  *	caller.
638  */
639 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
640 {
641 	u8 cmd;
642 
643 	int index, fua, lba48, write;
644 
645 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
646 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
647 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
648 
649 	if (dev->flags & ATA_DFLAG_PIO) {
650 		tf->protocol = ATA_PROT_PIO;
651 		index = dev->multi_count ? 0 : 8;
652 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
653 		/* Unable to use DMA due to host limitation */
654 		tf->protocol = ATA_PROT_PIO;
655 		index = dev->multi_count ? 0 : 8;
656 	} else {
657 		tf->protocol = ATA_PROT_DMA;
658 		index = 16;
659 	}
660 
661 	cmd = ata_rw_cmds[index + fua + lba48 + write];
662 	if (cmd) {
663 		tf->command = cmd;
664 		return 0;
665 	}
666 	return -1;
667 }
668 
669 /**
670  *	ata_tf_read_block - Read block address from ATA taskfile
671  *	@tf: ATA taskfile of interest
672  *	@dev: ATA device @tf belongs to
673  *
674  *	LOCKING:
675  *	None.
676  *
677  *	Read block address from @tf.  This function can handle all
678  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
679  *	flags select the address format to use.
680  *
681  *	RETURNS:
682  *	Block address read from @tf.
683  */
684 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
685 {
686 	u64 block = 0;
687 
688 	if (tf->flags & ATA_TFLAG_LBA) {
689 		if (tf->flags & ATA_TFLAG_LBA48) {
690 			block |= (u64)tf->hob_lbah << 40;
691 			block |= (u64)tf->hob_lbam << 32;
692 			block |= (u64)tf->hob_lbal << 24;
693 		} else
694 			block |= (tf->device & 0xf) << 24;
695 
696 		block |= tf->lbah << 16;
697 		block |= tf->lbam << 8;
698 		block |= tf->lbal;
699 	} else {
700 		u32 cyl, head, sect;
701 
702 		cyl = tf->lbam | (tf->lbah << 8);
703 		head = tf->device & 0xf;
704 		sect = tf->lbal;
705 
706 		block = (cyl * dev->heads + head) * dev->sectors + sect;
707 	}
708 
709 	return block;
710 }
711 
712 /**
713  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
714  *	@tf: Target ATA taskfile
715  *	@dev: ATA device @tf belongs to
716  *	@block: Block address
717  *	@n_block: Number of blocks
718  *	@tf_flags: RW/FUA etc...
719  *	@tag: tag
720  *
721  *	LOCKING:
722  *	None.
723  *
724  *	Build ATA taskfile @tf for read/write request described by
725  *	@block, @n_block, @tf_flags and @tag on @dev.
726  *
727  *	RETURNS:
728  *
729  *	0 on success, -ERANGE if the request is too large for @dev,
730  *	-EINVAL if the request is invalid.
731  */
732 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
733 		    u64 block, u32 n_block, unsigned int tf_flags,
734 		    unsigned int tag)
735 {
736 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
737 	tf->flags |= tf_flags;
738 
739 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
740 		/* yay, NCQ */
741 		if (!lba_48_ok(block, n_block))
742 			return -ERANGE;
743 
744 		tf->protocol = ATA_PROT_NCQ;
745 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
746 
747 		if (tf->flags & ATA_TFLAG_WRITE)
748 			tf->command = ATA_CMD_FPDMA_WRITE;
749 		else
750 			tf->command = ATA_CMD_FPDMA_READ;
751 
752 		tf->nsect = tag << 3;
753 		tf->hob_feature = (n_block >> 8) & 0xff;
754 		tf->feature = n_block & 0xff;
755 
756 		tf->hob_lbah = (block >> 40) & 0xff;
757 		tf->hob_lbam = (block >> 32) & 0xff;
758 		tf->hob_lbal = (block >> 24) & 0xff;
759 		tf->lbah = (block >> 16) & 0xff;
760 		tf->lbam = (block >> 8) & 0xff;
761 		tf->lbal = block & 0xff;
762 
763 		tf->device = 1 << 6;
764 		if (tf->flags & ATA_TFLAG_FUA)
765 			tf->device |= 1 << 7;
766 	} else if (dev->flags & ATA_DFLAG_LBA) {
767 		tf->flags |= ATA_TFLAG_LBA;
768 
769 		if (lba_28_ok(block, n_block)) {
770 			/* use LBA28 */
771 			tf->device |= (block >> 24) & 0xf;
772 		} else if (lba_48_ok(block, n_block)) {
773 			if (!(dev->flags & ATA_DFLAG_LBA48))
774 				return -ERANGE;
775 
776 			/* use LBA48 */
777 			tf->flags |= ATA_TFLAG_LBA48;
778 
779 			tf->hob_nsect = (n_block >> 8) & 0xff;
780 
781 			tf->hob_lbah = (block >> 40) & 0xff;
782 			tf->hob_lbam = (block >> 32) & 0xff;
783 			tf->hob_lbal = (block >> 24) & 0xff;
784 		} else
785 			/* request too large even for LBA48 */
786 			return -ERANGE;
787 
788 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
789 			return -EINVAL;
790 
791 		tf->nsect = n_block & 0xff;
792 
793 		tf->lbah = (block >> 16) & 0xff;
794 		tf->lbam = (block >> 8) & 0xff;
795 		tf->lbal = block & 0xff;
796 
797 		tf->device |= ATA_LBA;
798 	} else {
799 		/* CHS */
800 		u32 sect, head, cyl, track;
801 
802 		/* The request -may- be too large for CHS addressing. */
803 		if (!lba_28_ok(block, n_block))
804 			return -ERANGE;
805 
806 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
807 			return -EINVAL;
808 
809 		/* Convert LBA to CHS */
810 		track = (u32)block / dev->sectors;
811 		cyl   = track / dev->heads;
812 		head  = track % dev->heads;
813 		sect  = (u32)block % dev->sectors + 1;
814 
815 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
816 			(u32)block, track, cyl, head, sect);
817 
818 		/* Check whether the converted CHS can fit.
819 		   Cylinder: 0-65535
820 		   Head: 0-15
821 		   Sector: 1-255*/
822 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
823 			return -ERANGE;
824 
825 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
826 		tf->lbal = sect;
827 		tf->lbam = cyl;
828 		tf->lbah = cyl >> 8;
829 		tf->device |= head;
830 	}
831 
832 	return 0;
833 }
834 
835 /**
836  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
837  *	@pio_mask: pio_mask
838  *	@mwdma_mask: mwdma_mask
839  *	@udma_mask: udma_mask
840  *
841  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
842  *	unsigned int xfer_mask.
843  *
844  *	LOCKING:
845  *	None.
846  *
847  *	RETURNS:
848  *	Packed xfer_mask.
849  */
850 unsigned long ata_pack_xfermask(unsigned long pio_mask,
851 				unsigned long mwdma_mask,
852 				unsigned long udma_mask)
853 {
854 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
855 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
856 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
857 }
858 
859 /**
860  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
861  *	@xfer_mask: xfer_mask to unpack
862  *	@pio_mask: resulting pio_mask
863  *	@mwdma_mask: resulting mwdma_mask
864  *	@udma_mask: resulting udma_mask
865  *
866  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
867  *	Any NULL distination masks will be ignored.
868  */
869 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
870 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
871 {
872 	if (pio_mask)
873 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
874 	if (mwdma_mask)
875 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
876 	if (udma_mask)
877 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
878 }
879 
880 static const struct ata_xfer_ent {
881 	int shift, bits;
882 	u8 base;
883 } ata_xfer_tbl[] = {
884 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
885 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
886 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
887 	{ -1, },
888 };
889 
890 /**
891  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
892  *	@xfer_mask: xfer_mask of interest
893  *
894  *	Return matching XFER_* value for @xfer_mask.  Only the highest
895  *	bit of @xfer_mask is considered.
896  *
897  *	LOCKING:
898  *	None.
899  *
900  *	RETURNS:
901  *	Matching XFER_* value, 0xff if no match found.
902  */
903 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
904 {
905 	int highbit = fls(xfer_mask) - 1;
906 	const struct ata_xfer_ent *ent;
907 
908 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
909 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
910 			return ent->base + highbit - ent->shift;
911 	return 0xff;
912 }
913 
914 /**
915  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
916  *	@xfer_mode: XFER_* of interest
917  *
918  *	Return matching xfer_mask for @xfer_mode.
919  *
920  *	LOCKING:
921  *	None.
922  *
923  *	RETURNS:
924  *	Matching xfer_mask, 0 if no match found.
925  */
926 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
927 {
928 	const struct ata_xfer_ent *ent;
929 
930 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
931 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
932 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
933 				& ~((1 << ent->shift) - 1);
934 	return 0;
935 }
936 
937 /**
938  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
939  *	@xfer_mode: XFER_* of interest
940  *
941  *	Return matching xfer_shift for @xfer_mode.
942  *
943  *	LOCKING:
944  *	None.
945  *
946  *	RETURNS:
947  *	Matching xfer_shift, -1 if no match found.
948  */
949 int ata_xfer_mode2shift(unsigned long xfer_mode)
950 {
951 	const struct ata_xfer_ent *ent;
952 
953 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
954 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
955 			return ent->shift;
956 	return -1;
957 }
958 
959 /**
960  *	ata_mode_string - convert xfer_mask to string
961  *	@xfer_mask: mask of bits supported; only highest bit counts.
962  *
963  *	Determine string which represents the highest speed
964  *	(highest bit in @modemask).
965  *
966  *	LOCKING:
967  *	None.
968  *
969  *	RETURNS:
970  *	Constant C string representing highest speed listed in
971  *	@mode_mask, or the constant C string "<n/a>".
972  */
973 const char *ata_mode_string(unsigned long xfer_mask)
974 {
975 	static const char * const xfer_mode_str[] = {
976 		"PIO0",
977 		"PIO1",
978 		"PIO2",
979 		"PIO3",
980 		"PIO4",
981 		"PIO5",
982 		"PIO6",
983 		"MWDMA0",
984 		"MWDMA1",
985 		"MWDMA2",
986 		"MWDMA3",
987 		"MWDMA4",
988 		"UDMA/16",
989 		"UDMA/25",
990 		"UDMA/33",
991 		"UDMA/44",
992 		"UDMA/66",
993 		"UDMA/100",
994 		"UDMA/133",
995 		"UDMA7",
996 	};
997 	int highbit;
998 
999 	highbit = fls(xfer_mask) - 1;
1000 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1001 		return xfer_mode_str[highbit];
1002 	return "<n/a>";
1003 }
1004 
1005 static const char *sata_spd_string(unsigned int spd)
1006 {
1007 	static const char * const spd_str[] = {
1008 		"1.5 Gbps",
1009 		"3.0 Gbps",
1010 		"6.0 Gbps",
1011 	};
1012 
1013 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1014 		return "<unknown>";
1015 	return spd_str[spd - 1];
1016 }
1017 
1018 void ata_dev_disable(struct ata_device *dev)
1019 {
1020 	if (ata_dev_enabled(dev)) {
1021 		if (ata_msg_drv(dev->link->ap))
1022 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1023 		ata_acpi_on_disable(dev);
1024 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
1025 					     ATA_DNXFER_QUIET);
1026 		dev->class++;
1027 	}
1028 }
1029 
1030 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1031 {
1032 	struct ata_link *link = dev->link;
1033 	struct ata_port *ap = link->ap;
1034 	u32 scontrol;
1035 	unsigned int err_mask;
1036 	int rc;
1037 
1038 	/*
1039 	 * disallow DIPM for drivers which haven't set
1040 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
1041 	 * phy ready will be set in the interrupt status on
1042 	 * state changes, which will cause some drivers to
1043 	 * think there are errors - additionally drivers will
1044 	 * need to disable hot plug.
1045 	 */
1046 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1047 		ap->pm_policy = NOT_AVAILABLE;
1048 		return -EINVAL;
1049 	}
1050 
1051 	/*
1052 	 * For DIPM, we will only enable it for the
1053 	 * min_power setting.
1054 	 *
1055 	 * Why?  Because Disks are too stupid to know that
1056 	 * If the host rejects a request to go to SLUMBER
1057 	 * they should retry at PARTIAL, and instead it
1058 	 * just would give up.  So, for medium_power to
1059 	 * work at all, we need to only allow HIPM.
1060 	 */
1061 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1062 	if (rc)
1063 		return rc;
1064 
1065 	switch (policy) {
1066 	case MIN_POWER:
1067 		/* no restrictions on IPM transitions */
1068 		scontrol &= ~(0x3 << 8);
1069 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1070 		if (rc)
1071 			return rc;
1072 
1073 		/* enable DIPM */
1074 		if (dev->flags & ATA_DFLAG_DIPM)
1075 			err_mask = ata_dev_set_feature(dev,
1076 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
1077 		break;
1078 	case MEDIUM_POWER:
1079 		/* allow IPM to PARTIAL */
1080 		scontrol &= ~(0x1 << 8);
1081 		scontrol |= (0x2 << 8);
1082 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1083 		if (rc)
1084 			return rc;
1085 
1086 		/*
1087 		 * we don't have to disable DIPM since IPM flags
1088 		 * disallow transitions to SLUMBER, which effectively
1089 		 * disable DIPM if it does not support PARTIAL
1090 		 */
1091 		break;
1092 	case NOT_AVAILABLE:
1093 	case MAX_PERFORMANCE:
1094 		/* disable all IPM transitions */
1095 		scontrol |= (0x3 << 8);
1096 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1097 		if (rc)
1098 			return rc;
1099 
1100 		/*
1101 		 * we don't have to disable DIPM since IPM flags
1102 		 * disallow all transitions which effectively
1103 		 * disable DIPM anyway.
1104 		 */
1105 		break;
1106 	}
1107 
1108 	/* FIXME: handle SET FEATURES failure */
1109 	(void) err_mask;
1110 
1111 	return 0;
1112 }
1113 
1114 /**
1115  *	ata_dev_enable_pm - enable SATA interface power management
1116  *	@dev:  device to enable power management
1117  *	@policy: the link power management policy
1118  *
1119  *	Enable SATA Interface power management.  This will enable
1120  *	Device Interface Power Management (DIPM) for min_power
1121  * 	policy, and then call driver specific callbacks for
1122  *	enabling Host Initiated Power management.
1123  *
1124  *	Locking: Caller.
1125  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
1126  */
1127 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1128 {
1129 	int rc = 0;
1130 	struct ata_port *ap = dev->link->ap;
1131 
1132 	/* set HIPM first, then DIPM */
1133 	if (ap->ops->enable_pm)
1134 		rc = ap->ops->enable_pm(ap, policy);
1135 	if (rc)
1136 		goto enable_pm_out;
1137 	rc = ata_dev_set_dipm(dev, policy);
1138 
1139 enable_pm_out:
1140 	if (rc)
1141 		ap->pm_policy = MAX_PERFORMANCE;
1142 	else
1143 		ap->pm_policy = policy;
1144 	return /* rc */;	/* hopefully we can use 'rc' eventually */
1145 }
1146 
1147 #ifdef CONFIG_PM
1148 /**
1149  *	ata_dev_disable_pm - disable SATA interface power management
1150  *	@dev: device to disable power management
1151  *
1152  *	Disable SATA Interface power management.  This will disable
1153  *	Device Interface Power Management (DIPM) without changing
1154  * 	policy,  call driver specific callbacks for disabling Host
1155  * 	Initiated Power management.
1156  *
1157  *	Locking: Caller.
1158  *	Returns: void
1159  */
1160 static void ata_dev_disable_pm(struct ata_device *dev)
1161 {
1162 	struct ata_port *ap = dev->link->ap;
1163 
1164 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1165 	if (ap->ops->disable_pm)
1166 		ap->ops->disable_pm(ap);
1167 }
1168 #endif	/* CONFIG_PM */
1169 
1170 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1171 {
1172 	ap->pm_policy = policy;
1173 	ap->link.eh_info.action |= ATA_EH_LPM;
1174 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1175 	ata_port_schedule_eh(ap);
1176 }
1177 
1178 #ifdef CONFIG_PM
1179 static void ata_lpm_enable(struct ata_host *host)
1180 {
1181 	struct ata_link *link;
1182 	struct ata_port *ap;
1183 	struct ata_device *dev;
1184 	int i;
1185 
1186 	for (i = 0; i < host->n_ports; i++) {
1187 		ap = host->ports[i];
1188 		ata_for_each_link(link, ap, EDGE) {
1189 			ata_for_each_dev(dev, link, ALL)
1190 				ata_dev_disable_pm(dev);
1191 		}
1192 	}
1193 }
1194 
1195 static void ata_lpm_disable(struct ata_host *host)
1196 {
1197 	int i;
1198 
1199 	for (i = 0; i < host->n_ports; i++) {
1200 		struct ata_port *ap = host->ports[i];
1201 		ata_lpm_schedule(ap, ap->pm_policy);
1202 	}
1203 }
1204 #endif	/* CONFIG_PM */
1205 
1206 /**
1207  *	ata_dev_classify - determine device type based on ATA-spec signature
1208  *	@tf: ATA taskfile register set for device to be identified
1209  *
1210  *	Determine from taskfile register contents whether a device is
1211  *	ATA or ATAPI, as per "Signature and persistence" section
1212  *	of ATA/PI spec (volume 1, sect 5.14).
1213  *
1214  *	LOCKING:
1215  *	None.
1216  *
1217  *	RETURNS:
1218  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1219  *	%ATA_DEV_UNKNOWN the event of failure.
1220  */
1221 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1222 {
1223 	/* Apple's open source Darwin code hints that some devices only
1224 	 * put a proper signature into the LBA mid/high registers,
1225 	 * So, we only check those.  It's sufficient for uniqueness.
1226 	 *
1227 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1228 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1229 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1230 	 * spec has never mentioned about using different signatures
1231 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1232 	 * Multiplier specification began to use 0x69/0x96 to identify
1233 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1234 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1235 	 * 0x69/0x96 shortly and described them as reserved for
1236 	 * SerialATA.
1237 	 *
1238 	 * We follow the current spec and consider that 0x69/0x96
1239 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1240 	 */
1241 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1242 		DPRINTK("found ATA device by sig\n");
1243 		return ATA_DEV_ATA;
1244 	}
1245 
1246 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1247 		DPRINTK("found ATAPI device by sig\n");
1248 		return ATA_DEV_ATAPI;
1249 	}
1250 
1251 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1252 		DPRINTK("found PMP device by sig\n");
1253 		return ATA_DEV_PMP;
1254 	}
1255 
1256 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1257 		printk(KERN_INFO "ata: SEMB device ignored\n");
1258 		return ATA_DEV_SEMB_UNSUP; /* not yet */
1259 	}
1260 
1261 	DPRINTK("unknown device\n");
1262 	return ATA_DEV_UNKNOWN;
1263 }
1264 
1265 /**
1266  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1267  *	@id: IDENTIFY DEVICE results we will examine
1268  *	@s: string into which data is output
1269  *	@ofs: offset into identify device page
1270  *	@len: length of string to return. must be an even number.
1271  *
1272  *	The strings in the IDENTIFY DEVICE page are broken up into
1273  *	16-bit chunks.  Run through the string, and output each
1274  *	8-bit chunk linearly, regardless of platform.
1275  *
1276  *	LOCKING:
1277  *	caller.
1278  */
1279 
1280 void ata_id_string(const u16 *id, unsigned char *s,
1281 		   unsigned int ofs, unsigned int len)
1282 {
1283 	unsigned int c;
1284 
1285 	BUG_ON(len & 1);
1286 
1287 	while (len > 0) {
1288 		c = id[ofs] >> 8;
1289 		*s = c;
1290 		s++;
1291 
1292 		c = id[ofs] & 0xff;
1293 		*s = c;
1294 		s++;
1295 
1296 		ofs++;
1297 		len -= 2;
1298 	}
1299 }
1300 
1301 /**
1302  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1303  *	@id: IDENTIFY DEVICE results we will examine
1304  *	@s: string into which data is output
1305  *	@ofs: offset into identify device page
1306  *	@len: length of string to return. must be an odd number.
1307  *
1308  *	This function is identical to ata_id_string except that it
1309  *	trims trailing spaces and terminates the resulting string with
1310  *	null.  @len must be actual maximum length (even number) + 1.
1311  *
1312  *	LOCKING:
1313  *	caller.
1314  */
1315 void ata_id_c_string(const u16 *id, unsigned char *s,
1316 		     unsigned int ofs, unsigned int len)
1317 {
1318 	unsigned char *p;
1319 
1320 	ata_id_string(id, s, ofs, len - 1);
1321 
1322 	p = s + strnlen(s, len - 1);
1323 	while (p > s && p[-1] == ' ')
1324 		p--;
1325 	*p = '\0';
1326 }
1327 
1328 static u64 ata_id_n_sectors(const u16 *id)
1329 {
1330 	if (ata_id_has_lba(id)) {
1331 		if (ata_id_has_lba48(id))
1332 			return ata_id_u64(id, 100);
1333 		else
1334 			return ata_id_u32(id, 60);
1335 	} else {
1336 		if (ata_id_current_chs_valid(id))
1337 			return ata_id_u32(id, 57);
1338 		else
1339 			return id[1] * id[3] * id[6];
1340 	}
1341 }
1342 
1343 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1344 {
1345 	u64 sectors = 0;
1346 
1347 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1348 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1349 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1350 	sectors |= (tf->lbah & 0xff) << 16;
1351 	sectors |= (tf->lbam & 0xff) << 8;
1352 	sectors |= (tf->lbal & 0xff);
1353 
1354 	return sectors;
1355 }
1356 
1357 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1358 {
1359 	u64 sectors = 0;
1360 
1361 	sectors |= (tf->device & 0x0f) << 24;
1362 	sectors |= (tf->lbah & 0xff) << 16;
1363 	sectors |= (tf->lbam & 0xff) << 8;
1364 	sectors |= (tf->lbal & 0xff);
1365 
1366 	return sectors;
1367 }
1368 
1369 /**
1370  *	ata_read_native_max_address - Read native max address
1371  *	@dev: target device
1372  *	@max_sectors: out parameter for the result native max address
1373  *
1374  *	Perform an LBA48 or LBA28 native size query upon the device in
1375  *	question.
1376  *
1377  *	RETURNS:
1378  *	0 on success, -EACCES if command is aborted by the drive.
1379  *	-EIO on other errors.
1380  */
1381 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1382 {
1383 	unsigned int err_mask;
1384 	struct ata_taskfile tf;
1385 	int lba48 = ata_id_has_lba48(dev->id);
1386 
1387 	ata_tf_init(dev, &tf);
1388 
1389 	/* always clear all address registers */
1390 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1391 
1392 	if (lba48) {
1393 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1394 		tf.flags |= ATA_TFLAG_LBA48;
1395 	} else
1396 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1397 
1398 	tf.protocol |= ATA_PROT_NODATA;
1399 	tf.device |= ATA_LBA;
1400 
1401 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1402 	if (err_mask) {
1403 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1404 			       "max address (err_mask=0x%x)\n", err_mask);
1405 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1406 			return -EACCES;
1407 		return -EIO;
1408 	}
1409 
1410 	if (lba48)
1411 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1412 	else
1413 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1414 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1415 		(*max_sectors)--;
1416 	return 0;
1417 }
1418 
1419 /**
1420  *	ata_set_max_sectors - Set max sectors
1421  *	@dev: target device
1422  *	@new_sectors: new max sectors value to set for the device
1423  *
1424  *	Set max sectors of @dev to @new_sectors.
1425  *
1426  *	RETURNS:
1427  *	0 on success, -EACCES if command is aborted or denied (due to
1428  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1429  *	errors.
1430  */
1431 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1432 {
1433 	unsigned int err_mask;
1434 	struct ata_taskfile tf;
1435 	int lba48 = ata_id_has_lba48(dev->id);
1436 
1437 	new_sectors--;
1438 
1439 	ata_tf_init(dev, &tf);
1440 
1441 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1442 
1443 	if (lba48) {
1444 		tf.command = ATA_CMD_SET_MAX_EXT;
1445 		tf.flags |= ATA_TFLAG_LBA48;
1446 
1447 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1448 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1449 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1450 	} else {
1451 		tf.command = ATA_CMD_SET_MAX;
1452 
1453 		tf.device |= (new_sectors >> 24) & 0xf;
1454 	}
1455 
1456 	tf.protocol |= ATA_PROT_NODATA;
1457 	tf.device |= ATA_LBA;
1458 
1459 	tf.lbal = (new_sectors >> 0) & 0xff;
1460 	tf.lbam = (new_sectors >> 8) & 0xff;
1461 	tf.lbah = (new_sectors >> 16) & 0xff;
1462 
1463 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1464 	if (err_mask) {
1465 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1466 			       "max address (err_mask=0x%x)\n", err_mask);
1467 		if (err_mask == AC_ERR_DEV &&
1468 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1469 			return -EACCES;
1470 		return -EIO;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 /**
1477  *	ata_hpa_resize		-	Resize a device with an HPA set
1478  *	@dev: Device to resize
1479  *
1480  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1481  *	it if required to the full size of the media. The caller must check
1482  *	the drive has the HPA feature set enabled.
1483  *
1484  *	RETURNS:
1485  *	0 on success, -errno on failure.
1486  */
1487 static int ata_hpa_resize(struct ata_device *dev)
1488 {
1489 	struct ata_eh_context *ehc = &dev->link->eh_context;
1490 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1491 	u64 sectors = ata_id_n_sectors(dev->id);
1492 	u64 native_sectors;
1493 	int rc;
1494 
1495 	/* do we need to do it? */
1496 	if (dev->class != ATA_DEV_ATA ||
1497 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1498 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1499 		return 0;
1500 
1501 	/* read native max address */
1502 	rc = ata_read_native_max_address(dev, &native_sectors);
1503 	if (rc) {
1504 		/* If device aborted the command or HPA isn't going to
1505 		 * be unlocked, skip HPA resizing.
1506 		 */
1507 		if (rc == -EACCES || !ata_ignore_hpa) {
1508 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1509 				       "broken, skipping HPA handling\n");
1510 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1511 
1512 			/* we can continue if device aborted the command */
1513 			if (rc == -EACCES)
1514 				rc = 0;
1515 		}
1516 
1517 		return rc;
1518 	}
1519 
1520 	/* nothing to do? */
1521 	if (native_sectors <= sectors || !ata_ignore_hpa) {
1522 		if (!print_info || native_sectors == sectors)
1523 			return 0;
1524 
1525 		if (native_sectors > sectors)
1526 			ata_dev_printk(dev, KERN_INFO,
1527 				"HPA detected: current %llu, native %llu\n",
1528 				(unsigned long long)sectors,
1529 				(unsigned long long)native_sectors);
1530 		else if (native_sectors < sectors)
1531 			ata_dev_printk(dev, KERN_WARNING,
1532 				"native sectors (%llu) is smaller than "
1533 				"sectors (%llu)\n",
1534 				(unsigned long long)native_sectors,
1535 				(unsigned long long)sectors);
1536 		return 0;
1537 	}
1538 
1539 	/* let's unlock HPA */
1540 	rc = ata_set_max_sectors(dev, native_sectors);
1541 	if (rc == -EACCES) {
1542 		/* if device aborted the command, skip HPA resizing */
1543 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1544 			       "(%llu -> %llu), skipping HPA handling\n",
1545 			       (unsigned long long)sectors,
1546 			       (unsigned long long)native_sectors);
1547 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1548 		return 0;
1549 	} else if (rc)
1550 		return rc;
1551 
1552 	/* re-read IDENTIFY data */
1553 	rc = ata_dev_reread_id(dev, 0);
1554 	if (rc) {
1555 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1556 			       "data after HPA resizing\n");
1557 		return rc;
1558 	}
1559 
1560 	if (print_info) {
1561 		u64 new_sectors = ata_id_n_sectors(dev->id);
1562 		ata_dev_printk(dev, KERN_INFO,
1563 			"HPA unlocked: %llu -> %llu, native %llu\n",
1564 			(unsigned long long)sectors,
1565 			(unsigned long long)new_sectors,
1566 			(unsigned long long)native_sectors);
1567 	}
1568 
1569 	return 0;
1570 }
1571 
1572 /**
1573  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1574  *	@id: IDENTIFY DEVICE page to dump
1575  *
1576  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1577  *	page.
1578  *
1579  *	LOCKING:
1580  *	caller.
1581  */
1582 
1583 static inline void ata_dump_id(const u16 *id)
1584 {
1585 	DPRINTK("49==0x%04x  "
1586 		"53==0x%04x  "
1587 		"63==0x%04x  "
1588 		"64==0x%04x  "
1589 		"75==0x%04x  \n",
1590 		id[49],
1591 		id[53],
1592 		id[63],
1593 		id[64],
1594 		id[75]);
1595 	DPRINTK("80==0x%04x  "
1596 		"81==0x%04x  "
1597 		"82==0x%04x  "
1598 		"83==0x%04x  "
1599 		"84==0x%04x  \n",
1600 		id[80],
1601 		id[81],
1602 		id[82],
1603 		id[83],
1604 		id[84]);
1605 	DPRINTK("88==0x%04x  "
1606 		"93==0x%04x\n",
1607 		id[88],
1608 		id[93]);
1609 }
1610 
1611 /**
1612  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1613  *	@id: IDENTIFY data to compute xfer mask from
1614  *
1615  *	Compute the xfermask for this device. This is not as trivial
1616  *	as it seems if we must consider early devices correctly.
1617  *
1618  *	FIXME: pre IDE drive timing (do we care ?).
1619  *
1620  *	LOCKING:
1621  *	None.
1622  *
1623  *	RETURNS:
1624  *	Computed xfermask
1625  */
1626 unsigned long ata_id_xfermask(const u16 *id)
1627 {
1628 	unsigned long pio_mask, mwdma_mask, udma_mask;
1629 
1630 	/* Usual case. Word 53 indicates word 64 is valid */
1631 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1632 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1633 		pio_mask <<= 3;
1634 		pio_mask |= 0x7;
1635 	} else {
1636 		/* If word 64 isn't valid then Word 51 high byte holds
1637 		 * the PIO timing number for the maximum. Turn it into
1638 		 * a mask.
1639 		 */
1640 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1641 		if (mode < 5)	/* Valid PIO range */
1642 			pio_mask = (2 << mode) - 1;
1643 		else
1644 			pio_mask = 1;
1645 
1646 		/* But wait.. there's more. Design your standards by
1647 		 * committee and you too can get a free iordy field to
1648 		 * process. However its the speeds not the modes that
1649 		 * are supported... Note drivers using the timing API
1650 		 * will get this right anyway
1651 		 */
1652 	}
1653 
1654 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1655 
1656 	if (ata_id_is_cfa(id)) {
1657 		/*
1658 		 *	Process compact flash extended modes
1659 		 */
1660 		int pio = id[163] & 0x7;
1661 		int dma = (id[163] >> 3) & 7;
1662 
1663 		if (pio)
1664 			pio_mask |= (1 << 5);
1665 		if (pio > 1)
1666 			pio_mask |= (1 << 6);
1667 		if (dma)
1668 			mwdma_mask |= (1 << 3);
1669 		if (dma > 1)
1670 			mwdma_mask |= (1 << 4);
1671 	}
1672 
1673 	udma_mask = 0;
1674 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1675 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1676 
1677 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1678 }
1679 
1680 /**
1681  *	ata_pio_queue_task - Queue port_task
1682  *	@ap: The ata_port to queue port_task for
1683  *	@data: data for @fn to use
1684  *	@delay: delay time in msecs for workqueue function
1685  *
1686  *	Schedule @fn(@data) for execution after @delay jiffies using
1687  *	port_task.  There is one port_task per port and it's the
1688  *	user(low level driver)'s responsibility to make sure that only
1689  *	one task is active at any given time.
1690  *
1691  *	libata core layer takes care of synchronization between
1692  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1693  *	synchronization.
1694  *
1695  *	LOCKING:
1696  *	Inherited from caller.
1697  */
1698 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1699 {
1700 	ap->port_task_data = data;
1701 
1702 	/* may fail if ata_port_flush_task() in progress */
1703 	queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1704 }
1705 
1706 /**
1707  *	ata_port_flush_task - Flush port_task
1708  *	@ap: The ata_port to flush port_task for
1709  *
1710  *	After this function completes, port_task is guranteed not to
1711  *	be running or scheduled.
1712  *
1713  *	LOCKING:
1714  *	Kernel thread context (may sleep)
1715  */
1716 void ata_port_flush_task(struct ata_port *ap)
1717 {
1718 	DPRINTK("ENTER\n");
1719 
1720 	cancel_rearming_delayed_work(&ap->port_task);
1721 
1722 	if (ata_msg_ctl(ap))
1723 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1724 }
1725 
1726 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1727 {
1728 	struct completion *waiting = qc->private_data;
1729 
1730 	complete(waiting);
1731 }
1732 
1733 /**
1734  *	ata_exec_internal_sg - execute libata internal command
1735  *	@dev: Device to which the command is sent
1736  *	@tf: Taskfile registers for the command and the result
1737  *	@cdb: CDB for packet command
1738  *	@dma_dir: Data tranfer direction of the command
1739  *	@sgl: sg list for the data buffer of the command
1740  *	@n_elem: Number of sg entries
1741  *	@timeout: Timeout in msecs (0 for default)
1742  *
1743  *	Executes libata internal command with timeout.  @tf contains
1744  *	command on entry and result on return.  Timeout and error
1745  *	conditions are reported via return value.  No recovery action
1746  *	is taken after a command times out.  It's caller's duty to
1747  *	clean up after timeout.
1748  *
1749  *	LOCKING:
1750  *	None.  Should be called with kernel context, might sleep.
1751  *
1752  *	RETURNS:
1753  *	Zero on success, AC_ERR_* mask on failure
1754  */
1755 unsigned ata_exec_internal_sg(struct ata_device *dev,
1756 			      struct ata_taskfile *tf, const u8 *cdb,
1757 			      int dma_dir, struct scatterlist *sgl,
1758 			      unsigned int n_elem, unsigned long timeout)
1759 {
1760 	struct ata_link *link = dev->link;
1761 	struct ata_port *ap = link->ap;
1762 	u8 command = tf->command;
1763 	int auto_timeout = 0;
1764 	struct ata_queued_cmd *qc;
1765 	unsigned int tag, preempted_tag;
1766 	u32 preempted_sactive, preempted_qc_active;
1767 	int preempted_nr_active_links;
1768 	DECLARE_COMPLETION_ONSTACK(wait);
1769 	unsigned long flags;
1770 	unsigned int err_mask;
1771 	int rc;
1772 
1773 	spin_lock_irqsave(ap->lock, flags);
1774 
1775 	/* no internal command while frozen */
1776 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1777 		spin_unlock_irqrestore(ap->lock, flags);
1778 		return AC_ERR_SYSTEM;
1779 	}
1780 
1781 	/* initialize internal qc */
1782 
1783 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1784 	 * drivers choke if any other tag is given.  This breaks
1785 	 * ata_tag_internal() test for those drivers.  Don't use new
1786 	 * EH stuff without converting to it.
1787 	 */
1788 	if (ap->ops->error_handler)
1789 		tag = ATA_TAG_INTERNAL;
1790 	else
1791 		tag = 0;
1792 
1793 	if (test_and_set_bit(tag, &ap->qc_allocated))
1794 		BUG();
1795 	qc = __ata_qc_from_tag(ap, tag);
1796 
1797 	qc->tag = tag;
1798 	qc->scsicmd = NULL;
1799 	qc->ap = ap;
1800 	qc->dev = dev;
1801 	ata_qc_reinit(qc);
1802 
1803 	preempted_tag = link->active_tag;
1804 	preempted_sactive = link->sactive;
1805 	preempted_qc_active = ap->qc_active;
1806 	preempted_nr_active_links = ap->nr_active_links;
1807 	link->active_tag = ATA_TAG_POISON;
1808 	link->sactive = 0;
1809 	ap->qc_active = 0;
1810 	ap->nr_active_links = 0;
1811 
1812 	/* prepare & issue qc */
1813 	qc->tf = *tf;
1814 	if (cdb)
1815 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1816 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1817 	qc->dma_dir = dma_dir;
1818 	if (dma_dir != DMA_NONE) {
1819 		unsigned int i, buflen = 0;
1820 		struct scatterlist *sg;
1821 
1822 		for_each_sg(sgl, sg, n_elem, i)
1823 			buflen += sg->length;
1824 
1825 		ata_sg_init(qc, sgl, n_elem);
1826 		qc->nbytes = buflen;
1827 	}
1828 
1829 	qc->private_data = &wait;
1830 	qc->complete_fn = ata_qc_complete_internal;
1831 
1832 	ata_qc_issue(qc);
1833 
1834 	spin_unlock_irqrestore(ap->lock, flags);
1835 
1836 	if (!timeout) {
1837 		if (ata_probe_timeout)
1838 			timeout = ata_probe_timeout * 1000;
1839 		else {
1840 			timeout = ata_internal_cmd_timeout(dev, command);
1841 			auto_timeout = 1;
1842 		}
1843 	}
1844 
1845 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1846 
1847 	ata_port_flush_task(ap);
1848 
1849 	if (!rc) {
1850 		spin_lock_irqsave(ap->lock, flags);
1851 
1852 		/* We're racing with irq here.  If we lose, the
1853 		 * following test prevents us from completing the qc
1854 		 * twice.  If we win, the port is frozen and will be
1855 		 * cleaned up by ->post_internal_cmd().
1856 		 */
1857 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1858 			qc->err_mask |= AC_ERR_TIMEOUT;
1859 
1860 			if (ap->ops->error_handler)
1861 				ata_port_freeze(ap);
1862 			else
1863 				ata_qc_complete(qc);
1864 
1865 			if (ata_msg_warn(ap))
1866 				ata_dev_printk(dev, KERN_WARNING,
1867 					"qc timeout (cmd 0x%x)\n", command);
1868 		}
1869 
1870 		spin_unlock_irqrestore(ap->lock, flags);
1871 	}
1872 
1873 	/* do post_internal_cmd */
1874 	if (ap->ops->post_internal_cmd)
1875 		ap->ops->post_internal_cmd(qc);
1876 
1877 	/* perform minimal error analysis */
1878 	if (qc->flags & ATA_QCFLAG_FAILED) {
1879 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1880 			qc->err_mask |= AC_ERR_DEV;
1881 
1882 		if (!qc->err_mask)
1883 			qc->err_mask |= AC_ERR_OTHER;
1884 
1885 		if (qc->err_mask & ~AC_ERR_OTHER)
1886 			qc->err_mask &= ~AC_ERR_OTHER;
1887 	}
1888 
1889 	/* finish up */
1890 	spin_lock_irqsave(ap->lock, flags);
1891 
1892 	*tf = qc->result_tf;
1893 	err_mask = qc->err_mask;
1894 
1895 	ata_qc_free(qc);
1896 	link->active_tag = preempted_tag;
1897 	link->sactive = preempted_sactive;
1898 	ap->qc_active = preempted_qc_active;
1899 	ap->nr_active_links = preempted_nr_active_links;
1900 
1901 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1902 	 * Until those drivers are fixed, we detect the condition
1903 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1904 	 * port.
1905 	 *
1906 	 * Note that this doesn't change any behavior as internal
1907 	 * command failure results in disabling the device in the
1908 	 * higher layer for LLDDs without new reset/EH callbacks.
1909 	 *
1910 	 * Kill the following code as soon as those drivers are fixed.
1911 	 */
1912 	if (ap->flags & ATA_FLAG_DISABLED) {
1913 		err_mask |= AC_ERR_SYSTEM;
1914 		ata_port_probe(ap);
1915 	}
1916 
1917 	spin_unlock_irqrestore(ap->lock, flags);
1918 
1919 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1920 		ata_internal_cmd_timed_out(dev, command);
1921 
1922 	return err_mask;
1923 }
1924 
1925 /**
1926  *	ata_exec_internal - execute libata internal command
1927  *	@dev: Device to which the command is sent
1928  *	@tf: Taskfile registers for the command and the result
1929  *	@cdb: CDB for packet command
1930  *	@dma_dir: Data tranfer direction of the command
1931  *	@buf: Data buffer of the command
1932  *	@buflen: Length of data buffer
1933  *	@timeout: Timeout in msecs (0 for default)
1934  *
1935  *	Wrapper around ata_exec_internal_sg() which takes simple
1936  *	buffer instead of sg list.
1937  *
1938  *	LOCKING:
1939  *	None.  Should be called with kernel context, might sleep.
1940  *
1941  *	RETURNS:
1942  *	Zero on success, AC_ERR_* mask on failure
1943  */
1944 unsigned ata_exec_internal(struct ata_device *dev,
1945 			   struct ata_taskfile *tf, const u8 *cdb,
1946 			   int dma_dir, void *buf, unsigned int buflen,
1947 			   unsigned long timeout)
1948 {
1949 	struct scatterlist *psg = NULL, sg;
1950 	unsigned int n_elem = 0;
1951 
1952 	if (dma_dir != DMA_NONE) {
1953 		WARN_ON(!buf);
1954 		sg_init_one(&sg, buf, buflen);
1955 		psg = &sg;
1956 		n_elem++;
1957 	}
1958 
1959 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1960 				    timeout);
1961 }
1962 
1963 /**
1964  *	ata_do_simple_cmd - execute simple internal command
1965  *	@dev: Device to which the command is sent
1966  *	@cmd: Opcode to execute
1967  *
1968  *	Execute a 'simple' command, that only consists of the opcode
1969  *	'cmd' itself, without filling any other registers
1970  *
1971  *	LOCKING:
1972  *	Kernel thread context (may sleep).
1973  *
1974  *	RETURNS:
1975  *	Zero on success, AC_ERR_* mask on failure
1976  */
1977 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1978 {
1979 	struct ata_taskfile tf;
1980 
1981 	ata_tf_init(dev, &tf);
1982 
1983 	tf.command = cmd;
1984 	tf.flags |= ATA_TFLAG_DEVICE;
1985 	tf.protocol = ATA_PROT_NODATA;
1986 
1987 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1988 }
1989 
1990 /**
1991  *	ata_pio_need_iordy	-	check if iordy needed
1992  *	@adev: ATA device
1993  *
1994  *	Check if the current speed of the device requires IORDY. Used
1995  *	by various controllers for chip configuration.
1996  */
1997 
1998 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1999 {
2000 	/* Controller doesn't support  IORDY. Probably a pointless check
2001 	   as the caller should know this */
2002 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2003 		return 0;
2004 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
2005 	if (ata_id_is_cfa(adev->id)
2006 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
2007 		return 0;
2008 	/* PIO3 and higher it is mandatory */
2009 	if (adev->pio_mode > XFER_PIO_2)
2010 		return 1;
2011 	/* We turn it on when possible */
2012 	if (ata_id_has_iordy(adev->id))
2013 		return 1;
2014 	return 0;
2015 }
2016 
2017 /**
2018  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
2019  *	@adev: ATA device
2020  *
2021  *	Compute the highest mode possible if we are not using iordy. Return
2022  *	-1 if no iordy mode is available.
2023  */
2024 
2025 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2026 {
2027 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
2028 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
2029 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
2030 		/* Is the speed faster than the drive allows non IORDY ? */
2031 		if (pio) {
2032 			/* This is cycle times not frequency - watch the logic! */
2033 			if (pio > 240)	/* PIO2 is 240nS per cycle */
2034 				return 3 << ATA_SHIFT_PIO;
2035 			return 7 << ATA_SHIFT_PIO;
2036 		}
2037 	}
2038 	return 3 << ATA_SHIFT_PIO;
2039 }
2040 
2041 /**
2042  *	ata_do_dev_read_id		-	default ID read method
2043  *	@dev: device
2044  *	@tf: proposed taskfile
2045  *	@id: data buffer
2046  *
2047  *	Issue the identify taskfile and hand back the buffer containing
2048  *	identify data. For some RAID controllers and for pre ATA devices
2049  *	this function is wrapped or replaced by the driver
2050  */
2051 unsigned int ata_do_dev_read_id(struct ata_device *dev,
2052 					struct ata_taskfile *tf, u16 *id)
2053 {
2054 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
2055 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2056 }
2057 
2058 /**
2059  *	ata_dev_read_id - Read ID data from the specified device
2060  *	@dev: target device
2061  *	@p_class: pointer to class of the target device (may be changed)
2062  *	@flags: ATA_READID_* flags
2063  *	@id: buffer to read IDENTIFY data into
2064  *
2065  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
2066  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2067  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
2068  *	for pre-ATA4 drives.
2069  *
2070  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2071  *	now we abort if we hit that case.
2072  *
2073  *	LOCKING:
2074  *	Kernel thread context (may sleep)
2075  *
2076  *	RETURNS:
2077  *	0 on success, -errno otherwise.
2078  */
2079 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2080 		    unsigned int flags, u16 *id)
2081 {
2082 	struct ata_port *ap = dev->link->ap;
2083 	unsigned int class = *p_class;
2084 	struct ata_taskfile tf;
2085 	unsigned int err_mask = 0;
2086 	const char *reason;
2087 	int may_fallback = 1, tried_spinup = 0;
2088 	int rc;
2089 
2090 	if (ata_msg_ctl(ap))
2091 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2092 
2093 retry:
2094 	ata_tf_init(dev, &tf);
2095 
2096 	switch (class) {
2097 	case ATA_DEV_ATA:
2098 		tf.command = ATA_CMD_ID_ATA;
2099 		break;
2100 	case ATA_DEV_ATAPI:
2101 		tf.command = ATA_CMD_ID_ATAPI;
2102 		break;
2103 	default:
2104 		rc = -ENODEV;
2105 		reason = "unsupported class";
2106 		goto err_out;
2107 	}
2108 
2109 	tf.protocol = ATA_PROT_PIO;
2110 
2111 	/* Some devices choke if TF registers contain garbage.  Make
2112 	 * sure those are properly initialized.
2113 	 */
2114 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2115 
2116 	/* Device presence detection is unreliable on some
2117 	 * controllers.  Always poll IDENTIFY if available.
2118 	 */
2119 	tf.flags |= ATA_TFLAG_POLLING;
2120 
2121 	if (ap->ops->read_id)
2122 		err_mask = ap->ops->read_id(dev, &tf, id);
2123 	else
2124 		err_mask = ata_do_dev_read_id(dev, &tf, id);
2125 
2126 	if (err_mask) {
2127 		if (err_mask & AC_ERR_NODEV_HINT) {
2128 			ata_dev_printk(dev, KERN_DEBUG,
2129 				       "NODEV after polling detection\n");
2130 			return -ENOENT;
2131 		}
2132 
2133 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2134 			/* Device or controller might have reported
2135 			 * the wrong device class.  Give a shot at the
2136 			 * other IDENTIFY if the current one is
2137 			 * aborted by the device.
2138 			 */
2139 			if (may_fallback) {
2140 				may_fallback = 0;
2141 
2142 				if (class == ATA_DEV_ATA)
2143 					class = ATA_DEV_ATAPI;
2144 				else
2145 					class = ATA_DEV_ATA;
2146 				goto retry;
2147 			}
2148 
2149 			/* Control reaches here iff the device aborted
2150 			 * both flavors of IDENTIFYs which happens
2151 			 * sometimes with phantom devices.
2152 			 */
2153 			ata_dev_printk(dev, KERN_DEBUG,
2154 				       "both IDENTIFYs aborted, assuming NODEV\n");
2155 			return -ENOENT;
2156 		}
2157 
2158 		rc = -EIO;
2159 		reason = "I/O error";
2160 		goto err_out;
2161 	}
2162 
2163 	/* Falling back doesn't make sense if ID data was read
2164 	 * successfully at least once.
2165 	 */
2166 	may_fallback = 0;
2167 
2168 	swap_buf_le16(id, ATA_ID_WORDS);
2169 
2170 	/* sanity check */
2171 	rc = -EINVAL;
2172 	reason = "device reports invalid type";
2173 
2174 	if (class == ATA_DEV_ATA) {
2175 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2176 			goto err_out;
2177 	} else {
2178 		if (ata_id_is_ata(id))
2179 			goto err_out;
2180 	}
2181 
2182 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2183 		tried_spinup = 1;
2184 		/*
2185 		 * Drive powered-up in standby mode, and requires a specific
2186 		 * SET_FEATURES spin-up subcommand before it will accept
2187 		 * anything other than the original IDENTIFY command.
2188 		 */
2189 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2190 		if (err_mask && id[2] != 0x738c) {
2191 			rc = -EIO;
2192 			reason = "SPINUP failed";
2193 			goto err_out;
2194 		}
2195 		/*
2196 		 * If the drive initially returned incomplete IDENTIFY info,
2197 		 * we now must reissue the IDENTIFY command.
2198 		 */
2199 		if (id[2] == 0x37c8)
2200 			goto retry;
2201 	}
2202 
2203 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2204 		/*
2205 		 * The exact sequence expected by certain pre-ATA4 drives is:
2206 		 * SRST RESET
2207 		 * IDENTIFY (optional in early ATA)
2208 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2209 		 * anything else..
2210 		 * Some drives were very specific about that exact sequence.
2211 		 *
2212 		 * Note that ATA4 says lba is mandatory so the second check
2213 		 * shoud never trigger.
2214 		 */
2215 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2216 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2217 			if (err_mask) {
2218 				rc = -EIO;
2219 				reason = "INIT_DEV_PARAMS failed";
2220 				goto err_out;
2221 			}
2222 
2223 			/* current CHS translation info (id[53-58]) might be
2224 			 * changed. reread the identify device info.
2225 			 */
2226 			flags &= ~ATA_READID_POSTRESET;
2227 			goto retry;
2228 		}
2229 	}
2230 
2231 	*p_class = class;
2232 
2233 	return 0;
2234 
2235  err_out:
2236 	if (ata_msg_warn(ap))
2237 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2238 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2239 	return rc;
2240 }
2241 
2242 static inline u8 ata_dev_knobble(struct ata_device *dev)
2243 {
2244 	struct ata_port *ap = dev->link->ap;
2245 
2246 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2247 		return 0;
2248 
2249 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2250 }
2251 
2252 static void ata_dev_config_ncq(struct ata_device *dev,
2253 			       char *desc, size_t desc_sz)
2254 {
2255 	struct ata_port *ap = dev->link->ap;
2256 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2257 
2258 	if (!ata_id_has_ncq(dev->id)) {
2259 		desc[0] = '\0';
2260 		return;
2261 	}
2262 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2263 		snprintf(desc, desc_sz, "NCQ (not used)");
2264 		return;
2265 	}
2266 	if (ap->flags & ATA_FLAG_NCQ) {
2267 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2268 		dev->flags |= ATA_DFLAG_NCQ;
2269 	}
2270 
2271 	if (hdepth >= ddepth)
2272 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2273 	else
2274 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2275 }
2276 
2277 /**
2278  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2279  *	@dev: Target device to configure
2280  *
2281  *	Configure @dev according to @dev->id.  Generic and low-level
2282  *	driver specific fixups are also applied.
2283  *
2284  *	LOCKING:
2285  *	Kernel thread context (may sleep)
2286  *
2287  *	RETURNS:
2288  *	0 on success, -errno otherwise
2289  */
2290 int ata_dev_configure(struct ata_device *dev)
2291 {
2292 	struct ata_port *ap = dev->link->ap;
2293 	struct ata_eh_context *ehc = &dev->link->eh_context;
2294 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2295 	const u16 *id = dev->id;
2296 	unsigned long xfer_mask;
2297 	char revbuf[7];		/* XYZ-99\0 */
2298 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2299 	char modelbuf[ATA_ID_PROD_LEN+1];
2300 	int rc;
2301 
2302 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2303 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2304 			       __func__);
2305 		return 0;
2306 	}
2307 
2308 	if (ata_msg_probe(ap))
2309 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2310 
2311 	/* set horkage */
2312 	dev->horkage |= ata_dev_blacklisted(dev);
2313 	ata_force_horkage(dev);
2314 
2315 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2316 		ata_dev_printk(dev, KERN_INFO,
2317 			       "unsupported device, disabling\n");
2318 		ata_dev_disable(dev);
2319 		return 0;
2320 	}
2321 
2322 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2323 	    dev->class == ATA_DEV_ATAPI) {
2324 		ata_dev_printk(dev, KERN_WARNING,
2325 			"WARNING: ATAPI is %s, device ignored.\n",
2326 			atapi_enabled ? "not supported with this driver"
2327 				      : "disabled");
2328 		ata_dev_disable(dev);
2329 		return 0;
2330 	}
2331 
2332 	/* let ACPI work its magic */
2333 	rc = ata_acpi_on_devcfg(dev);
2334 	if (rc)
2335 		return rc;
2336 
2337 	/* massage HPA, do it early as it might change IDENTIFY data */
2338 	rc = ata_hpa_resize(dev);
2339 	if (rc)
2340 		return rc;
2341 
2342 	/* print device capabilities */
2343 	if (ata_msg_probe(ap))
2344 		ata_dev_printk(dev, KERN_DEBUG,
2345 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2346 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2347 			       __func__,
2348 			       id[49], id[82], id[83], id[84],
2349 			       id[85], id[86], id[87], id[88]);
2350 
2351 	/* initialize to-be-configured parameters */
2352 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2353 	dev->max_sectors = 0;
2354 	dev->cdb_len = 0;
2355 	dev->n_sectors = 0;
2356 	dev->cylinders = 0;
2357 	dev->heads = 0;
2358 	dev->sectors = 0;
2359 
2360 	/*
2361 	 * common ATA, ATAPI feature tests
2362 	 */
2363 
2364 	/* find max transfer mode; for printk only */
2365 	xfer_mask = ata_id_xfermask(id);
2366 
2367 	if (ata_msg_probe(ap))
2368 		ata_dump_id(id);
2369 
2370 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2371 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2372 			sizeof(fwrevbuf));
2373 
2374 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2375 			sizeof(modelbuf));
2376 
2377 	/* ATA-specific feature tests */
2378 	if (dev->class == ATA_DEV_ATA) {
2379 		if (ata_id_is_cfa(id)) {
2380 			if (id[162] & 1) /* CPRM may make this media unusable */
2381 				ata_dev_printk(dev, KERN_WARNING,
2382 					       "supports DRM functions and may "
2383 					       "not be fully accessable.\n");
2384 			snprintf(revbuf, 7, "CFA");
2385 		} else {
2386 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2387 			/* Warn the user if the device has TPM extensions */
2388 			if (ata_id_has_tpm(id))
2389 				ata_dev_printk(dev, KERN_WARNING,
2390 					       "supports DRM functions and may "
2391 					       "not be fully accessable.\n");
2392 		}
2393 
2394 		dev->n_sectors = ata_id_n_sectors(id);
2395 
2396 		if (dev->id[59] & 0x100)
2397 			dev->multi_count = dev->id[59] & 0xff;
2398 
2399 		if (ata_id_has_lba(id)) {
2400 			const char *lba_desc;
2401 			char ncq_desc[20];
2402 
2403 			lba_desc = "LBA";
2404 			dev->flags |= ATA_DFLAG_LBA;
2405 			if (ata_id_has_lba48(id)) {
2406 				dev->flags |= ATA_DFLAG_LBA48;
2407 				lba_desc = "LBA48";
2408 
2409 				if (dev->n_sectors >= (1UL << 28) &&
2410 				    ata_id_has_flush_ext(id))
2411 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2412 			}
2413 
2414 			/* config NCQ */
2415 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2416 
2417 			/* print device info to dmesg */
2418 			if (ata_msg_drv(ap) && print_info) {
2419 				ata_dev_printk(dev, KERN_INFO,
2420 					"%s: %s, %s, max %s\n",
2421 					revbuf, modelbuf, fwrevbuf,
2422 					ata_mode_string(xfer_mask));
2423 				ata_dev_printk(dev, KERN_INFO,
2424 					"%Lu sectors, multi %u: %s %s\n",
2425 					(unsigned long long)dev->n_sectors,
2426 					dev->multi_count, lba_desc, ncq_desc);
2427 			}
2428 		} else {
2429 			/* CHS */
2430 
2431 			/* Default translation */
2432 			dev->cylinders	= id[1];
2433 			dev->heads	= id[3];
2434 			dev->sectors	= id[6];
2435 
2436 			if (ata_id_current_chs_valid(id)) {
2437 				/* Current CHS translation is valid. */
2438 				dev->cylinders = id[54];
2439 				dev->heads     = id[55];
2440 				dev->sectors   = id[56];
2441 			}
2442 
2443 			/* print device info to dmesg */
2444 			if (ata_msg_drv(ap) && print_info) {
2445 				ata_dev_printk(dev, KERN_INFO,
2446 					"%s: %s, %s, max %s\n",
2447 					revbuf,	modelbuf, fwrevbuf,
2448 					ata_mode_string(xfer_mask));
2449 				ata_dev_printk(dev, KERN_INFO,
2450 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2451 					(unsigned long long)dev->n_sectors,
2452 					dev->multi_count, dev->cylinders,
2453 					dev->heads, dev->sectors);
2454 			}
2455 		}
2456 
2457 		dev->cdb_len = 16;
2458 	}
2459 
2460 	/* ATAPI-specific feature tests */
2461 	else if (dev->class == ATA_DEV_ATAPI) {
2462 		const char *cdb_intr_string = "";
2463 		const char *atapi_an_string = "";
2464 		const char *dma_dir_string = "";
2465 		u32 sntf;
2466 
2467 		rc = atapi_cdb_len(id);
2468 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2469 			if (ata_msg_warn(ap))
2470 				ata_dev_printk(dev, KERN_WARNING,
2471 					       "unsupported CDB len\n");
2472 			rc = -EINVAL;
2473 			goto err_out_nosup;
2474 		}
2475 		dev->cdb_len = (unsigned int) rc;
2476 
2477 		/* Enable ATAPI AN if both the host and device have
2478 		 * the support.  If PMP is attached, SNTF is required
2479 		 * to enable ATAPI AN to discern between PHY status
2480 		 * changed notifications and ATAPI ANs.
2481 		 */
2482 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2483 		    (!sata_pmp_attached(ap) ||
2484 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2485 			unsigned int err_mask;
2486 
2487 			/* issue SET feature command to turn this on */
2488 			err_mask = ata_dev_set_feature(dev,
2489 					SETFEATURES_SATA_ENABLE, SATA_AN);
2490 			if (err_mask)
2491 				ata_dev_printk(dev, KERN_ERR,
2492 					"failed to enable ATAPI AN "
2493 					"(err_mask=0x%x)\n", err_mask);
2494 			else {
2495 				dev->flags |= ATA_DFLAG_AN;
2496 				atapi_an_string = ", ATAPI AN";
2497 			}
2498 		}
2499 
2500 		if (ata_id_cdb_intr(dev->id)) {
2501 			dev->flags |= ATA_DFLAG_CDB_INTR;
2502 			cdb_intr_string = ", CDB intr";
2503 		}
2504 
2505 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2506 			dev->flags |= ATA_DFLAG_DMADIR;
2507 			dma_dir_string = ", DMADIR";
2508 		}
2509 
2510 		/* print device info to dmesg */
2511 		if (ata_msg_drv(ap) && print_info)
2512 			ata_dev_printk(dev, KERN_INFO,
2513 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2514 				       modelbuf, fwrevbuf,
2515 				       ata_mode_string(xfer_mask),
2516 				       cdb_intr_string, atapi_an_string,
2517 				       dma_dir_string);
2518 	}
2519 
2520 	/* determine max_sectors */
2521 	dev->max_sectors = ATA_MAX_SECTORS;
2522 	if (dev->flags & ATA_DFLAG_LBA48)
2523 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2524 
2525 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2526 		if (ata_id_has_hipm(dev->id))
2527 			dev->flags |= ATA_DFLAG_HIPM;
2528 		if (ata_id_has_dipm(dev->id))
2529 			dev->flags |= ATA_DFLAG_DIPM;
2530 	}
2531 
2532 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2533 	   200 sectors */
2534 	if (ata_dev_knobble(dev)) {
2535 		if (ata_msg_drv(ap) && print_info)
2536 			ata_dev_printk(dev, KERN_INFO,
2537 				       "applying bridge limits\n");
2538 		dev->udma_mask &= ATA_UDMA5;
2539 		dev->max_sectors = ATA_MAX_SECTORS;
2540 	}
2541 
2542 	if ((dev->class == ATA_DEV_ATAPI) &&
2543 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2544 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2545 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2546 	}
2547 
2548 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2549 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2550 					 dev->max_sectors);
2551 
2552 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2553 		dev->horkage |= ATA_HORKAGE_IPM;
2554 
2555 		/* reset link pm_policy for this port to no pm */
2556 		ap->pm_policy = MAX_PERFORMANCE;
2557 	}
2558 
2559 	if (ap->ops->dev_config)
2560 		ap->ops->dev_config(dev);
2561 
2562 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2563 		/* Let the user know. We don't want to disallow opens for
2564 		   rescue purposes, or in case the vendor is just a blithering
2565 		   idiot. Do this after the dev_config call as some controllers
2566 		   with buggy firmware may want to avoid reporting false device
2567 		   bugs */
2568 
2569 		if (print_info) {
2570 			ata_dev_printk(dev, KERN_WARNING,
2571 "Drive reports diagnostics failure. This may indicate a drive\n");
2572 			ata_dev_printk(dev, KERN_WARNING,
2573 "fault or invalid emulation. Contact drive vendor for information.\n");
2574 		}
2575 	}
2576 
2577 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2578 		ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2579 			       "firmware update to be fully functional.\n");
2580 		ata_dev_printk(dev, KERN_WARNING, "         contact the vendor "
2581 			       "or visit http://ata.wiki.kernel.org.\n");
2582 	}
2583 
2584 	return 0;
2585 
2586 err_out_nosup:
2587 	if (ata_msg_probe(ap))
2588 		ata_dev_printk(dev, KERN_DEBUG,
2589 			       "%s: EXIT, err\n", __func__);
2590 	return rc;
2591 }
2592 
2593 /**
2594  *	ata_cable_40wire	-	return 40 wire cable type
2595  *	@ap: port
2596  *
2597  *	Helper method for drivers which want to hardwire 40 wire cable
2598  *	detection.
2599  */
2600 
2601 int ata_cable_40wire(struct ata_port *ap)
2602 {
2603 	return ATA_CBL_PATA40;
2604 }
2605 
2606 /**
2607  *	ata_cable_80wire	-	return 80 wire cable type
2608  *	@ap: port
2609  *
2610  *	Helper method for drivers which want to hardwire 80 wire cable
2611  *	detection.
2612  */
2613 
2614 int ata_cable_80wire(struct ata_port *ap)
2615 {
2616 	return ATA_CBL_PATA80;
2617 }
2618 
2619 /**
2620  *	ata_cable_unknown	-	return unknown PATA cable.
2621  *	@ap: port
2622  *
2623  *	Helper method for drivers which have no PATA cable detection.
2624  */
2625 
2626 int ata_cable_unknown(struct ata_port *ap)
2627 {
2628 	return ATA_CBL_PATA_UNK;
2629 }
2630 
2631 /**
2632  *	ata_cable_ignore	-	return ignored PATA cable.
2633  *	@ap: port
2634  *
2635  *	Helper method for drivers which don't use cable type to limit
2636  *	transfer mode.
2637  */
2638 int ata_cable_ignore(struct ata_port *ap)
2639 {
2640 	return ATA_CBL_PATA_IGN;
2641 }
2642 
2643 /**
2644  *	ata_cable_sata	-	return SATA cable type
2645  *	@ap: port
2646  *
2647  *	Helper method for drivers which have SATA cables
2648  */
2649 
2650 int ata_cable_sata(struct ata_port *ap)
2651 {
2652 	return ATA_CBL_SATA;
2653 }
2654 
2655 /**
2656  *	ata_bus_probe - Reset and probe ATA bus
2657  *	@ap: Bus to probe
2658  *
2659  *	Master ATA bus probing function.  Initiates a hardware-dependent
2660  *	bus reset, then attempts to identify any devices found on
2661  *	the bus.
2662  *
2663  *	LOCKING:
2664  *	PCI/etc. bus probe sem.
2665  *
2666  *	RETURNS:
2667  *	Zero on success, negative errno otherwise.
2668  */
2669 
2670 int ata_bus_probe(struct ata_port *ap)
2671 {
2672 	unsigned int classes[ATA_MAX_DEVICES];
2673 	int tries[ATA_MAX_DEVICES];
2674 	int rc;
2675 	struct ata_device *dev;
2676 
2677 	ata_port_probe(ap);
2678 
2679 	ata_for_each_dev(dev, &ap->link, ALL)
2680 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2681 
2682  retry:
2683 	ata_for_each_dev(dev, &ap->link, ALL) {
2684 		/* If we issue an SRST then an ATA drive (not ATAPI)
2685 		 * may change configuration and be in PIO0 timing. If
2686 		 * we do a hard reset (or are coming from power on)
2687 		 * this is true for ATA or ATAPI. Until we've set a
2688 		 * suitable controller mode we should not touch the
2689 		 * bus as we may be talking too fast.
2690 		 */
2691 		dev->pio_mode = XFER_PIO_0;
2692 
2693 		/* If the controller has a pio mode setup function
2694 		 * then use it to set the chipset to rights. Don't
2695 		 * touch the DMA setup as that will be dealt with when
2696 		 * configuring devices.
2697 		 */
2698 		if (ap->ops->set_piomode)
2699 			ap->ops->set_piomode(ap, dev);
2700 	}
2701 
2702 	/* reset and determine device classes */
2703 	ap->ops->phy_reset(ap);
2704 
2705 	ata_for_each_dev(dev, &ap->link, ALL) {
2706 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2707 		    dev->class != ATA_DEV_UNKNOWN)
2708 			classes[dev->devno] = dev->class;
2709 		else
2710 			classes[dev->devno] = ATA_DEV_NONE;
2711 
2712 		dev->class = ATA_DEV_UNKNOWN;
2713 	}
2714 
2715 	ata_port_probe(ap);
2716 
2717 	/* read IDENTIFY page and configure devices. We have to do the identify
2718 	   specific sequence bass-ackwards so that PDIAG- is released by
2719 	   the slave device */
2720 
2721 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2722 		if (tries[dev->devno])
2723 			dev->class = classes[dev->devno];
2724 
2725 		if (!ata_dev_enabled(dev))
2726 			continue;
2727 
2728 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2729 				     dev->id);
2730 		if (rc)
2731 			goto fail;
2732 	}
2733 
2734 	/* Now ask for the cable type as PDIAG- should have been released */
2735 	if (ap->ops->cable_detect)
2736 		ap->cbl = ap->ops->cable_detect(ap);
2737 
2738 	/* We may have SATA bridge glue hiding here irrespective of
2739 	 * the reported cable types and sensed types.  When SATA
2740 	 * drives indicate we have a bridge, we don't know which end
2741 	 * of the link the bridge is which is a problem.
2742 	 */
2743 	ata_for_each_dev(dev, &ap->link, ENABLED)
2744 		if (ata_id_is_sata(dev->id))
2745 			ap->cbl = ATA_CBL_SATA;
2746 
2747 	/* After the identify sequence we can now set up the devices. We do
2748 	   this in the normal order so that the user doesn't get confused */
2749 
2750 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2751 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2752 		rc = ata_dev_configure(dev);
2753 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2754 		if (rc)
2755 			goto fail;
2756 	}
2757 
2758 	/* configure transfer mode */
2759 	rc = ata_set_mode(&ap->link, &dev);
2760 	if (rc)
2761 		goto fail;
2762 
2763 	ata_for_each_dev(dev, &ap->link, ENABLED)
2764 		return 0;
2765 
2766 	/* no device present, disable port */
2767 	ata_port_disable(ap);
2768 	return -ENODEV;
2769 
2770  fail:
2771 	tries[dev->devno]--;
2772 
2773 	switch (rc) {
2774 	case -EINVAL:
2775 		/* eeek, something went very wrong, give up */
2776 		tries[dev->devno] = 0;
2777 		break;
2778 
2779 	case -ENODEV:
2780 		/* give it just one more chance */
2781 		tries[dev->devno] = min(tries[dev->devno], 1);
2782 	case -EIO:
2783 		if (tries[dev->devno] == 1) {
2784 			/* This is the last chance, better to slow
2785 			 * down than lose it.
2786 			 */
2787 			sata_down_spd_limit(&ap->link);
2788 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2789 		}
2790 	}
2791 
2792 	if (!tries[dev->devno])
2793 		ata_dev_disable(dev);
2794 
2795 	goto retry;
2796 }
2797 
2798 /**
2799  *	ata_port_probe - Mark port as enabled
2800  *	@ap: Port for which we indicate enablement
2801  *
2802  *	Modify @ap data structure such that the system
2803  *	thinks that the entire port is enabled.
2804  *
2805  *	LOCKING: host lock, or some other form of
2806  *	serialization.
2807  */
2808 
2809 void ata_port_probe(struct ata_port *ap)
2810 {
2811 	ap->flags &= ~ATA_FLAG_DISABLED;
2812 }
2813 
2814 /**
2815  *	sata_print_link_status - Print SATA link status
2816  *	@link: SATA link to printk link status about
2817  *
2818  *	This function prints link speed and status of a SATA link.
2819  *
2820  *	LOCKING:
2821  *	None.
2822  */
2823 static void sata_print_link_status(struct ata_link *link)
2824 {
2825 	u32 sstatus, scontrol, tmp;
2826 
2827 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2828 		return;
2829 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2830 
2831 	if (ata_phys_link_online(link)) {
2832 		tmp = (sstatus >> 4) & 0xf;
2833 		ata_link_printk(link, KERN_INFO,
2834 				"SATA link up %s (SStatus %X SControl %X)\n",
2835 				sata_spd_string(tmp), sstatus, scontrol);
2836 	} else {
2837 		ata_link_printk(link, KERN_INFO,
2838 				"SATA link down (SStatus %X SControl %X)\n",
2839 				sstatus, scontrol);
2840 	}
2841 }
2842 
2843 /**
2844  *	ata_dev_pair		-	return other device on cable
2845  *	@adev: device
2846  *
2847  *	Obtain the other device on the same cable, or if none is
2848  *	present NULL is returned
2849  */
2850 
2851 struct ata_device *ata_dev_pair(struct ata_device *adev)
2852 {
2853 	struct ata_link *link = adev->link;
2854 	struct ata_device *pair = &link->device[1 - adev->devno];
2855 	if (!ata_dev_enabled(pair))
2856 		return NULL;
2857 	return pair;
2858 }
2859 
2860 /**
2861  *	ata_port_disable - Disable port.
2862  *	@ap: Port to be disabled.
2863  *
2864  *	Modify @ap data structure such that the system
2865  *	thinks that the entire port is disabled, and should
2866  *	never attempt to probe or communicate with devices
2867  *	on this port.
2868  *
2869  *	LOCKING: host lock, or some other form of
2870  *	serialization.
2871  */
2872 
2873 void ata_port_disable(struct ata_port *ap)
2874 {
2875 	ap->link.device[0].class = ATA_DEV_NONE;
2876 	ap->link.device[1].class = ATA_DEV_NONE;
2877 	ap->flags |= ATA_FLAG_DISABLED;
2878 }
2879 
2880 /**
2881  *	sata_down_spd_limit - adjust SATA spd limit downward
2882  *	@link: Link to adjust SATA spd limit for
2883  *
2884  *	Adjust SATA spd limit of @link downward.  Note that this
2885  *	function only adjusts the limit.  The change must be applied
2886  *	using sata_set_spd().
2887  *
2888  *	LOCKING:
2889  *	Inherited from caller.
2890  *
2891  *	RETURNS:
2892  *	0 on success, negative errno on failure
2893  */
2894 int sata_down_spd_limit(struct ata_link *link)
2895 {
2896 	u32 sstatus, spd, mask;
2897 	int rc, highbit;
2898 
2899 	if (!sata_scr_valid(link))
2900 		return -EOPNOTSUPP;
2901 
2902 	/* If SCR can be read, use it to determine the current SPD.
2903 	 * If not, use cached value in link->sata_spd.
2904 	 */
2905 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2906 	if (rc == 0)
2907 		spd = (sstatus >> 4) & 0xf;
2908 	else
2909 		spd = link->sata_spd;
2910 
2911 	mask = link->sata_spd_limit;
2912 	if (mask <= 1)
2913 		return -EINVAL;
2914 
2915 	/* unconditionally mask off the highest bit */
2916 	highbit = fls(mask) - 1;
2917 	mask &= ~(1 << highbit);
2918 
2919 	/* Mask off all speeds higher than or equal to the current
2920 	 * one.  Force 1.5Gbps if current SPD is not available.
2921 	 */
2922 	if (spd > 1)
2923 		mask &= (1 << (spd - 1)) - 1;
2924 	else
2925 		mask &= 1;
2926 
2927 	/* were we already at the bottom? */
2928 	if (!mask)
2929 		return -EINVAL;
2930 
2931 	link->sata_spd_limit = mask;
2932 
2933 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2934 			sata_spd_string(fls(mask)));
2935 
2936 	return 0;
2937 }
2938 
2939 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2940 {
2941 	struct ata_link *host_link = &link->ap->link;
2942 	u32 limit, target, spd;
2943 
2944 	limit = link->sata_spd_limit;
2945 
2946 	/* Don't configure downstream link faster than upstream link.
2947 	 * It doesn't speed up anything and some PMPs choke on such
2948 	 * configuration.
2949 	 */
2950 	if (!ata_is_host_link(link) && host_link->sata_spd)
2951 		limit &= (1 << host_link->sata_spd) - 1;
2952 
2953 	if (limit == UINT_MAX)
2954 		target = 0;
2955 	else
2956 		target = fls(limit);
2957 
2958 	spd = (*scontrol >> 4) & 0xf;
2959 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2960 
2961 	return spd != target;
2962 }
2963 
2964 /**
2965  *	sata_set_spd_needed - is SATA spd configuration needed
2966  *	@link: Link in question
2967  *
2968  *	Test whether the spd limit in SControl matches
2969  *	@link->sata_spd_limit.  This function is used to determine
2970  *	whether hardreset is necessary to apply SATA spd
2971  *	configuration.
2972  *
2973  *	LOCKING:
2974  *	Inherited from caller.
2975  *
2976  *	RETURNS:
2977  *	1 if SATA spd configuration is needed, 0 otherwise.
2978  */
2979 static int sata_set_spd_needed(struct ata_link *link)
2980 {
2981 	u32 scontrol;
2982 
2983 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2984 		return 1;
2985 
2986 	return __sata_set_spd_needed(link, &scontrol);
2987 }
2988 
2989 /**
2990  *	sata_set_spd - set SATA spd according to spd limit
2991  *	@link: Link to set SATA spd for
2992  *
2993  *	Set SATA spd of @link according to sata_spd_limit.
2994  *
2995  *	LOCKING:
2996  *	Inherited from caller.
2997  *
2998  *	RETURNS:
2999  *	0 if spd doesn't need to be changed, 1 if spd has been
3000  *	changed.  Negative errno if SCR registers are inaccessible.
3001  */
3002 int sata_set_spd(struct ata_link *link)
3003 {
3004 	u32 scontrol;
3005 	int rc;
3006 
3007 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3008 		return rc;
3009 
3010 	if (!__sata_set_spd_needed(link, &scontrol))
3011 		return 0;
3012 
3013 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3014 		return rc;
3015 
3016 	return 1;
3017 }
3018 
3019 /*
3020  * This mode timing computation functionality is ported over from
3021  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3022  */
3023 /*
3024  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3025  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3026  * for UDMA6, which is currently supported only by Maxtor drives.
3027  *
3028  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3029  */
3030 
3031 static const struct ata_timing ata_timing[] = {
3032 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
3033 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
3034 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
3035 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
3036 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
3037 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
3038 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
3039 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
3040 
3041 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
3042 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
3043 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
3044 
3045 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
3046 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
3047 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
3048 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
3049 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
3050 
3051 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
3052 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
3053 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
3054 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
3055 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
3056 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
3057 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
3058 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
3059 
3060 	{ 0xFF }
3061 };
3062 
3063 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3064 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
3065 
3066 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3067 {
3068 	q->setup   = EZ(t->setup   * 1000,  T);
3069 	q->act8b   = EZ(t->act8b   * 1000,  T);
3070 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
3071 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
3072 	q->active  = EZ(t->active  * 1000,  T);
3073 	q->recover = EZ(t->recover * 1000,  T);
3074 	q->cycle   = EZ(t->cycle   * 1000,  T);
3075 	q->udma    = EZ(t->udma    * 1000, UT);
3076 }
3077 
3078 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3079 		      struct ata_timing *m, unsigned int what)
3080 {
3081 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3082 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3083 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3084 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3085 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3086 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3087 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3088 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3089 }
3090 
3091 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3092 {
3093 	const struct ata_timing *t = ata_timing;
3094 
3095 	while (xfer_mode > t->mode)
3096 		t++;
3097 
3098 	if (xfer_mode == t->mode)
3099 		return t;
3100 	return NULL;
3101 }
3102 
3103 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3104 		       struct ata_timing *t, int T, int UT)
3105 {
3106 	const struct ata_timing *s;
3107 	struct ata_timing p;
3108 
3109 	/*
3110 	 * Find the mode.
3111 	 */
3112 
3113 	if (!(s = ata_timing_find_mode(speed)))
3114 		return -EINVAL;
3115 
3116 	memcpy(t, s, sizeof(*s));
3117 
3118 	/*
3119 	 * If the drive is an EIDE drive, it can tell us it needs extended
3120 	 * PIO/MW_DMA cycle timing.
3121 	 */
3122 
3123 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3124 		memset(&p, 0, sizeof(p));
3125 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3126 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3127 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3128 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3129 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3130 		}
3131 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3132 	}
3133 
3134 	/*
3135 	 * Convert the timing to bus clock counts.
3136 	 */
3137 
3138 	ata_timing_quantize(t, t, T, UT);
3139 
3140 	/*
3141 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3142 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3143 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3144 	 */
3145 
3146 	if (speed > XFER_PIO_6) {
3147 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3148 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3149 	}
3150 
3151 	/*
3152 	 * Lengthen active & recovery time so that cycle time is correct.
3153 	 */
3154 
3155 	if (t->act8b + t->rec8b < t->cyc8b) {
3156 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3157 		t->rec8b = t->cyc8b - t->act8b;
3158 	}
3159 
3160 	if (t->active + t->recover < t->cycle) {
3161 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3162 		t->recover = t->cycle - t->active;
3163 	}
3164 
3165 	/* In a few cases quantisation may produce enough errors to
3166 	   leave t->cycle too low for the sum of active and recovery
3167 	   if so we must correct this */
3168 	if (t->active + t->recover > t->cycle)
3169 		t->cycle = t->active + t->recover;
3170 
3171 	return 0;
3172 }
3173 
3174 /**
3175  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3176  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3177  *	@cycle: cycle duration in ns
3178  *
3179  *	Return matching xfer mode for @cycle.  The returned mode is of
3180  *	the transfer type specified by @xfer_shift.  If @cycle is too
3181  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3182  *	than the fastest known mode, the fasted mode is returned.
3183  *
3184  *	LOCKING:
3185  *	None.
3186  *
3187  *	RETURNS:
3188  *	Matching xfer_mode, 0xff if no match found.
3189  */
3190 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3191 {
3192 	u8 base_mode = 0xff, last_mode = 0xff;
3193 	const struct ata_xfer_ent *ent;
3194 	const struct ata_timing *t;
3195 
3196 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3197 		if (ent->shift == xfer_shift)
3198 			base_mode = ent->base;
3199 
3200 	for (t = ata_timing_find_mode(base_mode);
3201 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3202 		unsigned short this_cycle;
3203 
3204 		switch (xfer_shift) {
3205 		case ATA_SHIFT_PIO:
3206 		case ATA_SHIFT_MWDMA:
3207 			this_cycle = t->cycle;
3208 			break;
3209 		case ATA_SHIFT_UDMA:
3210 			this_cycle = t->udma;
3211 			break;
3212 		default:
3213 			return 0xff;
3214 		}
3215 
3216 		if (cycle > this_cycle)
3217 			break;
3218 
3219 		last_mode = t->mode;
3220 	}
3221 
3222 	return last_mode;
3223 }
3224 
3225 /**
3226  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3227  *	@dev: Device to adjust xfer masks
3228  *	@sel: ATA_DNXFER_* selector
3229  *
3230  *	Adjust xfer masks of @dev downward.  Note that this function
3231  *	does not apply the change.  Invoking ata_set_mode() afterwards
3232  *	will apply the limit.
3233  *
3234  *	LOCKING:
3235  *	Inherited from caller.
3236  *
3237  *	RETURNS:
3238  *	0 on success, negative errno on failure
3239  */
3240 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3241 {
3242 	char buf[32];
3243 	unsigned long orig_mask, xfer_mask;
3244 	unsigned long pio_mask, mwdma_mask, udma_mask;
3245 	int quiet, highbit;
3246 
3247 	quiet = !!(sel & ATA_DNXFER_QUIET);
3248 	sel &= ~ATA_DNXFER_QUIET;
3249 
3250 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3251 						  dev->mwdma_mask,
3252 						  dev->udma_mask);
3253 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3254 
3255 	switch (sel) {
3256 	case ATA_DNXFER_PIO:
3257 		highbit = fls(pio_mask) - 1;
3258 		pio_mask &= ~(1 << highbit);
3259 		break;
3260 
3261 	case ATA_DNXFER_DMA:
3262 		if (udma_mask) {
3263 			highbit = fls(udma_mask) - 1;
3264 			udma_mask &= ~(1 << highbit);
3265 			if (!udma_mask)
3266 				return -ENOENT;
3267 		} else if (mwdma_mask) {
3268 			highbit = fls(mwdma_mask) - 1;
3269 			mwdma_mask &= ~(1 << highbit);
3270 			if (!mwdma_mask)
3271 				return -ENOENT;
3272 		}
3273 		break;
3274 
3275 	case ATA_DNXFER_40C:
3276 		udma_mask &= ATA_UDMA_MASK_40C;
3277 		break;
3278 
3279 	case ATA_DNXFER_FORCE_PIO0:
3280 		pio_mask &= 1;
3281 	case ATA_DNXFER_FORCE_PIO:
3282 		mwdma_mask = 0;
3283 		udma_mask = 0;
3284 		break;
3285 
3286 	default:
3287 		BUG();
3288 	}
3289 
3290 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3291 
3292 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3293 		return -ENOENT;
3294 
3295 	if (!quiet) {
3296 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3297 			snprintf(buf, sizeof(buf), "%s:%s",
3298 				 ata_mode_string(xfer_mask),
3299 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3300 		else
3301 			snprintf(buf, sizeof(buf), "%s",
3302 				 ata_mode_string(xfer_mask));
3303 
3304 		ata_dev_printk(dev, KERN_WARNING,
3305 			       "limiting speed to %s\n", buf);
3306 	}
3307 
3308 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3309 			    &dev->udma_mask);
3310 
3311 	return 0;
3312 }
3313 
3314 static int ata_dev_set_mode(struct ata_device *dev)
3315 {
3316 	struct ata_eh_context *ehc = &dev->link->eh_context;
3317 	const char *dev_err_whine = "";
3318 	int ign_dev_err = 0;
3319 	unsigned int err_mask;
3320 	int rc;
3321 
3322 	dev->flags &= ~ATA_DFLAG_PIO;
3323 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3324 		dev->flags |= ATA_DFLAG_PIO;
3325 
3326 	err_mask = ata_dev_set_xfermode(dev);
3327 
3328 	if (err_mask & ~AC_ERR_DEV)
3329 		goto fail;
3330 
3331 	/* revalidate */
3332 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3333 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3334 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3335 	if (rc)
3336 		return rc;
3337 
3338 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3339 		/* Old CFA may refuse this command, which is just fine */
3340 		if (ata_id_is_cfa(dev->id))
3341 			ign_dev_err = 1;
3342 		/* Catch several broken garbage emulations plus some pre
3343 		   ATA devices */
3344 		if (ata_id_major_version(dev->id) == 0 &&
3345 					dev->pio_mode <= XFER_PIO_2)
3346 			ign_dev_err = 1;
3347 		/* Some very old devices and some bad newer ones fail
3348 		   any kind of SET_XFERMODE request but support PIO0-2
3349 		   timings and no IORDY */
3350 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3351 			ign_dev_err = 1;
3352 	}
3353 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3354 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3355 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3356 	    dev->dma_mode == XFER_MW_DMA_0 &&
3357 	    (dev->id[63] >> 8) & 1)
3358 		ign_dev_err = 1;
3359 
3360 	/* if the device is actually configured correctly, ignore dev err */
3361 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3362 		ign_dev_err = 1;
3363 
3364 	if (err_mask & AC_ERR_DEV) {
3365 		if (!ign_dev_err)
3366 			goto fail;
3367 		else
3368 			dev_err_whine = " (device error ignored)";
3369 	}
3370 
3371 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3372 		dev->xfer_shift, (int)dev->xfer_mode);
3373 
3374 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3375 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3376 		       dev_err_whine);
3377 
3378 	return 0;
3379 
3380  fail:
3381 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3382 		       "(err_mask=0x%x)\n", err_mask);
3383 	return -EIO;
3384 }
3385 
3386 /**
3387  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3388  *	@link: link on which timings will be programmed
3389  *	@r_failed_dev: out parameter for failed device
3390  *
3391  *	Standard implementation of the function used to tune and set
3392  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3393  *	ata_dev_set_mode() fails, pointer to the failing device is
3394  *	returned in @r_failed_dev.
3395  *
3396  *	LOCKING:
3397  *	PCI/etc. bus probe sem.
3398  *
3399  *	RETURNS:
3400  *	0 on success, negative errno otherwise
3401  */
3402 
3403 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3404 {
3405 	struct ata_port *ap = link->ap;
3406 	struct ata_device *dev;
3407 	int rc = 0, used_dma = 0, found = 0;
3408 
3409 	/* step 1: calculate xfer_mask */
3410 	ata_for_each_dev(dev, link, ENABLED) {
3411 		unsigned long pio_mask, dma_mask;
3412 		unsigned int mode_mask;
3413 
3414 		mode_mask = ATA_DMA_MASK_ATA;
3415 		if (dev->class == ATA_DEV_ATAPI)
3416 			mode_mask = ATA_DMA_MASK_ATAPI;
3417 		else if (ata_id_is_cfa(dev->id))
3418 			mode_mask = ATA_DMA_MASK_CFA;
3419 
3420 		ata_dev_xfermask(dev);
3421 		ata_force_xfermask(dev);
3422 
3423 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3424 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3425 
3426 		if (libata_dma_mask & mode_mask)
3427 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3428 		else
3429 			dma_mask = 0;
3430 
3431 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3432 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3433 
3434 		found = 1;
3435 		if (ata_dma_enabled(dev))
3436 			used_dma = 1;
3437 	}
3438 	if (!found)
3439 		goto out;
3440 
3441 	/* step 2: always set host PIO timings */
3442 	ata_for_each_dev(dev, link, ENABLED) {
3443 		if (dev->pio_mode == 0xff) {
3444 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3445 			rc = -EINVAL;
3446 			goto out;
3447 		}
3448 
3449 		dev->xfer_mode = dev->pio_mode;
3450 		dev->xfer_shift = ATA_SHIFT_PIO;
3451 		if (ap->ops->set_piomode)
3452 			ap->ops->set_piomode(ap, dev);
3453 	}
3454 
3455 	/* step 3: set host DMA timings */
3456 	ata_for_each_dev(dev, link, ENABLED) {
3457 		if (!ata_dma_enabled(dev))
3458 			continue;
3459 
3460 		dev->xfer_mode = dev->dma_mode;
3461 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3462 		if (ap->ops->set_dmamode)
3463 			ap->ops->set_dmamode(ap, dev);
3464 	}
3465 
3466 	/* step 4: update devices' xfer mode */
3467 	ata_for_each_dev(dev, link, ENABLED) {
3468 		rc = ata_dev_set_mode(dev);
3469 		if (rc)
3470 			goto out;
3471 	}
3472 
3473 	/* Record simplex status. If we selected DMA then the other
3474 	 * host channels are not permitted to do so.
3475 	 */
3476 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3477 		ap->host->simplex_claimed = ap;
3478 
3479  out:
3480 	if (rc)
3481 		*r_failed_dev = dev;
3482 	return rc;
3483 }
3484 
3485 /**
3486  *	ata_wait_ready - wait for link to become ready
3487  *	@link: link to be waited on
3488  *	@deadline: deadline jiffies for the operation
3489  *	@check_ready: callback to check link readiness
3490  *
3491  *	Wait for @link to become ready.  @check_ready should return
3492  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3493  *	link doesn't seem to be occupied, other errno for other error
3494  *	conditions.
3495  *
3496  *	Transient -ENODEV conditions are allowed for
3497  *	ATA_TMOUT_FF_WAIT.
3498  *
3499  *	LOCKING:
3500  *	EH context.
3501  *
3502  *	RETURNS:
3503  *	0 if @linke is ready before @deadline; otherwise, -errno.
3504  */
3505 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3506 		   int (*check_ready)(struct ata_link *link))
3507 {
3508 	unsigned long start = jiffies;
3509 	unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3510 	int warned = 0;
3511 
3512 	/* Slave readiness can't be tested separately from master.  On
3513 	 * M/S emulation configuration, this function should be called
3514 	 * only on the master and it will handle both master and slave.
3515 	 */
3516 	WARN_ON(link == link->ap->slave_link);
3517 
3518 	if (time_after(nodev_deadline, deadline))
3519 		nodev_deadline = deadline;
3520 
3521 	while (1) {
3522 		unsigned long now = jiffies;
3523 		int ready, tmp;
3524 
3525 		ready = tmp = check_ready(link);
3526 		if (ready > 0)
3527 			return 0;
3528 
3529 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3530 		 * is online.  Also, some SATA devices take a long
3531 		 * time to clear 0xff after reset.  For example,
3532 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3533 		 * GoVault needs even more than that.  Wait for
3534 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3535 		 *
3536 		 * Note that some PATA controllers (pata_ali) explode
3537 		 * if status register is read more than once when
3538 		 * there's no device attached.
3539 		 */
3540 		if (ready == -ENODEV) {
3541 			if (ata_link_online(link))
3542 				ready = 0;
3543 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3544 				 !ata_link_offline(link) &&
3545 				 time_before(now, nodev_deadline))
3546 				ready = 0;
3547 		}
3548 
3549 		if (ready)
3550 			return ready;
3551 		if (time_after(now, deadline))
3552 			return -EBUSY;
3553 
3554 		if (!warned && time_after(now, start + 5 * HZ) &&
3555 		    (deadline - now > 3 * HZ)) {
3556 			ata_link_printk(link, KERN_WARNING,
3557 				"link is slow to respond, please be patient "
3558 				"(ready=%d)\n", tmp);
3559 			warned = 1;
3560 		}
3561 
3562 		msleep(50);
3563 	}
3564 }
3565 
3566 /**
3567  *	ata_wait_after_reset - wait for link to become ready after reset
3568  *	@link: link to be waited on
3569  *	@deadline: deadline jiffies for the operation
3570  *	@check_ready: callback to check link readiness
3571  *
3572  *	Wait for @link to become ready after reset.
3573  *
3574  *	LOCKING:
3575  *	EH context.
3576  *
3577  *	RETURNS:
3578  *	0 if @linke is ready before @deadline; otherwise, -errno.
3579  */
3580 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3581 				int (*check_ready)(struct ata_link *link))
3582 {
3583 	msleep(ATA_WAIT_AFTER_RESET);
3584 
3585 	return ata_wait_ready(link, deadline, check_ready);
3586 }
3587 
3588 /**
3589  *	sata_link_debounce - debounce SATA phy status
3590  *	@link: ATA link to debounce SATA phy status for
3591  *	@params: timing parameters { interval, duratinon, timeout } in msec
3592  *	@deadline: deadline jiffies for the operation
3593  *
3594 *	Make sure SStatus of @link reaches stable state, determined by
3595  *	holding the same value where DET is not 1 for @duration polled
3596  *	every @interval, before @timeout.  Timeout constraints the
3597  *	beginning of the stable state.  Because DET gets stuck at 1 on
3598  *	some controllers after hot unplugging, this functions waits
3599  *	until timeout then returns 0 if DET is stable at 1.
3600  *
3601  *	@timeout is further limited by @deadline.  The sooner of the
3602  *	two is used.
3603  *
3604  *	LOCKING:
3605  *	Kernel thread context (may sleep)
3606  *
3607  *	RETURNS:
3608  *	0 on success, -errno on failure.
3609  */
3610 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3611 		       unsigned long deadline)
3612 {
3613 	unsigned long interval = params[0];
3614 	unsigned long duration = params[1];
3615 	unsigned long last_jiffies, t;
3616 	u32 last, cur;
3617 	int rc;
3618 
3619 	t = ata_deadline(jiffies, params[2]);
3620 	if (time_before(t, deadline))
3621 		deadline = t;
3622 
3623 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3624 		return rc;
3625 	cur &= 0xf;
3626 
3627 	last = cur;
3628 	last_jiffies = jiffies;
3629 
3630 	while (1) {
3631 		msleep(interval);
3632 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3633 			return rc;
3634 		cur &= 0xf;
3635 
3636 		/* DET stable? */
3637 		if (cur == last) {
3638 			if (cur == 1 && time_before(jiffies, deadline))
3639 				continue;
3640 			if (time_after(jiffies,
3641 				       ata_deadline(last_jiffies, duration)))
3642 				return 0;
3643 			continue;
3644 		}
3645 
3646 		/* unstable, start over */
3647 		last = cur;
3648 		last_jiffies = jiffies;
3649 
3650 		/* Check deadline.  If debouncing failed, return
3651 		 * -EPIPE to tell upper layer to lower link speed.
3652 		 */
3653 		if (time_after(jiffies, deadline))
3654 			return -EPIPE;
3655 	}
3656 }
3657 
3658 /**
3659  *	sata_link_resume - resume SATA link
3660  *	@link: ATA link to resume SATA
3661  *	@params: timing parameters { interval, duratinon, timeout } in msec
3662  *	@deadline: deadline jiffies for the operation
3663  *
3664  *	Resume SATA phy @link and debounce it.
3665  *
3666  *	LOCKING:
3667  *	Kernel thread context (may sleep)
3668  *
3669  *	RETURNS:
3670  *	0 on success, -errno on failure.
3671  */
3672 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3673 		     unsigned long deadline)
3674 {
3675 	u32 scontrol, serror;
3676 	int rc;
3677 
3678 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3679 		return rc;
3680 
3681 	scontrol = (scontrol & 0x0f0) | 0x300;
3682 
3683 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3684 		return rc;
3685 
3686 	/* Some PHYs react badly if SStatus is pounded immediately
3687 	 * after resuming.  Delay 200ms before debouncing.
3688 	 */
3689 	msleep(200);
3690 
3691 	if ((rc = sata_link_debounce(link, params, deadline)))
3692 		return rc;
3693 
3694 	/* clear SError, some PHYs require this even for SRST to work */
3695 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3696 		rc = sata_scr_write(link, SCR_ERROR, serror);
3697 
3698 	return rc != -EINVAL ? rc : 0;
3699 }
3700 
3701 /**
3702  *	ata_std_prereset - prepare for reset
3703  *	@link: ATA link to be reset
3704  *	@deadline: deadline jiffies for the operation
3705  *
3706  *	@link is about to be reset.  Initialize it.  Failure from
3707  *	prereset makes libata abort whole reset sequence and give up
3708  *	that port, so prereset should be best-effort.  It does its
3709  *	best to prepare for reset sequence but if things go wrong, it
3710  *	should just whine, not fail.
3711  *
3712  *	LOCKING:
3713  *	Kernel thread context (may sleep)
3714  *
3715  *	RETURNS:
3716  *	0 on success, -errno otherwise.
3717  */
3718 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3719 {
3720 	struct ata_port *ap = link->ap;
3721 	struct ata_eh_context *ehc = &link->eh_context;
3722 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3723 	int rc;
3724 
3725 	/* if we're about to do hardreset, nothing more to do */
3726 	if (ehc->i.action & ATA_EH_HARDRESET)
3727 		return 0;
3728 
3729 	/* if SATA, resume link */
3730 	if (ap->flags & ATA_FLAG_SATA) {
3731 		rc = sata_link_resume(link, timing, deadline);
3732 		/* whine about phy resume failure but proceed */
3733 		if (rc && rc != -EOPNOTSUPP)
3734 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3735 					"link for reset (errno=%d)\n", rc);
3736 	}
3737 
3738 	/* no point in trying softreset on offline link */
3739 	if (ata_phys_link_offline(link))
3740 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3741 
3742 	return 0;
3743 }
3744 
3745 /**
3746  *	sata_link_hardreset - reset link via SATA phy reset
3747  *	@link: link to reset
3748  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3749  *	@deadline: deadline jiffies for the operation
3750  *	@online: optional out parameter indicating link onlineness
3751  *	@check_ready: optional callback to check link readiness
3752  *
3753  *	SATA phy-reset @link using DET bits of SControl register.
3754  *	After hardreset, link readiness is waited upon using
3755  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3756  *	allowed to not specify @check_ready and wait itself after this
3757  *	function returns.  Device classification is LLD's
3758  *	responsibility.
3759  *
3760  *	*@online is set to one iff reset succeeded and @link is online
3761  *	after reset.
3762  *
3763  *	LOCKING:
3764  *	Kernel thread context (may sleep)
3765  *
3766  *	RETURNS:
3767  *	0 on success, -errno otherwise.
3768  */
3769 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3770 			unsigned long deadline,
3771 			bool *online, int (*check_ready)(struct ata_link *))
3772 {
3773 	u32 scontrol;
3774 	int rc;
3775 
3776 	DPRINTK("ENTER\n");
3777 
3778 	if (online)
3779 		*online = false;
3780 
3781 	if (sata_set_spd_needed(link)) {
3782 		/* SATA spec says nothing about how to reconfigure
3783 		 * spd.  To be on the safe side, turn off phy during
3784 		 * reconfiguration.  This works for at least ICH7 AHCI
3785 		 * and Sil3124.
3786 		 */
3787 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3788 			goto out;
3789 
3790 		scontrol = (scontrol & 0x0f0) | 0x304;
3791 
3792 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3793 			goto out;
3794 
3795 		sata_set_spd(link);
3796 	}
3797 
3798 	/* issue phy wake/reset */
3799 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3800 		goto out;
3801 
3802 	scontrol = (scontrol & 0x0f0) | 0x301;
3803 
3804 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3805 		goto out;
3806 
3807 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3808 	 * 10.4.2 says at least 1 ms.
3809 	 */
3810 	msleep(1);
3811 
3812 	/* bring link back */
3813 	rc = sata_link_resume(link, timing, deadline);
3814 	if (rc)
3815 		goto out;
3816 	/* if link is offline nothing more to do */
3817 	if (ata_phys_link_offline(link))
3818 		goto out;
3819 
3820 	/* Link is online.  From this point, -ENODEV too is an error. */
3821 	if (online)
3822 		*online = true;
3823 
3824 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3825 		/* If PMP is supported, we have to do follow-up SRST.
3826 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3827 		 * the first port is empty.  Wait only for
3828 		 * ATA_TMOUT_PMP_SRST_WAIT.
3829 		 */
3830 		if (check_ready) {
3831 			unsigned long pmp_deadline;
3832 
3833 			pmp_deadline = ata_deadline(jiffies,
3834 						    ATA_TMOUT_PMP_SRST_WAIT);
3835 			if (time_after(pmp_deadline, deadline))
3836 				pmp_deadline = deadline;
3837 			ata_wait_ready(link, pmp_deadline, check_ready);
3838 		}
3839 		rc = -EAGAIN;
3840 		goto out;
3841 	}
3842 
3843 	rc = 0;
3844 	if (check_ready)
3845 		rc = ata_wait_ready(link, deadline, check_ready);
3846  out:
3847 	if (rc && rc != -EAGAIN) {
3848 		/* online is set iff link is online && reset succeeded */
3849 		if (online)
3850 			*online = false;
3851 		ata_link_printk(link, KERN_ERR,
3852 				"COMRESET failed (errno=%d)\n", rc);
3853 	}
3854 	DPRINTK("EXIT, rc=%d\n", rc);
3855 	return rc;
3856 }
3857 
3858 /**
3859  *	sata_std_hardreset - COMRESET w/o waiting or classification
3860  *	@link: link to reset
3861  *	@class: resulting class of attached device
3862  *	@deadline: deadline jiffies for the operation
3863  *
3864  *	Standard SATA COMRESET w/o waiting or classification.
3865  *
3866  *	LOCKING:
3867  *	Kernel thread context (may sleep)
3868  *
3869  *	RETURNS:
3870  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3871  */
3872 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3873 		       unsigned long deadline)
3874 {
3875 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3876 	bool online;
3877 	int rc;
3878 
3879 	/* do hardreset */
3880 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3881 	return online ? -EAGAIN : rc;
3882 }
3883 
3884 /**
3885  *	ata_std_postreset - standard postreset callback
3886  *	@link: the target ata_link
3887  *	@classes: classes of attached devices
3888  *
3889  *	This function is invoked after a successful reset.  Note that
3890  *	the device might have been reset more than once using
3891  *	different reset methods before postreset is invoked.
3892  *
3893  *	LOCKING:
3894  *	Kernel thread context (may sleep)
3895  */
3896 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3897 {
3898 	u32 serror;
3899 
3900 	DPRINTK("ENTER\n");
3901 
3902 	/* reset complete, clear SError */
3903 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3904 		sata_scr_write(link, SCR_ERROR, serror);
3905 
3906 	/* print link status */
3907 	sata_print_link_status(link);
3908 
3909 	DPRINTK("EXIT\n");
3910 }
3911 
3912 /**
3913  *	ata_dev_same_device - Determine whether new ID matches configured device
3914  *	@dev: device to compare against
3915  *	@new_class: class of the new device
3916  *	@new_id: IDENTIFY page of the new device
3917  *
3918  *	Compare @new_class and @new_id against @dev and determine
3919  *	whether @dev is the device indicated by @new_class and
3920  *	@new_id.
3921  *
3922  *	LOCKING:
3923  *	None.
3924  *
3925  *	RETURNS:
3926  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3927  */
3928 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3929 			       const u16 *new_id)
3930 {
3931 	const u16 *old_id = dev->id;
3932 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3933 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3934 
3935 	if (dev->class != new_class) {
3936 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3937 			       dev->class, new_class);
3938 		return 0;
3939 	}
3940 
3941 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3942 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3943 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3944 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3945 
3946 	if (strcmp(model[0], model[1])) {
3947 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3948 			       "'%s' != '%s'\n", model[0], model[1]);
3949 		return 0;
3950 	}
3951 
3952 	if (strcmp(serial[0], serial[1])) {
3953 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3954 			       "'%s' != '%s'\n", serial[0], serial[1]);
3955 		return 0;
3956 	}
3957 
3958 	return 1;
3959 }
3960 
3961 /**
3962  *	ata_dev_reread_id - Re-read IDENTIFY data
3963  *	@dev: target ATA device
3964  *	@readid_flags: read ID flags
3965  *
3966  *	Re-read IDENTIFY page and make sure @dev is still attached to
3967  *	the port.
3968  *
3969  *	LOCKING:
3970  *	Kernel thread context (may sleep)
3971  *
3972  *	RETURNS:
3973  *	0 on success, negative errno otherwise
3974  */
3975 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3976 {
3977 	unsigned int class = dev->class;
3978 	u16 *id = (void *)dev->link->ap->sector_buf;
3979 	int rc;
3980 
3981 	/* read ID data */
3982 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3983 	if (rc)
3984 		return rc;
3985 
3986 	/* is the device still there? */
3987 	if (!ata_dev_same_device(dev, class, id))
3988 		return -ENODEV;
3989 
3990 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3991 	return 0;
3992 }
3993 
3994 /**
3995  *	ata_dev_revalidate - Revalidate ATA device
3996  *	@dev: device to revalidate
3997  *	@new_class: new class code
3998  *	@readid_flags: read ID flags
3999  *
4000  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4001  *	port and reconfigure it according to the new IDENTIFY page.
4002  *
4003  *	LOCKING:
4004  *	Kernel thread context (may sleep)
4005  *
4006  *	RETURNS:
4007  *	0 on success, negative errno otherwise
4008  */
4009 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4010 		       unsigned int readid_flags)
4011 {
4012 	u64 n_sectors = dev->n_sectors;
4013 	int rc;
4014 
4015 	if (!ata_dev_enabled(dev))
4016 		return -ENODEV;
4017 
4018 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4019 	if (ata_class_enabled(new_class) &&
4020 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4021 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4022 			       dev->class, new_class);
4023 		rc = -ENODEV;
4024 		goto fail;
4025 	}
4026 
4027 	/* re-read ID */
4028 	rc = ata_dev_reread_id(dev, readid_flags);
4029 	if (rc)
4030 		goto fail;
4031 
4032 	/* configure device according to the new ID */
4033 	rc = ata_dev_configure(dev);
4034 	if (rc)
4035 		goto fail;
4036 
4037 	/* verify n_sectors hasn't changed */
4038 	if (dev->class == ATA_DEV_ATA && n_sectors &&
4039 	    dev->n_sectors != n_sectors) {
4040 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4041 			       "%llu != %llu\n",
4042 			       (unsigned long long)n_sectors,
4043 			       (unsigned long long)dev->n_sectors);
4044 
4045 		/* restore original n_sectors */
4046 		dev->n_sectors = n_sectors;
4047 
4048 		rc = -ENODEV;
4049 		goto fail;
4050 	}
4051 
4052 	return 0;
4053 
4054  fail:
4055 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4056 	return rc;
4057 }
4058 
4059 struct ata_blacklist_entry {
4060 	const char *model_num;
4061 	const char *model_rev;
4062 	unsigned long horkage;
4063 };
4064 
4065 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4066 	/* Devices with DMA related problems under Linux */
4067 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4068 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4069 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4070 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4071 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4072 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4073 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4074 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4075 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4076 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
4077 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
4078 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4079 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4080 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4081 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4082 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4083 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
4084 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
4085 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4086 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4087 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4088 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4089 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4090 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4091 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4092 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4093 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4094 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4095 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4096 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4097 	/* Odd clown on sil3726/4726 PMPs */
4098 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4099 
4100 	/* Weird ATAPI devices */
4101 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4102 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4103 
4104 	/* Devices we expect to fail diagnostics */
4105 
4106 	/* Devices where NCQ should be avoided */
4107 	/* NCQ is slow */
4108 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4109 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4110 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4111 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4112 	/* NCQ is broken */
4113 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4114 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4115 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4116 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4117 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4118 
4119 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4120 	{ "ST31500341AS",	"SD15",		ATA_HORKAGE_NONCQ |
4121 						ATA_HORKAGE_FIRMWARE_WARN },
4122 	{ "ST31500341AS",	"SD16",		ATA_HORKAGE_NONCQ |
4123 						ATA_HORKAGE_FIRMWARE_WARN },
4124 	{ "ST31500341AS",	"SD17",		ATA_HORKAGE_NONCQ |
4125 						ATA_HORKAGE_FIRMWARE_WARN },
4126 	{ "ST31500341AS",	"SD18",		ATA_HORKAGE_NONCQ |
4127 						ATA_HORKAGE_FIRMWARE_WARN },
4128 	{ "ST31500341AS",	"SD19",		ATA_HORKAGE_NONCQ |
4129 						ATA_HORKAGE_FIRMWARE_WARN },
4130 
4131 	{ "ST31000333AS",	"SD15",		ATA_HORKAGE_NONCQ |
4132 						ATA_HORKAGE_FIRMWARE_WARN },
4133 	{ "ST31000333AS",	"SD16",		ATA_HORKAGE_NONCQ |
4134 						ATA_HORKAGE_FIRMWARE_WARN },
4135 	{ "ST31000333AS",	"SD17",		ATA_HORKAGE_NONCQ |
4136 						ATA_HORKAGE_FIRMWARE_WARN },
4137 	{ "ST31000333AS",	"SD18",		ATA_HORKAGE_NONCQ |
4138 						ATA_HORKAGE_FIRMWARE_WARN },
4139 	{ "ST31000333AS",	"SD19",		ATA_HORKAGE_NONCQ |
4140 						ATA_HORKAGE_FIRMWARE_WARN },
4141 
4142 	{ "ST3640623AS",	"SD15",		ATA_HORKAGE_NONCQ |
4143 						ATA_HORKAGE_FIRMWARE_WARN },
4144 	{ "ST3640623AS",	"SD16",		ATA_HORKAGE_NONCQ |
4145 						ATA_HORKAGE_FIRMWARE_WARN },
4146 	{ "ST3640623AS",	"SD17",		ATA_HORKAGE_NONCQ |
4147 						ATA_HORKAGE_FIRMWARE_WARN },
4148 	{ "ST3640623AS",	"SD18",		ATA_HORKAGE_NONCQ |
4149 						ATA_HORKAGE_FIRMWARE_WARN },
4150 	{ "ST3640623AS",	"SD19",		ATA_HORKAGE_NONCQ |
4151 						ATA_HORKAGE_FIRMWARE_WARN },
4152 
4153 	{ "ST3640323AS",	"SD15",		ATA_HORKAGE_NONCQ |
4154 						ATA_HORKAGE_FIRMWARE_WARN },
4155 	{ "ST3640323AS",	"SD16",		ATA_HORKAGE_NONCQ |
4156 						ATA_HORKAGE_FIRMWARE_WARN },
4157 	{ "ST3640323AS",	"SD17",		ATA_HORKAGE_NONCQ |
4158 						ATA_HORKAGE_FIRMWARE_WARN },
4159 	{ "ST3640323AS",	"SD18",		ATA_HORKAGE_NONCQ |
4160 						ATA_HORKAGE_FIRMWARE_WARN },
4161 	{ "ST3640323AS",	"SD19",		ATA_HORKAGE_NONCQ |
4162 						ATA_HORKAGE_FIRMWARE_WARN },
4163 
4164 	{ "ST3320813AS",	"SD15",		ATA_HORKAGE_NONCQ |
4165 						ATA_HORKAGE_FIRMWARE_WARN },
4166 	{ "ST3320813AS",	"SD16",		ATA_HORKAGE_NONCQ |
4167 						ATA_HORKAGE_FIRMWARE_WARN },
4168 	{ "ST3320813AS",	"SD17",		ATA_HORKAGE_NONCQ |
4169 						ATA_HORKAGE_FIRMWARE_WARN },
4170 	{ "ST3320813AS",	"SD18",		ATA_HORKAGE_NONCQ |
4171 						ATA_HORKAGE_FIRMWARE_WARN },
4172 	{ "ST3320813AS",	"SD19",		ATA_HORKAGE_NONCQ |
4173 						ATA_HORKAGE_FIRMWARE_WARN },
4174 
4175 	{ "ST3320613AS",	"SD15",		ATA_HORKAGE_NONCQ |
4176 						ATA_HORKAGE_FIRMWARE_WARN },
4177 	{ "ST3320613AS",	"SD16",		ATA_HORKAGE_NONCQ |
4178 						ATA_HORKAGE_FIRMWARE_WARN },
4179 	{ "ST3320613AS",	"SD17",		ATA_HORKAGE_NONCQ |
4180 						ATA_HORKAGE_FIRMWARE_WARN },
4181 	{ "ST3320613AS",	"SD18",		ATA_HORKAGE_NONCQ |
4182 						ATA_HORKAGE_FIRMWARE_WARN },
4183 	{ "ST3320613AS",	"SD19",		ATA_HORKAGE_NONCQ |
4184 						ATA_HORKAGE_FIRMWARE_WARN },
4185 
4186 	/* Blacklist entries taken from Silicon Image 3124/3132
4187 	   Windows driver .inf file - also several Linux problem reports */
4188 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4189 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4190 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4191 
4192 	/* devices which puke on READ_NATIVE_MAX */
4193 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4194 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4195 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4196 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4197 
4198 	/* Devices which report 1 sector over size HPA */
4199 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4200 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4201 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4202 
4203 	/* Devices which get the IVB wrong */
4204 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4205 	/* Maybe we should just blacklist TSSTcorp... */
4206 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
4207 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
4208 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4209 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4210 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4211 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
4212 
4213 	/* Devices that do not need bridging limits applied */
4214 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4215 
4216 	/* End Marker */
4217 	{ }
4218 };
4219 
4220 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4221 {
4222 	const char *p;
4223 	int len;
4224 
4225 	/*
4226 	 * check for trailing wildcard: *\0
4227 	 */
4228 	p = strchr(patt, wildchar);
4229 	if (p && ((*(p + 1)) == 0))
4230 		len = p - patt;
4231 	else {
4232 		len = strlen(name);
4233 		if (!len) {
4234 			if (!*patt)
4235 				return 0;
4236 			return -1;
4237 		}
4238 	}
4239 
4240 	return strncmp(patt, name, len);
4241 }
4242 
4243 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4244 {
4245 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4246 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4247 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4248 
4249 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4250 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4251 
4252 	while (ad->model_num) {
4253 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4254 			if (ad->model_rev == NULL)
4255 				return ad->horkage;
4256 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4257 				return ad->horkage;
4258 		}
4259 		ad++;
4260 	}
4261 	return 0;
4262 }
4263 
4264 static int ata_dma_blacklisted(const struct ata_device *dev)
4265 {
4266 	/* We don't support polling DMA.
4267 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4268 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4269 	 */
4270 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4271 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4272 		return 1;
4273 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4274 }
4275 
4276 /**
4277  *	ata_is_40wire		-	check drive side detection
4278  *	@dev: device
4279  *
4280  *	Perform drive side detection decoding, allowing for device vendors
4281  *	who can't follow the documentation.
4282  */
4283 
4284 static int ata_is_40wire(struct ata_device *dev)
4285 {
4286 	if (dev->horkage & ATA_HORKAGE_IVB)
4287 		return ata_drive_40wire_relaxed(dev->id);
4288 	return ata_drive_40wire(dev->id);
4289 }
4290 
4291 /**
4292  *	cable_is_40wire		-	40/80/SATA decider
4293  *	@ap: port to consider
4294  *
4295  *	This function encapsulates the policy for speed management
4296  *	in one place. At the moment we don't cache the result but
4297  *	there is a good case for setting ap->cbl to the result when
4298  *	we are called with unknown cables (and figuring out if it
4299  *	impacts hotplug at all).
4300  *
4301  *	Return 1 if the cable appears to be 40 wire.
4302  */
4303 
4304 static int cable_is_40wire(struct ata_port *ap)
4305 {
4306 	struct ata_link *link;
4307 	struct ata_device *dev;
4308 
4309 	/* If the controller thinks we are 40 wire, we are. */
4310 	if (ap->cbl == ATA_CBL_PATA40)
4311 		return 1;
4312 
4313 	/* If the controller thinks we are 80 wire, we are. */
4314 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4315 		return 0;
4316 
4317 	/* If the system is known to be 40 wire short cable (eg
4318 	 * laptop), then we allow 80 wire modes even if the drive
4319 	 * isn't sure.
4320 	 */
4321 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4322 		return 0;
4323 
4324 	/* If the controller doesn't know, we scan.
4325 	 *
4326 	 * Note: We look for all 40 wire detects at this point.  Any
4327 	 *       80 wire detect is taken to be 80 wire cable because
4328 	 * - in many setups only the one drive (slave if present) will
4329 	 *   give a valid detect
4330 	 * - if you have a non detect capable drive you don't want it
4331 	 *   to colour the choice
4332 	 */
4333 	ata_for_each_link(link, ap, EDGE) {
4334 		ata_for_each_dev(dev, link, ENABLED) {
4335 			if (!ata_is_40wire(dev))
4336 				return 0;
4337 		}
4338 	}
4339 	return 1;
4340 }
4341 
4342 /**
4343  *	ata_dev_xfermask - Compute supported xfermask of the given device
4344  *	@dev: Device to compute xfermask for
4345  *
4346  *	Compute supported xfermask of @dev and store it in
4347  *	dev->*_mask.  This function is responsible for applying all
4348  *	known limits including host controller limits, device
4349  *	blacklist, etc...
4350  *
4351  *	LOCKING:
4352  *	None.
4353  */
4354 static void ata_dev_xfermask(struct ata_device *dev)
4355 {
4356 	struct ata_link *link = dev->link;
4357 	struct ata_port *ap = link->ap;
4358 	struct ata_host *host = ap->host;
4359 	unsigned long xfer_mask;
4360 
4361 	/* controller modes available */
4362 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4363 				      ap->mwdma_mask, ap->udma_mask);
4364 
4365 	/* drive modes available */
4366 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4367 				       dev->mwdma_mask, dev->udma_mask);
4368 	xfer_mask &= ata_id_xfermask(dev->id);
4369 
4370 	/*
4371 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4372 	 *	cable
4373 	 */
4374 	if (ata_dev_pair(dev)) {
4375 		/* No PIO5 or PIO6 */
4376 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4377 		/* No MWDMA3 or MWDMA 4 */
4378 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4379 	}
4380 
4381 	if (ata_dma_blacklisted(dev)) {
4382 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4383 		ata_dev_printk(dev, KERN_WARNING,
4384 			       "device is on DMA blacklist, disabling DMA\n");
4385 	}
4386 
4387 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4388 	    host->simplex_claimed && host->simplex_claimed != ap) {
4389 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4390 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4391 			       "other device, disabling DMA\n");
4392 	}
4393 
4394 	if (ap->flags & ATA_FLAG_NO_IORDY)
4395 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4396 
4397 	if (ap->ops->mode_filter)
4398 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4399 
4400 	/* Apply cable rule here.  Don't apply it early because when
4401 	 * we handle hot plug the cable type can itself change.
4402 	 * Check this last so that we know if the transfer rate was
4403 	 * solely limited by the cable.
4404 	 * Unknown or 80 wire cables reported host side are checked
4405 	 * drive side as well. Cases where we know a 40wire cable
4406 	 * is used safely for 80 are not checked here.
4407 	 */
4408 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4409 		/* UDMA/44 or higher would be available */
4410 		if (cable_is_40wire(ap)) {
4411 			ata_dev_printk(dev, KERN_WARNING,
4412 				 "limited to UDMA/33 due to 40-wire cable\n");
4413 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4414 		}
4415 
4416 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4417 			    &dev->mwdma_mask, &dev->udma_mask);
4418 }
4419 
4420 /**
4421  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4422  *	@dev: Device to which command will be sent
4423  *
4424  *	Issue SET FEATURES - XFER MODE command to device @dev
4425  *	on port @ap.
4426  *
4427  *	LOCKING:
4428  *	PCI/etc. bus probe sem.
4429  *
4430  *	RETURNS:
4431  *	0 on success, AC_ERR_* mask otherwise.
4432  */
4433 
4434 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4435 {
4436 	struct ata_taskfile tf;
4437 	unsigned int err_mask;
4438 
4439 	/* set up set-features taskfile */
4440 	DPRINTK("set features - xfer mode\n");
4441 
4442 	/* Some controllers and ATAPI devices show flaky interrupt
4443 	 * behavior after setting xfer mode.  Use polling instead.
4444 	 */
4445 	ata_tf_init(dev, &tf);
4446 	tf.command = ATA_CMD_SET_FEATURES;
4447 	tf.feature = SETFEATURES_XFER;
4448 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4449 	tf.protocol = ATA_PROT_NODATA;
4450 	/* If we are using IORDY we must send the mode setting command */
4451 	if (ata_pio_need_iordy(dev))
4452 		tf.nsect = dev->xfer_mode;
4453 	/* If the device has IORDY and the controller does not - turn it off */
4454  	else if (ata_id_has_iordy(dev->id))
4455 		tf.nsect = 0x01;
4456 	else /* In the ancient relic department - skip all of this */
4457 		return 0;
4458 
4459 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4460 
4461 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4462 	return err_mask;
4463 }
4464 /**
4465  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4466  *	@dev: Device to which command will be sent
4467  *	@enable: Whether to enable or disable the feature
4468  *	@feature: The sector count represents the feature to set
4469  *
4470  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4471  *	on port @ap with sector count
4472  *
4473  *	LOCKING:
4474  *	PCI/etc. bus probe sem.
4475  *
4476  *	RETURNS:
4477  *	0 on success, AC_ERR_* mask otherwise.
4478  */
4479 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4480 					u8 feature)
4481 {
4482 	struct ata_taskfile tf;
4483 	unsigned int err_mask;
4484 
4485 	/* set up set-features taskfile */
4486 	DPRINTK("set features - SATA features\n");
4487 
4488 	ata_tf_init(dev, &tf);
4489 	tf.command = ATA_CMD_SET_FEATURES;
4490 	tf.feature = enable;
4491 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4492 	tf.protocol = ATA_PROT_NODATA;
4493 	tf.nsect = feature;
4494 
4495 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4496 
4497 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4498 	return err_mask;
4499 }
4500 
4501 /**
4502  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4503  *	@dev: Device to which command will be sent
4504  *	@heads: Number of heads (taskfile parameter)
4505  *	@sectors: Number of sectors (taskfile parameter)
4506  *
4507  *	LOCKING:
4508  *	Kernel thread context (may sleep)
4509  *
4510  *	RETURNS:
4511  *	0 on success, AC_ERR_* mask otherwise.
4512  */
4513 static unsigned int ata_dev_init_params(struct ata_device *dev,
4514 					u16 heads, u16 sectors)
4515 {
4516 	struct ata_taskfile tf;
4517 	unsigned int err_mask;
4518 
4519 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4520 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4521 		return AC_ERR_INVALID;
4522 
4523 	/* set up init dev params taskfile */
4524 	DPRINTK("init dev params \n");
4525 
4526 	ata_tf_init(dev, &tf);
4527 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4528 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4529 	tf.protocol = ATA_PROT_NODATA;
4530 	tf.nsect = sectors;
4531 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4532 
4533 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4534 	/* A clean abort indicates an original or just out of spec drive
4535 	   and we should continue as we issue the setup based on the
4536 	   drive reported working geometry */
4537 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4538 		err_mask = 0;
4539 
4540 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4541 	return err_mask;
4542 }
4543 
4544 /**
4545  *	ata_sg_clean - Unmap DMA memory associated with command
4546  *	@qc: Command containing DMA memory to be released
4547  *
4548  *	Unmap all mapped DMA memory associated with this command.
4549  *
4550  *	LOCKING:
4551  *	spin_lock_irqsave(host lock)
4552  */
4553 void ata_sg_clean(struct ata_queued_cmd *qc)
4554 {
4555 	struct ata_port *ap = qc->ap;
4556 	struct scatterlist *sg = qc->sg;
4557 	int dir = qc->dma_dir;
4558 
4559 	WARN_ON_ONCE(sg == NULL);
4560 
4561 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4562 
4563 	if (qc->n_elem)
4564 		dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4565 
4566 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4567 	qc->sg = NULL;
4568 }
4569 
4570 /**
4571  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4572  *	@qc: Metadata associated with taskfile to check
4573  *
4574  *	Allow low-level driver to filter ATA PACKET commands, returning
4575  *	a status indicating whether or not it is OK to use DMA for the
4576  *	supplied PACKET command.
4577  *
4578  *	LOCKING:
4579  *	spin_lock_irqsave(host lock)
4580  *
4581  *	RETURNS: 0 when ATAPI DMA can be used
4582  *               nonzero otherwise
4583  */
4584 int atapi_check_dma(struct ata_queued_cmd *qc)
4585 {
4586 	struct ata_port *ap = qc->ap;
4587 
4588 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4589 	 * few ATAPI devices choke on such DMA requests.
4590 	 */
4591 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4592 	    unlikely(qc->nbytes & 15))
4593 		return 1;
4594 
4595 	if (ap->ops->check_atapi_dma)
4596 		return ap->ops->check_atapi_dma(qc);
4597 
4598 	return 0;
4599 }
4600 
4601 /**
4602  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4603  *	@qc: ATA command in question
4604  *
4605  *	Non-NCQ commands cannot run with any other command, NCQ or
4606  *	not.  As upper layer only knows the queue depth, we are
4607  *	responsible for maintaining exclusion.  This function checks
4608  *	whether a new command @qc can be issued.
4609  *
4610  *	LOCKING:
4611  *	spin_lock_irqsave(host lock)
4612  *
4613  *	RETURNS:
4614  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4615  */
4616 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4617 {
4618 	struct ata_link *link = qc->dev->link;
4619 
4620 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4621 		if (!ata_tag_valid(link->active_tag))
4622 			return 0;
4623 	} else {
4624 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4625 			return 0;
4626 	}
4627 
4628 	return ATA_DEFER_LINK;
4629 }
4630 
4631 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4632 
4633 /**
4634  *	ata_sg_init - Associate command with scatter-gather table.
4635  *	@qc: Command to be associated
4636  *	@sg: Scatter-gather table.
4637  *	@n_elem: Number of elements in s/g table.
4638  *
4639  *	Initialize the data-related elements of queued_cmd @qc
4640  *	to point to a scatter-gather table @sg, containing @n_elem
4641  *	elements.
4642  *
4643  *	LOCKING:
4644  *	spin_lock_irqsave(host lock)
4645  */
4646 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4647 		 unsigned int n_elem)
4648 {
4649 	qc->sg = sg;
4650 	qc->n_elem = n_elem;
4651 	qc->cursg = qc->sg;
4652 }
4653 
4654 /**
4655  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4656  *	@qc: Command with scatter-gather table to be mapped.
4657  *
4658  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4659  *
4660  *	LOCKING:
4661  *	spin_lock_irqsave(host lock)
4662  *
4663  *	RETURNS:
4664  *	Zero on success, negative on error.
4665  *
4666  */
4667 static int ata_sg_setup(struct ata_queued_cmd *qc)
4668 {
4669 	struct ata_port *ap = qc->ap;
4670 	unsigned int n_elem;
4671 
4672 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4673 
4674 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4675 	if (n_elem < 1)
4676 		return -1;
4677 
4678 	DPRINTK("%d sg elements mapped\n", n_elem);
4679 
4680 	qc->n_elem = n_elem;
4681 	qc->flags |= ATA_QCFLAG_DMAMAP;
4682 
4683 	return 0;
4684 }
4685 
4686 /**
4687  *	swap_buf_le16 - swap halves of 16-bit words in place
4688  *	@buf:  Buffer to swap
4689  *	@buf_words:  Number of 16-bit words in buffer.
4690  *
4691  *	Swap halves of 16-bit words if needed to convert from
4692  *	little-endian byte order to native cpu byte order, or
4693  *	vice-versa.
4694  *
4695  *	LOCKING:
4696  *	Inherited from caller.
4697  */
4698 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4699 {
4700 #ifdef __BIG_ENDIAN
4701 	unsigned int i;
4702 
4703 	for (i = 0; i < buf_words; i++)
4704 		buf[i] = le16_to_cpu(buf[i]);
4705 #endif /* __BIG_ENDIAN */
4706 }
4707 
4708 /**
4709  *	ata_qc_new - Request an available ATA command, for queueing
4710  *	@ap: Port associated with device @dev
4711  *	@dev: Device from whom we request an available command structure
4712  *
4713  *	LOCKING:
4714  *	None.
4715  */
4716 
4717 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4718 {
4719 	struct ata_queued_cmd *qc = NULL;
4720 	unsigned int i;
4721 
4722 	/* no command while frozen */
4723 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4724 		return NULL;
4725 
4726 	/* the last tag is reserved for internal command. */
4727 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4728 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4729 			qc = __ata_qc_from_tag(ap, i);
4730 			break;
4731 		}
4732 
4733 	if (qc)
4734 		qc->tag = i;
4735 
4736 	return qc;
4737 }
4738 
4739 /**
4740  *	ata_qc_new_init - Request an available ATA command, and initialize it
4741  *	@dev: Device from whom we request an available command structure
4742  *
4743  *	LOCKING:
4744  *	None.
4745  */
4746 
4747 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4748 {
4749 	struct ata_port *ap = dev->link->ap;
4750 	struct ata_queued_cmd *qc;
4751 
4752 	qc = ata_qc_new(ap);
4753 	if (qc) {
4754 		qc->scsicmd = NULL;
4755 		qc->ap = ap;
4756 		qc->dev = dev;
4757 
4758 		ata_qc_reinit(qc);
4759 	}
4760 
4761 	return qc;
4762 }
4763 
4764 /**
4765  *	ata_qc_free - free unused ata_queued_cmd
4766  *	@qc: Command to complete
4767  *
4768  *	Designed to free unused ata_queued_cmd object
4769  *	in case something prevents using it.
4770  *
4771  *	LOCKING:
4772  *	spin_lock_irqsave(host lock)
4773  */
4774 void ata_qc_free(struct ata_queued_cmd *qc)
4775 {
4776 	struct ata_port *ap = qc->ap;
4777 	unsigned int tag;
4778 
4779 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4780 
4781 	qc->flags = 0;
4782 	tag = qc->tag;
4783 	if (likely(ata_tag_valid(tag))) {
4784 		qc->tag = ATA_TAG_POISON;
4785 		clear_bit(tag, &ap->qc_allocated);
4786 	}
4787 }
4788 
4789 void __ata_qc_complete(struct ata_queued_cmd *qc)
4790 {
4791 	struct ata_port *ap = qc->ap;
4792 	struct ata_link *link = qc->dev->link;
4793 
4794 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4795 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4796 
4797 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4798 		ata_sg_clean(qc);
4799 
4800 	/* command should be marked inactive atomically with qc completion */
4801 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4802 		link->sactive &= ~(1 << qc->tag);
4803 		if (!link->sactive)
4804 			ap->nr_active_links--;
4805 	} else {
4806 		link->active_tag = ATA_TAG_POISON;
4807 		ap->nr_active_links--;
4808 	}
4809 
4810 	/* clear exclusive status */
4811 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4812 		     ap->excl_link == link))
4813 		ap->excl_link = NULL;
4814 
4815 	/* atapi: mark qc as inactive to prevent the interrupt handler
4816 	 * from completing the command twice later, before the error handler
4817 	 * is called. (when rc != 0 and atapi request sense is needed)
4818 	 */
4819 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4820 	ap->qc_active &= ~(1 << qc->tag);
4821 
4822 	/* call completion callback */
4823 	qc->complete_fn(qc);
4824 }
4825 
4826 static void fill_result_tf(struct ata_queued_cmd *qc)
4827 {
4828 	struct ata_port *ap = qc->ap;
4829 
4830 	qc->result_tf.flags = qc->tf.flags;
4831 	ap->ops->qc_fill_rtf(qc);
4832 }
4833 
4834 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4835 {
4836 	struct ata_device *dev = qc->dev;
4837 
4838 	if (ata_tag_internal(qc->tag))
4839 		return;
4840 
4841 	if (ata_is_nodata(qc->tf.protocol))
4842 		return;
4843 
4844 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4845 		return;
4846 
4847 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4848 }
4849 
4850 /**
4851  *	ata_qc_complete - Complete an active ATA command
4852  *	@qc: Command to complete
4853  *
4854  *	Indicate to the mid and upper layers that an ATA
4855  *	command has completed, with either an ok or not-ok status.
4856  *
4857  *	LOCKING:
4858  *	spin_lock_irqsave(host lock)
4859  */
4860 void ata_qc_complete(struct ata_queued_cmd *qc)
4861 {
4862 	struct ata_port *ap = qc->ap;
4863 
4864 	/* XXX: New EH and old EH use different mechanisms to
4865 	 * synchronize EH with regular execution path.
4866 	 *
4867 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4868 	 * Normal execution path is responsible for not accessing a
4869 	 * failed qc.  libata core enforces the rule by returning NULL
4870 	 * from ata_qc_from_tag() for failed qcs.
4871 	 *
4872 	 * Old EH depends on ata_qc_complete() nullifying completion
4873 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4874 	 * not synchronize with interrupt handler.  Only PIO task is
4875 	 * taken care of.
4876 	 */
4877 	if (ap->ops->error_handler) {
4878 		struct ata_device *dev = qc->dev;
4879 		struct ata_eh_info *ehi = &dev->link->eh_info;
4880 
4881 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4882 
4883 		if (unlikely(qc->err_mask))
4884 			qc->flags |= ATA_QCFLAG_FAILED;
4885 
4886 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4887 			if (!ata_tag_internal(qc->tag)) {
4888 				/* always fill result TF for failed qc */
4889 				fill_result_tf(qc);
4890 				ata_qc_schedule_eh(qc);
4891 				return;
4892 			}
4893 		}
4894 
4895 		/* read result TF if requested */
4896 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4897 			fill_result_tf(qc);
4898 
4899 		/* Some commands need post-processing after successful
4900 		 * completion.
4901 		 */
4902 		switch (qc->tf.command) {
4903 		case ATA_CMD_SET_FEATURES:
4904 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4905 			    qc->tf.feature != SETFEATURES_WC_OFF)
4906 				break;
4907 			/* fall through */
4908 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4909 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4910 			/* revalidate device */
4911 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4912 			ata_port_schedule_eh(ap);
4913 			break;
4914 
4915 		case ATA_CMD_SLEEP:
4916 			dev->flags |= ATA_DFLAG_SLEEPING;
4917 			break;
4918 		}
4919 
4920 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4921 			ata_verify_xfer(qc);
4922 
4923 		__ata_qc_complete(qc);
4924 	} else {
4925 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4926 			return;
4927 
4928 		/* read result TF if failed or requested */
4929 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4930 			fill_result_tf(qc);
4931 
4932 		__ata_qc_complete(qc);
4933 	}
4934 }
4935 
4936 /**
4937  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4938  *	@ap: port in question
4939  *	@qc_active: new qc_active mask
4940  *
4941  *	Complete in-flight commands.  This functions is meant to be
4942  *	called from low-level driver's interrupt routine to complete
4943  *	requests normally.  ap->qc_active and @qc_active is compared
4944  *	and commands are completed accordingly.
4945  *
4946  *	LOCKING:
4947  *	spin_lock_irqsave(host lock)
4948  *
4949  *	RETURNS:
4950  *	Number of completed commands on success, -errno otherwise.
4951  */
4952 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4953 {
4954 	int nr_done = 0;
4955 	u32 done_mask;
4956 	int i;
4957 
4958 	done_mask = ap->qc_active ^ qc_active;
4959 
4960 	if (unlikely(done_mask & qc_active)) {
4961 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4962 				"(%08x->%08x)\n", ap->qc_active, qc_active);
4963 		return -EINVAL;
4964 	}
4965 
4966 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
4967 		struct ata_queued_cmd *qc;
4968 
4969 		if (!(done_mask & (1 << i)))
4970 			continue;
4971 
4972 		if ((qc = ata_qc_from_tag(ap, i))) {
4973 			ata_qc_complete(qc);
4974 			nr_done++;
4975 		}
4976 	}
4977 
4978 	return nr_done;
4979 }
4980 
4981 /**
4982  *	ata_qc_issue - issue taskfile to device
4983  *	@qc: command to issue to device
4984  *
4985  *	Prepare an ATA command to submission to device.
4986  *	This includes mapping the data into a DMA-able
4987  *	area, filling in the S/G table, and finally
4988  *	writing the taskfile to hardware, starting the command.
4989  *
4990  *	LOCKING:
4991  *	spin_lock_irqsave(host lock)
4992  */
4993 void ata_qc_issue(struct ata_queued_cmd *qc)
4994 {
4995 	struct ata_port *ap = qc->ap;
4996 	struct ata_link *link = qc->dev->link;
4997 	u8 prot = qc->tf.protocol;
4998 
4999 	/* Make sure only one non-NCQ command is outstanding.  The
5000 	 * check is skipped for old EH because it reuses active qc to
5001 	 * request ATAPI sense.
5002 	 */
5003 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5004 
5005 	if (ata_is_ncq(prot)) {
5006 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5007 
5008 		if (!link->sactive)
5009 			ap->nr_active_links++;
5010 		link->sactive |= 1 << qc->tag;
5011 	} else {
5012 		WARN_ON_ONCE(link->sactive);
5013 
5014 		ap->nr_active_links++;
5015 		link->active_tag = qc->tag;
5016 	}
5017 
5018 	qc->flags |= ATA_QCFLAG_ACTIVE;
5019 	ap->qc_active |= 1 << qc->tag;
5020 
5021 	/* We guarantee to LLDs that they will have at least one
5022 	 * non-zero sg if the command is a data command.
5023 	 */
5024 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5025 
5026 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5027 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5028 		if (ata_sg_setup(qc))
5029 			goto sg_err;
5030 
5031 	/* if device is sleeping, schedule reset and abort the link */
5032 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5033 		link->eh_info.action |= ATA_EH_RESET;
5034 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5035 		ata_link_abort(link);
5036 		return;
5037 	}
5038 
5039 	ap->ops->qc_prep(qc);
5040 
5041 	qc->err_mask |= ap->ops->qc_issue(qc);
5042 	if (unlikely(qc->err_mask))
5043 		goto err;
5044 	return;
5045 
5046 sg_err:
5047 	qc->err_mask |= AC_ERR_SYSTEM;
5048 err:
5049 	ata_qc_complete(qc);
5050 }
5051 
5052 /**
5053  *	sata_scr_valid - test whether SCRs are accessible
5054  *	@link: ATA link to test SCR accessibility for
5055  *
5056  *	Test whether SCRs are accessible for @link.
5057  *
5058  *	LOCKING:
5059  *	None.
5060  *
5061  *	RETURNS:
5062  *	1 if SCRs are accessible, 0 otherwise.
5063  */
5064 int sata_scr_valid(struct ata_link *link)
5065 {
5066 	struct ata_port *ap = link->ap;
5067 
5068 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5069 }
5070 
5071 /**
5072  *	sata_scr_read - read SCR register of the specified port
5073  *	@link: ATA link to read SCR for
5074  *	@reg: SCR to read
5075  *	@val: Place to store read value
5076  *
5077  *	Read SCR register @reg of @link into *@val.  This function is
5078  *	guaranteed to succeed if @link is ap->link, the cable type of
5079  *	the port is SATA and the port implements ->scr_read.
5080  *
5081  *	LOCKING:
5082  *	None if @link is ap->link.  Kernel thread context otherwise.
5083  *
5084  *	RETURNS:
5085  *	0 on success, negative errno on failure.
5086  */
5087 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5088 {
5089 	if (ata_is_host_link(link)) {
5090 		if (sata_scr_valid(link))
5091 			return link->ap->ops->scr_read(link, reg, val);
5092 		return -EOPNOTSUPP;
5093 	}
5094 
5095 	return sata_pmp_scr_read(link, reg, val);
5096 }
5097 
5098 /**
5099  *	sata_scr_write - write SCR register of the specified port
5100  *	@link: ATA link to write SCR for
5101  *	@reg: SCR to write
5102  *	@val: value to write
5103  *
5104  *	Write @val to SCR register @reg of @link.  This function is
5105  *	guaranteed to succeed if @link is ap->link, the cable type of
5106  *	the port is SATA and the port implements ->scr_read.
5107  *
5108  *	LOCKING:
5109  *	None if @link is ap->link.  Kernel thread context otherwise.
5110  *
5111  *	RETURNS:
5112  *	0 on success, negative errno on failure.
5113  */
5114 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5115 {
5116 	if (ata_is_host_link(link)) {
5117 		if (sata_scr_valid(link))
5118 			return link->ap->ops->scr_write(link, reg, val);
5119 		return -EOPNOTSUPP;
5120 	}
5121 
5122 	return sata_pmp_scr_write(link, reg, val);
5123 }
5124 
5125 /**
5126  *	sata_scr_write_flush - write SCR register of the specified port and flush
5127  *	@link: ATA link to write SCR for
5128  *	@reg: SCR to write
5129  *	@val: value to write
5130  *
5131  *	This function is identical to sata_scr_write() except that this
5132  *	function performs flush after writing to the register.
5133  *
5134  *	LOCKING:
5135  *	None if @link is ap->link.  Kernel thread context otherwise.
5136  *
5137  *	RETURNS:
5138  *	0 on success, negative errno on failure.
5139  */
5140 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5141 {
5142 	if (ata_is_host_link(link)) {
5143 		int rc;
5144 
5145 		if (sata_scr_valid(link)) {
5146 			rc = link->ap->ops->scr_write(link, reg, val);
5147 			if (rc == 0)
5148 				rc = link->ap->ops->scr_read(link, reg, &val);
5149 			return rc;
5150 		}
5151 		return -EOPNOTSUPP;
5152 	}
5153 
5154 	return sata_pmp_scr_write(link, reg, val);
5155 }
5156 
5157 /**
5158  *	ata_phys_link_online - test whether the given link is online
5159  *	@link: ATA link to test
5160  *
5161  *	Test whether @link is online.  Note that this function returns
5162  *	0 if online status of @link cannot be obtained, so
5163  *	ata_link_online(link) != !ata_link_offline(link).
5164  *
5165  *	LOCKING:
5166  *	None.
5167  *
5168  *	RETURNS:
5169  *	True if the port online status is available and online.
5170  */
5171 bool ata_phys_link_online(struct ata_link *link)
5172 {
5173 	u32 sstatus;
5174 
5175 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5176 	    (sstatus & 0xf) == 0x3)
5177 		return true;
5178 	return false;
5179 }
5180 
5181 /**
5182  *	ata_phys_link_offline - test whether the given link is offline
5183  *	@link: ATA link to test
5184  *
5185  *	Test whether @link is offline.  Note that this function
5186  *	returns 0 if offline status of @link cannot be obtained, so
5187  *	ata_link_online(link) != !ata_link_offline(link).
5188  *
5189  *	LOCKING:
5190  *	None.
5191  *
5192  *	RETURNS:
5193  *	True if the port offline status is available and offline.
5194  */
5195 bool ata_phys_link_offline(struct ata_link *link)
5196 {
5197 	u32 sstatus;
5198 
5199 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5200 	    (sstatus & 0xf) != 0x3)
5201 		return true;
5202 	return false;
5203 }
5204 
5205 /**
5206  *	ata_link_online - test whether the given link is online
5207  *	@link: ATA link to test
5208  *
5209  *	Test whether @link is online.  This is identical to
5210  *	ata_phys_link_online() when there's no slave link.  When
5211  *	there's a slave link, this function should only be called on
5212  *	the master link and will return true if any of M/S links is
5213  *	online.
5214  *
5215  *	LOCKING:
5216  *	None.
5217  *
5218  *	RETURNS:
5219  *	True if the port online status is available and online.
5220  */
5221 bool ata_link_online(struct ata_link *link)
5222 {
5223 	struct ata_link *slave = link->ap->slave_link;
5224 
5225 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5226 
5227 	return ata_phys_link_online(link) ||
5228 		(slave && ata_phys_link_online(slave));
5229 }
5230 
5231 /**
5232  *	ata_link_offline - test whether the given link is offline
5233  *	@link: ATA link to test
5234  *
5235  *	Test whether @link is offline.  This is identical to
5236  *	ata_phys_link_offline() when there's no slave link.  When
5237  *	there's a slave link, this function should only be called on
5238  *	the master link and will return true if both M/S links are
5239  *	offline.
5240  *
5241  *	LOCKING:
5242  *	None.
5243  *
5244  *	RETURNS:
5245  *	True if the port offline status is available and offline.
5246  */
5247 bool ata_link_offline(struct ata_link *link)
5248 {
5249 	struct ata_link *slave = link->ap->slave_link;
5250 
5251 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5252 
5253 	return ata_phys_link_offline(link) &&
5254 		(!slave || ata_phys_link_offline(slave));
5255 }
5256 
5257 #ifdef CONFIG_PM
5258 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5259 			       unsigned int action, unsigned int ehi_flags,
5260 			       int wait)
5261 {
5262 	unsigned long flags;
5263 	int i, rc;
5264 
5265 	for (i = 0; i < host->n_ports; i++) {
5266 		struct ata_port *ap = host->ports[i];
5267 		struct ata_link *link;
5268 
5269 		/* Previous resume operation might still be in
5270 		 * progress.  Wait for PM_PENDING to clear.
5271 		 */
5272 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5273 			ata_port_wait_eh(ap);
5274 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5275 		}
5276 
5277 		/* request PM ops to EH */
5278 		spin_lock_irqsave(ap->lock, flags);
5279 
5280 		ap->pm_mesg = mesg;
5281 		if (wait) {
5282 			rc = 0;
5283 			ap->pm_result = &rc;
5284 		}
5285 
5286 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5287 		ata_for_each_link(link, ap, HOST_FIRST) {
5288 			link->eh_info.action |= action;
5289 			link->eh_info.flags |= ehi_flags;
5290 		}
5291 
5292 		ata_port_schedule_eh(ap);
5293 
5294 		spin_unlock_irqrestore(ap->lock, flags);
5295 
5296 		/* wait and check result */
5297 		if (wait) {
5298 			ata_port_wait_eh(ap);
5299 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5300 			if (rc)
5301 				return rc;
5302 		}
5303 	}
5304 
5305 	return 0;
5306 }
5307 
5308 /**
5309  *	ata_host_suspend - suspend host
5310  *	@host: host to suspend
5311  *	@mesg: PM message
5312  *
5313  *	Suspend @host.  Actual operation is performed by EH.  This
5314  *	function requests EH to perform PM operations and waits for EH
5315  *	to finish.
5316  *
5317  *	LOCKING:
5318  *	Kernel thread context (may sleep).
5319  *
5320  *	RETURNS:
5321  *	0 on success, -errno on failure.
5322  */
5323 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5324 {
5325 	int rc;
5326 
5327 	/*
5328 	 * disable link pm on all ports before requesting
5329 	 * any pm activity
5330 	 */
5331 	ata_lpm_enable(host);
5332 
5333 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5334 	if (rc == 0)
5335 		host->dev->power.power_state = mesg;
5336 	return rc;
5337 }
5338 
5339 /**
5340  *	ata_host_resume - resume host
5341  *	@host: host to resume
5342  *
5343  *	Resume @host.  Actual operation is performed by EH.  This
5344  *	function requests EH to perform PM operations and returns.
5345  *	Note that all resume operations are performed parallely.
5346  *
5347  *	LOCKING:
5348  *	Kernel thread context (may sleep).
5349  */
5350 void ata_host_resume(struct ata_host *host)
5351 {
5352 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5353 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5354 	host->dev->power.power_state = PMSG_ON;
5355 
5356 	/* reenable link pm */
5357 	ata_lpm_disable(host);
5358 }
5359 #endif
5360 
5361 /**
5362  *	ata_port_start - Set port up for dma.
5363  *	@ap: Port to initialize
5364  *
5365  *	Called just after data structures for each port are
5366  *	initialized.  Allocates space for PRD table.
5367  *
5368  *	May be used as the port_start() entry in ata_port_operations.
5369  *
5370  *	LOCKING:
5371  *	Inherited from caller.
5372  */
5373 int ata_port_start(struct ata_port *ap)
5374 {
5375 	struct device *dev = ap->dev;
5376 
5377 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5378 				      GFP_KERNEL);
5379 	if (!ap->prd)
5380 		return -ENOMEM;
5381 
5382 	return 0;
5383 }
5384 
5385 /**
5386  *	ata_dev_init - Initialize an ata_device structure
5387  *	@dev: Device structure to initialize
5388  *
5389  *	Initialize @dev in preparation for probing.
5390  *
5391  *	LOCKING:
5392  *	Inherited from caller.
5393  */
5394 void ata_dev_init(struct ata_device *dev)
5395 {
5396 	struct ata_link *link = ata_dev_phys_link(dev);
5397 	struct ata_port *ap = link->ap;
5398 	unsigned long flags;
5399 
5400 	/* SATA spd limit is bound to the attached device, reset together */
5401 	link->sata_spd_limit = link->hw_sata_spd_limit;
5402 	link->sata_spd = 0;
5403 
5404 	/* High bits of dev->flags are used to record warm plug
5405 	 * requests which occur asynchronously.  Synchronize using
5406 	 * host lock.
5407 	 */
5408 	spin_lock_irqsave(ap->lock, flags);
5409 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5410 	dev->horkage = 0;
5411 	spin_unlock_irqrestore(ap->lock, flags);
5412 
5413 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5414 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5415 	dev->pio_mask = UINT_MAX;
5416 	dev->mwdma_mask = UINT_MAX;
5417 	dev->udma_mask = UINT_MAX;
5418 }
5419 
5420 /**
5421  *	ata_link_init - Initialize an ata_link structure
5422  *	@ap: ATA port link is attached to
5423  *	@link: Link structure to initialize
5424  *	@pmp: Port multiplier port number
5425  *
5426  *	Initialize @link.
5427  *
5428  *	LOCKING:
5429  *	Kernel thread context (may sleep)
5430  */
5431 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5432 {
5433 	int i;
5434 
5435 	/* clear everything except for devices */
5436 	memset(link, 0, offsetof(struct ata_link, device[0]));
5437 
5438 	link->ap = ap;
5439 	link->pmp = pmp;
5440 	link->active_tag = ATA_TAG_POISON;
5441 	link->hw_sata_spd_limit = UINT_MAX;
5442 
5443 	/* can't use iterator, ap isn't initialized yet */
5444 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5445 		struct ata_device *dev = &link->device[i];
5446 
5447 		dev->link = link;
5448 		dev->devno = dev - link->device;
5449 		ata_dev_init(dev);
5450 	}
5451 }
5452 
5453 /**
5454  *	sata_link_init_spd - Initialize link->sata_spd_limit
5455  *	@link: Link to configure sata_spd_limit for
5456  *
5457  *	Initialize @link->[hw_]sata_spd_limit to the currently
5458  *	configured value.
5459  *
5460  *	LOCKING:
5461  *	Kernel thread context (may sleep).
5462  *
5463  *	RETURNS:
5464  *	0 on success, -errno on failure.
5465  */
5466 int sata_link_init_spd(struct ata_link *link)
5467 {
5468 	u8 spd;
5469 	int rc;
5470 
5471 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5472 	if (rc)
5473 		return rc;
5474 
5475 	spd = (link->saved_scontrol >> 4) & 0xf;
5476 	if (spd)
5477 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5478 
5479 	ata_force_link_limits(link);
5480 
5481 	link->sata_spd_limit = link->hw_sata_spd_limit;
5482 
5483 	return 0;
5484 }
5485 
5486 /**
5487  *	ata_port_alloc - allocate and initialize basic ATA port resources
5488  *	@host: ATA host this allocated port belongs to
5489  *
5490  *	Allocate and initialize basic ATA port resources.
5491  *
5492  *	RETURNS:
5493  *	Allocate ATA port on success, NULL on failure.
5494  *
5495  *	LOCKING:
5496  *	Inherited from calling layer (may sleep).
5497  */
5498 struct ata_port *ata_port_alloc(struct ata_host *host)
5499 {
5500 	struct ata_port *ap;
5501 
5502 	DPRINTK("ENTER\n");
5503 
5504 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5505 	if (!ap)
5506 		return NULL;
5507 
5508 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5509 	ap->lock = &host->lock;
5510 	ap->flags = ATA_FLAG_DISABLED;
5511 	ap->print_id = -1;
5512 	ap->ctl = ATA_DEVCTL_OBS;
5513 	ap->host = host;
5514 	ap->dev = host->dev;
5515 	ap->last_ctl = 0xFF;
5516 
5517 #if defined(ATA_VERBOSE_DEBUG)
5518 	/* turn on all debugging levels */
5519 	ap->msg_enable = 0x00FF;
5520 #elif defined(ATA_DEBUG)
5521 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5522 #else
5523 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5524 #endif
5525 
5526 #ifdef CONFIG_ATA_SFF
5527 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5528 #else
5529 	INIT_DELAYED_WORK(&ap->port_task, NULL);
5530 #endif
5531 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5532 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5533 	INIT_LIST_HEAD(&ap->eh_done_q);
5534 	init_waitqueue_head(&ap->eh_wait_q);
5535 	init_completion(&ap->park_req_pending);
5536 	init_timer_deferrable(&ap->fastdrain_timer);
5537 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5538 	ap->fastdrain_timer.data = (unsigned long)ap;
5539 
5540 	ap->cbl = ATA_CBL_NONE;
5541 
5542 	ata_link_init(ap, &ap->link, 0);
5543 
5544 #ifdef ATA_IRQ_TRAP
5545 	ap->stats.unhandled_irq = 1;
5546 	ap->stats.idle_irq = 1;
5547 #endif
5548 	return ap;
5549 }
5550 
5551 static void ata_host_release(struct device *gendev, void *res)
5552 {
5553 	struct ata_host *host = dev_get_drvdata(gendev);
5554 	int i;
5555 
5556 	for (i = 0; i < host->n_ports; i++) {
5557 		struct ata_port *ap = host->ports[i];
5558 
5559 		if (!ap)
5560 			continue;
5561 
5562 		if (ap->scsi_host)
5563 			scsi_host_put(ap->scsi_host);
5564 
5565 		kfree(ap->pmp_link);
5566 		kfree(ap->slave_link);
5567 		kfree(ap);
5568 		host->ports[i] = NULL;
5569 	}
5570 
5571 	dev_set_drvdata(gendev, NULL);
5572 }
5573 
5574 /**
5575  *	ata_host_alloc - allocate and init basic ATA host resources
5576  *	@dev: generic device this host is associated with
5577  *	@max_ports: maximum number of ATA ports associated with this host
5578  *
5579  *	Allocate and initialize basic ATA host resources.  LLD calls
5580  *	this function to allocate a host, initializes it fully and
5581  *	attaches it using ata_host_register().
5582  *
5583  *	@max_ports ports are allocated and host->n_ports is
5584  *	initialized to @max_ports.  The caller is allowed to decrease
5585  *	host->n_ports before calling ata_host_register().  The unused
5586  *	ports will be automatically freed on registration.
5587  *
5588  *	RETURNS:
5589  *	Allocate ATA host on success, NULL on failure.
5590  *
5591  *	LOCKING:
5592  *	Inherited from calling layer (may sleep).
5593  */
5594 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5595 {
5596 	struct ata_host *host;
5597 	size_t sz;
5598 	int i;
5599 
5600 	DPRINTK("ENTER\n");
5601 
5602 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5603 		return NULL;
5604 
5605 	/* alloc a container for our list of ATA ports (buses) */
5606 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5607 	/* alloc a container for our list of ATA ports (buses) */
5608 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5609 	if (!host)
5610 		goto err_out;
5611 
5612 	devres_add(dev, host);
5613 	dev_set_drvdata(dev, host);
5614 
5615 	spin_lock_init(&host->lock);
5616 	host->dev = dev;
5617 	host->n_ports = max_ports;
5618 
5619 	/* allocate ports bound to this host */
5620 	for (i = 0; i < max_ports; i++) {
5621 		struct ata_port *ap;
5622 
5623 		ap = ata_port_alloc(host);
5624 		if (!ap)
5625 			goto err_out;
5626 
5627 		ap->port_no = i;
5628 		host->ports[i] = ap;
5629 	}
5630 
5631 	devres_remove_group(dev, NULL);
5632 	return host;
5633 
5634  err_out:
5635 	devres_release_group(dev, NULL);
5636 	return NULL;
5637 }
5638 
5639 /**
5640  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5641  *	@dev: generic device this host is associated with
5642  *	@ppi: array of ATA port_info to initialize host with
5643  *	@n_ports: number of ATA ports attached to this host
5644  *
5645  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5646  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5647  *	last entry will be used for the remaining ports.
5648  *
5649  *	RETURNS:
5650  *	Allocate ATA host on success, NULL on failure.
5651  *
5652  *	LOCKING:
5653  *	Inherited from calling layer (may sleep).
5654  */
5655 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5656 				      const struct ata_port_info * const * ppi,
5657 				      int n_ports)
5658 {
5659 	const struct ata_port_info *pi;
5660 	struct ata_host *host;
5661 	int i, j;
5662 
5663 	host = ata_host_alloc(dev, n_ports);
5664 	if (!host)
5665 		return NULL;
5666 
5667 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5668 		struct ata_port *ap = host->ports[i];
5669 
5670 		if (ppi[j])
5671 			pi = ppi[j++];
5672 
5673 		ap->pio_mask = pi->pio_mask;
5674 		ap->mwdma_mask = pi->mwdma_mask;
5675 		ap->udma_mask = pi->udma_mask;
5676 		ap->flags |= pi->flags;
5677 		ap->link.flags |= pi->link_flags;
5678 		ap->ops = pi->port_ops;
5679 
5680 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5681 			host->ops = pi->port_ops;
5682 	}
5683 
5684 	return host;
5685 }
5686 
5687 /**
5688  *	ata_slave_link_init - initialize slave link
5689  *	@ap: port to initialize slave link for
5690  *
5691  *	Create and initialize slave link for @ap.  This enables slave
5692  *	link handling on the port.
5693  *
5694  *	In libata, a port contains links and a link contains devices.
5695  *	There is single host link but if a PMP is attached to it,
5696  *	there can be multiple fan-out links.  On SATA, there's usually
5697  *	a single device connected to a link but PATA and SATA
5698  *	controllers emulating TF based interface can have two - master
5699  *	and slave.
5700  *
5701  *	However, there are a few controllers which don't fit into this
5702  *	abstraction too well - SATA controllers which emulate TF
5703  *	interface with both master and slave devices but also have
5704  *	separate SCR register sets for each device.  These controllers
5705  *	need separate links for physical link handling
5706  *	(e.g. onlineness, link speed) but should be treated like a
5707  *	traditional M/S controller for everything else (e.g. command
5708  *	issue, softreset).
5709  *
5710  *	slave_link is libata's way of handling this class of
5711  *	controllers without impacting core layer too much.  For
5712  *	anything other than physical link handling, the default host
5713  *	link is used for both master and slave.  For physical link
5714  *	handling, separate @ap->slave_link is used.  All dirty details
5715  *	are implemented inside libata core layer.  From LLD's POV, the
5716  *	only difference is that prereset, hardreset and postreset are
5717  *	called once more for the slave link, so the reset sequence
5718  *	looks like the following.
5719  *
5720  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5721  *	softreset(M) -> postreset(M) -> postreset(S)
5722  *
5723  *	Note that softreset is called only for the master.  Softreset
5724  *	resets both M/S by definition, so SRST on master should handle
5725  *	both (the standard method will work just fine).
5726  *
5727  *	LOCKING:
5728  *	Should be called before host is registered.
5729  *
5730  *	RETURNS:
5731  *	0 on success, -errno on failure.
5732  */
5733 int ata_slave_link_init(struct ata_port *ap)
5734 {
5735 	struct ata_link *link;
5736 
5737 	WARN_ON(ap->slave_link);
5738 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5739 
5740 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5741 	if (!link)
5742 		return -ENOMEM;
5743 
5744 	ata_link_init(ap, link, 1);
5745 	ap->slave_link = link;
5746 	return 0;
5747 }
5748 
5749 static void ata_host_stop(struct device *gendev, void *res)
5750 {
5751 	struct ata_host *host = dev_get_drvdata(gendev);
5752 	int i;
5753 
5754 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5755 
5756 	for (i = 0; i < host->n_ports; i++) {
5757 		struct ata_port *ap = host->ports[i];
5758 
5759 		if (ap->ops->port_stop)
5760 			ap->ops->port_stop(ap);
5761 	}
5762 
5763 	if (host->ops->host_stop)
5764 		host->ops->host_stop(host);
5765 }
5766 
5767 /**
5768  *	ata_finalize_port_ops - finalize ata_port_operations
5769  *	@ops: ata_port_operations to finalize
5770  *
5771  *	An ata_port_operations can inherit from another ops and that
5772  *	ops can again inherit from another.  This can go on as many
5773  *	times as necessary as long as there is no loop in the
5774  *	inheritance chain.
5775  *
5776  *	Ops tables are finalized when the host is started.  NULL or
5777  *	unspecified entries are inherited from the closet ancestor
5778  *	which has the method and the entry is populated with it.
5779  *	After finalization, the ops table directly points to all the
5780  *	methods and ->inherits is no longer necessary and cleared.
5781  *
5782  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5783  *
5784  *	LOCKING:
5785  *	None.
5786  */
5787 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5788 {
5789 	static DEFINE_SPINLOCK(lock);
5790 	const struct ata_port_operations *cur;
5791 	void **begin = (void **)ops;
5792 	void **end = (void **)&ops->inherits;
5793 	void **pp;
5794 
5795 	if (!ops || !ops->inherits)
5796 		return;
5797 
5798 	spin_lock(&lock);
5799 
5800 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5801 		void **inherit = (void **)cur;
5802 
5803 		for (pp = begin; pp < end; pp++, inherit++)
5804 			if (!*pp)
5805 				*pp = *inherit;
5806 	}
5807 
5808 	for (pp = begin; pp < end; pp++)
5809 		if (IS_ERR(*pp))
5810 			*pp = NULL;
5811 
5812 	ops->inherits = NULL;
5813 
5814 	spin_unlock(&lock);
5815 }
5816 
5817 /**
5818  *	ata_host_start - start and freeze ports of an ATA host
5819  *	@host: ATA host to start ports for
5820  *
5821  *	Start and then freeze ports of @host.  Started status is
5822  *	recorded in host->flags, so this function can be called
5823  *	multiple times.  Ports are guaranteed to get started only
5824  *	once.  If host->ops isn't initialized yet, its set to the
5825  *	first non-dummy port ops.
5826  *
5827  *	LOCKING:
5828  *	Inherited from calling layer (may sleep).
5829  *
5830  *	RETURNS:
5831  *	0 if all ports are started successfully, -errno otherwise.
5832  */
5833 int ata_host_start(struct ata_host *host)
5834 {
5835 	int have_stop = 0;
5836 	void *start_dr = NULL;
5837 	int i, rc;
5838 
5839 	if (host->flags & ATA_HOST_STARTED)
5840 		return 0;
5841 
5842 	ata_finalize_port_ops(host->ops);
5843 
5844 	for (i = 0; i < host->n_ports; i++) {
5845 		struct ata_port *ap = host->ports[i];
5846 
5847 		ata_finalize_port_ops(ap->ops);
5848 
5849 		if (!host->ops && !ata_port_is_dummy(ap))
5850 			host->ops = ap->ops;
5851 
5852 		if (ap->ops->port_stop)
5853 			have_stop = 1;
5854 	}
5855 
5856 	if (host->ops->host_stop)
5857 		have_stop = 1;
5858 
5859 	if (have_stop) {
5860 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5861 		if (!start_dr)
5862 			return -ENOMEM;
5863 	}
5864 
5865 	for (i = 0; i < host->n_ports; i++) {
5866 		struct ata_port *ap = host->ports[i];
5867 
5868 		if (ap->ops->port_start) {
5869 			rc = ap->ops->port_start(ap);
5870 			if (rc) {
5871 				if (rc != -ENODEV)
5872 					dev_printk(KERN_ERR, host->dev,
5873 						"failed to start port %d "
5874 						"(errno=%d)\n", i, rc);
5875 				goto err_out;
5876 			}
5877 		}
5878 		ata_eh_freeze_port(ap);
5879 	}
5880 
5881 	if (start_dr)
5882 		devres_add(host->dev, start_dr);
5883 	host->flags |= ATA_HOST_STARTED;
5884 	return 0;
5885 
5886  err_out:
5887 	while (--i >= 0) {
5888 		struct ata_port *ap = host->ports[i];
5889 
5890 		if (ap->ops->port_stop)
5891 			ap->ops->port_stop(ap);
5892 	}
5893 	devres_free(start_dr);
5894 	return rc;
5895 }
5896 
5897 /**
5898  *	ata_sas_host_init - Initialize a host struct
5899  *	@host:	host to initialize
5900  *	@dev:	device host is attached to
5901  *	@flags:	host flags
5902  *	@ops:	port_ops
5903  *
5904  *	LOCKING:
5905  *	PCI/etc. bus probe sem.
5906  *
5907  */
5908 /* KILLME - the only user left is ipr */
5909 void ata_host_init(struct ata_host *host, struct device *dev,
5910 		   unsigned long flags, struct ata_port_operations *ops)
5911 {
5912 	spin_lock_init(&host->lock);
5913 	host->dev = dev;
5914 	host->flags = flags;
5915 	host->ops = ops;
5916 }
5917 
5918 
5919 static void async_port_probe(void *data, async_cookie_t cookie)
5920 {
5921 	int rc;
5922 	struct ata_port *ap = data;
5923 
5924 	/*
5925 	 * If we're not allowed to scan this host in parallel,
5926 	 * we need to wait until all previous scans have completed
5927 	 * before going further.
5928 	 * Jeff Garzik says this is only within a controller, so we
5929 	 * don't need to wait for port 0, only for later ports.
5930 	 */
5931 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5932 		async_synchronize_cookie(cookie);
5933 
5934 	/* probe */
5935 	if (ap->ops->error_handler) {
5936 		struct ata_eh_info *ehi = &ap->link.eh_info;
5937 		unsigned long flags;
5938 
5939 		ata_port_probe(ap);
5940 
5941 		/* kick EH for boot probing */
5942 		spin_lock_irqsave(ap->lock, flags);
5943 
5944 		ehi->probe_mask |= ATA_ALL_DEVICES;
5945 		ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5946 		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5947 
5948 		ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5949 		ap->pflags |= ATA_PFLAG_LOADING;
5950 		ata_port_schedule_eh(ap);
5951 
5952 		spin_unlock_irqrestore(ap->lock, flags);
5953 
5954 		/* wait for EH to finish */
5955 		ata_port_wait_eh(ap);
5956 	} else {
5957 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5958 		rc = ata_bus_probe(ap);
5959 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
5960 
5961 		if (rc) {
5962 			/* FIXME: do something useful here?
5963 			 * Current libata behavior will
5964 			 * tear down everything when
5965 			 * the module is removed
5966 			 * or the h/w is unplugged.
5967 			 */
5968 		}
5969 	}
5970 
5971 	/* in order to keep device order, we need to synchronize at this point */
5972 	async_synchronize_cookie(cookie);
5973 
5974 	ata_scsi_scan_host(ap, 1);
5975 
5976 }
5977 /**
5978  *	ata_host_register - register initialized ATA host
5979  *	@host: ATA host to register
5980  *	@sht: template for SCSI host
5981  *
5982  *	Register initialized ATA host.  @host is allocated using
5983  *	ata_host_alloc() and fully initialized by LLD.  This function
5984  *	starts ports, registers @host with ATA and SCSI layers and
5985  *	probe registered devices.
5986  *
5987  *	LOCKING:
5988  *	Inherited from calling layer (may sleep).
5989  *
5990  *	RETURNS:
5991  *	0 on success, -errno otherwise.
5992  */
5993 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5994 {
5995 	int i, rc;
5996 
5997 	/* host must have been started */
5998 	if (!(host->flags & ATA_HOST_STARTED)) {
5999 		dev_printk(KERN_ERR, host->dev,
6000 			   "BUG: trying to register unstarted host\n");
6001 		WARN_ON(1);
6002 		return -EINVAL;
6003 	}
6004 
6005 	/* Blow away unused ports.  This happens when LLD can't
6006 	 * determine the exact number of ports to allocate at
6007 	 * allocation time.
6008 	 */
6009 	for (i = host->n_ports; host->ports[i]; i++)
6010 		kfree(host->ports[i]);
6011 
6012 	/* give ports names and add SCSI hosts */
6013 	for (i = 0; i < host->n_ports; i++)
6014 		host->ports[i]->print_id = ata_print_id++;
6015 
6016 	rc = ata_scsi_add_hosts(host, sht);
6017 	if (rc)
6018 		return rc;
6019 
6020 	/* associate with ACPI nodes */
6021 	ata_acpi_associate(host);
6022 
6023 	/* set cable, sata_spd_limit and report */
6024 	for (i = 0; i < host->n_ports; i++) {
6025 		struct ata_port *ap = host->ports[i];
6026 		unsigned long xfer_mask;
6027 
6028 		/* set SATA cable type if still unset */
6029 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6030 			ap->cbl = ATA_CBL_SATA;
6031 
6032 		/* init sata_spd_limit to the current value */
6033 		sata_link_init_spd(&ap->link);
6034 		if (ap->slave_link)
6035 			sata_link_init_spd(ap->slave_link);
6036 
6037 		/* print per-port info to dmesg */
6038 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6039 					      ap->udma_mask);
6040 
6041 		if (!ata_port_is_dummy(ap)) {
6042 			ata_port_printk(ap, KERN_INFO,
6043 					"%cATA max %s %s\n",
6044 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6045 					ata_mode_string(xfer_mask),
6046 					ap->link.eh_info.desc);
6047 			ata_ehi_clear_desc(&ap->link.eh_info);
6048 		} else
6049 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6050 	}
6051 
6052 	/* perform each probe synchronously */
6053 	DPRINTK("probe begin\n");
6054 	for (i = 0; i < host->n_ports; i++) {
6055 		struct ata_port *ap = host->ports[i];
6056 		async_schedule(async_port_probe, ap);
6057 	}
6058 	DPRINTK("probe end\n");
6059 
6060 	return 0;
6061 }
6062 
6063 /**
6064  *	ata_host_activate - start host, request IRQ and register it
6065  *	@host: target ATA host
6066  *	@irq: IRQ to request
6067  *	@irq_handler: irq_handler used when requesting IRQ
6068  *	@irq_flags: irq_flags used when requesting IRQ
6069  *	@sht: scsi_host_template to use when registering the host
6070  *
6071  *	After allocating an ATA host and initializing it, most libata
6072  *	LLDs perform three steps to activate the host - start host,
6073  *	request IRQ and register it.  This helper takes necessasry
6074  *	arguments and performs the three steps in one go.
6075  *
6076  *	An invalid IRQ skips the IRQ registration and expects the host to
6077  *	have set polling mode on the port. In this case, @irq_handler
6078  *	should be NULL.
6079  *
6080  *	LOCKING:
6081  *	Inherited from calling layer (may sleep).
6082  *
6083  *	RETURNS:
6084  *	0 on success, -errno otherwise.
6085  */
6086 int ata_host_activate(struct ata_host *host, int irq,
6087 		      irq_handler_t irq_handler, unsigned long irq_flags,
6088 		      struct scsi_host_template *sht)
6089 {
6090 	int i, rc;
6091 
6092 	rc = ata_host_start(host);
6093 	if (rc)
6094 		return rc;
6095 
6096 	/* Special case for polling mode */
6097 	if (!irq) {
6098 		WARN_ON(irq_handler);
6099 		return ata_host_register(host, sht);
6100 	}
6101 
6102 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6103 			      dev_driver_string(host->dev), host);
6104 	if (rc)
6105 		return rc;
6106 
6107 	for (i = 0; i < host->n_ports; i++)
6108 		ata_port_desc(host->ports[i], "irq %d", irq);
6109 
6110 	rc = ata_host_register(host, sht);
6111 	/* if failed, just free the IRQ and leave ports alone */
6112 	if (rc)
6113 		devm_free_irq(host->dev, irq, host);
6114 
6115 	return rc;
6116 }
6117 
6118 /**
6119  *	ata_port_detach - Detach ATA port in prepration of device removal
6120  *	@ap: ATA port to be detached
6121  *
6122  *	Detach all ATA devices and the associated SCSI devices of @ap;
6123  *	then, remove the associated SCSI host.  @ap is guaranteed to
6124  *	be quiescent on return from this function.
6125  *
6126  *	LOCKING:
6127  *	Kernel thread context (may sleep).
6128  */
6129 static void ata_port_detach(struct ata_port *ap)
6130 {
6131 	unsigned long flags;
6132 
6133 	if (!ap->ops->error_handler)
6134 		goto skip_eh;
6135 
6136 	/* tell EH we're leaving & flush EH */
6137 	spin_lock_irqsave(ap->lock, flags);
6138 	ap->pflags |= ATA_PFLAG_UNLOADING;
6139 	ata_port_schedule_eh(ap);
6140 	spin_unlock_irqrestore(ap->lock, flags);
6141 
6142 	/* wait till EH commits suicide */
6143 	ata_port_wait_eh(ap);
6144 
6145 	/* it better be dead now */
6146 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6147 
6148 	cancel_rearming_delayed_work(&ap->hotplug_task);
6149 
6150  skip_eh:
6151 	/* remove the associated SCSI host */
6152 	scsi_remove_host(ap->scsi_host);
6153 }
6154 
6155 /**
6156  *	ata_host_detach - Detach all ports of an ATA host
6157  *	@host: Host to detach
6158  *
6159  *	Detach all ports of @host.
6160  *
6161  *	LOCKING:
6162  *	Kernel thread context (may sleep).
6163  */
6164 void ata_host_detach(struct ata_host *host)
6165 {
6166 	int i;
6167 
6168 	for (i = 0; i < host->n_ports; i++)
6169 		ata_port_detach(host->ports[i]);
6170 
6171 	/* the host is dead now, dissociate ACPI */
6172 	ata_acpi_dissociate(host);
6173 }
6174 
6175 #ifdef CONFIG_PCI
6176 
6177 /**
6178  *	ata_pci_remove_one - PCI layer callback for device removal
6179  *	@pdev: PCI device that was removed
6180  *
6181  *	PCI layer indicates to libata via this hook that hot-unplug or
6182  *	module unload event has occurred.  Detach all ports.  Resource
6183  *	release is handled via devres.
6184  *
6185  *	LOCKING:
6186  *	Inherited from PCI layer (may sleep).
6187  */
6188 void ata_pci_remove_one(struct pci_dev *pdev)
6189 {
6190 	struct device *dev = &pdev->dev;
6191 	struct ata_host *host = dev_get_drvdata(dev);
6192 
6193 	ata_host_detach(host);
6194 }
6195 
6196 /* move to PCI subsystem */
6197 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6198 {
6199 	unsigned long tmp = 0;
6200 
6201 	switch (bits->width) {
6202 	case 1: {
6203 		u8 tmp8 = 0;
6204 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6205 		tmp = tmp8;
6206 		break;
6207 	}
6208 	case 2: {
6209 		u16 tmp16 = 0;
6210 		pci_read_config_word(pdev, bits->reg, &tmp16);
6211 		tmp = tmp16;
6212 		break;
6213 	}
6214 	case 4: {
6215 		u32 tmp32 = 0;
6216 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6217 		tmp = tmp32;
6218 		break;
6219 	}
6220 
6221 	default:
6222 		return -EINVAL;
6223 	}
6224 
6225 	tmp &= bits->mask;
6226 
6227 	return (tmp == bits->val) ? 1 : 0;
6228 }
6229 
6230 #ifdef CONFIG_PM
6231 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6232 {
6233 	pci_save_state(pdev);
6234 	pci_disable_device(pdev);
6235 
6236 	if (mesg.event & PM_EVENT_SLEEP)
6237 		pci_set_power_state(pdev, PCI_D3hot);
6238 }
6239 
6240 int ata_pci_device_do_resume(struct pci_dev *pdev)
6241 {
6242 	int rc;
6243 
6244 	pci_set_power_state(pdev, PCI_D0);
6245 	pci_restore_state(pdev);
6246 
6247 	rc = pcim_enable_device(pdev);
6248 	if (rc) {
6249 		dev_printk(KERN_ERR, &pdev->dev,
6250 			   "failed to enable device after resume (%d)\n", rc);
6251 		return rc;
6252 	}
6253 
6254 	pci_set_master(pdev);
6255 	return 0;
6256 }
6257 
6258 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6259 {
6260 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6261 	int rc = 0;
6262 
6263 	rc = ata_host_suspend(host, mesg);
6264 	if (rc)
6265 		return rc;
6266 
6267 	ata_pci_device_do_suspend(pdev, mesg);
6268 
6269 	return 0;
6270 }
6271 
6272 int ata_pci_device_resume(struct pci_dev *pdev)
6273 {
6274 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6275 	int rc;
6276 
6277 	rc = ata_pci_device_do_resume(pdev);
6278 	if (rc == 0)
6279 		ata_host_resume(host);
6280 	return rc;
6281 }
6282 #endif /* CONFIG_PM */
6283 
6284 #endif /* CONFIG_PCI */
6285 
6286 static int __init ata_parse_force_one(char **cur,
6287 				      struct ata_force_ent *force_ent,
6288 				      const char **reason)
6289 {
6290 	/* FIXME: Currently, there's no way to tag init const data and
6291 	 * using __initdata causes build failure on some versions of
6292 	 * gcc.  Once __initdataconst is implemented, add const to the
6293 	 * following structure.
6294 	 */
6295 	static struct ata_force_param force_tbl[] __initdata = {
6296 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6297 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6298 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6299 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6300 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6301 		{ "sata",	.cbl		= ATA_CBL_SATA },
6302 		{ "1.5Gbps",	.spd_limit	= 1 },
6303 		{ "3.0Gbps",	.spd_limit	= 2 },
6304 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6305 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6306 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6307 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6308 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6309 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6310 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6311 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6312 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6313 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6314 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6315 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6316 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6317 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6318 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6319 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6320 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6321 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6322 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6323 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6324 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6325 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6326 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6327 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6328 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6329 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6330 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6331 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6332 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6333 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6334 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6335 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6336 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6337 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6338 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6339 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6340 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6341 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6342 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6343 	};
6344 	char *start = *cur, *p = *cur;
6345 	char *id, *val, *endp;
6346 	const struct ata_force_param *match_fp = NULL;
6347 	int nr_matches = 0, i;
6348 
6349 	/* find where this param ends and update *cur */
6350 	while (*p != '\0' && *p != ',')
6351 		p++;
6352 
6353 	if (*p == '\0')
6354 		*cur = p;
6355 	else
6356 		*cur = p + 1;
6357 
6358 	*p = '\0';
6359 
6360 	/* parse */
6361 	p = strchr(start, ':');
6362 	if (!p) {
6363 		val = strstrip(start);
6364 		goto parse_val;
6365 	}
6366 	*p = '\0';
6367 
6368 	id = strstrip(start);
6369 	val = strstrip(p + 1);
6370 
6371 	/* parse id */
6372 	p = strchr(id, '.');
6373 	if (p) {
6374 		*p++ = '\0';
6375 		force_ent->device = simple_strtoul(p, &endp, 10);
6376 		if (p == endp || *endp != '\0') {
6377 			*reason = "invalid device";
6378 			return -EINVAL;
6379 		}
6380 	}
6381 
6382 	force_ent->port = simple_strtoul(id, &endp, 10);
6383 	if (p == endp || *endp != '\0') {
6384 		*reason = "invalid port/link";
6385 		return -EINVAL;
6386 	}
6387 
6388  parse_val:
6389 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6390 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6391 		const struct ata_force_param *fp = &force_tbl[i];
6392 
6393 		if (strncasecmp(val, fp->name, strlen(val)))
6394 			continue;
6395 
6396 		nr_matches++;
6397 		match_fp = fp;
6398 
6399 		if (strcasecmp(val, fp->name) == 0) {
6400 			nr_matches = 1;
6401 			break;
6402 		}
6403 	}
6404 
6405 	if (!nr_matches) {
6406 		*reason = "unknown value";
6407 		return -EINVAL;
6408 	}
6409 	if (nr_matches > 1) {
6410 		*reason = "ambigious value";
6411 		return -EINVAL;
6412 	}
6413 
6414 	force_ent->param = *match_fp;
6415 
6416 	return 0;
6417 }
6418 
6419 static void __init ata_parse_force_param(void)
6420 {
6421 	int idx = 0, size = 1;
6422 	int last_port = -1, last_device = -1;
6423 	char *p, *cur, *next;
6424 
6425 	/* calculate maximum number of params and allocate force_tbl */
6426 	for (p = ata_force_param_buf; *p; p++)
6427 		if (*p == ',')
6428 			size++;
6429 
6430 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6431 	if (!ata_force_tbl) {
6432 		printk(KERN_WARNING "ata: failed to extend force table, "
6433 		       "libata.force ignored\n");
6434 		return;
6435 	}
6436 
6437 	/* parse and populate the table */
6438 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6439 		const char *reason = "";
6440 		struct ata_force_ent te = { .port = -1, .device = -1 };
6441 
6442 		next = cur;
6443 		if (ata_parse_force_one(&next, &te, &reason)) {
6444 			printk(KERN_WARNING "ata: failed to parse force "
6445 			       "parameter \"%s\" (%s)\n",
6446 			       cur, reason);
6447 			continue;
6448 		}
6449 
6450 		if (te.port == -1) {
6451 			te.port = last_port;
6452 			te.device = last_device;
6453 		}
6454 
6455 		ata_force_tbl[idx++] = te;
6456 
6457 		last_port = te.port;
6458 		last_device = te.device;
6459 	}
6460 
6461 	ata_force_tbl_size = idx;
6462 }
6463 
6464 static int __init ata_init(void)
6465 {
6466 	ata_parse_force_param();
6467 
6468 	ata_wq = create_workqueue("ata");
6469 	if (!ata_wq)
6470 		goto free_force_tbl;
6471 
6472 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6473 	if (!ata_aux_wq)
6474 		goto free_wq;
6475 
6476 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6477 	return 0;
6478 
6479 free_wq:
6480 	destroy_workqueue(ata_wq);
6481 free_force_tbl:
6482 	kfree(ata_force_tbl);
6483 	return -ENOMEM;
6484 }
6485 
6486 static void __exit ata_exit(void)
6487 {
6488 	kfree(ata_force_tbl);
6489 	destroy_workqueue(ata_wq);
6490 	destroy_workqueue(ata_aux_wq);
6491 }
6492 
6493 subsys_initcall(ata_init);
6494 module_exit(ata_exit);
6495 
6496 static unsigned long ratelimit_time;
6497 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6498 
6499 int ata_ratelimit(void)
6500 {
6501 	int rc;
6502 	unsigned long flags;
6503 
6504 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6505 
6506 	if (time_after(jiffies, ratelimit_time)) {
6507 		rc = 1;
6508 		ratelimit_time = jiffies + (HZ/5);
6509 	} else
6510 		rc = 0;
6511 
6512 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6513 
6514 	return rc;
6515 }
6516 
6517 /**
6518  *	ata_wait_register - wait until register value changes
6519  *	@reg: IO-mapped register
6520  *	@mask: Mask to apply to read register value
6521  *	@val: Wait condition
6522  *	@interval: polling interval in milliseconds
6523  *	@timeout: timeout in milliseconds
6524  *
6525  *	Waiting for some bits of register to change is a common
6526  *	operation for ATA controllers.  This function reads 32bit LE
6527  *	IO-mapped register @reg and tests for the following condition.
6528  *
6529  *	(*@reg & mask) != val
6530  *
6531  *	If the condition is met, it returns; otherwise, the process is
6532  *	repeated after @interval_msec until timeout.
6533  *
6534  *	LOCKING:
6535  *	Kernel thread context (may sleep)
6536  *
6537  *	RETURNS:
6538  *	The final register value.
6539  */
6540 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6541 		      unsigned long interval, unsigned long timeout)
6542 {
6543 	unsigned long deadline;
6544 	u32 tmp;
6545 
6546 	tmp = ioread32(reg);
6547 
6548 	/* Calculate timeout _after_ the first read to make sure
6549 	 * preceding writes reach the controller before starting to
6550 	 * eat away the timeout.
6551 	 */
6552 	deadline = ata_deadline(jiffies, timeout);
6553 
6554 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6555 		msleep(interval);
6556 		tmp = ioread32(reg);
6557 	}
6558 
6559 	return tmp;
6560 }
6561 
6562 /*
6563  * Dummy port_ops
6564  */
6565 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6566 {
6567 	return AC_ERR_SYSTEM;
6568 }
6569 
6570 static void ata_dummy_error_handler(struct ata_port *ap)
6571 {
6572 	/* truly dummy */
6573 }
6574 
6575 struct ata_port_operations ata_dummy_port_ops = {
6576 	.qc_prep		= ata_noop_qc_prep,
6577 	.qc_issue		= ata_dummy_qc_issue,
6578 	.error_handler		= ata_dummy_error_handler,
6579 };
6580 
6581 const struct ata_port_info ata_dummy_port_info = {
6582 	.port_ops		= &ata_dummy_port_ops,
6583 };
6584 
6585 /*
6586  * libata is essentially a library of internal helper functions for
6587  * low-level ATA host controller drivers.  As such, the API/ABI is
6588  * likely to change as new drivers are added and updated.
6589  * Do not depend on ABI/API stability.
6590  */
6591 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6592 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6593 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6594 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6595 EXPORT_SYMBOL_GPL(sata_port_ops);
6596 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6597 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6598 EXPORT_SYMBOL_GPL(ata_link_next);
6599 EXPORT_SYMBOL_GPL(ata_dev_next);
6600 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6601 EXPORT_SYMBOL_GPL(ata_host_init);
6602 EXPORT_SYMBOL_GPL(ata_host_alloc);
6603 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6604 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6605 EXPORT_SYMBOL_GPL(ata_host_start);
6606 EXPORT_SYMBOL_GPL(ata_host_register);
6607 EXPORT_SYMBOL_GPL(ata_host_activate);
6608 EXPORT_SYMBOL_GPL(ata_host_detach);
6609 EXPORT_SYMBOL_GPL(ata_sg_init);
6610 EXPORT_SYMBOL_GPL(ata_qc_complete);
6611 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6612 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6613 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6614 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6615 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6616 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6617 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6618 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6619 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6620 EXPORT_SYMBOL_GPL(ata_mode_string);
6621 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6622 EXPORT_SYMBOL_GPL(ata_port_start);
6623 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6624 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6625 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6626 EXPORT_SYMBOL_GPL(ata_port_probe);
6627 EXPORT_SYMBOL_GPL(ata_dev_disable);
6628 EXPORT_SYMBOL_GPL(sata_set_spd);
6629 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6630 EXPORT_SYMBOL_GPL(sata_link_debounce);
6631 EXPORT_SYMBOL_GPL(sata_link_resume);
6632 EXPORT_SYMBOL_GPL(ata_std_prereset);
6633 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6634 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6635 EXPORT_SYMBOL_GPL(ata_std_postreset);
6636 EXPORT_SYMBOL_GPL(ata_dev_classify);
6637 EXPORT_SYMBOL_GPL(ata_dev_pair);
6638 EXPORT_SYMBOL_GPL(ata_port_disable);
6639 EXPORT_SYMBOL_GPL(ata_ratelimit);
6640 EXPORT_SYMBOL_GPL(ata_wait_register);
6641 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6642 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6643 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6644 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6645 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6646 EXPORT_SYMBOL_GPL(sata_scr_valid);
6647 EXPORT_SYMBOL_GPL(sata_scr_read);
6648 EXPORT_SYMBOL_GPL(sata_scr_write);
6649 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6650 EXPORT_SYMBOL_GPL(ata_link_online);
6651 EXPORT_SYMBOL_GPL(ata_link_offline);
6652 #ifdef CONFIG_PM
6653 EXPORT_SYMBOL_GPL(ata_host_suspend);
6654 EXPORT_SYMBOL_GPL(ata_host_resume);
6655 #endif /* CONFIG_PM */
6656 EXPORT_SYMBOL_GPL(ata_id_string);
6657 EXPORT_SYMBOL_GPL(ata_id_c_string);
6658 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6659 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6660 
6661 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6662 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6663 EXPORT_SYMBOL_GPL(ata_timing_compute);
6664 EXPORT_SYMBOL_GPL(ata_timing_merge);
6665 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6666 
6667 #ifdef CONFIG_PCI
6668 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6669 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6670 #ifdef CONFIG_PM
6671 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6672 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6673 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6674 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6675 #endif /* CONFIG_PM */
6676 #endif /* CONFIG_PCI */
6677 
6678 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6679 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6680 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6681 EXPORT_SYMBOL_GPL(ata_port_desc);
6682 #ifdef CONFIG_PCI
6683 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6684 #endif /* CONFIG_PCI */
6685 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6686 EXPORT_SYMBOL_GPL(ata_link_abort);
6687 EXPORT_SYMBOL_GPL(ata_port_abort);
6688 EXPORT_SYMBOL_GPL(ata_port_freeze);
6689 EXPORT_SYMBOL_GPL(sata_async_notification);
6690 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6691 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6692 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6693 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6694 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6695 EXPORT_SYMBOL_GPL(ata_do_eh);
6696 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6697 
6698 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6699 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6700 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6701 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6702 EXPORT_SYMBOL_GPL(ata_cable_sata);
6703