xref: /linux/drivers/ata/libata-core.c (revision 7ec7fb394298c212c30e063c57e0aa895efe9439)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_host.h>
62 #include <linux/libata.h>
63 #include <asm/byteorder.h>
64 #include <linux/cdrom.h>
65 
66 #include "libata.h"
67 
68 
69 /* debounce timing parameters in msecs { interval, duration, timeout } */
70 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
71 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
72 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
73 
74 const struct ata_port_operations ata_base_port_ops = {
75 	.prereset		= ata_std_prereset,
76 	.postreset		= ata_std_postreset,
77 	.error_handler		= ata_std_error_handler,
78 };
79 
80 const struct ata_port_operations sata_port_ops = {
81 	.inherits		= &ata_base_port_ops,
82 
83 	.qc_defer		= ata_std_qc_defer,
84 	.hardreset		= sata_std_hardreset,
85 };
86 
87 static unsigned int ata_dev_init_params(struct ata_device *dev,
88 					u16 heads, u16 sectors);
89 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
90 static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 					u8 enable, u8 feature);
92 static void ata_dev_xfermask(struct ata_device *dev);
93 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
94 
95 unsigned int ata_print_id = 1;
96 static struct workqueue_struct *ata_wq;
97 
98 struct workqueue_struct *ata_aux_wq;
99 
100 struct ata_force_param {
101 	const char	*name;
102 	unsigned int	cbl;
103 	int		spd_limit;
104 	unsigned long	xfer_mask;
105 	unsigned int	horkage_on;
106 	unsigned int	horkage_off;
107 	unsigned int	lflags;
108 };
109 
110 struct ata_force_ent {
111 	int			port;
112 	int			device;
113 	struct ata_force_param	param;
114 };
115 
116 static struct ata_force_ent *ata_force_tbl;
117 static int ata_force_tbl_size;
118 
119 static char ata_force_param_buf[PAGE_SIZE] __initdata;
120 /* param_buf is thrown away after initialization, disallow read */
121 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123 
124 static int atapi_enabled = 1;
125 module_param(atapi_enabled, int, 0444);
126 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
127 
128 static int atapi_dmadir = 0;
129 module_param(atapi_dmadir, int, 0444);
130 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
131 
132 int atapi_passthru16 = 1;
133 module_param(atapi_passthru16, int, 0444);
134 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
135 
136 int libata_fua = 0;
137 module_param_named(fua, libata_fua, int, 0444);
138 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
139 
140 static int ata_ignore_hpa;
141 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143 
144 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145 module_param_named(dma, libata_dma_mask, int, 0444);
146 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147 
148 static int ata_probe_timeout;
149 module_param(ata_probe_timeout, int, 0444);
150 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151 
152 int libata_noacpi = 0;
153 module_param_named(noacpi, libata_noacpi, int, 0444);
154 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
155 
156 int libata_allow_tpm = 0;
157 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
159 
160 MODULE_AUTHOR("Jeff Garzik");
161 MODULE_DESCRIPTION("Library module for ATA devices");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(DRV_VERSION);
164 
165 
166 /**
167  *	ata_link_next - link iteration helper
168  *	@link: the previous link, NULL to start
169  *	@ap: ATA port containing links to iterate
170  *	@mode: iteration mode, one of ATA_LITER_*
171  *
172  *	LOCKING:
173  *	Host lock or EH context.
174  *
175  *	RETURNS:
176  *	Pointer to the next link.
177  */
178 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
179 			       enum ata_link_iter_mode mode)
180 {
181 	BUG_ON(mode != ATA_LITER_EDGE &&
182 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
183 
184 	/* NULL link indicates start of iteration */
185 	if (!link)
186 		switch (mode) {
187 		case ATA_LITER_EDGE:
188 		case ATA_LITER_PMP_FIRST:
189 			if (sata_pmp_attached(ap))
190 				return ap->pmp_link;
191 			/* fall through */
192 		case ATA_LITER_HOST_FIRST:
193 			return &ap->link;
194 		}
195 
196 	/* we just iterated over the host link, what's next? */
197 	if (link == &ap->link)
198 		switch (mode) {
199 		case ATA_LITER_HOST_FIRST:
200 			if (sata_pmp_attached(ap))
201 				return ap->pmp_link;
202 			/* fall through */
203 		case ATA_LITER_PMP_FIRST:
204 			if (unlikely(ap->slave_link))
205 				return ap->slave_link;
206 			/* fall through */
207 		case ATA_LITER_EDGE:
208 			return NULL;
209 		}
210 
211 	/* slave_link excludes PMP */
212 	if (unlikely(link == ap->slave_link))
213 		return NULL;
214 
215 	/* we were over a PMP link */
216 	if (++link < ap->pmp_link + ap->nr_pmp_links)
217 		return link;
218 
219 	if (mode == ATA_LITER_PMP_FIRST)
220 		return &ap->link;
221 
222 	return NULL;
223 }
224 
225 /**
226  *	ata_dev_next - device iteration helper
227  *	@dev: the previous device, NULL to start
228  *	@link: ATA link containing devices to iterate
229  *	@mode: iteration mode, one of ATA_DITER_*
230  *
231  *	LOCKING:
232  *	Host lock or EH context.
233  *
234  *	RETURNS:
235  *	Pointer to the next device.
236  */
237 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
238 				enum ata_dev_iter_mode mode)
239 {
240 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
241 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
242 
243 	/* NULL dev indicates start of iteration */
244 	if (!dev)
245 		switch (mode) {
246 		case ATA_DITER_ENABLED:
247 		case ATA_DITER_ALL:
248 			dev = link->device;
249 			goto check;
250 		case ATA_DITER_ENABLED_REVERSE:
251 		case ATA_DITER_ALL_REVERSE:
252 			dev = link->device + ata_link_max_devices(link) - 1;
253 			goto check;
254 		}
255 
256  next:
257 	/* move to the next one */
258 	switch (mode) {
259 	case ATA_DITER_ENABLED:
260 	case ATA_DITER_ALL:
261 		if (++dev < link->device + ata_link_max_devices(link))
262 			goto check;
263 		return NULL;
264 	case ATA_DITER_ENABLED_REVERSE:
265 	case ATA_DITER_ALL_REVERSE:
266 		if (--dev >= link->device)
267 			goto check;
268 		return NULL;
269 	}
270 
271  check:
272 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
273 	    !ata_dev_enabled(dev))
274 		goto next;
275 	return dev;
276 }
277 
278 /**
279  *	ata_dev_phys_link - find physical link for a device
280  *	@dev: ATA device to look up physical link for
281  *
282  *	Look up physical link which @dev is attached to.  Note that
283  *	this is different from @dev->link only when @dev is on slave
284  *	link.  For all other cases, it's the same as @dev->link.
285  *
286  *	LOCKING:
287  *	Don't care.
288  *
289  *	RETURNS:
290  *	Pointer to the found physical link.
291  */
292 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
293 {
294 	struct ata_port *ap = dev->link->ap;
295 
296 	if (!ap->slave_link)
297 		return dev->link;
298 	if (!dev->devno)
299 		return &ap->link;
300 	return ap->slave_link;
301 }
302 
303 /**
304  *	ata_force_cbl - force cable type according to libata.force
305  *	@ap: ATA port of interest
306  *
307  *	Force cable type according to libata.force and whine about it.
308  *	The last entry which has matching port number is used, so it
309  *	can be specified as part of device force parameters.  For
310  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
311  *	same effect.
312  *
313  *	LOCKING:
314  *	EH context.
315  */
316 void ata_force_cbl(struct ata_port *ap)
317 {
318 	int i;
319 
320 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
321 		const struct ata_force_ent *fe = &ata_force_tbl[i];
322 
323 		if (fe->port != -1 && fe->port != ap->print_id)
324 			continue;
325 
326 		if (fe->param.cbl == ATA_CBL_NONE)
327 			continue;
328 
329 		ap->cbl = fe->param.cbl;
330 		ata_port_printk(ap, KERN_NOTICE,
331 				"FORCE: cable set to %s\n", fe->param.name);
332 		return;
333 	}
334 }
335 
336 /**
337  *	ata_force_link_limits - force link limits according to libata.force
338  *	@link: ATA link of interest
339  *
340  *	Force link flags and SATA spd limit according to libata.force
341  *	and whine about it.  When only the port part is specified
342  *	(e.g. 1:), the limit applies to all links connected to both
343  *	the host link and all fan-out ports connected via PMP.  If the
344  *	device part is specified as 0 (e.g. 1.00:), it specifies the
345  *	first fan-out link not the host link.  Device number 15 always
346  *	points to the host link whether PMP is attached or not.  If the
347  *	controller has slave link, device number 16 points to it.
348  *
349  *	LOCKING:
350  *	EH context.
351  */
352 static void ata_force_link_limits(struct ata_link *link)
353 {
354 	bool did_spd = false;
355 	int linkno = link->pmp;
356 	int i;
357 
358 	if (ata_is_host_link(link))
359 		linkno += 15;
360 
361 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
362 		const struct ata_force_ent *fe = &ata_force_tbl[i];
363 
364 		if (fe->port != -1 && fe->port != link->ap->print_id)
365 			continue;
366 
367 		if (fe->device != -1 && fe->device != linkno)
368 			continue;
369 
370 		/* only honor the first spd limit */
371 		if (!did_spd && fe->param.spd_limit) {
372 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
373 			ata_link_printk(link, KERN_NOTICE,
374 					"FORCE: PHY spd limit set to %s\n",
375 					fe->param.name);
376 			did_spd = true;
377 		}
378 
379 		/* let lflags stack */
380 		if (fe->param.lflags) {
381 			link->flags |= fe->param.lflags;
382 			ata_link_printk(link, KERN_NOTICE,
383 					"FORCE: link flag 0x%x forced -> 0x%x\n",
384 					fe->param.lflags, link->flags);
385 		}
386 	}
387 }
388 
389 /**
390  *	ata_force_xfermask - force xfermask according to libata.force
391  *	@dev: ATA device of interest
392  *
393  *	Force xfer_mask according to libata.force and whine about it.
394  *	For consistency with link selection, device number 15 selects
395  *	the first device connected to the host link.
396  *
397  *	LOCKING:
398  *	EH context.
399  */
400 static void ata_force_xfermask(struct ata_device *dev)
401 {
402 	int devno = dev->link->pmp + dev->devno;
403 	int alt_devno = devno;
404 	int i;
405 
406 	/* allow n.15/16 for devices attached to host port */
407 	if (ata_is_host_link(dev->link))
408 		alt_devno += 15;
409 
410 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
411 		const struct ata_force_ent *fe = &ata_force_tbl[i];
412 		unsigned long pio_mask, mwdma_mask, udma_mask;
413 
414 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
415 			continue;
416 
417 		if (fe->device != -1 && fe->device != devno &&
418 		    fe->device != alt_devno)
419 			continue;
420 
421 		if (!fe->param.xfer_mask)
422 			continue;
423 
424 		ata_unpack_xfermask(fe->param.xfer_mask,
425 				    &pio_mask, &mwdma_mask, &udma_mask);
426 		if (udma_mask)
427 			dev->udma_mask = udma_mask;
428 		else if (mwdma_mask) {
429 			dev->udma_mask = 0;
430 			dev->mwdma_mask = mwdma_mask;
431 		} else {
432 			dev->udma_mask = 0;
433 			dev->mwdma_mask = 0;
434 			dev->pio_mask = pio_mask;
435 		}
436 
437 		ata_dev_printk(dev, KERN_NOTICE,
438 			"FORCE: xfer_mask set to %s\n", fe->param.name);
439 		return;
440 	}
441 }
442 
443 /**
444  *	ata_force_horkage - force horkage according to libata.force
445  *	@dev: ATA device of interest
446  *
447  *	Force horkage according to libata.force and whine about it.
448  *	For consistency with link selection, device number 15 selects
449  *	the first device connected to the host link.
450  *
451  *	LOCKING:
452  *	EH context.
453  */
454 static void ata_force_horkage(struct ata_device *dev)
455 {
456 	int devno = dev->link->pmp + dev->devno;
457 	int alt_devno = devno;
458 	int i;
459 
460 	/* allow n.15/16 for devices attached to host port */
461 	if (ata_is_host_link(dev->link))
462 		alt_devno += 15;
463 
464 	for (i = 0; i < ata_force_tbl_size; i++) {
465 		const struct ata_force_ent *fe = &ata_force_tbl[i];
466 
467 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
468 			continue;
469 
470 		if (fe->device != -1 && fe->device != devno &&
471 		    fe->device != alt_devno)
472 			continue;
473 
474 		if (!(~dev->horkage & fe->param.horkage_on) &&
475 		    !(dev->horkage & fe->param.horkage_off))
476 			continue;
477 
478 		dev->horkage |= fe->param.horkage_on;
479 		dev->horkage &= ~fe->param.horkage_off;
480 
481 		ata_dev_printk(dev, KERN_NOTICE,
482 			"FORCE: horkage modified (%s)\n", fe->param.name);
483 	}
484 }
485 
486 /**
487  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
488  *	@opcode: SCSI opcode
489  *
490  *	Determine ATAPI command type from @opcode.
491  *
492  *	LOCKING:
493  *	None.
494  *
495  *	RETURNS:
496  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
497  */
498 int atapi_cmd_type(u8 opcode)
499 {
500 	switch (opcode) {
501 	case GPCMD_READ_10:
502 	case GPCMD_READ_12:
503 		return ATAPI_READ;
504 
505 	case GPCMD_WRITE_10:
506 	case GPCMD_WRITE_12:
507 	case GPCMD_WRITE_AND_VERIFY_10:
508 		return ATAPI_WRITE;
509 
510 	case GPCMD_READ_CD:
511 	case GPCMD_READ_CD_MSF:
512 		return ATAPI_READ_CD;
513 
514 	case ATA_16:
515 	case ATA_12:
516 		if (atapi_passthru16)
517 			return ATAPI_PASS_THRU;
518 		/* fall thru */
519 	default:
520 		return ATAPI_MISC;
521 	}
522 }
523 
524 /**
525  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
526  *	@tf: Taskfile to convert
527  *	@pmp: Port multiplier port
528  *	@is_cmd: This FIS is for command
529  *	@fis: Buffer into which data will output
530  *
531  *	Converts a standard ATA taskfile to a Serial ATA
532  *	FIS structure (Register - Host to Device).
533  *
534  *	LOCKING:
535  *	Inherited from caller.
536  */
537 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
538 {
539 	fis[0] = 0x27;			/* Register - Host to Device FIS */
540 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
541 	if (is_cmd)
542 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
543 
544 	fis[2] = tf->command;
545 	fis[3] = tf->feature;
546 
547 	fis[4] = tf->lbal;
548 	fis[5] = tf->lbam;
549 	fis[6] = tf->lbah;
550 	fis[7] = tf->device;
551 
552 	fis[8] = tf->hob_lbal;
553 	fis[9] = tf->hob_lbam;
554 	fis[10] = tf->hob_lbah;
555 	fis[11] = tf->hob_feature;
556 
557 	fis[12] = tf->nsect;
558 	fis[13] = tf->hob_nsect;
559 	fis[14] = 0;
560 	fis[15] = tf->ctl;
561 
562 	fis[16] = 0;
563 	fis[17] = 0;
564 	fis[18] = 0;
565 	fis[19] = 0;
566 }
567 
568 /**
569  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
570  *	@fis: Buffer from which data will be input
571  *	@tf: Taskfile to output
572  *
573  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
574  *
575  *	LOCKING:
576  *	Inherited from caller.
577  */
578 
579 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
580 {
581 	tf->command	= fis[2];	/* status */
582 	tf->feature	= fis[3];	/* error */
583 
584 	tf->lbal	= fis[4];
585 	tf->lbam	= fis[5];
586 	tf->lbah	= fis[6];
587 	tf->device	= fis[7];
588 
589 	tf->hob_lbal	= fis[8];
590 	tf->hob_lbam	= fis[9];
591 	tf->hob_lbah	= fis[10];
592 
593 	tf->nsect	= fis[12];
594 	tf->hob_nsect	= fis[13];
595 }
596 
597 static const u8 ata_rw_cmds[] = {
598 	/* pio multi */
599 	ATA_CMD_READ_MULTI,
600 	ATA_CMD_WRITE_MULTI,
601 	ATA_CMD_READ_MULTI_EXT,
602 	ATA_CMD_WRITE_MULTI_EXT,
603 	0,
604 	0,
605 	0,
606 	ATA_CMD_WRITE_MULTI_FUA_EXT,
607 	/* pio */
608 	ATA_CMD_PIO_READ,
609 	ATA_CMD_PIO_WRITE,
610 	ATA_CMD_PIO_READ_EXT,
611 	ATA_CMD_PIO_WRITE_EXT,
612 	0,
613 	0,
614 	0,
615 	0,
616 	/* dma */
617 	ATA_CMD_READ,
618 	ATA_CMD_WRITE,
619 	ATA_CMD_READ_EXT,
620 	ATA_CMD_WRITE_EXT,
621 	0,
622 	0,
623 	0,
624 	ATA_CMD_WRITE_FUA_EXT
625 };
626 
627 /**
628  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
629  *	@tf: command to examine and configure
630  *	@dev: device tf belongs to
631  *
632  *	Examine the device configuration and tf->flags to calculate
633  *	the proper read/write commands and protocol to use.
634  *
635  *	LOCKING:
636  *	caller.
637  */
638 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
639 {
640 	u8 cmd;
641 
642 	int index, fua, lba48, write;
643 
644 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
645 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
646 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
647 
648 	if (dev->flags & ATA_DFLAG_PIO) {
649 		tf->protocol = ATA_PROT_PIO;
650 		index = dev->multi_count ? 0 : 8;
651 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
652 		/* Unable to use DMA due to host limitation */
653 		tf->protocol = ATA_PROT_PIO;
654 		index = dev->multi_count ? 0 : 8;
655 	} else {
656 		tf->protocol = ATA_PROT_DMA;
657 		index = 16;
658 	}
659 
660 	cmd = ata_rw_cmds[index + fua + lba48 + write];
661 	if (cmd) {
662 		tf->command = cmd;
663 		return 0;
664 	}
665 	return -1;
666 }
667 
668 /**
669  *	ata_tf_read_block - Read block address from ATA taskfile
670  *	@tf: ATA taskfile of interest
671  *	@dev: ATA device @tf belongs to
672  *
673  *	LOCKING:
674  *	None.
675  *
676  *	Read block address from @tf.  This function can handle all
677  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
678  *	flags select the address format to use.
679  *
680  *	RETURNS:
681  *	Block address read from @tf.
682  */
683 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
684 {
685 	u64 block = 0;
686 
687 	if (tf->flags & ATA_TFLAG_LBA) {
688 		if (tf->flags & ATA_TFLAG_LBA48) {
689 			block |= (u64)tf->hob_lbah << 40;
690 			block |= (u64)tf->hob_lbam << 32;
691 			block |= (u64)tf->hob_lbal << 24;
692 		} else
693 			block |= (tf->device & 0xf) << 24;
694 
695 		block |= tf->lbah << 16;
696 		block |= tf->lbam << 8;
697 		block |= tf->lbal;
698 	} else {
699 		u32 cyl, head, sect;
700 
701 		cyl = tf->lbam | (tf->lbah << 8);
702 		head = tf->device & 0xf;
703 		sect = tf->lbal;
704 
705 		block = (cyl * dev->heads + head) * dev->sectors + sect;
706 	}
707 
708 	return block;
709 }
710 
711 /**
712  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
713  *	@tf: Target ATA taskfile
714  *	@dev: ATA device @tf belongs to
715  *	@block: Block address
716  *	@n_block: Number of blocks
717  *	@tf_flags: RW/FUA etc...
718  *	@tag: tag
719  *
720  *	LOCKING:
721  *	None.
722  *
723  *	Build ATA taskfile @tf for read/write request described by
724  *	@block, @n_block, @tf_flags and @tag on @dev.
725  *
726  *	RETURNS:
727  *
728  *	0 on success, -ERANGE if the request is too large for @dev,
729  *	-EINVAL if the request is invalid.
730  */
731 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
732 		    u64 block, u32 n_block, unsigned int tf_flags,
733 		    unsigned int tag)
734 {
735 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
736 	tf->flags |= tf_flags;
737 
738 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
739 		/* yay, NCQ */
740 		if (!lba_48_ok(block, n_block))
741 			return -ERANGE;
742 
743 		tf->protocol = ATA_PROT_NCQ;
744 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
745 
746 		if (tf->flags & ATA_TFLAG_WRITE)
747 			tf->command = ATA_CMD_FPDMA_WRITE;
748 		else
749 			tf->command = ATA_CMD_FPDMA_READ;
750 
751 		tf->nsect = tag << 3;
752 		tf->hob_feature = (n_block >> 8) & 0xff;
753 		tf->feature = n_block & 0xff;
754 
755 		tf->hob_lbah = (block >> 40) & 0xff;
756 		tf->hob_lbam = (block >> 32) & 0xff;
757 		tf->hob_lbal = (block >> 24) & 0xff;
758 		tf->lbah = (block >> 16) & 0xff;
759 		tf->lbam = (block >> 8) & 0xff;
760 		tf->lbal = block & 0xff;
761 
762 		tf->device = 1 << 6;
763 		if (tf->flags & ATA_TFLAG_FUA)
764 			tf->device |= 1 << 7;
765 	} else if (dev->flags & ATA_DFLAG_LBA) {
766 		tf->flags |= ATA_TFLAG_LBA;
767 
768 		if (lba_28_ok(block, n_block)) {
769 			/* use LBA28 */
770 			tf->device |= (block >> 24) & 0xf;
771 		} else if (lba_48_ok(block, n_block)) {
772 			if (!(dev->flags & ATA_DFLAG_LBA48))
773 				return -ERANGE;
774 
775 			/* use LBA48 */
776 			tf->flags |= ATA_TFLAG_LBA48;
777 
778 			tf->hob_nsect = (n_block >> 8) & 0xff;
779 
780 			tf->hob_lbah = (block >> 40) & 0xff;
781 			tf->hob_lbam = (block >> 32) & 0xff;
782 			tf->hob_lbal = (block >> 24) & 0xff;
783 		} else
784 			/* request too large even for LBA48 */
785 			return -ERANGE;
786 
787 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
788 			return -EINVAL;
789 
790 		tf->nsect = n_block & 0xff;
791 
792 		tf->lbah = (block >> 16) & 0xff;
793 		tf->lbam = (block >> 8) & 0xff;
794 		tf->lbal = block & 0xff;
795 
796 		tf->device |= ATA_LBA;
797 	} else {
798 		/* CHS */
799 		u32 sect, head, cyl, track;
800 
801 		/* The request -may- be too large for CHS addressing. */
802 		if (!lba_28_ok(block, n_block))
803 			return -ERANGE;
804 
805 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
806 			return -EINVAL;
807 
808 		/* Convert LBA to CHS */
809 		track = (u32)block / dev->sectors;
810 		cyl   = track / dev->heads;
811 		head  = track % dev->heads;
812 		sect  = (u32)block % dev->sectors + 1;
813 
814 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
815 			(u32)block, track, cyl, head, sect);
816 
817 		/* Check whether the converted CHS can fit.
818 		   Cylinder: 0-65535
819 		   Head: 0-15
820 		   Sector: 1-255*/
821 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
822 			return -ERANGE;
823 
824 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
825 		tf->lbal = sect;
826 		tf->lbam = cyl;
827 		tf->lbah = cyl >> 8;
828 		tf->device |= head;
829 	}
830 
831 	return 0;
832 }
833 
834 /**
835  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
836  *	@pio_mask: pio_mask
837  *	@mwdma_mask: mwdma_mask
838  *	@udma_mask: udma_mask
839  *
840  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
841  *	unsigned int xfer_mask.
842  *
843  *	LOCKING:
844  *	None.
845  *
846  *	RETURNS:
847  *	Packed xfer_mask.
848  */
849 unsigned long ata_pack_xfermask(unsigned long pio_mask,
850 				unsigned long mwdma_mask,
851 				unsigned long udma_mask)
852 {
853 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
854 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
855 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
856 }
857 
858 /**
859  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
860  *	@xfer_mask: xfer_mask to unpack
861  *	@pio_mask: resulting pio_mask
862  *	@mwdma_mask: resulting mwdma_mask
863  *	@udma_mask: resulting udma_mask
864  *
865  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
866  *	Any NULL distination masks will be ignored.
867  */
868 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
869 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
870 {
871 	if (pio_mask)
872 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
873 	if (mwdma_mask)
874 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
875 	if (udma_mask)
876 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
877 }
878 
879 static const struct ata_xfer_ent {
880 	int shift, bits;
881 	u8 base;
882 } ata_xfer_tbl[] = {
883 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
884 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
885 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
886 	{ -1, },
887 };
888 
889 /**
890  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
891  *	@xfer_mask: xfer_mask of interest
892  *
893  *	Return matching XFER_* value for @xfer_mask.  Only the highest
894  *	bit of @xfer_mask is considered.
895  *
896  *	LOCKING:
897  *	None.
898  *
899  *	RETURNS:
900  *	Matching XFER_* value, 0xff if no match found.
901  */
902 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
903 {
904 	int highbit = fls(xfer_mask) - 1;
905 	const struct ata_xfer_ent *ent;
906 
907 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
908 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
909 			return ent->base + highbit - ent->shift;
910 	return 0xff;
911 }
912 
913 /**
914  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
915  *	@xfer_mode: XFER_* of interest
916  *
917  *	Return matching xfer_mask for @xfer_mode.
918  *
919  *	LOCKING:
920  *	None.
921  *
922  *	RETURNS:
923  *	Matching xfer_mask, 0 if no match found.
924  */
925 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
926 {
927 	const struct ata_xfer_ent *ent;
928 
929 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
930 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
931 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
932 				& ~((1 << ent->shift) - 1);
933 	return 0;
934 }
935 
936 /**
937  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
938  *	@xfer_mode: XFER_* of interest
939  *
940  *	Return matching xfer_shift for @xfer_mode.
941  *
942  *	LOCKING:
943  *	None.
944  *
945  *	RETURNS:
946  *	Matching xfer_shift, -1 if no match found.
947  */
948 int ata_xfer_mode2shift(unsigned long xfer_mode)
949 {
950 	const struct ata_xfer_ent *ent;
951 
952 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
953 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
954 			return ent->shift;
955 	return -1;
956 }
957 
958 /**
959  *	ata_mode_string - convert xfer_mask to string
960  *	@xfer_mask: mask of bits supported; only highest bit counts.
961  *
962  *	Determine string which represents the highest speed
963  *	(highest bit in @modemask).
964  *
965  *	LOCKING:
966  *	None.
967  *
968  *	RETURNS:
969  *	Constant C string representing highest speed listed in
970  *	@mode_mask, or the constant C string "<n/a>".
971  */
972 const char *ata_mode_string(unsigned long xfer_mask)
973 {
974 	static const char * const xfer_mode_str[] = {
975 		"PIO0",
976 		"PIO1",
977 		"PIO2",
978 		"PIO3",
979 		"PIO4",
980 		"PIO5",
981 		"PIO6",
982 		"MWDMA0",
983 		"MWDMA1",
984 		"MWDMA2",
985 		"MWDMA3",
986 		"MWDMA4",
987 		"UDMA/16",
988 		"UDMA/25",
989 		"UDMA/33",
990 		"UDMA/44",
991 		"UDMA/66",
992 		"UDMA/100",
993 		"UDMA/133",
994 		"UDMA7",
995 	};
996 	int highbit;
997 
998 	highbit = fls(xfer_mask) - 1;
999 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1000 		return xfer_mode_str[highbit];
1001 	return "<n/a>";
1002 }
1003 
1004 static const char *sata_spd_string(unsigned int spd)
1005 {
1006 	static const char * const spd_str[] = {
1007 		"1.5 Gbps",
1008 		"3.0 Gbps",
1009 	};
1010 
1011 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1012 		return "<unknown>";
1013 	return spd_str[spd - 1];
1014 }
1015 
1016 void ata_dev_disable(struct ata_device *dev)
1017 {
1018 	if (ata_dev_enabled(dev)) {
1019 		if (ata_msg_drv(dev->link->ap))
1020 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1021 		ata_acpi_on_disable(dev);
1022 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
1023 					     ATA_DNXFER_QUIET);
1024 		dev->class++;
1025 	}
1026 }
1027 
1028 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1029 {
1030 	struct ata_link *link = dev->link;
1031 	struct ata_port *ap = link->ap;
1032 	u32 scontrol;
1033 	unsigned int err_mask;
1034 	int rc;
1035 
1036 	/*
1037 	 * disallow DIPM for drivers which haven't set
1038 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
1039 	 * phy ready will be set in the interrupt status on
1040 	 * state changes, which will cause some drivers to
1041 	 * think there are errors - additionally drivers will
1042 	 * need to disable hot plug.
1043 	 */
1044 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1045 		ap->pm_policy = NOT_AVAILABLE;
1046 		return -EINVAL;
1047 	}
1048 
1049 	/*
1050 	 * For DIPM, we will only enable it for the
1051 	 * min_power setting.
1052 	 *
1053 	 * Why?  Because Disks are too stupid to know that
1054 	 * If the host rejects a request to go to SLUMBER
1055 	 * they should retry at PARTIAL, and instead it
1056 	 * just would give up.  So, for medium_power to
1057 	 * work at all, we need to only allow HIPM.
1058 	 */
1059 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1060 	if (rc)
1061 		return rc;
1062 
1063 	switch (policy) {
1064 	case MIN_POWER:
1065 		/* no restrictions on IPM transitions */
1066 		scontrol &= ~(0x3 << 8);
1067 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1068 		if (rc)
1069 			return rc;
1070 
1071 		/* enable DIPM */
1072 		if (dev->flags & ATA_DFLAG_DIPM)
1073 			err_mask = ata_dev_set_feature(dev,
1074 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
1075 		break;
1076 	case MEDIUM_POWER:
1077 		/* allow IPM to PARTIAL */
1078 		scontrol &= ~(0x1 << 8);
1079 		scontrol |= (0x2 << 8);
1080 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1081 		if (rc)
1082 			return rc;
1083 
1084 		/*
1085 		 * we don't have to disable DIPM since IPM flags
1086 		 * disallow transitions to SLUMBER, which effectively
1087 		 * disable DIPM if it does not support PARTIAL
1088 		 */
1089 		break;
1090 	case NOT_AVAILABLE:
1091 	case MAX_PERFORMANCE:
1092 		/* disable all IPM transitions */
1093 		scontrol |= (0x3 << 8);
1094 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1095 		if (rc)
1096 			return rc;
1097 
1098 		/*
1099 		 * we don't have to disable DIPM since IPM flags
1100 		 * disallow all transitions which effectively
1101 		 * disable DIPM anyway.
1102 		 */
1103 		break;
1104 	}
1105 
1106 	/* FIXME: handle SET FEATURES failure */
1107 	(void) err_mask;
1108 
1109 	return 0;
1110 }
1111 
1112 /**
1113  *	ata_dev_enable_pm - enable SATA interface power management
1114  *	@dev:  device to enable power management
1115  *	@policy: the link power management policy
1116  *
1117  *	Enable SATA Interface power management.  This will enable
1118  *	Device Interface Power Management (DIPM) for min_power
1119  * 	policy, and then call driver specific callbacks for
1120  *	enabling Host Initiated Power management.
1121  *
1122  *	Locking: Caller.
1123  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
1124  */
1125 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1126 {
1127 	int rc = 0;
1128 	struct ata_port *ap = dev->link->ap;
1129 
1130 	/* set HIPM first, then DIPM */
1131 	if (ap->ops->enable_pm)
1132 		rc = ap->ops->enable_pm(ap, policy);
1133 	if (rc)
1134 		goto enable_pm_out;
1135 	rc = ata_dev_set_dipm(dev, policy);
1136 
1137 enable_pm_out:
1138 	if (rc)
1139 		ap->pm_policy = MAX_PERFORMANCE;
1140 	else
1141 		ap->pm_policy = policy;
1142 	return /* rc */;	/* hopefully we can use 'rc' eventually */
1143 }
1144 
1145 #ifdef CONFIG_PM
1146 /**
1147  *	ata_dev_disable_pm - disable SATA interface power management
1148  *	@dev: device to disable power management
1149  *
1150  *	Disable SATA Interface power management.  This will disable
1151  *	Device Interface Power Management (DIPM) without changing
1152  * 	policy,  call driver specific callbacks for disabling Host
1153  * 	Initiated Power management.
1154  *
1155  *	Locking: Caller.
1156  *	Returns: void
1157  */
1158 static void ata_dev_disable_pm(struct ata_device *dev)
1159 {
1160 	struct ata_port *ap = dev->link->ap;
1161 
1162 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1163 	if (ap->ops->disable_pm)
1164 		ap->ops->disable_pm(ap);
1165 }
1166 #endif	/* CONFIG_PM */
1167 
1168 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1169 {
1170 	ap->pm_policy = policy;
1171 	ap->link.eh_info.action |= ATA_EH_LPM;
1172 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1173 	ata_port_schedule_eh(ap);
1174 }
1175 
1176 #ifdef CONFIG_PM
1177 static void ata_lpm_enable(struct ata_host *host)
1178 {
1179 	struct ata_link *link;
1180 	struct ata_port *ap;
1181 	struct ata_device *dev;
1182 	int i;
1183 
1184 	for (i = 0; i < host->n_ports; i++) {
1185 		ap = host->ports[i];
1186 		ata_for_each_link(link, ap, EDGE) {
1187 			ata_for_each_dev(dev, link, ALL)
1188 				ata_dev_disable_pm(dev);
1189 		}
1190 	}
1191 }
1192 
1193 static void ata_lpm_disable(struct ata_host *host)
1194 {
1195 	int i;
1196 
1197 	for (i = 0; i < host->n_ports; i++) {
1198 		struct ata_port *ap = host->ports[i];
1199 		ata_lpm_schedule(ap, ap->pm_policy);
1200 	}
1201 }
1202 #endif	/* CONFIG_PM */
1203 
1204 /**
1205  *	ata_dev_classify - determine device type based on ATA-spec signature
1206  *	@tf: ATA taskfile register set for device to be identified
1207  *
1208  *	Determine from taskfile register contents whether a device is
1209  *	ATA or ATAPI, as per "Signature and persistence" section
1210  *	of ATA/PI spec (volume 1, sect 5.14).
1211  *
1212  *	LOCKING:
1213  *	None.
1214  *
1215  *	RETURNS:
1216  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1217  *	%ATA_DEV_UNKNOWN the event of failure.
1218  */
1219 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1220 {
1221 	/* Apple's open source Darwin code hints that some devices only
1222 	 * put a proper signature into the LBA mid/high registers,
1223 	 * So, we only check those.  It's sufficient for uniqueness.
1224 	 *
1225 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1226 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1227 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1228 	 * spec has never mentioned about using different signatures
1229 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1230 	 * Multiplier specification began to use 0x69/0x96 to identify
1231 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1232 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1233 	 * 0x69/0x96 shortly and described them as reserved for
1234 	 * SerialATA.
1235 	 *
1236 	 * We follow the current spec and consider that 0x69/0x96
1237 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1238 	 */
1239 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1240 		DPRINTK("found ATA device by sig\n");
1241 		return ATA_DEV_ATA;
1242 	}
1243 
1244 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1245 		DPRINTK("found ATAPI device by sig\n");
1246 		return ATA_DEV_ATAPI;
1247 	}
1248 
1249 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1250 		DPRINTK("found PMP device by sig\n");
1251 		return ATA_DEV_PMP;
1252 	}
1253 
1254 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1255 		printk(KERN_INFO "ata: SEMB device ignored\n");
1256 		return ATA_DEV_SEMB_UNSUP; /* not yet */
1257 	}
1258 
1259 	DPRINTK("unknown device\n");
1260 	return ATA_DEV_UNKNOWN;
1261 }
1262 
1263 /**
1264  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1265  *	@id: IDENTIFY DEVICE results we will examine
1266  *	@s: string into which data is output
1267  *	@ofs: offset into identify device page
1268  *	@len: length of string to return. must be an even number.
1269  *
1270  *	The strings in the IDENTIFY DEVICE page are broken up into
1271  *	16-bit chunks.  Run through the string, and output each
1272  *	8-bit chunk linearly, regardless of platform.
1273  *
1274  *	LOCKING:
1275  *	caller.
1276  */
1277 
1278 void ata_id_string(const u16 *id, unsigned char *s,
1279 		   unsigned int ofs, unsigned int len)
1280 {
1281 	unsigned int c;
1282 
1283 	BUG_ON(len & 1);
1284 
1285 	while (len > 0) {
1286 		c = id[ofs] >> 8;
1287 		*s = c;
1288 		s++;
1289 
1290 		c = id[ofs] & 0xff;
1291 		*s = c;
1292 		s++;
1293 
1294 		ofs++;
1295 		len -= 2;
1296 	}
1297 }
1298 
1299 /**
1300  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1301  *	@id: IDENTIFY DEVICE results we will examine
1302  *	@s: string into which data is output
1303  *	@ofs: offset into identify device page
1304  *	@len: length of string to return. must be an odd number.
1305  *
1306  *	This function is identical to ata_id_string except that it
1307  *	trims trailing spaces and terminates the resulting string with
1308  *	null.  @len must be actual maximum length (even number) + 1.
1309  *
1310  *	LOCKING:
1311  *	caller.
1312  */
1313 void ata_id_c_string(const u16 *id, unsigned char *s,
1314 		     unsigned int ofs, unsigned int len)
1315 {
1316 	unsigned char *p;
1317 
1318 	ata_id_string(id, s, ofs, len - 1);
1319 
1320 	p = s + strnlen(s, len - 1);
1321 	while (p > s && p[-1] == ' ')
1322 		p--;
1323 	*p = '\0';
1324 }
1325 
1326 static u64 ata_id_n_sectors(const u16 *id)
1327 {
1328 	if (ata_id_has_lba(id)) {
1329 		if (ata_id_has_lba48(id))
1330 			return ata_id_u64(id, 100);
1331 		else
1332 			return ata_id_u32(id, 60);
1333 	} else {
1334 		if (ata_id_current_chs_valid(id))
1335 			return ata_id_u32(id, 57);
1336 		else
1337 			return id[1] * id[3] * id[6];
1338 	}
1339 }
1340 
1341 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1342 {
1343 	u64 sectors = 0;
1344 
1345 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1346 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1347 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1348 	sectors |= (tf->lbah & 0xff) << 16;
1349 	sectors |= (tf->lbam & 0xff) << 8;
1350 	sectors |= (tf->lbal & 0xff);
1351 
1352 	return sectors;
1353 }
1354 
1355 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1356 {
1357 	u64 sectors = 0;
1358 
1359 	sectors |= (tf->device & 0x0f) << 24;
1360 	sectors |= (tf->lbah & 0xff) << 16;
1361 	sectors |= (tf->lbam & 0xff) << 8;
1362 	sectors |= (tf->lbal & 0xff);
1363 
1364 	return sectors;
1365 }
1366 
1367 /**
1368  *	ata_read_native_max_address - Read native max address
1369  *	@dev: target device
1370  *	@max_sectors: out parameter for the result native max address
1371  *
1372  *	Perform an LBA48 or LBA28 native size query upon the device in
1373  *	question.
1374  *
1375  *	RETURNS:
1376  *	0 on success, -EACCES if command is aborted by the drive.
1377  *	-EIO on other errors.
1378  */
1379 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1380 {
1381 	unsigned int err_mask;
1382 	struct ata_taskfile tf;
1383 	int lba48 = ata_id_has_lba48(dev->id);
1384 
1385 	ata_tf_init(dev, &tf);
1386 
1387 	/* always clear all address registers */
1388 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1389 
1390 	if (lba48) {
1391 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1392 		tf.flags |= ATA_TFLAG_LBA48;
1393 	} else
1394 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1395 
1396 	tf.protocol |= ATA_PROT_NODATA;
1397 	tf.device |= ATA_LBA;
1398 
1399 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1400 	if (err_mask) {
1401 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1402 			       "max address (err_mask=0x%x)\n", err_mask);
1403 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1404 			return -EACCES;
1405 		return -EIO;
1406 	}
1407 
1408 	if (lba48)
1409 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1410 	else
1411 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1412 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1413 		(*max_sectors)--;
1414 	return 0;
1415 }
1416 
1417 /**
1418  *	ata_set_max_sectors - Set max sectors
1419  *	@dev: target device
1420  *	@new_sectors: new max sectors value to set for the device
1421  *
1422  *	Set max sectors of @dev to @new_sectors.
1423  *
1424  *	RETURNS:
1425  *	0 on success, -EACCES if command is aborted or denied (due to
1426  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1427  *	errors.
1428  */
1429 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1430 {
1431 	unsigned int err_mask;
1432 	struct ata_taskfile tf;
1433 	int lba48 = ata_id_has_lba48(dev->id);
1434 
1435 	new_sectors--;
1436 
1437 	ata_tf_init(dev, &tf);
1438 
1439 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1440 
1441 	if (lba48) {
1442 		tf.command = ATA_CMD_SET_MAX_EXT;
1443 		tf.flags |= ATA_TFLAG_LBA48;
1444 
1445 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1446 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1447 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1448 	} else {
1449 		tf.command = ATA_CMD_SET_MAX;
1450 
1451 		tf.device |= (new_sectors >> 24) & 0xf;
1452 	}
1453 
1454 	tf.protocol |= ATA_PROT_NODATA;
1455 	tf.device |= ATA_LBA;
1456 
1457 	tf.lbal = (new_sectors >> 0) & 0xff;
1458 	tf.lbam = (new_sectors >> 8) & 0xff;
1459 	tf.lbah = (new_sectors >> 16) & 0xff;
1460 
1461 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1462 	if (err_mask) {
1463 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1464 			       "max address (err_mask=0x%x)\n", err_mask);
1465 		if (err_mask == AC_ERR_DEV &&
1466 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1467 			return -EACCES;
1468 		return -EIO;
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 /**
1475  *	ata_hpa_resize		-	Resize a device with an HPA set
1476  *	@dev: Device to resize
1477  *
1478  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1479  *	it if required to the full size of the media. The caller must check
1480  *	the drive has the HPA feature set enabled.
1481  *
1482  *	RETURNS:
1483  *	0 on success, -errno on failure.
1484  */
1485 static int ata_hpa_resize(struct ata_device *dev)
1486 {
1487 	struct ata_eh_context *ehc = &dev->link->eh_context;
1488 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1489 	u64 sectors = ata_id_n_sectors(dev->id);
1490 	u64 native_sectors;
1491 	int rc;
1492 
1493 	/* do we need to do it? */
1494 	if (dev->class != ATA_DEV_ATA ||
1495 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1496 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1497 		return 0;
1498 
1499 	/* read native max address */
1500 	rc = ata_read_native_max_address(dev, &native_sectors);
1501 	if (rc) {
1502 		/* If device aborted the command or HPA isn't going to
1503 		 * be unlocked, skip HPA resizing.
1504 		 */
1505 		if (rc == -EACCES || !ata_ignore_hpa) {
1506 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1507 				       "broken, skipping HPA handling\n");
1508 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1509 
1510 			/* we can continue if device aborted the command */
1511 			if (rc == -EACCES)
1512 				rc = 0;
1513 		}
1514 
1515 		return rc;
1516 	}
1517 
1518 	/* nothing to do? */
1519 	if (native_sectors <= sectors || !ata_ignore_hpa) {
1520 		if (!print_info || native_sectors == sectors)
1521 			return 0;
1522 
1523 		if (native_sectors > sectors)
1524 			ata_dev_printk(dev, KERN_INFO,
1525 				"HPA detected: current %llu, native %llu\n",
1526 				(unsigned long long)sectors,
1527 				(unsigned long long)native_sectors);
1528 		else if (native_sectors < sectors)
1529 			ata_dev_printk(dev, KERN_WARNING,
1530 				"native sectors (%llu) is smaller than "
1531 				"sectors (%llu)\n",
1532 				(unsigned long long)native_sectors,
1533 				(unsigned long long)sectors);
1534 		return 0;
1535 	}
1536 
1537 	/* let's unlock HPA */
1538 	rc = ata_set_max_sectors(dev, native_sectors);
1539 	if (rc == -EACCES) {
1540 		/* if device aborted the command, skip HPA resizing */
1541 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1542 			       "(%llu -> %llu), skipping HPA handling\n",
1543 			       (unsigned long long)sectors,
1544 			       (unsigned long long)native_sectors);
1545 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1546 		return 0;
1547 	} else if (rc)
1548 		return rc;
1549 
1550 	/* re-read IDENTIFY data */
1551 	rc = ata_dev_reread_id(dev, 0);
1552 	if (rc) {
1553 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1554 			       "data after HPA resizing\n");
1555 		return rc;
1556 	}
1557 
1558 	if (print_info) {
1559 		u64 new_sectors = ata_id_n_sectors(dev->id);
1560 		ata_dev_printk(dev, KERN_INFO,
1561 			"HPA unlocked: %llu -> %llu, native %llu\n",
1562 			(unsigned long long)sectors,
1563 			(unsigned long long)new_sectors,
1564 			(unsigned long long)native_sectors);
1565 	}
1566 
1567 	return 0;
1568 }
1569 
1570 /**
1571  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1572  *	@id: IDENTIFY DEVICE page to dump
1573  *
1574  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1575  *	page.
1576  *
1577  *	LOCKING:
1578  *	caller.
1579  */
1580 
1581 static inline void ata_dump_id(const u16 *id)
1582 {
1583 	DPRINTK("49==0x%04x  "
1584 		"53==0x%04x  "
1585 		"63==0x%04x  "
1586 		"64==0x%04x  "
1587 		"75==0x%04x  \n",
1588 		id[49],
1589 		id[53],
1590 		id[63],
1591 		id[64],
1592 		id[75]);
1593 	DPRINTK("80==0x%04x  "
1594 		"81==0x%04x  "
1595 		"82==0x%04x  "
1596 		"83==0x%04x  "
1597 		"84==0x%04x  \n",
1598 		id[80],
1599 		id[81],
1600 		id[82],
1601 		id[83],
1602 		id[84]);
1603 	DPRINTK("88==0x%04x  "
1604 		"93==0x%04x\n",
1605 		id[88],
1606 		id[93]);
1607 }
1608 
1609 /**
1610  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1611  *	@id: IDENTIFY data to compute xfer mask from
1612  *
1613  *	Compute the xfermask for this device. This is not as trivial
1614  *	as it seems if we must consider early devices correctly.
1615  *
1616  *	FIXME: pre IDE drive timing (do we care ?).
1617  *
1618  *	LOCKING:
1619  *	None.
1620  *
1621  *	RETURNS:
1622  *	Computed xfermask
1623  */
1624 unsigned long ata_id_xfermask(const u16 *id)
1625 {
1626 	unsigned long pio_mask, mwdma_mask, udma_mask;
1627 
1628 	/* Usual case. Word 53 indicates word 64 is valid */
1629 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1630 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1631 		pio_mask <<= 3;
1632 		pio_mask |= 0x7;
1633 	} else {
1634 		/* If word 64 isn't valid then Word 51 high byte holds
1635 		 * the PIO timing number for the maximum. Turn it into
1636 		 * a mask.
1637 		 */
1638 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1639 		if (mode < 5)	/* Valid PIO range */
1640 			pio_mask = (2 << mode) - 1;
1641 		else
1642 			pio_mask = 1;
1643 
1644 		/* But wait.. there's more. Design your standards by
1645 		 * committee and you too can get a free iordy field to
1646 		 * process. However its the speeds not the modes that
1647 		 * are supported... Note drivers using the timing API
1648 		 * will get this right anyway
1649 		 */
1650 	}
1651 
1652 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1653 
1654 	if (ata_id_is_cfa(id)) {
1655 		/*
1656 		 *	Process compact flash extended modes
1657 		 */
1658 		int pio = id[163] & 0x7;
1659 		int dma = (id[163] >> 3) & 7;
1660 
1661 		if (pio)
1662 			pio_mask |= (1 << 5);
1663 		if (pio > 1)
1664 			pio_mask |= (1 << 6);
1665 		if (dma)
1666 			mwdma_mask |= (1 << 3);
1667 		if (dma > 1)
1668 			mwdma_mask |= (1 << 4);
1669 	}
1670 
1671 	udma_mask = 0;
1672 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1673 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1674 
1675 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1676 }
1677 
1678 /**
1679  *	ata_pio_queue_task - Queue port_task
1680  *	@ap: The ata_port to queue port_task for
1681  *	@data: data for @fn to use
1682  *	@delay: delay time in msecs for workqueue function
1683  *
1684  *	Schedule @fn(@data) for execution after @delay jiffies using
1685  *	port_task.  There is one port_task per port and it's the
1686  *	user(low level driver)'s responsibility to make sure that only
1687  *	one task is active at any given time.
1688  *
1689  *	libata core layer takes care of synchronization between
1690  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1691  *	synchronization.
1692  *
1693  *	LOCKING:
1694  *	Inherited from caller.
1695  */
1696 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1697 {
1698 	ap->port_task_data = data;
1699 
1700 	/* may fail if ata_port_flush_task() in progress */
1701 	queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1702 }
1703 
1704 /**
1705  *	ata_port_flush_task - Flush port_task
1706  *	@ap: The ata_port to flush port_task for
1707  *
1708  *	After this function completes, port_task is guranteed not to
1709  *	be running or scheduled.
1710  *
1711  *	LOCKING:
1712  *	Kernel thread context (may sleep)
1713  */
1714 void ata_port_flush_task(struct ata_port *ap)
1715 {
1716 	DPRINTK("ENTER\n");
1717 
1718 	cancel_rearming_delayed_work(&ap->port_task);
1719 
1720 	if (ata_msg_ctl(ap))
1721 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1722 }
1723 
1724 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1725 {
1726 	struct completion *waiting = qc->private_data;
1727 
1728 	complete(waiting);
1729 }
1730 
1731 /**
1732  *	ata_exec_internal_sg - execute libata internal command
1733  *	@dev: Device to which the command is sent
1734  *	@tf: Taskfile registers for the command and the result
1735  *	@cdb: CDB for packet command
1736  *	@dma_dir: Data tranfer direction of the command
1737  *	@sgl: sg list for the data buffer of the command
1738  *	@n_elem: Number of sg entries
1739  *	@timeout: Timeout in msecs (0 for default)
1740  *
1741  *	Executes libata internal command with timeout.  @tf contains
1742  *	command on entry and result on return.  Timeout and error
1743  *	conditions are reported via return value.  No recovery action
1744  *	is taken after a command times out.  It's caller's duty to
1745  *	clean up after timeout.
1746  *
1747  *	LOCKING:
1748  *	None.  Should be called with kernel context, might sleep.
1749  *
1750  *	RETURNS:
1751  *	Zero on success, AC_ERR_* mask on failure
1752  */
1753 unsigned ata_exec_internal_sg(struct ata_device *dev,
1754 			      struct ata_taskfile *tf, const u8 *cdb,
1755 			      int dma_dir, struct scatterlist *sgl,
1756 			      unsigned int n_elem, unsigned long timeout)
1757 {
1758 	struct ata_link *link = dev->link;
1759 	struct ata_port *ap = link->ap;
1760 	u8 command = tf->command;
1761 	int auto_timeout = 0;
1762 	struct ata_queued_cmd *qc;
1763 	unsigned int tag, preempted_tag;
1764 	u32 preempted_sactive, preempted_qc_active;
1765 	int preempted_nr_active_links;
1766 	DECLARE_COMPLETION_ONSTACK(wait);
1767 	unsigned long flags;
1768 	unsigned int err_mask;
1769 	int rc;
1770 
1771 	spin_lock_irqsave(ap->lock, flags);
1772 
1773 	/* no internal command while frozen */
1774 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1775 		spin_unlock_irqrestore(ap->lock, flags);
1776 		return AC_ERR_SYSTEM;
1777 	}
1778 
1779 	/* initialize internal qc */
1780 
1781 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1782 	 * drivers choke if any other tag is given.  This breaks
1783 	 * ata_tag_internal() test for those drivers.  Don't use new
1784 	 * EH stuff without converting to it.
1785 	 */
1786 	if (ap->ops->error_handler)
1787 		tag = ATA_TAG_INTERNAL;
1788 	else
1789 		tag = 0;
1790 
1791 	if (test_and_set_bit(tag, &ap->qc_allocated))
1792 		BUG();
1793 	qc = __ata_qc_from_tag(ap, tag);
1794 
1795 	qc->tag = tag;
1796 	qc->scsicmd = NULL;
1797 	qc->ap = ap;
1798 	qc->dev = dev;
1799 	ata_qc_reinit(qc);
1800 
1801 	preempted_tag = link->active_tag;
1802 	preempted_sactive = link->sactive;
1803 	preempted_qc_active = ap->qc_active;
1804 	preempted_nr_active_links = ap->nr_active_links;
1805 	link->active_tag = ATA_TAG_POISON;
1806 	link->sactive = 0;
1807 	ap->qc_active = 0;
1808 	ap->nr_active_links = 0;
1809 
1810 	/* prepare & issue qc */
1811 	qc->tf = *tf;
1812 	if (cdb)
1813 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1814 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1815 	qc->dma_dir = dma_dir;
1816 	if (dma_dir != DMA_NONE) {
1817 		unsigned int i, buflen = 0;
1818 		struct scatterlist *sg;
1819 
1820 		for_each_sg(sgl, sg, n_elem, i)
1821 			buflen += sg->length;
1822 
1823 		ata_sg_init(qc, sgl, n_elem);
1824 		qc->nbytes = buflen;
1825 	}
1826 
1827 	qc->private_data = &wait;
1828 	qc->complete_fn = ata_qc_complete_internal;
1829 
1830 	ata_qc_issue(qc);
1831 
1832 	spin_unlock_irqrestore(ap->lock, flags);
1833 
1834 	if (!timeout) {
1835 		if (ata_probe_timeout)
1836 			timeout = ata_probe_timeout * 1000;
1837 		else {
1838 			timeout = ata_internal_cmd_timeout(dev, command);
1839 			auto_timeout = 1;
1840 		}
1841 	}
1842 
1843 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1844 
1845 	ata_port_flush_task(ap);
1846 
1847 	if (!rc) {
1848 		spin_lock_irqsave(ap->lock, flags);
1849 
1850 		/* We're racing with irq here.  If we lose, the
1851 		 * following test prevents us from completing the qc
1852 		 * twice.  If we win, the port is frozen and will be
1853 		 * cleaned up by ->post_internal_cmd().
1854 		 */
1855 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1856 			qc->err_mask |= AC_ERR_TIMEOUT;
1857 
1858 			if (ap->ops->error_handler)
1859 				ata_port_freeze(ap);
1860 			else
1861 				ata_qc_complete(qc);
1862 
1863 			if (ata_msg_warn(ap))
1864 				ata_dev_printk(dev, KERN_WARNING,
1865 					"qc timeout (cmd 0x%x)\n", command);
1866 		}
1867 
1868 		spin_unlock_irqrestore(ap->lock, flags);
1869 	}
1870 
1871 	/* do post_internal_cmd */
1872 	if (ap->ops->post_internal_cmd)
1873 		ap->ops->post_internal_cmd(qc);
1874 
1875 	/* perform minimal error analysis */
1876 	if (qc->flags & ATA_QCFLAG_FAILED) {
1877 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1878 			qc->err_mask |= AC_ERR_DEV;
1879 
1880 		if (!qc->err_mask)
1881 			qc->err_mask |= AC_ERR_OTHER;
1882 
1883 		if (qc->err_mask & ~AC_ERR_OTHER)
1884 			qc->err_mask &= ~AC_ERR_OTHER;
1885 	}
1886 
1887 	/* finish up */
1888 	spin_lock_irqsave(ap->lock, flags);
1889 
1890 	*tf = qc->result_tf;
1891 	err_mask = qc->err_mask;
1892 
1893 	ata_qc_free(qc);
1894 	link->active_tag = preempted_tag;
1895 	link->sactive = preempted_sactive;
1896 	ap->qc_active = preempted_qc_active;
1897 	ap->nr_active_links = preempted_nr_active_links;
1898 
1899 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1900 	 * Until those drivers are fixed, we detect the condition
1901 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1902 	 * port.
1903 	 *
1904 	 * Note that this doesn't change any behavior as internal
1905 	 * command failure results in disabling the device in the
1906 	 * higher layer for LLDDs without new reset/EH callbacks.
1907 	 *
1908 	 * Kill the following code as soon as those drivers are fixed.
1909 	 */
1910 	if (ap->flags & ATA_FLAG_DISABLED) {
1911 		err_mask |= AC_ERR_SYSTEM;
1912 		ata_port_probe(ap);
1913 	}
1914 
1915 	spin_unlock_irqrestore(ap->lock, flags);
1916 
1917 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1918 		ata_internal_cmd_timed_out(dev, command);
1919 
1920 	return err_mask;
1921 }
1922 
1923 /**
1924  *	ata_exec_internal - execute libata internal command
1925  *	@dev: Device to which the command is sent
1926  *	@tf: Taskfile registers for the command and the result
1927  *	@cdb: CDB for packet command
1928  *	@dma_dir: Data tranfer direction of the command
1929  *	@buf: Data buffer of the command
1930  *	@buflen: Length of data buffer
1931  *	@timeout: Timeout in msecs (0 for default)
1932  *
1933  *	Wrapper around ata_exec_internal_sg() which takes simple
1934  *	buffer instead of sg list.
1935  *
1936  *	LOCKING:
1937  *	None.  Should be called with kernel context, might sleep.
1938  *
1939  *	RETURNS:
1940  *	Zero on success, AC_ERR_* mask on failure
1941  */
1942 unsigned ata_exec_internal(struct ata_device *dev,
1943 			   struct ata_taskfile *tf, const u8 *cdb,
1944 			   int dma_dir, void *buf, unsigned int buflen,
1945 			   unsigned long timeout)
1946 {
1947 	struct scatterlist *psg = NULL, sg;
1948 	unsigned int n_elem = 0;
1949 
1950 	if (dma_dir != DMA_NONE) {
1951 		WARN_ON(!buf);
1952 		sg_init_one(&sg, buf, buflen);
1953 		psg = &sg;
1954 		n_elem++;
1955 	}
1956 
1957 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1958 				    timeout);
1959 }
1960 
1961 /**
1962  *	ata_do_simple_cmd - execute simple internal command
1963  *	@dev: Device to which the command is sent
1964  *	@cmd: Opcode to execute
1965  *
1966  *	Execute a 'simple' command, that only consists of the opcode
1967  *	'cmd' itself, without filling any other registers
1968  *
1969  *	LOCKING:
1970  *	Kernel thread context (may sleep).
1971  *
1972  *	RETURNS:
1973  *	Zero on success, AC_ERR_* mask on failure
1974  */
1975 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1976 {
1977 	struct ata_taskfile tf;
1978 
1979 	ata_tf_init(dev, &tf);
1980 
1981 	tf.command = cmd;
1982 	tf.flags |= ATA_TFLAG_DEVICE;
1983 	tf.protocol = ATA_PROT_NODATA;
1984 
1985 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1986 }
1987 
1988 /**
1989  *	ata_pio_need_iordy	-	check if iordy needed
1990  *	@adev: ATA device
1991  *
1992  *	Check if the current speed of the device requires IORDY. Used
1993  *	by various controllers for chip configuration.
1994  */
1995 
1996 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1997 {
1998 	/* Controller doesn't support  IORDY. Probably a pointless check
1999 	   as the caller should know this */
2000 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2001 		return 0;
2002 	/* PIO3 and higher it is mandatory */
2003 	if (adev->pio_mode > XFER_PIO_2)
2004 		return 1;
2005 	/* We turn it on when possible */
2006 	if (ata_id_has_iordy(adev->id))
2007 		return 1;
2008 	return 0;
2009 }
2010 
2011 /**
2012  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
2013  *	@adev: ATA device
2014  *
2015  *	Compute the highest mode possible if we are not using iordy. Return
2016  *	-1 if no iordy mode is available.
2017  */
2018 
2019 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2020 {
2021 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
2022 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
2023 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
2024 		/* Is the speed faster than the drive allows non IORDY ? */
2025 		if (pio) {
2026 			/* This is cycle times not frequency - watch the logic! */
2027 			if (pio > 240)	/* PIO2 is 240nS per cycle */
2028 				return 3 << ATA_SHIFT_PIO;
2029 			return 7 << ATA_SHIFT_PIO;
2030 		}
2031 	}
2032 	return 3 << ATA_SHIFT_PIO;
2033 }
2034 
2035 /**
2036  *	ata_do_dev_read_id		-	default ID read method
2037  *	@dev: device
2038  *	@tf: proposed taskfile
2039  *	@id: data buffer
2040  *
2041  *	Issue the identify taskfile and hand back the buffer containing
2042  *	identify data. For some RAID controllers and for pre ATA devices
2043  *	this function is wrapped or replaced by the driver
2044  */
2045 unsigned int ata_do_dev_read_id(struct ata_device *dev,
2046 					struct ata_taskfile *tf, u16 *id)
2047 {
2048 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
2049 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2050 }
2051 
2052 /**
2053  *	ata_dev_read_id - Read ID data from the specified device
2054  *	@dev: target device
2055  *	@p_class: pointer to class of the target device (may be changed)
2056  *	@flags: ATA_READID_* flags
2057  *	@id: buffer to read IDENTIFY data into
2058  *
2059  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
2060  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2061  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
2062  *	for pre-ATA4 drives.
2063  *
2064  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2065  *	now we abort if we hit that case.
2066  *
2067  *	LOCKING:
2068  *	Kernel thread context (may sleep)
2069  *
2070  *	RETURNS:
2071  *	0 on success, -errno otherwise.
2072  */
2073 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2074 		    unsigned int flags, u16 *id)
2075 {
2076 	struct ata_port *ap = dev->link->ap;
2077 	unsigned int class = *p_class;
2078 	struct ata_taskfile tf;
2079 	unsigned int err_mask = 0;
2080 	const char *reason;
2081 	int may_fallback = 1, tried_spinup = 0;
2082 	int rc;
2083 
2084 	if (ata_msg_ctl(ap))
2085 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2086 
2087 retry:
2088 	ata_tf_init(dev, &tf);
2089 
2090 	switch (class) {
2091 	case ATA_DEV_ATA:
2092 		tf.command = ATA_CMD_ID_ATA;
2093 		break;
2094 	case ATA_DEV_ATAPI:
2095 		tf.command = ATA_CMD_ID_ATAPI;
2096 		break;
2097 	default:
2098 		rc = -ENODEV;
2099 		reason = "unsupported class";
2100 		goto err_out;
2101 	}
2102 
2103 	tf.protocol = ATA_PROT_PIO;
2104 
2105 	/* Some devices choke if TF registers contain garbage.  Make
2106 	 * sure those are properly initialized.
2107 	 */
2108 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2109 
2110 	/* Device presence detection is unreliable on some
2111 	 * controllers.  Always poll IDENTIFY if available.
2112 	 */
2113 	tf.flags |= ATA_TFLAG_POLLING;
2114 
2115 	if (ap->ops->read_id)
2116 		err_mask = ap->ops->read_id(dev, &tf, id);
2117 	else
2118 		err_mask = ata_do_dev_read_id(dev, &tf, id);
2119 
2120 	if (err_mask) {
2121 		if (err_mask & AC_ERR_NODEV_HINT) {
2122 			ata_dev_printk(dev, KERN_DEBUG,
2123 				       "NODEV after polling detection\n");
2124 			return -ENOENT;
2125 		}
2126 
2127 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2128 			/* Device or controller might have reported
2129 			 * the wrong device class.  Give a shot at the
2130 			 * other IDENTIFY if the current one is
2131 			 * aborted by the device.
2132 			 */
2133 			if (may_fallback) {
2134 				may_fallback = 0;
2135 
2136 				if (class == ATA_DEV_ATA)
2137 					class = ATA_DEV_ATAPI;
2138 				else
2139 					class = ATA_DEV_ATA;
2140 				goto retry;
2141 			}
2142 
2143 			/* Control reaches here iff the device aborted
2144 			 * both flavors of IDENTIFYs which happens
2145 			 * sometimes with phantom devices.
2146 			 */
2147 			ata_dev_printk(dev, KERN_DEBUG,
2148 				       "both IDENTIFYs aborted, assuming NODEV\n");
2149 			return -ENOENT;
2150 		}
2151 
2152 		rc = -EIO;
2153 		reason = "I/O error";
2154 		goto err_out;
2155 	}
2156 
2157 	/* Falling back doesn't make sense if ID data was read
2158 	 * successfully at least once.
2159 	 */
2160 	may_fallback = 0;
2161 
2162 	swap_buf_le16(id, ATA_ID_WORDS);
2163 
2164 	/* sanity check */
2165 	rc = -EINVAL;
2166 	reason = "device reports invalid type";
2167 
2168 	if (class == ATA_DEV_ATA) {
2169 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2170 			goto err_out;
2171 	} else {
2172 		if (ata_id_is_ata(id))
2173 			goto err_out;
2174 	}
2175 
2176 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2177 		tried_spinup = 1;
2178 		/*
2179 		 * Drive powered-up in standby mode, and requires a specific
2180 		 * SET_FEATURES spin-up subcommand before it will accept
2181 		 * anything other than the original IDENTIFY command.
2182 		 */
2183 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2184 		if (err_mask && id[2] != 0x738c) {
2185 			rc = -EIO;
2186 			reason = "SPINUP failed";
2187 			goto err_out;
2188 		}
2189 		/*
2190 		 * If the drive initially returned incomplete IDENTIFY info,
2191 		 * we now must reissue the IDENTIFY command.
2192 		 */
2193 		if (id[2] == 0x37c8)
2194 			goto retry;
2195 	}
2196 
2197 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2198 		/*
2199 		 * The exact sequence expected by certain pre-ATA4 drives is:
2200 		 * SRST RESET
2201 		 * IDENTIFY (optional in early ATA)
2202 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2203 		 * anything else..
2204 		 * Some drives were very specific about that exact sequence.
2205 		 *
2206 		 * Note that ATA4 says lba is mandatory so the second check
2207 		 * shoud never trigger.
2208 		 */
2209 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2210 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2211 			if (err_mask) {
2212 				rc = -EIO;
2213 				reason = "INIT_DEV_PARAMS failed";
2214 				goto err_out;
2215 			}
2216 
2217 			/* current CHS translation info (id[53-58]) might be
2218 			 * changed. reread the identify device info.
2219 			 */
2220 			flags &= ~ATA_READID_POSTRESET;
2221 			goto retry;
2222 		}
2223 	}
2224 
2225 	*p_class = class;
2226 
2227 	return 0;
2228 
2229  err_out:
2230 	if (ata_msg_warn(ap))
2231 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2232 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2233 	return rc;
2234 }
2235 
2236 static inline u8 ata_dev_knobble(struct ata_device *dev)
2237 {
2238 	struct ata_port *ap = dev->link->ap;
2239 
2240 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2241 		return 0;
2242 
2243 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2244 }
2245 
2246 static void ata_dev_config_ncq(struct ata_device *dev,
2247 			       char *desc, size_t desc_sz)
2248 {
2249 	struct ata_port *ap = dev->link->ap;
2250 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2251 
2252 	if (!ata_id_has_ncq(dev->id)) {
2253 		desc[0] = '\0';
2254 		return;
2255 	}
2256 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2257 		snprintf(desc, desc_sz, "NCQ (not used)");
2258 		return;
2259 	}
2260 	if (ap->flags & ATA_FLAG_NCQ) {
2261 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2262 		dev->flags |= ATA_DFLAG_NCQ;
2263 	}
2264 
2265 	if (hdepth >= ddepth)
2266 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2267 	else
2268 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2269 }
2270 
2271 /**
2272  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2273  *	@dev: Target device to configure
2274  *
2275  *	Configure @dev according to @dev->id.  Generic and low-level
2276  *	driver specific fixups are also applied.
2277  *
2278  *	LOCKING:
2279  *	Kernel thread context (may sleep)
2280  *
2281  *	RETURNS:
2282  *	0 on success, -errno otherwise
2283  */
2284 int ata_dev_configure(struct ata_device *dev)
2285 {
2286 	struct ata_port *ap = dev->link->ap;
2287 	struct ata_eh_context *ehc = &dev->link->eh_context;
2288 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2289 	const u16 *id = dev->id;
2290 	unsigned long xfer_mask;
2291 	char revbuf[7];		/* XYZ-99\0 */
2292 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2293 	char modelbuf[ATA_ID_PROD_LEN+1];
2294 	int rc;
2295 
2296 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2297 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2298 			       __func__);
2299 		return 0;
2300 	}
2301 
2302 	if (ata_msg_probe(ap))
2303 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2304 
2305 	/* set horkage */
2306 	dev->horkage |= ata_dev_blacklisted(dev);
2307 	ata_force_horkage(dev);
2308 
2309 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2310 		ata_dev_printk(dev, KERN_INFO,
2311 			       "unsupported device, disabling\n");
2312 		ata_dev_disable(dev);
2313 		return 0;
2314 	}
2315 
2316 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2317 	    dev->class == ATA_DEV_ATAPI) {
2318 		ata_dev_printk(dev, KERN_WARNING,
2319 			"WARNING: ATAPI is %s, device ignored.\n",
2320 			atapi_enabled ? "not supported with this driver"
2321 				      : "disabled");
2322 		ata_dev_disable(dev);
2323 		return 0;
2324 	}
2325 
2326 	/* let ACPI work its magic */
2327 	rc = ata_acpi_on_devcfg(dev);
2328 	if (rc)
2329 		return rc;
2330 
2331 	/* massage HPA, do it early as it might change IDENTIFY data */
2332 	rc = ata_hpa_resize(dev);
2333 	if (rc)
2334 		return rc;
2335 
2336 	/* print device capabilities */
2337 	if (ata_msg_probe(ap))
2338 		ata_dev_printk(dev, KERN_DEBUG,
2339 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2340 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2341 			       __func__,
2342 			       id[49], id[82], id[83], id[84],
2343 			       id[85], id[86], id[87], id[88]);
2344 
2345 	/* initialize to-be-configured parameters */
2346 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2347 	dev->max_sectors = 0;
2348 	dev->cdb_len = 0;
2349 	dev->n_sectors = 0;
2350 	dev->cylinders = 0;
2351 	dev->heads = 0;
2352 	dev->sectors = 0;
2353 
2354 	/*
2355 	 * common ATA, ATAPI feature tests
2356 	 */
2357 
2358 	/* find max transfer mode; for printk only */
2359 	xfer_mask = ata_id_xfermask(id);
2360 
2361 	if (ata_msg_probe(ap))
2362 		ata_dump_id(id);
2363 
2364 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2365 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2366 			sizeof(fwrevbuf));
2367 
2368 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2369 			sizeof(modelbuf));
2370 
2371 	/* ATA-specific feature tests */
2372 	if (dev->class == ATA_DEV_ATA) {
2373 		if (ata_id_is_cfa(id)) {
2374 			if (id[162] & 1) /* CPRM may make this media unusable */
2375 				ata_dev_printk(dev, KERN_WARNING,
2376 					       "supports DRM functions and may "
2377 					       "not be fully accessable.\n");
2378 			snprintf(revbuf, 7, "CFA");
2379 		} else {
2380 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2381 			/* Warn the user if the device has TPM extensions */
2382 			if (ata_id_has_tpm(id))
2383 				ata_dev_printk(dev, KERN_WARNING,
2384 					       "supports DRM functions and may "
2385 					       "not be fully accessable.\n");
2386 		}
2387 
2388 		dev->n_sectors = ata_id_n_sectors(id);
2389 
2390 		if (dev->id[59] & 0x100)
2391 			dev->multi_count = dev->id[59] & 0xff;
2392 
2393 		if (ata_id_has_lba(id)) {
2394 			const char *lba_desc;
2395 			char ncq_desc[20];
2396 
2397 			lba_desc = "LBA";
2398 			dev->flags |= ATA_DFLAG_LBA;
2399 			if (ata_id_has_lba48(id)) {
2400 				dev->flags |= ATA_DFLAG_LBA48;
2401 				lba_desc = "LBA48";
2402 
2403 				if (dev->n_sectors >= (1UL << 28) &&
2404 				    ata_id_has_flush_ext(id))
2405 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2406 			}
2407 
2408 			/* config NCQ */
2409 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2410 
2411 			/* print device info to dmesg */
2412 			if (ata_msg_drv(ap) && print_info) {
2413 				ata_dev_printk(dev, KERN_INFO,
2414 					"%s: %s, %s, max %s\n",
2415 					revbuf, modelbuf, fwrevbuf,
2416 					ata_mode_string(xfer_mask));
2417 				ata_dev_printk(dev, KERN_INFO,
2418 					"%Lu sectors, multi %u: %s %s\n",
2419 					(unsigned long long)dev->n_sectors,
2420 					dev->multi_count, lba_desc, ncq_desc);
2421 			}
2422 		} else {
2423 			/* CHS */
2424 
2425 			/* Default translation */
2426 			dev->cylinders	= id[1];
2427 			dev->heads	= id[3];
2428 			dev->sectors	= id[6];
2429 
2430 			if (ata_id_current_chs_valid(id)) {
2431 				/* Current CHS translation is valid. */
2432 				dev->cylinders = id[54];
2433 				dev->heads     = id[55];
2434 				dev->sectors   = id[56];
2435 			}
2436 
2437 			/* print device info to dmesg */
2438 			if (ata_msg_drv(ap) && print_info) {
2439 				ata_dev_printk(dev, KERN_INFO,
2440 					"%s: %s, %s, max %s\n",
2441 					revbuf,	modelbuf, fwrevbuf,
2442 					ata_mode_string(xfer_mask));
2443 				ata_dev_printk(dev, KERN_INFO,
2444 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2445 					(unsigned long long)dev->n_sectors,
2446 					dev->multi_count, dev->cylinders,
2447 					dev->heads, dev->sectors);
2448 			}
2449 		}
2450 
2451 		dev->cdb_len = 16;
2452 	}
2453 
2454 	/* ATAPI-specific feature tests */
2455 	else if (dev->class == ATA_DEV_ATAPI) {
2456 		const char *cdb_intr_string = "";
2457 		const char *atapi_an_string = "";
2458 		const char *dma_dir_string = "";
2459 		u32 sntf;
2460 
2461 		rc = atapi_cdb_len(id);
2462 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2463 			if (ata_msg_warn(ap))
2464 				ata_dev_printk(dev, KERN_WARNING,
2465 					       "unsupported CDB len\n");
2466 			rc = -EINVAL;
2467 			goto err_out_nosup;
2468 		}
2469 		dev->cdb_len = (unsigned int) rc;
2470 
2471 		/* Enable ATAPI AN if both the host and device have
2472 		 * the support.  If PMP is attached, SNTF is required
2473 		 * to enable ATAPI AN to discern between PHY status
2474 		 * changed notifications and ATAPI ANs.
2475 		 */
2476 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2477 		    (!sata_pmp_attached(ap) ||
2478 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2479 			unsigned int err_mask;
2480 
2481 			/* issue SET feature command to turn this on */
2482 			err_mask = ata_dev_set_feature(dev,
2483 					SETFEATURES_SATA_ENABLE, SATA_AN);
2484 			if (err_mask)
2485 				ata_dev_printk(dev, KERN_ERR,
2486 					"failed to enable ATAPI AN "
2487 					"(err_mask=0x%x)\n", err_mask);
2488 			else {
2489 				dev->flags |= ATA_DFLAG_AN;
2490 				atapi_an_string = ", ATAPI AN";
2491 			}
2492 		}
2493 
2494 		if (ata_id_cdb_intr(dev->id)) {
2495 			dev->flags |= ATA_DFLAG_CDB_INTR;
2496 			cdb_intr_string = ", CDB intr";
2497 		}
2498 
2499 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2500 			dev->flags |= ATA_DFLAG_DMADIR;
2501 			dma_dir_string = ", DMADIR";
2502 		}
2503 
2504 		/* print device info to dmesg */
2505 		if (ata_msg_drv(ap) && print_info)
2506 			ata_dev_printk(dev, KERN_INFO,
2507 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2508 				       modelbuf, fwrevbuf,
2509 				       ata_mode_string(xfer_mask),
2510 				       cdb_intr_string, atapi_an_string,
2511 				       dma_dir_string);
2512 	}
2513 
2514 	/* determine max_sectors */
2515 	dev->max_sectors = ATA_MAX_SECTORS;
2516 	if (dev->flags & ATA_DFLAG_LBA48)
2517 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2518 
2519 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2520 		if (ata_id_has_hipm(dev->id))
2521 			dev->flags |= ATA_DFLAG_HIPM;
2522 		if (ata_id_has_dipm(dev->id))
2523 			dev->flags |= ATA_DFLAG_DIPM;
2524 	}
2525 
2526 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2527 	   200 sectors */
2528 	if (ata_dev_knobble(dev)) {
2529 		if (ata_msg_drv(ap) && print_info)
2530 			ata_dev_printk(dev, KERN_INFO,
2531 				       "applying bridge limits\n");
2532 		dev->udma_mask &= ATA_UDMA5;
2533 		dev->max_sectors = ATA_MAX_SECTORS;
2534 	}
2535 
2536 	if ((dev->class == ATA_DEV_ATAPI) &&
2537 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2538 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2539 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2540 	}
2541 
2542 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2543 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2544 					 dev->max_sectors);
2545 
2546 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2547 		dev->horkage |= ATA_HORKAGE_IPM;
2548 
2549 		/* reset link pm_policy for this port to no pm */
2550 		ap->pm_policy = MAX_PERFORMANCE;
2551 	}
2552 
2553 	if (ap->ops->dev_config)
2554 		ap->ops->dev_config(dev);
2555 
2556 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2557 		/* Let the user know. We don't want to disallow opens for
2558 		   rescue purposes, or in case the vendor is just a blithering
2559 		   idiot. Do this after the dev_config call as some controllers
2560 		   with buggy firmware may want to avoid reporting false device
2561 		   bugs */
2562 
2563 		if (print_info) {
2564 			ata_dev_printk(dev, KERN_WARNING,
2565 "Drive reports diagnostics failure. This may indicate a drive\n");
2566 			ata_dev_printk(dev, KERN_WARNING,
2567 "fault or invalid emulation. Contact drive vendor for information.\n");
2568 		}
2569 	}
2570 
2571 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2572 		ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2573 			       "firmware update to be fully functional.\n");
2574 		ata_dev_printk(dev, KERN_WARNING, "         contact the vendor "
2575 			       "or visit http://ata.wiki.kernel.org.\n");
2576 	}
2577 
2578 	return 0;
2579 
2580 err_out_nosup:
2581 	if (ata_msg_probe(ap))
2582 		ata_dev_printk(dev, KERN_DEBUG,
2583 			       "%s: EXIT, err\n", __func__);
2584 	return rc;
2585 }
2586 
2587 /**
2588  *	ata_cable_40wire	-	return 40 wire cable type
2589  *	@ap: port
2590  *
2591  *	Helper method for drivers which want to hardwire 40 wire cable
2592  *	detection.
2593  */
2594 
2595 int ata_cable_40wire(struct ata_port *ap)
2596 {
2597 	return ATA_CBL_PATA40;
2598 }
2599 
2600 /**
2601  *	ata_cable_80wire	-	return 80 wire cable type
2602  *	@ap: port
2603  *
2604  *	Helper method for drivers which want to hardwire 80 wire cable
2605  *	detection.
2606  */
2607 
2608 int ata_cable_80wire(struct ata_port *ap)
2609 {
2610 	return ATA_CBL_PATA80;
2611 }
2612 
2613 /**
2614  *	ata_cable_unknown	-	return unknown PATA cable.
2615  *	@ap: port
2616  *
2617  *	Helper method for drivers which have no PATA cable detection.
2618  */
2619 
2620 int ata_cable_unknown(struct ata_port *ap)
2621 {
2622 	return ATA_CBL_PATA_UNK;
2623 }
2624 
2625 /**
2626  *	ata_cable_ignore	-	return ignored PATA cable.
2627  *	@ap: port
2628  *
2629  *	Helper method for drivers which don't use cable type to limit
2630  *	transfer mode.
2631  */
2632 int ata_cable_ignore(struct ata_port *ap)
2633 {
2634 	return ATA_CBL_PATA_IGN;
2635 }
2636 
2637 /**
2638  *	ata_cable_sata	-	return SATA cable type
2639  *	@ap: port
2640  *
2641  *	Helper method for drivers which have SATA cables
2642  */
2643 
2644 int ata_cable_sata(struct ata_port *ap)
2645 {
2646 	return ATA_CBL_SATA;
2647 }
2648 
2649 /**
2650  *	ata_bus_probe - Reset and probe ATA bus
2651  *	@ap: Bus to probe
2652  *
2653  *	Master ATA bus probing function.  Initiates a hardware-dependent
2654  *	bus reset, then attempts to identify any devices found on
2655  *	the bus.
2656  *
2657  *	LOCKING:
2658  *	PCI/etc. bus probe sem.
2659  *
2660  *	RETURNS:
2661  *	Zero on success, negative errno otherwise.
2662  */
2663 
2664 int ata_bus_probe(struct ata_port *ap)
2665 {
2666 	unsigned int classes[ATA_MAX_DEVICES];
2667 	int tries[ATA_MAX_DEVICES];
2668 	int rc;
2669 	struct ata_device *dev;
2670 
2671 	ata_port_probe(ap);
2672 
2673 	ata_for_each_dev(dev, &ap->link, ALL)
2674 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2675 
2676  retry:
2677 	ata_for_each_dev(dev, &ap->link, ALL) {
2678 		/* If we issue an SRST then an ATA drive (not ATAPI)
2679 		 * may change configuration and be in PIO0 timing. If
2680 		 * we do a hard reset (or are coming from power on)
2681 		 * this is true for ATA or ATAPI. Until we've set a
2682 		 * suitable controller mode we should not touch the
2683 		 * bus as we may be talking too fast.
2684 		 */
2685 		dev->pio_mode = XFER_PIO_0;
2686 
2687 		/* If the controller has a pio mode setup function
2688 		 * then use it to set the chipset to rights. Don't
2689 		 * touch the DMA setup as that will be dealt with when
2690 		 * configuring devices.
2691 		 */
2692 		if (ap->ops->set_piomode)
2693 			ap->ops->set_piomode(ap, dev);
2694 	}
2695 
2696 	/* reset and determine device classes */
2697 	ap->ops->phy_reset(ap);
2698 
2699 	ata_for_each_dev(dev, &ap->link, ALL) {
2700 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2701 		    dev->class != ATA_DEV_UNKNOWN)
2702 			classes[dev->devno] = dev->class;
2703 		else
2704 			classes[dev->devno] = ATA_DEV_NONE;
2705 
2706 		dev->class = ATA_DEV_UNKNOWN;
2707 	}
2708 
2709 	ata_port_probe(ap);
2710 
2711 	/* read IDENTIFY page and configure devices. We have to do the identify
2712 	   specific sequence bass-ackwards so that PDIAG- is released by
2713 	   the slave device */
2714 
2715 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2716 		if (tries[dev->devno])
2717 			dev->class = classes[dev->devno];
2718 
2719 		if (!ata_dev_enabled(dev))
2720 			continue;
2721 
2722 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2723 				     dev->id);
2724 		if (rc)
2725 			goto fail;
2726 	}
2727 
2728 	/* Now ask for the cable type as PDIAG- should have been released */
2729 	if (ap->ops->cable_detect)
2730 		ap->cbl = ap->ops->cable_detect(ap);
2731 
2732 	/* We may have SATA bridge glue hiding here irrespective of
2733 	 * the reported cable types and sensed types.  When SATA
2734 	 * drives indicate we have a bridge, we don't know which end
2735 	 * of the link the bridge is which is a problem.
2736 	 */
2737 	ata_for_each_dev(dev, &ap->link, ENABLED)
2738 		if (ata_id_is_sata(dev->id))
2739 			ap->cbl = ATA_CBL_SATA;
2740 
2741 	/* After the identify sequence we can now set up the devices. We do
2742 	   this in the normal order so that the user doesn't get confused */
2743 
2744 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2745 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2746 		rc = ata_dev_configure(dev);
2747 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2748 		if (rc)
2749 			goto fail;
2750 	}
2751 
2752 	/* configure transfer mode */
2753 	rc = ata_set_mode(&ap->link, &dev);
2754 	if (rc)
2755 		goto fail;
2756 
2757 	ata_for_each_dev(dev, &ap->link, ENABLED)
2758 		return 0;
2759 
2760 	/* no device present, disable port */
2761 	ata_port_disable(ap);
2762 	return -ENODEV;
2763 
2764  fail:
2765 	tries[dev->devno]--;
2766 
2767 	switch (rc) {
2768 	case -EINVAL:
2769 		/* eeek, something went very wrong, give up */
2770 		tries[dev->devno] = 0;
2771 		break;
2772 
2773 	case -ENODEV:
2774 		/* give it just one more chance */
2775 		tries[dev->devno] = min(tries[dev->devno], 1);
2776 	case -EIO:
2777 		if (tries[dev->devno] == 1) {
2778 			/* This is the last chance, better to slow
2779 			 * down than lose it.
2780 			 */
2781 			sata_down_spd_limit(&ap->link);
2782 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2783 		}
2784 	}
2785 
2786 	if (!tries[dev->devno])
2787 		ata_dev_disable(dev);
2788 
2789 	goto retry;
2790 }
2791 
2792 /**
2793  *	ata_port_probe - Mark port as enabled
2794  *	@ap: Port for which we indicate enablement
2795  *
2796  *	Modify @ap data structure such that the system
2797  *	thinks that the entire port is enabled.
2798  *
2799  *	LOCKING: host lock, or some other form of
2800  *	serialization.
2801  */
2802 
2803 void ata_port_probe(struct ata_port *ap)
2804 {
2805 	ap->flags &= ~ATA_FLAG_DISABLED;
2806 }
2807 
2808 /**
2809  *	sata_print_link_status - Print SATA link status
2810  *	@link: SATA link to printk link status about
2811  *
2812  *	This function prints link speed and status of a SATA link.
2813  *
2814  *	LOCKING:
2815  *	None.
2816  */
2817 static void sata_print_link_status(struct ata_link *link)
2818 {
2819 	u32 sstatus, scontrol, tmp;
2820 
2821 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2822 		return;
2823 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2824 
2825 	if (ata_phys_link_online(link)) {
2826 		tmp = (sstatus >> 4) & 0xf;
2827 		ata_link_printk(link, KERN_INFO,
2828 				"SATA link up %s (SStatus %X SControl %X)\n",
2829 				sata_spd_string(tmp), sstatus, scontrol);
2830 	} else {
2831 		ata_link_printk(link, KERN_INFO,
2832 				"SATA link down (SStatus %X SControl %X)\n",
2833 				sstatus, scontrol);
2834 	}
2835 }
2836 
2837 /**
2838  *	ata_dev_pair		-	return other device on cable
2839  *	@adev: device
2840  *
2841  *	Obtain the other device on the same cable, or if none is
2842  *	present NULL is returned
2843  */
2844 
2845 struct ata_device *ata_dev_pair(struct ata_device *adev)
2846 {
2847 	struct ata_link *link = adev->link;
2848 	struct ata_device *pair = &link->device[1 - adev->devno];
2849 	if (!ata_dev_enabled(pair))
2850 		return NULL;
2851 	return pair;
2852 }
2853 
2854 /**
2855  *	ata_port_disable - Disable port.
2856  *	@ap: Port to be disabled.
2857  *
2858  *	Modify @ap data structure such that the system
2859  *	thinks that the entire port is disabled, and should
2860  *	never attempt to probe or communicate with devices
2861  *	on this port.
2862  *
2863  *	LOCKING: host lock, or some other form of
2864  *	serialization.
2865  */
2866 
2867 void ata_port_disable(struct ata_port *ap)
2868 {
2869 	ap->link.device[0].class = ATA_DEV_NONE;
2870 	ap->link.device[1].class = ATA_DEV_NONE;
2871 	ap->flags |= ATA_FLAG_DISABLED;
2872 }
2873 
2874 /**
2875  *	sata_down_spd_limit - adjust SATA spd limit downward
2876  *	@link: Link to adjust SATA spd limit for
2877  *
2878  *	Adjust SATA spd limit of @link downward.  Note that this
2879  *	function only adjusts the limit.  The change must be applied
2880  *	using sata_set_spd().
2881  *
2882  *	LOCKING:
2883  *	Inherited from caller.
2884  *
2885  *	RETURNS:
2886  *	0 on success, negative errno on failure
2887  */
2888 int sata_down_spd_limit(struct ata_link *link)
2889 {
2890 	u32 sstatus, spd, mask;
2891 	int rc, highbit;
2892 
2893 	if (!sata_scr_valid(link))
2894 		return -EOPNOTSUPP;
2895 
2896 	/* If SCR can be read, use it to determine the current SPD.
2897 	 * If not, use cached value in link->sata_spd.
2898 	 */
2899 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2900 	if (rc == 0)
2901 		spd = (sstatus >> 4) & 0xf;
2902 	else
2903 		spd = link->sata_spd;
2904 
2905 	mask = link->sata_spd_limit;
2906 	if (mask <= 1)
2907 		return -EINVAL;
2908 
2909 	/* unconditionally mask off the highest bit */
2910 	highbit = fls(mask) - 1;
2911 	mask &= ~(1 << highbit);
2912 
2913 	/* Mask off all speeds higher than or equal to the current
2914 	 * one.  Force 1.5Gbps if current SPD is not available.
2915 	 */
2916 	if (spd > 1)
2917 		mask &= (1 << (spd - 1)) - 1;
2918 	else
2919 		mask &= 1;
2920 
2921 	/* were we already at the bottom? */
2922 	if (!mask)
2923 		return -EINVAL;
2924 
2925 	link->sata_spd_limit = mask;
2926 
2927 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2928 			sata_spd_string(fls(mask)));
2929 
2930 	return 0;
2931 }
2932 
2933 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2934 {
2935 	struct ata_link *host_link = &link->ap->link;
2936 	u32 limit, target, spd;
2937 
2938 	limit = link->sata_spd_limit;
2939 
2940 	/* Don't configure downstream link faster than upstream link.
2941 	 * It doesn't speed up anything and some PMPs choke on such
2942 	 * configuration.
2943 	 */
2944 	if (!ata_is_host_link(link) && host_link->sata_spd)
2945 		limit &= (1 << host_link->sata_spd) - 1;
2946 
2947 	if (limit == UINT_MAX)
2948 		target = 0;
2949 	else
2950 		target = fls(limit);
2951 
2952 	spd = (*scontrol >> 4) & 0xf;
2953 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2954 
2955 	return spd != target;
2956 }
2957 
2958 /**
2959  *	sata_set_spd_needed - is SATA spd configuration needed
2960  *	@link: Link in question
2961  *
2962  *	Test whether the spd limit in SControl matches
2963  *	@link->sata_spd_limit.  This function is used to determine
2964  *	whether hardreset is necessary to apply SATA spd
2965  *	configuration.
2966  *
2967  *	LOCKING:
2968  *	Inherited from caller.
2969  *
2970  *	RETURNS:
2971  *	1 if SATA spd configuration is needed, 0 otherwise.
2972  */
2973 static int sata_set_spd_needed(struct ata_link *link)
2974 {
2975 	u32 scontrol;
2976 
2977 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2978 		return 1;
2979 
2980 	return __sata_set_spd_needed(link, &scontrol);
2981 }
2982 
2983 /**
2984  *	sata_set_spd - set SATA spd according to spd limit
2985  *	@link: Link to set SATA spd for
2986  *
2987  *	Set SATA spd of @link according to sata_spd_limit.
2988  *
2989  *	LOCKING:
2990  *	Inherited from caller.
2991  *
2992  *	RETURNS:
2993  *	0 if spd doesn't need to be changed, 1 if spd has been
2994  *	changed.  Negative errno if SCR registers are inaccessible.
2995  */
2996 int sata_set_spd(struct ata_link *link)
2997 {
2998 	u32 scontrol;
2999 	int rc;
3000 
3001 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3002 		return rc;
3003 
3004 	if (!__sata_set_spd_needed(link, &scontrol))
3005 		return 0;
3006 
3007 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3008 		return rc;
3009 
3010 	return 1;
3011 }
3012 
3013 /*
3014  * This mode timing computation functionality is ported over from
3015  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3016  */
3017 /*
3018  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3019  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3020  * for UDMA6, which is currently supported only by Maxtor drives.
3021  *
3022  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3023  */
3024 
3025 static const struct ata_timing ata_timing[] = {
3026 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
3027 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
3028 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
3029 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
3030 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
3031 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
3032 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
3033 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
3034 
3035 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
3036 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
3037 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
3038 
3039 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
3040 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
3041 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
3042 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
3043 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
3044 
3045 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
3046 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
3047 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
3048 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
3049 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
3050 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
3051 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
3052 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
3053 
3054 	{ 0xFF }
3055 };
3056 
3057 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3058 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
3059 
3060 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3061 {
3062 	q->setup   = EZ(t->setup   * 1000,  T);
3063 	q->act8b   = EZ(t->act8b   * 1000,  T);
3064 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
3065 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
3066 	q->active  = EZ(t->active  * 1000,  T);
3067 	q->recover = EZ(t->recover * 1000,  T);
3068 	q->cycle   = EZ(t->cycle   * 1000,  T);
3069 	q->udma    = EZ(t->udma    * 1000, UT);
3070 }
3071 
3072 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3073 		      struct ata_timing *m, unsigned int what)
3074 {
3075 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3076 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3077 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3078 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3079 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3080 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3081 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3082 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3083 }
3084 
3085 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3086 {
3087 	const struct ata_timing *t = ata_timing;
3088 
3089 	while (xfer_mode > t->mode)
3090 		t++;
3091 
3092 	if (xfer_mode == t->mode)
3093 		return t;
3094 	return NULL;
3095 }
3096 
3097 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3098 		       struct ata_timing *t, int T, int UT)
3099 {
3100 	const struct ata_timing *s;
3101 	struct ata_timing p;
3102 
3103 	/*
3104 	 * Find the mode.
3105 	 */
3106 
3107 	if (!(s = ata_timing_find_mode(speed)))
3108 		return -EINVAL;
3109 
3110 	memcpy(t, s, sizeof(*s));
3111 
3112 	/*
3113 	 * If the drive is an EIDE drive, it can tell us it needs extended
3114 	 * PIO/MW_DMA cycle timing.
3115 	 */
3116 
3117 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3118 		memset(&p, 0, sizeof(p));
3119 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3120 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3121 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3122 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3123 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3124 		}
3125 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3126 	}
3127 
3128 	/*
3129 	 * Convert the timing to bus clock counts.
3130 	 */
3131 
3132 	ata_timing_quantize(t, t, T, UT);
3133 
3134 	/*
3135 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3136 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3137 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3138 	 */
3139 
3140 	if (speed > XFER_PIO_6) {
3141 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3142 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3143 	}
3144 
3145 	/*
3146 	 * Lengthen active & recovery time so that cycle time is correct.
3147 	 */
3148 
3149 	if (t->act8b + t->rec8b < t->cyc8b) {
3150 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3151 		t->rec8b = t->cyc8b - t->act8b;
3152 	}
3153 
3154 	if (t->active + t->recover < t->cycle) {
3155 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3156 		t->recover = t->cycle - t->active;
3157 	}
3158 
3159 	/* In a few cases quantisation may produce enough errors to
3160 	   leave t->cycle too low for the sum of active and recovery
3161 	   if so we must correct this */
3162 	if (t->active + t->recover > t->cycle)
3163 		t->cycle = t->active + t->recover;
3164 
3165 	return 0;
3166 }
3167 
3168 /**
3169  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3170  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3171  *	@cycle: cycle duration in ns
3172  *
3173  *	Return matching xfer mode for @cycle.  The returned mode is of
3174  *	the transfer type specified by @xfer_shift.  If @cycle is too
3175  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3176  *	than the fastest known mode, the fasted mode is returned.
3177  *
3178  *	LOCKING:
3179  *	None.
3180  *
3181  *	RETURNS:
3182  *	Matching xfer_mode, 0xff if no match found.
3183  */
3184 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3185 {
3186 	u8 base_mode = 0xff, last_mode = 0xff;
3187 	const struct ata_xfer_ent *ent;
3188 	const struct ata_timing *t;
3189 
3190 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3191 		if (ent->shift == xfer_shift)
3192 			base_mode = ent->base;
3193 
3194 	for (t = ata_timing_find_mode(base_mode);
3195 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3196 		unsigned short this_cycle;
3197 
3198 		switch (xfer_shift) {
3199 		case ATA_SHIFT_PIO:
3200 		case ATA_SHIFT_MWDMA:
3201 			this_cycle = t->cycle;
3202 			break;
3203 		case ATA_SHIFT_UDMA:
3204 			this_cycle = t->udma;
3205 			break;
3206 		default:
3207 			return 0xff;
3208 		}
3209 
3210 		if (cycle > this_cycle)
3211 			break;
3212 
3213 		last_mode = t->mode;
3214 	}
3215 
3216 	return last_mode;
3217 }
3218 
3219 /**
3220  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3221  *	@dev: Device to adjust xfer masks
3222  *	@sel: ATA_DNXFER_* selector
3223  *
3224  *	Adjust xfer masks of @dev downward.  Note that this function
3225  *	does not apply the change.  Invoking ata_set_mode() afterwards
3226  *	will apply the limit.
3227  *
3228  *	LOCKING:
3229  *	Inherited from caller.
3230  *
3231  *	RETURNS:
3232  *	0 on success, negative errno on failure
3233  */
3234 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3235 {
3236 	char buf[32];
3237 	unsigned long orig_mask, xfer_mask;
3238 	unsigned long pio_mask, mwdma_mask, udma_mask;
3239 	int quiet, highbit;
3240 
3241 	quiet = !!(sel & ATA_DNXFER_QUIET);
3242 	sel &= ~ATA_DNXFER_QUIET;
3243 
3244 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3245 						  dev->mwdma_mask,
3246 						  dev->udma_mask);
3247 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3248 
3249 	switch (sel) {
3250 	case ATA_DNXFER_PIO:
3251 		highbit = fls(pio_mask) - 1;
3252 		pio_mask &= ~(1 << highbit);
3253 		break;
3254 
3255 	case ATA_DNXFER_DMA:
3256 		if (udma_mask) {
3257 			highbit = fls(udma_mask) - 1;
3258 			udma_mask &= ~(1 << highbit);
3259 			if (!udma_mask)
3260 				return -ENOENT;
3261 		} else if (mwdma_mask) {
3262 			highbit = fls(mwdma_mask) - 1;
3263 			mwdma_mask &= ~(1 << highbit);
3264 			if (!mwdma_mask)
3265 				return -ENOENT;
3266 		}
3267 		break;
3268 
3269 	case ATA_DNXFER_40C:
3270 		udma_mask &= ATA_UDMA_MASK_40C;
3271 		break;
3272 
3273 	case ATA_DNXFER_FORCE_PIO0:
3274 		pio_mask &= 1;
3275 	case ATA_DNXFER_FORCE_PIO:
3276 		mwdma_mask = 0;
3277 		udma_mask = 0;
3278 		break;
3279 
3280 	default:
3281 		BUG();
3282 	}
3283 
3284 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3285 
3286 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3287 		return -ENOENT;
3288 
3289 	if (!quiet) {
3290 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3291 			snprintf(buf, sizeof(buf), "%s:%s",
3292 				 ata_mode_string(xfer_mask),
3293 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3294 		else
3295 			snprintf(buf, sizeof(buf), "%s",
3296 				 ata_mode_string(xfer_mask));
3297 
3298 		ata_dev_printk(dev, KERN_WARNING,
3299 			       "limiting speed to %s\n", buf);
3300 	}
3301 
3302 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3303 			    &dev->udma_mask);
3304 
3305 	return 0;
3306 }
3307 
3308 static int ata_dev_set_mode(struct ata_device *dev)
3309 {
3310 	struct ata_eh_context *ehc = &dev->link->eh_context;
3311 	const char *dev_err_whine = "";
3312 	int ign_dev_err = 0;
3313 	unsigned int err_mask;
3314 	int rc;
3315 
3316 	dev->flags &= ~ATA_DFLAG_PIO;
3317 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3318 		dev->flags |= ATA_DFLAG_PIO;
3319 
3320 	err_mask = ata_dev_set_xfermode(dev);
3321 
3322 	if (err_mask & ~AC_ERR_DEV)
3323 		goto fail;
3324 
3325 	/* revalidate */
3326 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3327 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3328 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3329 	if (rc)
3330 		return rc;
3331 
3332 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3333 		/* Old CFA may refuse this command, which is just fine */
3334 		if (ata_id_is_cfa(dev->id))
3335 			ign_dev_err = 1;
3336 		/* Catch several broken garbage emulations plus some pre
3337 		   ATA devices */
3338 		if (ata_id_major_version(dev->id) == 0 &&
3339 					dev->pio_mode <= XFER_PIO_2)
3340 			ign_dev_err = 1;
3341 		/* Some very old devices and some bad newer ones fail
3342 		   any kind of SET_XFERMODE request but support PIO0-2
3343 		   timings and no IORDY */
3344 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3345 			ign_dev_err = 1;
3346 	}
3347 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3348 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3349 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3350 	    dev->dma_mode == XFER_MW_DMA_0 &&
3351 	    (dev->id[63] >> 8) & 1)
3352 		ign_dev_err = 1;
3353 
3354 	/* if the device is actually configured correctly, ignore dev err */
3355 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3356 		ign_dev_err = 1;
3357 
3358 	if (err_mask & AC_ERR_DEV) {
3359 		if (!ign_dev_err)
3360 			goto fail;
3361 		else
3362 			dev_err_whine = " (device error ignored)";
3363 	}
3364 
3365 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3366 		dev->xfer_shift, (int)dev->xfer_mode);
3367 
3368 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3369 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3370 		       dev_err_whine);
3371 
3372 	return 0;
3373 
3374  fail:
3375 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3376 		       "(err_mask=0x%x)\n", err_mask);
3377 	return -EIO;
3378 }
3379 
3380 /**
3381  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3382  *	@link: link on which timings will be programmed
3383  *	@r_failed_dev: out parameter for failed device
3384  *
3385  *	Standard implementation of the function used to tune and set
3386  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3387  *	ata_dev_set_mode() fails, pointer to the failing device is
3388  *	returned in @r_failed_dev.
3389  *
3390  *	LOCKING:
3391  *	PCI/etc. bus probe sem.
3392  *
3393  *	RETURNS:
3394  *	0 on success, negative errno otherwise
3395  */
3396 
3397 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3398 {
3399 	struct ata_port *ap = link->ap;
3400 	struct ata_device *dev;
3401 	int rc = 0, used_dma = 0, found = 0;
3402 
3403 	/* step 1: calculate xfer_mask */
3404 	ata_for_each_dev(dev, link, ENABLED) {
3405 		unsigned long pio_mask, dma_mask;
3406 		unsigned int mode_mask;
3407 
3408 		mode_mask = ATA_DMA_MASK_ATA;
3409 		if (dev->class == ATA_DEV_ATAPI)
3410 			mode_mask = ATA_DMA_MASK_ATAPI;
3411 		else if (ata_id_is_cfa(dev->id))
3412 			mode_mask = ATA_DMA_MASK_CFA;
3413 
3414 		ata_dev_xfermask(dev);
3415 		ata_force_xfermask(dev);
3416 
3417 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3418 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3419 
3420 		if (libata_dma_mask & mode_mask)
3421 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3422 		else
3423 			dma_mask = 0;
3424 
3425 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3426 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3427 
3428 		found = 1;
3429 		if (ata_dma_enabled(dev))
3430 			used_dma = 1;
3431 	}
3432 	if (!found)
3433 		goto out;
3434 
3435 	/* step 2: always set host PIO timings */
3436 	ata_for_each_dev(dev, link, ENABLED) {
3437 		if (dev->pio_mode == 0xff) {
3438 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3439 			rc = -EINVAL;
3440 			goto out;
3441 		}
3442 
3443 		dev->xfer_mode = dev->pio_mode;
3444 		dev->xfer_shift = ATA_SHIFT_PIO;
3445 		if (ap->ops->set_piomode)
3446 			ap->ops->set_piomode(ap, dev);
3447 	}
3448 
3449 	/* step 3: set host DMA timings */
3450 	ata_for_each_dev(dev, link, ENABLED) {
3451 		if (!ata_dma_enabled(dev))
3452 			continue;
3453 
3454 		dev->xfer_mode = dev->dma_mode;
3455 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3456 		if (ap->ops->set_dmamode)
3457 			ap->ops->set_dmamode(ap, dev);
3458 	}
3459 
3460 	/* step 4: update devices' xfer mode */
3461 	ata_for_each_dev(dev, link, ENABLED) {
3462 		rc = ata_dev_set_mode(dev);
3463 		if (rc)
3464 			goto out;
3465 	}
3466 
3467 	/* Record simplex status. If we selected DMA then the other
3468 	 * host channels are not permitted to do so.
3469 	 */
3470 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3471 		ap->host->simplex_claimed = ap;
3472 
3473  out:
3474 	if (rc)
3475 		*r_failed_dev = dev;
3476 	return rc;
3477 }
3478 
3479 /**
3480  *	ata_wait_ready - wait for link to become ready
3481  *	@link: link to be waited on
3482  *	@deadline: deadline jiffies for the operation
3483  *	@check_ready: callback to check link readiness
3484  *
3485  *	Wait for @link to become ready.  @check_ready should return
3486  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3487  *	link doesn't seem to be occupied, other errno for other error
3488  *	conditions.
3489  *
3490  *	Transient -ENODEV conditions are allowed for
3491  *	ATA_TMOUT_FF_WAIT.
3492  *
3493  *	LOCKING:
3494  *	EH context.
3495  *
3496  *	RETURNS:
3497  *	0 if @linke is ready before @deadline; otherwise, -errno.
3498  */
3499 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3500 		   int (*check_ready)(struct ata_link *link))
3501 {
3502 	unsigned long start = jiffies;
3503 	unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3504 	int warned = 0;
3505 
3506 	/* Slave readiness can't be tested separately from master.  On
3507 	 * M/S emulation configuration, this function should be called
3508 	 * only on the master and it will handle both master and slave.
3509 	 */
3510 	WARN_ON(link == link->ap->slave_link);
3511 
3512 	if (time_after(nodev_deadline, deadline))
3513 		nodev_deadline = deadline;
3514 
3515 	while (1) {
3516 		unsigned long now = jiffies;
3517 		int ready, tmp;
3518 
3519 		ready = tmp = check_ready(link);
3520 		if (ready > 0)
3521 			return 0;
3522 
3523 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3524 		 * is online.  Also, some SATA devices take a long
3525 		 * time to clear 0xff after reset.  For example,
3526 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3527 		 * GoVault needs even more than that.  Wait for
3528 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3529 		 *
3530 		 * Note that some PATA controllers (pata_ali) explode
3531 		 * if status register is read more than once when
3532 		 * there's no device attached.
3533 		 */
3534 		if (ready == -ENODEV) {
3535 			if (ata_link_online(link))
3536 				ready = 0;
3537 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3538 				 !ata_link_offline(link) &&
3539 				 time_before(now, nodev_deadline))
3540 				ready = 0;
3541 		}
3542 
3543 		if (ready)
3544 			return ready;
3545 		if (time_after(now, deadline))
3546 			return -EBUSY;
3547 
3548 		if (!warned && time_after(now, start + 5 * HZ) &&
3549 		    (deadline - now > 3 * HZ)) {
3550 			ata_link_printk(link, KERN_WARNING,
3551 				"link is slow to respond, please be patient "
3552 				"(ready=%d)\n", tmp);
3553 			warned = 1;
3554 		}
3555 
3556 		msleep(50);
3557 	}
3558 }
3559 
3560 /**
3561  *	ata_wait_after_reset - wait for link to become ready after reset
3562  *	@link: link to be waited on
3563  *	@deadline: deadline jiffies for the operation
3564  *	@check_ready: callback to check link readiness
3565  *
3566  *	Wait for @link to become ready after reset.
3567  *
3568  *	LOCKING:
3569  *	EH context.
3570  *
3571  *	RETURNS:
3572  *	0 if @linke is ready before @deadline; otherwise, -errno.
3573  */
3574 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3575 				int (*check_ready)(struct ata_link *link))
3576 {
3577 	msleep(ATA_WAIT_AFTER_RESET);
3578 
3579 	return ata_wait_ready(link, deadline, check_ready);
3580 }
3581 
3582 /**
3583  *	sata_link_debounce - debounce SATA phy status
3584  *	@link: ATA link to debounce SATA phy status for
3585  *	@params: timing parameters { interval, duratinon, timeout } in msec
3586  *	@deadline: deadline jiffies for the operation
3587  *
3588 *	Make sure SStatus of @link reaches stable state, determined by
3589  *	holding the same value where DET is not 1 for @duration polled
3590  *	every @interval, before @timeout.  Timeout constraints the
3591  *	beginning of the stable state.  Because DET gets stuck at 1 on
3592  *	some controllers after hot unplugging, this functions waits
3593  *	until timeout then returns 0 if DET is stable at 1.
3594  *
3595  *	@timeout is further limited by @deadline.  The sooner of the
3596  *	two is used.
3597  *
3598  *	LOCKING:
3599  *	Kernel thread context (may sleep)
3600  *
3601  *	RETURNS:
3602  *	0 on success, -errno on failure.
3603  */
3604 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3605 		       unsigned long deadline)
3606 {
3607 	unsigned long interval = params[0];
3608 	unsigned long duration = params[1];
3609 	unsigned long last_jiffies, t;
3610 	u32 last, cur;
3611 	int rc;
3612 
3613 	t = ata_deadline(jiffies, params[2]);
3614 	if (time_before(t, deadline))
3615 		deadline = t;
3616 
3617 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3618 		return rc;
3619 	cur &= 0xf;
3620 
3621 	last = cur;
3622 	last_jiffies = jiffies;
3623 
3624 	while (1) {
3625 		msleep(interval);
3626 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3627 			return rc;
3628 		cur &= 0xf;
3629 
3630 		/* DET stable? */
3631 		if (cur == last) {
3632 			if (cur == 1 && time_before(jiffies, deadline))
3633 				continue;
3634 			if (time_after(jiffies,
3635 				       ata_deadline(last_jiffies, duration)))
3636 				return 0;
3637 			continue;
3638 		}
3639 
3640 		/* unstable, start over */
3641 		last = cur;
3642 		last_jiffies = jiffies;
3643 
3644 		/* Check deadline.  If debouncing failed, return
3645 		 * -EPIPE to tell upper layer to lower link speed.
3646 		 */
3647 		if (time_after(jiffies, deadline))
3648 			return -EPIPE;
3649 	}
3650 }
3651 
3652 /**
3653  *	sata_link_resume - resume SATA link
3654  *	@link: ATA link to resume SATA
3655  *	@params: timing parameters { interval, duratinon, timeout } in msec
3656  *	@deadline: deadline jiffies for the operation
3657  *
3658  *	Resume SATA phy @link and debounce it.
3659  *
3660  *	LOCKING:
3661  *	Kernel thread context (may sleep)
3662  *
3663  *	RETURNS:
3664  *	0 on success, -errno on failure.
3665  */
3666 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3667 		     unsigned long deadline)
3668 {
3669 	u32 scontrol, serror;
3670 	int rc;
3671 
3672 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3673 		return rc;
3674 
3675 	scontrol = (scontrol & 0x0f0) | 0x300;
3676 
3677 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3678 		return rc;
3679 
3680 	/* Some PHYs react badly if SStatus is pounded immediately
3681 	 * after resuming.  Delay 200ms before debouncing.
3682 	 */
3683 	msleep(200);
3684 
3685 	if ((rc = sata_link_debounce(link, params, deadline)))
3686 		return rc;
3687 
3688 	/* clear SError, some PHYs require this even for SRST to work */
3689 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3690 		rc = sata_scr_write(link, SCR_ERROR, serror);
3691 
3692 	return rc != -EINVAL ? rc : 0;
3693 }
3694 
3695 /**
3696  *	ata_std_prereset - prepare for reset
3697  *	@link: ATA link to be reset
3698  *	@deadline: deadline jiffies for the operation
3699  *
3700  *	@link is about to be reset.  Initialize it.  Failure from
3701  *	prereset makes libata abort whole reset sequence and give up
3702  *	that port, so prereset should be best-effort.  It does its
3703  *	best to prepare for reset sequence but if things go wrong, it
3704  *	should just whine, not fail.
3705  *
3706  *	LOCKING:
3707  *	Kernel thread context (may sleep)
3708  *
3709  *	RETURNS:
3710  *	0 on success, -errno otherwise.
3711  */
3712 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3713 {
3714 	struct ata_port *ap = link->ap;
3715 	struct ata_eh_context *ehc = &link->eh_context;
3716 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3717 	int rc;
3718 
3719 	/* if we're about to do hardreset, nothing more to do */
3720 	if (ehc->i.action & ATA_EH_HARDRESET)
3721 		return 0;
3722 
3723 	/* if SATA, resume link */
3724 	if (ap->flags & ATA_FLAG_SATA) {
3725 		rc = sata_link_resume(link, timing, deadline);
3726 		/* whine about phy resume failure but proceed */
3727 		if (rc && rc != -EOPNOTSUPP)
3728 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3729 					"link for reset (errno=%d)\n", rc);
3730 	}
3731 
3732 	/* no point in trying softreset on offline link */
3733 	if (ata_phys_link_offline(link))
3734 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3735 
3736 	return 0;
3737 }
3738 
3739 /**
3740  *	sata_link_hardreset - reset link via SATA phy reset
3741  *	@link: link to reset
3742  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3743  *	@deadline: deadline jiffies for the operation
3744  *	@online: optional out parameter indicating link onlineness
3745  *	@check_ready: optional callback to check link readiness
3746  *
3747  *	SATA phy-reset @link using DET bits of SControl register.
3748  *	After hardreset, link readiness is waited upon using
3749  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3750  *	allowed to not specify @check_ready and wait itself after this
3751  *	function returns.  Device classification is LLD's
3752  *	responsibility.
3753  *
3754  *	*@online is set to one iff reset succeeded and @link is online
3755  *	after reset.
3756  *
3757  *	LOCKING:
3758  *	Kernel thread context (may sleep)
3759  *
3760  *	RETURNS:
3761  *	0 on success, -errno otherwise.
3762  */
3763 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3764 			unsigned long deadline,
3765 			bool *online, int (*check_ready)(struct ata_link *))
3766 {
3767 	u32 scontrol;
3768 	int rc;
3769 
3770 	DPRINTK("ENTER\n");
3771 
3772 	if (online)
3773 		*online = false;
3774 
3775 	if (sata_set_spd_needed(link)) {
3776 		/* SATA spec says nothing about how to reconfigure
3777 		 * spd.  To be on the safe side, turn off phy during
3778 		 * reconfiguration.  This works for at least ICH7 AHCI
3779 		 * and Sil3124.
3780 		 */
3781 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3782 			goto out;
3783 
3784 		scontrol = (scontrol & 0x0f0) | 0x304;
3785 
3786 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3787 			goto out;
3788 
3789 		sata_set_spd(link);
3790 	}
3791 
3792 	/* issue phy wake/reset */
3793 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3794 		goto out;
3795 
3796 	scontrol = (scontrol & 0x0f0) | 0x301;
3797 
3798 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3799 		goto out;
3800 
3801 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3802 	 * 10.4.2 says at least 1 ms.
3803 	 */
3804 	msleep(1);
3805 
3806 	/* bring link back */
3807 	rc = sata_link_resume(link, timing, deadline);
3808 	if (rc)
3809 		goto out;
3810 	/* if link is offline nothing more to do */
3811 	if (ata_phys_link_offline(link))
3812 		goto out;
3813 
3814 	/* Link is online.  From this point, -ENODEV too is an error. */
3815 	if (online)
3816 		*online = true;
3817 
3818 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3819 		/* If PMP is supported, we have to do follow-up SRST.
3820 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3821 		 * the first port is empty.  Wait only for
3822 		 * ATA_TMOUT_PMP_SRST_WAIT.
3823 		 */
3824 		if (check_ready) {
3825 			unsigned long pmp_deadline;
3826 
3827 			pmp_deadline = ata_deadline(jiffies,
3828 						    ATA_TMOUT_PMP_SRST_WAIT);
3829 			if (time_after(pmp_deadline, deadline))
3830 				pmp_deadline = deadline;
3831 			ata_wait_ready(link, pmp_deadline, check_ready);
3832 		}
3833 		rc = -EAGAIN;
3834 		goto out;
3835 	}
3836 
3837 	rc = 0;
3838 	if (check_ready)
3839 		rc = ata_wait_ready(link, deadline, check_ready);
3840  out:
3841 	if (rc && rc != -EAGAIN) {
3842 		/* online is set iff link is online && reset succeeded */
3843 		if (online)
3844 			*online = false;
3845 		ata_link_printk(link, KERN_ERR,
3846 				"COMRESET failed (errno=%d)\n", rc);
3847 	}
3848 	DPRINTK("EXIT, rc=%d\n", rc);
3849 	return rc;
3850 }
3851 
3852 /**
3853  *	sata_std_hardreset - COMRESET w/o waiting or classification
3854  *	@link: link to reset
3855  *	@class: resulting class of attached device
3856  *	@deadline: deadline jiffies for the operation
3857  *
3858  *	Standard SATA COMRESET w/o waiting or classification.
3859  *
3860  *	LOCKING:
3861  *	Kernel thread context (may sleep)
3862  *
3863  *	RETURNS:
3864  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3865  */
3866 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3867 		       unsigned long deadline)
3868 {
3869 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3870 	bool online;
3871 	int rc;
3872 
3873 	/* do hardreset */
3874 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3875 	return online ? -EAGAIN : rc;
3876 }
3877 
3878 /**
3879  *	ata_std_postreset - standard postreset callback
3880  *	@link: the target ata_link
3881  *	@classes: classes of attached devices
3882  *
3883  *	This function is invoked after a successful reset.  Note that
3884  *	the device might have been reset more than once using
3885  *	different reset methods before postreset is invoked.
3886  *
3887  *	LOCKING:
3888  *	Kernel thread context (may sleep)
3889  */
3890 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3891 {
3892 	u32 serror;
3893 
3894 	DPRINTK("ENTER\n");
3895 
3896 	/* reset complete, clear SError */
3897 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3898 		sata_scr_write(link, SCR_ERROR, serror);
3899 
3900 	/* print link status */
3901 	sata_print_link_status(link);
3902 
3903 	DPRINTK("EXIT\n");
3904 }
3905 
3906 /**
3907  *	ata_dev_same_device - Determine whether new ID matches configured device
3908  *	@dev: device to compare against
3909  *	@new_class: class of the new device
3910  *	@new_id: IDENTIFY page of the new device
3911  *
3912  *	Compare @new_class and @new_id against @dev and determine
3913  *	whether @dev is the device indicated by @new_class and
3914  *	@new_id.
3915  *
3916  *	LOCKING:
3917  *	None.
3918  *
3919  *	RETURNS:
3920  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3921  */
3922 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3923 			       const u16 *new_id)
3924 {
3925 	const u16 *old_id = dev->id;
3926 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3927 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3928 
3929 	if (dev->class != new_class) {
3930 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3931 			       dev->class, new_class);
3932 		return 0;
3933 	}
3934 
3935 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3936 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3937 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3938 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3939 
3940 	if (strcmp(model[0], model[1])) {
3941 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3942 			       "'%s' != '%s'\n", model[0], model[1]);
3943 		return 0;
3944 	}
3945 
3946 	if (strcmp(serial[0], serial[1])) {
3947 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3948 			       "'%s' != '%s'\n", serial[0], serial[1]);
3949 		return 0;
3950 	}
3951 
3952 	return 1;
3953 }
3954 
3955 /**
3956  *	ata_dev_reread_id - Re-read IDENTIFY data
3957  *	@dev: target ATA device
3958  *	@readid_flags: read ID flags
3959  *
3960  *	Re-read IDENTIFY page and make sure @dev is still attached to
3961  *	the port.
3962  *
3963  *	LOCKING:
3964  *	Kernel thread context (may sleep)
3965  *
3966  *	RETURNS:
3967  *	0 on success, negative errno otherwise
3968  */
3969 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3970 {
3971 	unsigned int class = dev->class;
3972 	u16 *id = (void *)dev->link->ap->sector_buf;
3973 	int rc;
3974 
3975 	/* read ID data */
3976 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3977 	if (rc)
3978 		return rc;
3979 
3980 	/* is the device still there? */
3981 	if (!ata_dev_same_device(dev, class, id))
3982 		return -ENODEV;
3983 
3984 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3985 	return 0;
3986 }
3987 
3988 /**
3989  *	ata_dev_revalidate - Revalidate ATA device
3990  *	@dev: device to revalidate
3991  *	@new_class: new class code
3992  *	@readid_flags: read ID flags
3993  *
3994  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3995  *	port and reconfigure it according to the new IDENTIFY page.
3996  *
3997  *	LOCKING:
3998  *	Kernel thread context (may sleep)
3999  *
4000  *	RETURNS:
4001  *	0 on success, negative errno otherwise
4002  */
4003 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4004 		       unsigned int readid_flags)
4005 {
4006 	u64 n_sectors = dev->n_sectors;
4007 	int rc;
4008 
4009 	if (!ata_dev_enabled(dev))
4010 		return -ENODEV;
4011 
4012 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4013 	if (ata_class_enabled(new_class) &&
4014 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4015 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4016 			       dev->class, new_class);
4017 		rc = -ENODEV;
4018 		goto fail;
4019 	}
4020 
4021 	/* re-read ID */
4022 	rc = ata_dev_reread_id(dev, readid_flags);
4023 	if (rc)
4024 		goto fail;
4025 
4026 	/* configure device according to the new ID */
4027 	rc = ata_dev_configure(dev);
4028 	if (rc)
4029 		goto fail;
4030 
4031 	/* verify n_sectors hasn't changed */
4032 	if (dev->class == ATA_DEV_ATA && n_sectors &&
4033 	    dev->n_sectors != n_sectors) {
4034 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4035 			       "%llu != %llu\n",
4036 			       (unsigned long long)n_sectors,
4037 			       (unsigned long long)dev->n_sectors);
4038 
4039 		/* restore original n_sectors */
4040 		dev->n_sectors = n_sectors;
4041 
4042 		rc = -ENODEV;
4043 		goto fail;
4044 	}
4045 
4046 	return 0;
4047 
4048  fail:
4049 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4050 	return rc;
4051 }
4052 
4053 struct ata_blacklist_entry {
4054 	const char *model_num;
4055 	const char *model_rev;
4056 	unsigned long horkage;
4057 };
4058 
4059 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4060 	/* Devices with DMA related problems under Linux */
4061 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4062 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4063 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4064 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4065 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4066 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4067 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4068 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4069 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4070 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
4071 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
4072 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4073 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4074 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4075 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4076 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4077 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
4078 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
4079 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4080 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4081 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4082 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4083 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4084 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4085 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4086 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4087 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4088 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4089 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4090 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4091 	/* Odd clown on sil3726/4726 PMPs */
4092 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4093 
4094 	/* Weird ATAPI devices */
4095 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4096 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4097 
4098 	/* Devices we expect to fail diagnostics */
4099 
4100 	/* Devices where NCQ should be avoided */
4101 	/* NCQ is slow */
4102 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4103 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4104 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4105 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4106 	/* NCQ is broken */
4107 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4108 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4109 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4110 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4111 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4112 
4113 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4114 	{ "ST31500341AS",	"SD15",		ATA_HORKAGE_NONCQ |
4115 						ATA_HORKAGE_FIRMWARE_WARN },
4116 	{ "ST31500341AS",	"SD16",		ATA_HORKAGE_NONCQ |
4117 						ATA_HORKAGE_FIRMWARE_WARN },
4118 	{ "ST31500341AS",	"SD17",		ATA_HORKAGE_NONCQ |
4119 						ATA_HORKAGE_FIRMWARE_WARN },
4120 	{ "ST31500341AS",	"SD18",		ATA_HORKAGE_NONCQ |
4121 						ATA_HORKAGE_FIRMWARE_WARN },
4122 	{ "ST31500341AS",	"SD19",		ATA_HORKAGE_NONCQ |
4123 						ATA_HORKAGE_FIRMWARE_WARN },
4124 
4125 	{ "ST31000333AS",	"SD15",		ATA_HORKAGE_NONCQ |
4126 						ATA_HORKAGE_FIRMWARE_WARN },
4127 	{ "ST31000333AS",	"SD16",		ATA_HORKAGE_NONCQ |
4128 						ATA_HORKAGE_FIRMWARE_WARN },
4129 	{ "ST31000333AS",	"SD17",		ATA_HORKAGE_NONCQ |
4130 						ATA_HORKAGE_FIRMWARE_WARN },
4131 	{ "ST31000333AS",	"SD18",		ATA_HORKAGE_NONCQ |
4132 						ATA_HORKAGE_FIRMWARE_WARN },
4133 	{ "ST31000333AS",	"SD19",		ATA_HORKAGE_NONCQ |
4134 						ATA_HORKAGE_FIRMWARE_WARN },
4135 
4136 	{ "ST3640623AS",	"SD15",		ATA_HORKAGE_NONCQ |
4137 						ATA_HORKAGE_FIRMWARE_WARN },
4138 	{ "ST3640623AS",	"SD16",		ATA_HORKAGE_NONCQ |
4139 						ATA_HORKAGE_FIRMWARE_WARN },
4140 	{ "ST3640623AS",	"SD17",		ATA_HORKAGE_NONCQ |
4141 						ATA_HORKAGE_FIRMWARE_WARN },
4142 	{ "ST3640623AS",	"SD18",		ATA_HORKAGE_NONCQ |
4143 						ATA_HORKAGE_FIRMWARE_WARN },
4144 	{ "ST3640623AS",	"SD19",		ATA_HORKAGE_NONCQ |
4145 						ATA_HORKAGE_FIRMWARE_WARN },
4146 
4147 	{ "ST3640323AS",	"SD15",		ATA_HORKAGE_NONCQ |
4148 						ATA_HORKAGE_FIRMWARE_WARN },
4149 	{ "ST3640323AS",	"SD16",		ATA_HORKAGE_NONCQ |
4150 						ATA_HORKAGE_FIRMWARE_WARN },
4151 	{ "ST3640323AS",	"SD17",		ATA_HORKAGE_NONCQ |
4152 						ATA_HORKAGE_FIRMWARE_WARN },
4153 	{ "ST3640323AS",	"SD18",		ATA_HORKAGE_NONCQ |
4154 						ATA_HORKAGE_FIRMWARE_WARN },
4155 	{ "ST3640323AS",	"SD19",		ATA_HORKAGE_NONCQ |
4156 						ATA_HORKAGE_FIRMWARE_WARN },
4157 
4158 	{ "ST3320813AS",	"SD15",		ATA_HORKAGE_NONCQ |
4159 						ATA_HORKAGE_FIRMWARE_WARN },
4160 	{ "ST3320813AS",	"SD16",		ATA_HORKAGE_NONCQ |
4161 						ATA_HORKAGE_FIRMWARE_WARN },
4162 	{ "ST3320813AS",	"SD17",		ATA_HORKAGE_NONCQ |
4163 						ATA_HORKAGE_FIRMWARE_WARN },
4164 	{ "ST3320813AS",	"SD18",		ATA_HORKAGE_NONCQ |
4165 						ATA_HORKAGE_FIRMWARE_WARN },
4166 	{ "ST3320813AS",	"SD19",		ATA_HORKAGE_NONCQ |
4167 						ATA_HORKAGE_FIRMWARE_WARN },
4168 
4169 	{ "ST3320613AS",	"SD15",		ATA_HORKAGE_NONCQ |
4170 						ATA_HORKAGE_FIRMWARE_WARN },
4171 	{ "ST3320613AS",	"SD16",		ATA_HORKAGE_NONCQ |
4172 						ATA_HORKAGE_FIRMWARE_WARN },
4173 	{ "ST3320613AS",	"SD17",		ATA_HORKAGE_NONCQ |
4174 						ATA_HORKAGE_FIRMWARE_WARN },
4175 	{ "ST3320613AS",	"SD18",		ATA_HORKAGE_NONCQ |
4176 						ATA_HORKAGE_FIRMWARE_WARN },
4177 	{ "ST3320613AS",	"SD19",		ATA_HORKAGE_NONCQ |
4178 						ATA_HORKAGE_FIRMWARE_WARN },
4179 
4180 	/* Blacklist entries taken from Silicon Image 3124/3132
4181 	   Windows driver .inf file - also several Linux problem reports */
4182 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4183 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4184 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4185 
4186 	/* devices which puke on READ_NATIVE_MAX */
4187 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4188 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4189 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4190 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4191 
4192 	/* Devices which report 1 sector over size HPA */
4193 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4194 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4195 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4196 
4197 	/* Devices which get the IVB wrong */
4198 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4199 	/* Maybe we should just blacklist TSSTcorp... */
4200 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
4201 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
4202 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4203 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4204 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4205 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
4206 
4207 	/* Devices that do not need bridging limits applied */
4208 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4209 
4210 	/* End Marker */
4211 	{ }
4212 };
4213 
4214 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4215 {
4216 	const char *p;
4217 	int len;
4218 
4219 	/*
4220 	 * check for trailing wildcard: *\0
4221 	 */
4222 	p = strchr(patt, wildchar);
4223 	if (p && ((*(p + 1)) == 0))
4224 		len = p - patt;
4225 	else {
4226 		len = strlen(name);
4227 		if (!len) {
4228 			if (!*patt)
4229 				return 0;
4230 			return -1;
4231 		}
4232 	}
4233 
4234 	return strncmp(patt, name, len);
4235 }
4236 
4237 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4238 {
4239 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4240 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4241 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4242 
4243 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4244 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4245 
4246 	while (ad->model_num) {
4247 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4248 			if (ad->model_rev == NULL)
4249 				return ad->horkage;
4250 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4251 				return ad->horkage;
4252 		}
4253 		ad++;
4254 	}
4255 	return 0;
4256 }
4257 
4258 static int ata_dma_blacklisted(const struct ata_device *dev)
4259 {
4260 	/* We don't support polling DMA.
4261 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4262 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4263 	 */
4264 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4265 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4266 		return 1;
4267 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4268 }
4269 
4270 /**
4271  *	ata_is_40wire		-	check drive side detection
4272  *	@dev: device
4273  *
4274  *	Perform drive side detection decoding, allowing for device vendors
4275  *	who can't follow the documentation.
4276  */
4277 
4278 static int ata_is_40wire(struct ata_device *dev)
4279 {
4280 	if (dev->horkage & ATA_HORKAGE_IVB)
4281 		return ata_drive_40wire_relaxed(dev->id);
4282 	return ata_drive_40wire(dev->id);
4283 }
4284 
4285 /**
4286  *	cable_is_40wire		-	40/80/SATA decider
4287  *	@ap: port to consider
4288  *
4289  *	This function encapsulates the policy for speed management
4290  *	in one place. At the moment we don't cache the result but
4291  *	there is a good case for setting ap->cbl to the result when
4292  *	we are called with unknown cables (and figuring out if it
4293  *	impacts hotplug at all).
4294  *
4295  *	Return 1 if the cable appears to be 40 wire.
4296  */
4297 
4298 static int cable_is_40wire(struct ata_port *ap)
4299 {
4300 	struct ata_link *link;
4301 	struct ata_device *dev;
4302 
4303 	/* If the controller thinks we are 40 wire, we are. */
4304 	if (ap->cbl == ATA_CBL_PATA40)
4305 		return 1;
4306 
4307 	/* If the controller thinks we are 80 wire, we are. */
4308 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4309 		return 0;
4310 
4311 	/* If the system is known to be 40 wire short cable (eg
4312 	 * laptop), then we allow 80 wire modes even if the drive
4313 	 * isn't sure.
4314 	 */
4315 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4316 		return 0;
4317 
4318 	/* If the controller doesn't know, we scan.
4319 	 *
4320 	 * Note: We look for all 40 wire detects at this point.  Any
4321 	 *       80 wire detect is taken to be 80 wire cable because
4322 	 * - in many setups only the one drive (slave if present) will
4323 	 *   give a valid detect
4324 	 * - if you have a non detect capable drive you don't want it
4325 	 *   to colour the choice
4326 	 */
4327 	ata_for_each_link(link, ap, EDGE) {
4328 		ata_for_each_dev(dev, link, ENABLED) {
4329 			if (!ata_is_40wire(dev))
4330 				return 0;
4331 		}
4332 	}
4333 	return 1;
4334 }
4335 
4336 /**
4337  *	ata_dev_xfermask - Compute supported xfermask of the given device
4338  *	@dev: Device to compute xfermask for
4339  *
4340  *	Compute supported xfermask of @dev and store it in
4341  *	dev->*_mask.  This function is responsible for applying all
4342  *	known limits including host controller limits, device
4343  *	blacklist, etc...
4344  *
4345  *	LOCKING:
4346  *	None.
4347  */
4348 static void ata_dev_xfermask(struct ata_device *dev)
4349 {
4350 	struct ata_link *link = dev->link;
4351 	struct ata_port *ap = link->ap;
4352 	struct ata_host *host = ap->host;
4353 	unsigned long xfer_mask;
4354 
4355 	/* controller modes available */
4356 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4357 				      ap->mwdma_mask, ap->udma_mask);
4358 
4359 	/* drive modes available */
4360 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4361 				       dev->mwdma_mask, dev->udma_mask);
4362 	xfer_mask &= ata_id_xfermask(dev->id);
4363 
4364 	/*
4365 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4366 	 *	cable
4367 	 */
4368 	if (ata_dev_pair(dev)) {
4369 		/* No PIO5 or PIO6 */
4370 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4371 		/* No MWDMA3 or MWDMA 4 */
4372 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4373 	}
4374 
4375 	if (ata_dma_blacklisted(dev)) {
4376 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4377 		ata_dev_printk(dev, KERN_WARNING,
4378 			       "device is on DMA blacklist, disabling DMA\n");
4379 	}
4380 
4381 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4382 	    host->simplex_claimed && host->simplex_claimed != ap) {
4383 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4384 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4385 			       "other device, disabling DMA\n");
4386 	}
4387 
4388 	if (ap->flags & ATA_FLAG_NO_IORDY)
4389 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4390 
4391 	if (ap->ops->mode_filter)
4392 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4393 
4394 	/* Apply cable rule here.  Don't apply it early because when
4395 	 * we handle hot plug the cable type can itself change.
4396 	 * Check this last so that we know if the transfer rate was
4397 	 * solely limited by the cable.
4398 	 * Unknown or 80 wire cables reported host side are checked
4399 	 * drive side as well. Cases where we know a 40wire cable
4400 	 * is used safely for 80 are not checked here.
4401 	 */
4402 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4403 		/* UDMA/44 or higher would be available */
4404 		if (cable_is_40wire(ap)) {
4405 			ata_dev_printk(dev, KERN_WARNING,
4406 				 "limited to UDMA/33 due to 40-wire cable\n");
4407 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4408 		}
4409 
4410 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4411 			    &dev->mwdma_mask, &dev->udma_mask);
4412 }
4413 
4414 /**
4415  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4416  *	@dev: Device to which command will be sent
4417  *
4418  *	Issue SET FEATURES - XFER MODE command to device @dev
4419  *	on port @ap.
4420  *
4421  *	LOCKING:
4422  *	PCI/etc. bus probe sem.
4423  *
4424  *	RETURNS:
4425  *	0 on success, AC_ERR_* mask otherwise.
4426  */
4427 
4428 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4429 {
4430 	struct ata_taskfile tf;
4431 	unsigned int err_mask;
4432 
4433 	/* set up set-features taskfile */
4434 	DPRINTK("set features - xfer mode\n");
4435 
4436 	/* Some controllers and ATAPI devices show flaky interrupt
4437 	 * behavior after setting xfer mode.  Use polling instead.
4438 	 */
4439 	ata_tf_init(dev, &tf);
4440 	tf.command = ATA_CMD_SET_FEATURES;
4441 	tf.feature = SETFEATURES_XFER;
4442 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4443 	tf.protocol = ATA_PROT_NODATA;
4444 	/* If we are using IORDY we must send the mode setting command */
4445 	if (ata_pio_need_iordy(dev))
4446 		tf.nsect = dev->xfer_mode;
4447 	/* If the device has IORDY and the controller does not - turn it off */
4448  	else if (ata_id_has_iordy(dev->id))
4449 		tf.nsect = 0x01;
4450 	else /* In the ancient relic department - skip all of this */
4451 		return 0;
4452 
4453 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4454 
4455 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4456 	return err_mask;
4457 }
4458 /**
4459  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4460  *	@dev: Device to which command will be sent
4461  *	@enable: Whether to enable or disable the feature
4462  *	@feature: The sector count represents the feature to set
4463  *
4464  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4465  *	on port @ap with sector count
4466  *
4467  *	LOCKING:
4468  *	PCI/etc. bus probe sem.
4469  *
4470  *	RETURNS:
4471  *	0 on success, AC_ERR_* mask otherwise.
4472  */
4473 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4474 					u8 feature)
4475 {
4476 	struct ata_taskfile tf;
4477 	unsigned int err_mask;
4478 
4479 	/* set up set-features taskfile */
4480 	DPRINTK("set features - SATA features\n");
4481 
4482 	ata_tf_init(dev, &tf);
4483 	tf.command = ATA_CMD_SET_FEATURES;
4484 	tf.feature = enable;
4485 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4486 	tf.protocol = ATA_PROT_NODATA;
4487 	tf.nsect = feature;
4488 
4489 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4490 
4491 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4492 	return err_mask;
4493 }
4494 
4495 /**
4496  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4497  *	@dev: Device to which command will be sent
4498  *	@heads: Number of heads (taskfile parameter)
4499  *	@sectors: Number of sectors (taskfile parameter)
4500  *
4501  *	LOCKING:
4502  *	Kernel thread context (may sleep)
4503  *
4504  *	RETURNS:
4505  *	0 on success, AC_ERR_* mask otherwise.
4506  */
4507 static unsigned int ata_dev_init_params(struct ata_device *dev,
4508 					u16 heads, u16 sectors)
4509 {
4510 	struct ata_taskfile tf;
4511 	unsigned int err_mask;
4512 
4513 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4514 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4515 		return AC_ERR_INVALID;
4516 
4517 	/* set up init dev params taskfile */
4518 	DPRINTK("init dev params \n");
4519 
4520 	ata_tf_init(dev, &tf);
4521 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4522 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4523 	tf.protocol = ATA_PROT_NODATA;
4524 	tf.nsect = sectors;
4525 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4526 
4527 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4528 	/* A clean abort indicates an original or just out of spec drive
4529 	   and we should continue as we issue the setup based on the
4530 	   drive reported working geometry */
4531 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4532 		err_mask = 0;
4533 
4534 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4535 	return err_mask;
4536 }
4537 
4538 /**
4539  *	ata_sg_clean - Unmap DMA memory associated with command
4540  *	@qc: Command containing DMA memory to be released
4541  *
4542  *	Unmap all mapped DMA memory associated with this command.
4543  *
4544  *	LOCKING:
4545  *	spin_lock_irqsave(host lock)
4546  */
4547 void ata_sg_clean(struct ata_queued_cmd *qc)
4548 {
4549 	struct ata_port *ap = qc->ap;
4550 	struct scatterlist *sg = qc->sg;
4551 	int dir = qc->dma_dir;
4552 
4553 	WARN_ON(sg == NULL);
4554 
4555 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4556 
4557 	if (qc->n_elem)
4558 		dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4559 
4560 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4561 	qc->sg = NULL;
4562 }
4563 
4564 /**
4565  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4566  *	@qc: Metadata associated with taskfile to check
4567  *
4568  *	Allow low-level driver to filter ATA PACKET commands, returning
4569  *	a status indicating whether or not it is OK to use DMA for the
4570  *	supplied PACKET command.
4571  *
4572  *	LOCKING:
4573  *	spin_lock_irqsave(host lock)
4574  *
4575  *	RETURNS: 0 when ATAPI DMA can be used
4576  *               nonzero otherwise
4577  */
4578 int atapi_check_dma(struct ata_queued_cmd *qc)
4579 {
4580 	struct ata_port *ap = qc->ap;
4581 
4582 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4583 	 * few ATAPI devices choke on such DMA requests.
4584 	 */
4585 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4586 	    unlikely(qc->nbytes & 15))
4587 		return 1;
4588 
4589 	if (ap->ops->check_atapi_dma)
4590 		return ap->ops->check_atapi_dma(qc);
4591 
4592 	return 0;
4593 }
4594 
4595 /**
4596  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4597  *	@qc: ATA command in question
4598  *
4599  *	Non-NCQ commands cannot run with any other command, NCQ or
4600  *	not.  As upper layer only knows the queue depth, we are
4601  *	responsible for maintaining exclusion.  This function checks
4602  *	whether a new command @qc can be issued.
4603  *
4604  *	LOCKING:
4605  *	spin_lock_irqsave(host lock)
4606  *
4607  *	RETURNS:
4608  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4609  */
4610 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4611 {
4612 	struct ata_link *link = qc->dev->link;
4613 
4614 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4615 		if (!ata_tag_valid(link->active_tag))
4616 			return 0;
4617 	} else {
4618 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4619 			return 0;
4620 	}
4621 
4622 	return ATA_DEFER_LINK;
4623 }
4624 
4625 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4626 
4627 /**
4628  *	ata_sg_init - Associate command with scatter-gather table.
4629  *	@qc: Command to be associated
4630  *	@sg: Scatter-gather table.
4631  *	@n_elem: Number of elements in s/g table.
4632  *
4633  *	Initialize the data-related elements of queued_cmd @qc
4634  *	to point to a scatter-gather table @sg, containing @n_elem
4635  *	elements.
4636  *
4637  *	LOCKING:
4638  *	spin_lock_irqsave(host lock)
4639  */
4640 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4641 		 unsigned int n_elem)
4642 {
4643 	qc->sg = sg;
4644 	qc->n_elem = n_elem;
4645 	qc->cursg = qc->sg;
4646 }
4647 
4648 /**
4649  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4650  *	@qc: Command with scatter-gather table to be mapped.
4651  *
4652  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4653  *
4654  *	LOCKING:
4655  *	spin_lock_irqsave(host lock)
4656  *
4657  *	RETURNS:
4658  *	Zero on success, negative on error.
4659  *
4660  */
4661 static int ata_sg_setup(struct ata_queued_cmd *qc)
4662 {
4663 	struct ata_port *ap = qc->ap;
4664 	unsigned int n_elem;
4665 
4666 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4667 
4668 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4669 	if (n_elem < 1)
4670 		return -1;
4671 
4672 	DPRINTK("%d sg elements mapped\n", n_elem);
4673 
4674 	qc->n_elem = n_elem;
4675 	qc->flags |= ATA_QCFLAG_DMAMAP;
4676 
4677 	return 0;
4678 }
4679 
4680 /**
4681  *	swap_buf_le16 - swap halves of 16-bit words in place
4682  *	@buf:  Buffer to swap
4683  *	@buf_words:  Number of 16-bit words in buffer.
4684  *
4685  *	Swap halves of 16-bit words if needed to convert from
4686  *	little-endian byte order to native cpu byte order, or
4687  *	vice-versa.
4688  *
4689  *	LOCKING:
4690  *	Inherited from caller.
4691  */
4692 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4693 {
4694 #ifdef __BIG_ENDIAN
4695 	unsigned int i;
4696 
4697 	for (i = 0; i < buf_words; i++)
4698 		buf[i] = le16_to_cpu(buf[i]);
4699 #endif /* __BIG_ENDIAN */
4700 }
4701 
4702 /**
4703  *	ata_qc_new - Request an available ATA command, for queueing
4704  *	@ap: Port associated with device @dev
4705  *	@dev: Device from whom we request an available command structure
4706  *
4707  *	LOCKING:
4708  *	None.
4709  */
4710 
4711 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4712 {
4713 	struct ata_queued_cmd *qc = NULL;
4714 	unsigned int i;
4715 
4716 	/* no command while frozen */
4717 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4718 		return NULL;
4719 
4720 	/* the last tag is reserved for internal command. */
4721 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4722 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4723 			qc = __ata_qc_from_tag(ap, i);
4724 			break;
4725 		}
4726 
4727 	if (qc)
4728 		qc->tag = i;
4729 
4730 	return qc;
4731 }
4732 
4733 /**
4734  *	ata_qc_new_init - Request an available ATA command, and initialize it
4735  *	@dev: Device from whom we request an available command structure
4736  *
4737  *	LOCKING:
4738  *	None.
4739  */
4740 
4741 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4742 {
4743 	struct ata_port *ap = dev->link->ap;
4744 	struct ata_queued_cmd *qc;
4745 
4746 	qc = ata_qc_new(ap);
4747 	if (qc) {
4748 		qc->scsicmd = NULL;
4749 		qc->ap = ap;
4750 		qc->dev = dev;
4751 
4752 		ata_qc_reinit(qc);
4753 	}
4754 
4755 	return qc;
4756 }
4757 
4758 /**
4759  *	ata_qc_free - free unused ata_queued_cmd
4760  *	@qc: Command to complete
4761  *
4762  *	Designed to free unused ata_queued_cmd object
4763  *	in case something prevents using it.
4764  *
4765  *	LOCKING:
4766  *	spin_lock_irqsave(host lock)
4767  */
4768 void ata_qc_free(struct ata_queued_cmd *qc)
4769 {
4770 	struct ata_port *ap = qc->ap;
4771 	unsigned int tag;
4772 
4773 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4774 
4775 	qc->flags = 0;
4776 	tag = qc->tag;
4777 	if (likely(ata_tag_valid(tag))) {
4778 		qc->tag = ATA_TAG_POISON;
4779 		clear_bit(tag, &ap->qc_allocated);
4780 	}
4781 }
4782 
4783 void __ata_qc_complete(struct ata_queued_cmd *qc)
4784 {
4785 	struct ata_port *ap = qc->ap;
4786 	struct ata_link *link = qc->dev->link;
4787 
4788 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4789 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4790 
4791 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4792 		ata_sg_clean(qc);
4793 
4794 	/* command should be marked inactive atomically with qc completion */
4795 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4796 		link->sactive &= ~(1 << qc->tag);
4797 		if (!link->sactive)
4798 			ap->nr_active_links--;
4799 	} else {
4800 		link->active_tag = ATA_TAG_POISON;
4801 		ap->nr_active_links--;
4802 	}
4803 
4804 	/* clear exclusive status */
4805 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4806 		     ap->excl_link == link))
4807 		ap->excl_link = NULL;
4808 
4809 	/* atapi: mark qc as inactive to prevent the interrupt handler
4810 	 * from completing the command twice later, before the error handler
4811 	 * is called. (when rc != 0 and atapi request sense is needed)
4812 	 */
4813 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4814 	ap->qc_active &= ~(1 << qc->tag);
4815 
4816 	/* call completion callback */
4817 	qc->complete_fn(qc);
4818 }
4819 
4820 static void fill_result_tf(struct ata_queued_cmd *qc)
4821 {
4822 	struct ata_port *ap = qc->ap;
4823 
4824 	qc->result_tf.flags = qc->tf.flags;
4825 	ap->ops->qc_fill_rtf(qc);
4826 }
4827 
4828 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4829 {
4830 	struct ata_device *dev = qc->dev;
4831 
4832 	if (ata_tag_internal(qc->tag))
4833 		return;
4834 
4835 	if (ata_is_nodata(qc->tf.protocol))
4836 		return;
4837 
4838 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4839 		return;
4840 
4841 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4842 }
4843 
4844 /**
4845  *	ata_qc_complete - Complete an active ATA command
4846  *	@qc: Command to complete
4847  *
4848  *	Indicate to the mid and upper layers that an ATA
4849  *	command has completed, with either an ok or not-ok status.
4850  *
4851  *	LOCKING:
4852  *	spin_lock_irqsave(host lock)
4853  */
4854 void ata_qc_complete(struct ata_queued_cmd *qc)
4855 {
4856 	struct ata_port *ap = qc->ap;
4857 
4858 	/* XXX: New EH and old EH use different mechanisms to
4859 	 * synchronize EH with regular execution path.
4860 	 *
4861 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4862 	 * Normal execution path is responsible for not accessing a
4863 	 * failed qc.  libata core enforces the rule by returning NULL
4864 	 * from ata_qc_from_tag() for failed qcs.
4865 	 *
4866 	 * Old EH depends on ata_qc_complete() nullifying completion
4867 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4868 	 * not synchronize with interrupt handler.  Only PIO task is
4869 	 * taken care of.
4870 	 */
4871 	if (ap->ops->error_handler) {
4872 		struct ata_device *dev = qc->dev;
4873 		struct ata_eh_info *ehi = &dev->link->eh_info;
4874 
4875 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4876 
4877 		if (unlikely(qc->err_mask))
4878 			qc->flags |= ATA_QCFLAG_FAILED;
4879 
4880 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4881 			if (!ata_tag_internal(qc->tag)) {
4882 				/* always fill result TF for failed qc */
4883 				fill_result_tf(qc);
4884 				ata_qc_schedule_eh(qc);
4885 				return;
4886 			}
4887 		}
4888 
4889 		/* read result TF if requested */
4890 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4891 			fill_result_tf(qc);
4892 
4893 		/* Some commands need post-processing after successful
4894 		 * completion.
4895 		 */
4896 		switch (qc->tf.command) {
4897 		case ATA_CMD_SET_FEATURES:
4898 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4899 			    qc->tf.feature != SETFEATURES_WC_OFF)
4900 				break;
4901 			/* fall through */
4902 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4903 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4904 			/* revalidate device */
4905 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4906 			ata_port_schedule_eh(ap);
4907 			break;
4908 
4909 		case ATA_CMD_SLEEP:
4910 			dev->flags |= ATA_DFLAG_SLEEPING;
4911 			break;
4912 		}
4913 
4914 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4915 			ata_verify_xfer(qc);
4916 
4917 		__ata_qc_complete(qc);
4918 	} else {
4919 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4920 			return;
4921 
4922 		/* read result TF if failed or requested */
4923 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4924 			fill_result_tf(qc);
4925 
4926 		__ata_qc_complete(qc);
4927 	}
4928 }
4929 
4930 /**
4931  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4932  *	@ap: port in question
4933  *	@qc_active: new qc_active mask
4934  *
4935  *	Complete in-flight commands.  This functions is meant to be
4936  *	called from low-level driver's interrupt routine to complete
4937  *	requests normally.  ap->qc_active and @qc_active is compared
4938  *	and commands are completed accordingly.
4939  *
4940  *	LOCKING:
4941  *	spin_lock_irqsave(host lock)
4942  *
4943  *	RETURNS:
4944  *	Number of completed commands on success, -errno otherwise.
4945  */
4946 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4947 {
4948 	int nr_done = 0;
4949 	u32 done_mask;
4950 	int i;
4951 
4952 	done_mask = ap->qc_active ^ qc_active;
4953 
4954 	if (unlikely(done_mask & qc_active)) {
4955 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4956 				"(%08x->%08x)\n", ap->qc_active, qc_active);
4957 		return -EINVAL;
4958 	}
4959 
4960 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
4961 		struct ata_queued_cmd *qc;
4962 
4963 		if (!(done_mask & (1 << i)))
4964 			continue;
4965 
4966 		if ((qc = ata_qc_from_tag(ap, i))) {
4967 			ata_qc_complete(qc);
4968 			nr_done++;
4969 		}
4970 	}
4971 
4972 	return nr_done;
4973 }
4974 
4975 /**
4976  *	ata_qc_issue - issue taskfile to device
4977  *	@qc: command to issue to device
4978  *
4979  *	Prepare an ATA command to submission to device.
4980  *	This includes mapping the data into a DMA-able
4981  *	area, filling in the S/G table, and finally
4982  *	writing the taskfile to hardware, starting the command.
4983  *
4984  *	LOCKING:
4985  *	spin_lock_irqsave(host lock)
4986  */
4987 void ata_qc_issue(struct ata_queued_cmd *qc)
4988 {
4989 	struct ata_port *ap = qc->ap;
4990 	struct ata_link *link = qc->dev->link;
4991 	u8 prot = qc->tf.protocol;
4992 
4993 	/* Make sure only one non-NCQ command is outstanding.  The
4994 	 * check is skipped for old EH because it reuses active qc to
4995 	 * request ATAPI sense.
4996 	 */
4997 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4998 
4999 	if (ata_is_ncq(prot)) {
5000 		WARN_ON(link->sactive & (1 << qc->tag));
5001 
5002 		if (!link->sactive)
5003 			ap->nr_active_links++;
5004 		link->sactive |= 1 << qc->tag;
5005 	} else {
5006 		WARN_ON(link->sactive);
5007 
5008 		ap->nr_active_links++;
5009 		link->active_tag = qc->tag;
5010 	}
5011 
5012 	qc->flags |= ATA_QCFLAG_ACTIVE;
5013 	ap->qc_active |= 1 << qc->tag;
5014 
5015 	/* We guarantee to LLDs that they will have at least one
5016 	 * non-zero sg if the command is a data command.
5017 	 */
5018 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5019 
5020 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5021 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5022 		if (ata_sg_setup(qc))
5023 			goto sg_err;
5024 
5025 	/* if device is sleeping, schedule reset and abort the link */
5026 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5027 		link->eh_info.action |= ATA_EH_RESET;
5028 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5029 		ata_link_abort(link);
5030 		return;
5031 	}
5032 
5033 	ap->ops->qc_prep(qc);
5034 
5035 	qc->err_mask |= ap->ops->qc_issue(qc);
5036 	if (unlikely(qc->err_mask))
5037 		goto err;
5038 	return;
5039 
5040 sg_err:
5041 	qc->err_mask |= AC_ERR_SYSTEM;
5042 err:
5043 	ata_qc_complete(qc);
5044 }
5045 
5046 /**
5047  *	sata_scr_valid - test whether SCRs are accessible
5048  *	@link: ATA link to test SCR accessibility for
5049  *
5050  *	Test whether SCRs are accessible for @link.
5051  *
5052  *	LOCKING:
5053  *	None.
5054  *
5055  *	RETURNS:
5056  *	1 if SCRs are accessible, 0 otherwise.
5057  */
5058 int sata_scr_valid(struct ata_link *link)
5059 {
5060 	struct ata_port *ap = link->ap;
5061 
5062 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5063 }
5064 
5065 /**
5066  *	sata_scr_read - read SCR register of the specified port
5067  *	@link: ATA link to read SCR for
5068  *	@reg: SCR to read
5069  *	@val: Place to store read value
5070  *
5071  *	Read SCR register @reg of @link into *@val.  This function is
5072  *	guaranteed to succeed if @link is ap->link, the cable type of
5073  *	the port is SATA and the port implements ->scr_read.
5074  *
5075  *	LOCKING:
5076  *	None if @link is ap->link.  Kernel thread context otherwise.
5077  *
5078  *	RETURNS:
5079  *	0 on success, negative errno on failure.
5080  */
5081 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5082 {
5083 	if (ata_is_host_link(link)) {
5084 		if (sata_scr_valid(link))
5085 			return link->ap->ops->scr_read(link, reg, val);
5086 		return -EOPNOTSUPP;
5087 	}
5088 
5089 	return sata_pmp_scr_read(link, reg, val);
5090 }
5091 
5092 /**
5093  *	sata_scr_write - write SCR register of the specified port
5094  *	@link: ATA link to write SCR for
5095  *	@reg: SCR to write
5096  *	@val: value to write
5097  *
5098  *	Write @val to SCR register @reg of @link.  This function is
5099  *	guaranteed to succeed if @link is ap->link, the cable type of
5100  *	the port is SATA and the port implements ->scr_read.
5101  *
5102  *	LOCKING:
5103  *	None if @link is ap->link.  Kernel thread context otherwise.
5104  *
5105  *	RETURNS:
5106  *	0 on success, negative errno on failure.
5107  */
5108 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5109 {
5110 	if (ata_is_host_link(link)) {
5111 		if (sata_scr_valid(link))
5112 			return link->ap->ops->scr_write(link, reg, val);
5113 		return -EOPNOTSUPP;
5114 	}
5115 
5116 	return sata_pmp_scr_write(link, reg, val);
5117 }
5118 
5119 /**
5120  *	sata_scr_write_flush - write SCR register of the specified port and flush
5121  *	@link: ATA link to write SCR for
5122  *	@reg: SCR to write
5123  *	@val: value to write
5124  *
5125  *	This function is identical to sata_scr_write() except that this
5126  *	function performs flush after writing to the register.
5127  *
5128  *	LOCKING:
5129  *	None if @link is ap->link.  Kernel thread context otherwise.
5130  *
5131  *	RETURNS:
5132  *	0 on success, negative errno on failure.
5133  */
5134 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5135 {
5136 	if (ata_is_host_link(link)) {
5137 		int rc;
5138 
5139 		if (sata_scr_valid(link)) {
5140 			rc = link->ap->ops->scr_write(link, reg, val);
5141 			if (rc == 0)
5142 				rc = link->ap->ops->scr_read(link, reg, &val);
5143 			return rc;
5144 		}
5145 		return -EOPNOTSUPP;
5146 	}
5147 
5148 	return sata_pmp_scr_write(link, reg, val);
5149 }
5150 
5151 /**
5152  *	ata_phys_link_online - test whether the given link is online
5153  *	@link: ATA link to test
5154  *
5155  *	Test whether @link is online.  Note that this function returns
5156  *	0 if online status of @link cannot be obtained, so
5157  *	ata_link_online(link) != !ata_link_offline(link).
5158  *
5159  *	LOCKING:
5160  *	None.
5161  *
5162  *	RETURNS:
5163  *	True if the port online status is available and online.
5164  */
5165 bool ata_phys_link_online(struct ata_link *link)
5166 {
5167 	u32 sstatus;
5168 
5169 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5170 	    (sstatus & 0xf) == 0x3)
5171 		return true;
5172 	return false;
5173 }
5174 
5175 /**
5176  *	ata_phys_link_offline - test whether the given link is offline
5177  *	@link: ATA link to test
5178  *
5179  *	Test whether @link is offline.  Note that this function
5180  *	returns 0 if offline status of @link cannot be obtained, so
5181  *	ata_link_online(link) != !ata_link_offline(link).
5182  *
5183  *	LOCKING:
5184  *	None.
5185  *
5186  *	RETURNS:
5187  *	True if the port offline status is available and offline.
5188  */
5189 bool ata_phys_link_offline(struct ata_link *link)
5190 {
5191 	u32 sstatus;
5192 
5193 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5194 	    (sstatus & 0xf) != 0x3)
5195 		return true;
5196 	return false;
5197 }
5198 
5199 /**
5200  *	ata_link_online - test whether the given link is online
5201  *	@link: ATA link to test
5202  *
5203  *	Test whether @link is online.  This is identical to
5204  *	ata_phys_link_online() when there's no slave link.  When
5205  *	there's a slave link, this function should only be called on
5206  *	the master link and will return true if any of M/S links is
5207  *	online.
5208  *
5209  *	LOCKING:
5210  *	None.
5211  *
5212  *	RETURNS:
5213  *	True if the port online status is available and online.
5214  */
5215 bool ata_link_online(struct ata_link *link)
5216 {
5217 	struct ata_link *slave = link->ap->slave_link;
5218 
5219 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5220 
5221 	return ata_phys_link_online(link) ||
5222 		(slave && ata_phys_link_online(slave));
5223 }
5224 
5225 /**
5226  *	ata_link_offline - test whether the given link is offline
5227  *	@link: ATA link to test
5228  *
5229  *	Test whether @link is offline.  This is identical to
5230  *	ata_phys_link_offline() when there's no slave link.  When
5231  *	there's a slave link, this function should only be called on
5232  *	the master link and will return true if both M/S links are
5233  *	offline.
5234  *
5235  *	LOCKING:
5236  *	None.
5237  *
5238  *	RETURNS:
5239  *	True if the port offline status is available and offline.
5240  */
5241 bool ata_link_offline(struct ata_link *link)
5242 {
5243 	struct ata_link *slave = link->ap->slave_link;
5244 
5245 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5246 
5247 	return ata_phys_link_offline(link) &&
5248 		(!slave || ata_phys_link_offline(slave));
5249 }
5250 
5251 #ifdef CONFIG_PM
5252 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5253 			       unsigned int action, unsigned int ehi_flags,
5254 			       int wait)
5255 {
5256 	unsigned long flags;
5257 	int i, rc;
5258 
5259 	for (i = 0; i < host->n_ports; i++) {
5260 		struct ata_port *ap = host->ports[i];
5261 		struct ata_link *link;
5262 
5263 		/* Previous resume operation might still be in
5264 		 * progress.  Wait for PM_PENDING to clear.
5265 		 */
5266 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5267 			ata_port_wait_eh(ap);
5268 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5269 		}
5270 
5271 		/* request PM ops to EH */
5272 		spin_lock_irqsave(ap->lock, flags);
5273 
5274 		ap->pm_mesg = mesg;
5275 		if (wait) {
5276 			rc = 0;
5277 			ap->pm_result = &rc;
5278 		}
5279 
5280 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5281 		ata_for_each_link(link, ap, HOST_FIRST) {
5282 			link->eh_info.action |= action;
5283 			link->eh_info.flags |= ehi_flags;
5284 		}
5285 
5286 		ata_port_schedule_eh(ap);
5287 
5288 		spin_unlock_irqrestore(ap->lock, flags);
5289 
5290 		/* wait and check result */
5291 		if (wait) {
5292 			ata_port_wait_eh(ap);
5293 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5294 			if (rc)
5295 				return rc;
5296 		}
5297 	}
5298 
5299 	return 0;
5300 }
5301 
5302 /**
5303  *	ata_host_suspend - suspend host
5304  *	@host: host to suspend
5305  *	@mesg: PM message
5306  *
5307  *	Suspend @host.  Actual operation is performed by EH.  This
5308  *	function requests EH to perform PM operations and waits for EH
5309  *	to finish.
5310  *
5311  *	LOCKING:
5312  *	Kernel thread context (may sleep).
5313  *
5314  *	RETURNS:
5315  *	0 on success, -errno on failure.
5316  */
5317 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5318 {
5319 	int rc;
5320 
5321 	/*
5322 	 * disable link pm on all ports before requesting
5323 	 * any pm activity
5324 	 */
5325 	ata_lpm_enable(host);
5326 
5327 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5328 	if (rc == 0)
5329 		host->dev->power.power_state = mesg;
5330 	return rc;
5331 }
5332 
5333 /**
5334  *	ata_host_resume - resume host
5335  *	@host: host to resume
5336  *
5337  *	Resume @host.  Actual operation is performed by EH.  This
5338  *	function requests EH to perform PM operations and returns.
5339  *	Note that all resume operations are performed parallely.
5340  *
5341  *	LOCKING:
5342  *	Kernel thread context (may sleep).
5343  */
5344 void ata_host_resume(struct ata_host *host)
5345 {
5346 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5347 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5348 	host->dev->power.power_state = PMSG_ON;
5349 
5350 	/* reenable link pm */
5351 	ata_lpm_disable(host);
5352 }
5353 #endif
5354 
5355 /**
5356  *	ata_port_start - Set port up for dma.
5357  *	@ap: Port to initialize
5358  *
5359  *	Called just after data structures for each port are
5360  *	initialized.  Allocates space for PRD table.
5361  *
5362  *	May be used as the port_start() entry in ata_port_operations.
5363  *
5364  *	LOCKING:
5365  *	Inherited from caller.
5366  */
5367 int ata_port_start(struct ata_port *ap)
5368 {
5369 	struct device *dev = ap->dev;
5370 
5371 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5372 				      GFP_KERNEL);
5373 	if (!ap->prd)
5374 		return -ENOMEM;
5375 
5376 	return 0;
5377 }
5378 
5379 /**
5380  *	ata_dev_init - Initialize an ata_device structure
5381  *	@dev: Device structure to initialize
5382  *
5383  *	Initialize @dev in preparation for probing.
5384  *
5385  *	LOCKING:
5386  *	Inherited from caller.
5387  */
5388 void ata_dev_init(struct ata_device *dev)
5389 {
5390 	struct ata_link *link = ata_dev_phys_link(dev);
5391 	struct ata_port *ap = link->ap;
5392 	unsigned long flags;
5393 
5394 	/* SATA spd limit is bound to the attached device, reset together */
5395 	link->sata_spd_limit = link->hw_sata_spd_limit;
5396 	link->sata_spd = 0;
5397 
5398 	/* High bits of dev->flags are used to record warm plug
5399 	 * requests which occur asynchronously.  Synchronize using
5400 	 * host lock.
5401 	 */
5402 	spin_lock_irqsave(ap->lock, flags);
5403 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5404 	dev->horkage = 0;
5405 	spin_unlock_irqrestore(ap->lock, flags);
5406 
5407 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5408 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5409 	dev->pio_mask = UINT_MAX;
5410 	dev->mwdma_mask = UINT_MAX;
5411 	dev->udma_mask = UINT_MAX;
5412 }
5413 
5414 /**
5415  *	ata_link_init - Initialize an ata_link structure
5416  *	@ap: ATA port link is attached to
5417  *	@link: Link structure to initialize
5418  *	@pmp: Port multiplier port number
5419  *
5420  *	Initialize @link.
5421  *
5422  *	LOCKING:
5423  *	Kernel thread context (may sleep)
5424  */
5425 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5426 {
5427 	int i;
5428 
5429 	/* clear everything except for devices */
5430 	memset(link, 0, offsetof(struct ata_link, device[0]));
5431 
5432 	link->ap = ap;
5433 	link->pmp = pmp;
5434 	link->active_tag = ATA_TAG_POISON;
5435 	link->hw_sata_spd_limit = UINT_MAX;
5436 
5437 	/* can't use iterator, ap isn't initialized yet */
5438 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5439 		struct ata_device *dev = &link->device[i];
5440 
5441 		dev->link = link;
5442 		dev->devno = dev - link->device;
5443 		ata_dev_init(dev);
5444 	}
5445 }
5446 
5447 /**
5448  *	sata_link_init_spd - Initialize link->sata_spd_limit
5449  *	@link: Link to configure sata_spd_limit for
5450  *
5451  *	Initialize @link->[hw_]sata_spd_limit to the currently
5452  *	configured value.
5453  *
5454  *	LOCKING:
5455  *	Kernel thread context (may sleep).
5456  *
5457  *	RETURNS:
5458  *	0 on success, -errno on failure.
5459  */
5460 int sata_link_init_spd(struct ata_link *link)
5461 {
5462 	u8 spd;
5463 	int rc;
5464 
5465 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5466 	if (rc)
5467 		return rc;
5468 
5469 	spd = (link->saved_scontrol >> 4) & 0xf;
5470 	if (spd)
5471 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5472 
5473 	ata_force_link_limits(link);
5474 
5475 	link->sata_spd_limit = link->hw_sata_spd_limit;
5476 
5477 	return 0;
5478 }
5479 
5480 /**
5481  *	ata_port_alloc - allocate and initialize basic ATA port resources
5482  *	@host: ATA host this allocated port belongs to
5483  *
5484  *	Allocate and initialize basic ATA port resources.
5485  *
5486  *	RETURNS:
5487  *	Allocate ATA port on success, NULL on failure.
5488  *
5489  *	LOCKING:
5490  *	Inherited from calling layer (may sleep).
5491  */
5492 struct ata_port *ata_port_alloc(struct ata_host *host)
5493 {
5494 	struct ata_port *ap;
5495 
5496 	DPRINTK("ENTER\n");
5497 
5498 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5499 	if (!ap)
5500 		return NULL;
5501 
5502 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5503 	ap->lock = &host->lock;
5504 	ap->flags = ATA_FLAG_DISABLED;
5505 	ap->print_id = -1;
5506 	ap->ctl = ATA_DEVCTL_OBS;
5507 	ap->host = host;
5508 	ap->dev = host->dev;
5509 	ap->last_ctl = 0xFF;
5510 
5511 #if defined(ATA_VERBOSE_DEBUG)
5512 	/* turn on all debugging levels */
5513 	ap->msg_enable = 0x00FF;
5514 #elif defined(ATA_DEBUG)
5515 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5516 #else
5517 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5518 #endif
5519 
5520 #ifdef CONFIG_ATA_SFF
5521 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5522 #else
5523 	INIT_DELAYED_WORK(&ap->port_task, NULL);
5524 #endif
5525 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5526 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5527 	INIT_LIST_HEAD(&ap->eh_done_q);
5528 	init_waitqueue_head(&ap->eh_wait_q);
5529 	init_completion(&ap->park_req_pending);
5530 	init_timer_deferrable(&ap->fastdrain_timer);
5531 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5532 	ap->fastdrain_timer.data = (unsigned long)ap;
5533 
5534 	ap->cbl = ATA_CBL_NONE;
5535 
5536 	ata_link_init(ap, &ap->link, 0);
5537 
5538 #ifdef ATA_IRQ_TRAP
5539 	ap->stats.unhandled_irq = 1;
5540 	ap->stats.idle_irq = 1;
5541 #endif
5542 	return ap;
5543 }
5544 
5545 static void ata_host_release(struct device *gendev, void *res)
5546 {
5547 	struct ata_host *host = dev_get_drvdata(gendev);
5548 	int i;
5549 
5550 	for (i = 0; i < host->n_ports; i++) {
5551 		struct ata_port *ap = host->ports[i];
5552 
5553 		if (!ap)
5554 			continue;
5555 
5556 		if (ap->scsi_host)
5557 			scsi_host_put(ap->scsi_host);
5558 
5559 		kfree(ap->pmp_link);
5560 		kfree(ap->slave_link);
5561 		kfree(ap);
5562 		host->ports[i] = NULL;
5563 	}
5564 
5565 	dev_set_drvdata(gendev, NULL);
5566 }
5567 
5568 /**
5569  *	ata_host_alloc - allocate and init basic ATA host resources
5570  *	@dev: generic device this host is associated with
5571  *	@max_ports: maximum number of ATA ports associated with this host
5572  *
5573  *	Allocate and initialize basic ATA host resources.  LLD calls
5574  *	this function to allocate a host, initializes it fully and
5575  *	attaches it using ata_host_register().
5576  *
5577  *	@max_ports ports are allocated and host->n_ports is
5578  *	initialized to @max_ports.  The caller is allowed to decrease
5579  *	host->n_ports before calling ata_host_register().  The unused
5580  *	ports will be automatically freed on registration.
5581  *
5582  *	RETURNS:
5583  *	Allocate ATA host on success, NULL on failure.
5584  *
5585  *	LOCKING:
5586  *	Inherited from calling layer (may sleep).
5587  */
5588 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5589 {
5590 	struct ata_host *host;
5591 	size_t sz;
5592 	int i;
5593 
5594 	DPRINTK("ENTER\n");
5595 
5596 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5597 		return NULL;
5598 
5599 	/* alloc a container for our list of ATA ports (buses) */
5600 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5601 	/* alloc a container for our list of ATA ports (buses) */
5602 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5603 	if (!host)
5604 		goto err_out;
5605 
5606 	devres_add(dev, host);
5607 	dev_set_drvdata(dev, host);
5608 
5609 	spin_lock_init(&host->lock);
5610 	host->dev = dev;
5611 	host->n_ports = max_ports;
5612 
5613 	/* allocate ports bound to this host */
5614 	for (i = 0; i < max_ports; i++) {
5615 		struct ata_port *ap;
5616 
5617 		ap = ata_port_alloc(host);
5618 		if (!ap)
5619 			goto err_out;
5620 
5621 		ap->port_no = i;
5622 		host->ports[i] = ap;
5623 	}
5624 
5625 	devres_remove_group(dev, NULL);
5626 	return host;
5627 
5628  err_out:
5629 	devres_release_group(dev, NULL);
5630 	return NULL;
5631 }
5632 
5633 /**
5634  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5635  *	@dev: generic device this host is associated with
5636  *	@ppi: array of ATA port_info to initialize host with
5637  *	@n_ports: number of ATA ports attached to this host
5638  *
5639  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5640  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5641  *	last entry will be used for the remaining ports.
5642  *
5643  *	RETURNS:
5644  *	Allocate ATA host on success, NULL on failure.
5645  *
5646  *	LOCKING:
5647  *	Inherited from calling layer (may sleep).
5648  */
5649 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5650 				      const struct ata_port_info * const * ppi,
5651 				      int n_ports)
5652 {
5653 	const struct ata_port_info *pi;
5654 	struct ata_host *host;
5655 	int i, j;
5656 
5657 	host = ata_host_alloc(dev, n_ports);
5658 	if (!host)
5659 		return NULL;
5660 
5661 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5662 		struct ata_port *ap = host->ports[i];
5663 
5664 		if (ppi[j])
5665 			pi = ppi[j++];
5666 
5667 		ap->pio_mask = pi->pio_mask;
5668 		ap->mwdma_mask = pi->mwdma_mask;
5669 		ap->udma_mask = pi->udma_mask;
5670 		ap->flags |= pi->flags;
5671 		ap->link.flags |= pi->link_flags;
5672 		ap->ops = pi->port_ops;
5673 
5674 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5675 			host->ops = pi->port_ops;
5676 	}
5677 
5678 	return host;
5679 }
5680 
5681 /**
5682  *	ata_slave_link_init - initialize slave link
5683  *	@ap: port to initialize slave link for
5684  *
5685  *	Create and initialize slave link for @ap.  This enables slave
5686  *	link handling on the port.
5687  *
5688  *	In libata, a port contains links and a link contains devices.
5689  *	There is single host link but if a PMP is attached to it,
5690  *	there can be multiple fan-out links.  On SATA, there's usually
5691  *	a single device connected to a link but PATA and SATA
5692  *	controllers emulating TF based interface can have two - master
5693  *	and slave.
5694  *
5695  *	However, there are a few controllers which don't fit into this
5696  *	abstraction too well - SATA controllers which emulate TF
5697  *	interface with both master and slave devices but also have
5698  *	separate SCR register sets for each device.  These controllers
5699  *	need separate links for physical link handling
5700  *	(e.g. onlineness, link speed) but should be treated like a
5701  *	traditional M/S controller for everything else (e.g. command
5702  *	issue, softreset).
5703  *
5704  *	slave_link is libata's way of handling this class of
5705  *	controllers without impacting core layer too much.  For
5706  *	anything other than physical link handling, the default host
5707  *	link is used for both master and slave.  For physical link
5708  *	handling, separate @ap->slave_link is used.  All dirty details
5709  *	are implemented inside libata core layer.  From LLD's POV, the
5710  *	only difference is that prereset, hardreset and postreset are
5711  *	called once more for the slave link, so the reset sequence
5712  *	looks like the following.
5713  *
5714  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5715  *	softreset(M) -> postreset(M) -> postreset(S)
5716  *
5717  *	Note that softreset is called only for the master.  Softreset
5718  *	resets both M/S by definition, so SRST on master should handle
5719  *	both (the standard method will work just fine).
5720  *
5721  *	LOCKING:
5722  *	Should be called before host is registered.
5723  *
5724  *	RETURNS:
5725  *	0 on success, -errno on failure.
5726  */
5727 int ata_slave_link_init(struct ata_port *ap)
5728 {
5729 	struct ata_link *link;
5730 
5731 	WARN_ON(ap->slave_link);
5732 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5733 
5734 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5735 	if (!link)
5736 		return -ENOMEM;
5737 
5738 	ata_link_init(ap, link, 1);
5739 	ap->slave_link = link;
5740 	return 0;
5741 }
5742 
5743 static void ata_host_stop(struct device *gendev, void *res)
5744 {
5745 	struct ata_host *host = dev_get_drvdata(gendev);
5746 	int i;
5747 
5748 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5749 
5750 	for (i = 0; i < host->n_ports; i++) {
5751 		struct ata_port *ap = host->ports[i];
5752 
5753 		if (ap->ops->port_stop)
5754 			ap->ops->port_stop(ap);
5755 	}
5756 
5757 	if (host->ops->host_stop)
5758 		host->ops->host_stop(host);
5759 }
5760 
5761 /**
5762  *	ata_finalize_port_ops - finalize ata_port_operations
5763  *	@ops: ata_port_operations to finalize
5764  *
5765  *	An ata_port_operations can inherit from another ops and that
5766  *	ops can again inherit from another.  This can go on as many
5767  *	times as necessary as long as there is no loop in the
5768  *	inheritance chain.
5769  *
5770  *	Ops tables are finalized when the host is started.  NULL or
5771  *	unspecified entries are inherited from the closet ancestor
5772  *	which has the method and the entry is populated with it.
5773  *	After finalization, the ops table directly points to all the
5774  *	methods and ->inherits is no longer necessary and cleared.
5775  *
5776  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5777  *
5778  *	LOCKING:
5779  *	None.
5780  */
5781 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5782 {
5783 	static DEFINE_SPINLOCK(lock);
5784 	const struct ata_port_operations *cur;
5785 	void **begin = (void **)ops;
5786 	void **end = (void **)&ops->inherits;
5787 	void **pp;
5788 
5789 	if (!ops || !ops->inherits)
5790 		return;
5791 
5792 	spin_lock(&lock);
5793 
5794 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5795 		void **inherit = (void **)cur;
5796 
5797 		for (pp = begin; pp < end; pp++, inherit++)
5798 			if (!*pp)
5799 				*pp = *inherit;
5800 	}
5801 
5802 	for (pp = begin; pp < end; pp++)
5803 		if (IS_ERR(*pp))
5804 			*pp = NULL;
5805 
5806 	ops->inherits = NULL;
5807 
5808 	spin_unlock(&lock);
5809 }
5810 
5811 /**
5812  *	ata_host_start - start and freeze ports of an ATA host
5813  *	@host: ATA host to start ports for
5814  *
5815  *	Start and then freeze ports of @host.  Started status is
5816  *	recorded in host->flags, so this function can be called
5817  *	multiple times.  Ports are guaranteed to get started only
5818  *	once.  If host->ops isn't initialized yet, its set to the
5819  *	first non-dummy port ops.
5820  *
5821  *	LOCKING:
5822  *	Inherited from calling layer (may sleep).
5823  *
5824  *	RETURNS:
5825  *	0 if all ports are started successfully, -errno otherwise.
5826  */
5827 int ata_host_start(struct ata_host *host)
5828 {
5829 	int have_stop = 0;
5830 	void *start_dr = NULL;
5831 	int i, rc;
5832 
5833 	if (host->flags & ATA_HOST_STARTED)
5834 		return 0;
5835 
5836 	ata_finalize_port_ops(host->ops);
5837 
5838 	for (i = 0; i < host->n_ports; i++) {
5839 		struct ata_port *ap = host->ports[i];
5840 
5841 		ata_finalize_port_ops(ap->ops);
5842 
5843 		if (!host->ops && !ata_port_is_dummy(ap))
5844 			host->ops = ap->ops;
5845 
5846 		if (ap->ops->port_stop)
5847 			have_stop = 1;
5848 	}
5849 
5850 	if (host->ops->host_stop)
5851 		have_stop = 1;
5852 
5853 	if (have_stop) {
5854 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5855 		if (!start_dr)
5856 			return -ENOMEM;
5857 	}
5858 
5859 	for (i = 0; i < host->n_ports; i++) {
5860 		struct ata_port *ap = host->ports[i];
5861 
5862 		if (ap->ops->port_start) {
5863 			rc = ap->ops->port_start(ap);
5864 			if (rc) {
5865 				if (rc != -ENODEV)
5866 					dev_printk(KERN_ERR, host->dev,
5867 						"failed to start port %d "
5868 						"(errno=%d)\n", i, rc);
5869 				goto err_out;
5870 			}
5871 		}
5872 		ata_eh_freeze_port(ap);
5873 	}
5874 
5875 	if (start_dr)
5876 		devres_add(host->dev, start_dr);
5877 	host->flags |= ATA_HOST_STARTED;
5878 	return 0;
5879 
5880  err_out:
5881 	while (--i >= 0) {
5882 		struct ata_port *ap = host->ports[i];
5883 
5884 		if (ap->ops->port_stop)
5885 			ap->ops->port_stop(ap);
5886 	}
5887 	devres_free(start_dr);
5888 	return rc;
5889 }
5890 
5891 /**
5892  *	ata_sas_host_init - Initialize a host struct
5893  *	@host:	host to initialize
5894  *	@dev:	device host is attached to
5895  *	@flags:	host flags
5896  *	@ops:	port_ops
5897  *
5898  *	LOCKING:
5899  *	PCI/etc. bus probe sem.
5900  *
5901  */
5902 /* KILLME - the only user left is ipr */
5903 void ata_host_init(struct ata_host *host, struct device *dev,
5904 		   unsigned long flags, struct ata_port_operations *ops)
5905 {
5906 	spin_lock_init(&host->lock);
5907 	host->dev = dev;
5908 	host->flags = flags;
5909 	host->ops = ops;
5910 }
5911 
5912 /**
5913  *	ata_host_register - register initialized ATA host
5914  *	@host: ATA host to register
5915  *	@sht: template for SCSI host
5916  *
5917  *	Register initialized ATA host.  @host is allocated using
5918  *	ata_host_alloc() and fully initialized by LLD.  This function
5919  *	starts ports, registers @host with ATA and SCSI layers and
5920  *	probe registered devices.
5921  *
5922  *	LOCKING:
5923  *	Inherited from calling layer (may sleep).
5924  *
5925  *	RETURNS:
5926  *	0 on success, -errno otherwise.
5927  */
5928 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5929 {
5930 	int i, rc;
5931 
5932 	/* host must have been started */
5933 	if (!(host->flags & ATA_HOST_STARTED)) {
5934 		dev_printk(KERN_ERR, host->dev,
5935 			   "BUG: trying to register unstarted host\n");
5936 		WARN_ON(1);
5937 		return -EINVAL;
5938 	}
5939 
5940 	/* Blow away unused ports.  This happens when LLD can't
5941 	 * determine the exact number of ports to allocate at
5942 	 * allocation time.
5943 	 */
5944 	for (i = host->n_ports; host->ports[i]; i++)
5945 		kfree(host->ports[i]);
5946 
5947 	/* give ports names and add SCSI hosts */
5948 	for (i = 0; i < host->n_ports; i++)
5949 		host->ports[i]->print_id = ata_print_id++;
5950 
5951 	rc = ata_scsi_add_hosts(host, sht);
5952 	if (rc)
5953 		return rc;
5954 
5955 	/* associate with ACPI nodes */
5956 	ata_acpi_associate(host);
5957 
5958 	/* set cable, sata_spd_limit and report */
5959 	for (i = 0; i < host->n_ports; i++) {
5960 		struct ata_port *ap = host->ports[i];
5961 		unsigned long xfer_mask;
5962 
5963 		/* set SATA cable type if still unset */
5964 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5965 			ap->cbl = ATA_CBL_SATA;
5966 
5967 		/* init sata_spd_limit to the current value */
5968 		sata_link_init_spd(&ap->link);
5969 		if (ap->slave_link)
5970 			sata_link_init_spd(ap->slave_link);
5971 
5972 		/* print per-port info to dmesg */
5973 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5974 					      ap->udma_mask);
5975 
5976 		if (!ata_port_is_dummy(ap)) {
5977 			ata_port_printk(ap, KERN_INFO,
5978 					"%cATA max %s %s\n",
5979 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5980 					ata_mode_string(xfer_mask),
5981 					ap->link.eh_info.desc);
5982 			ata_ehi_clear_desc(&ap->link.eh_info);
5983 		} else
5984 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5985 	}
5986 
5987 	/* perform each probe synchronously */
5988 	DPRINTK("probe begin\n");
5989 	for (i = 0; i < host->n_ports; i++) {
5990 		struct ata_port *ap = host->ports[i];
5991 
5992 		/* probe */
5993 		if (ap->ops->error_handler) {
5994 			struct ata_eh_info *ehi = &ap->link.eh_info;
5995 			unsigned long flags;
5996 
5997 			ata_port_probe(ap);
5998 
5999 			/* kick EH for boot probing */
6000 			spin_lock_irqsave(ap->lock, flags);
6001 
6002 			ehi->probe_mask |= ATA_ALL_DEVICES;
6003 			ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
6004 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6005 
6006 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6007 			ap->pflags |= ATA_PFLAG_LOADING;
6008 			ata_port_schedule_eh(ap);
6009 
6010 			spin_unlock_irqrestore(ap->lock, flags);
6011 
6012 			/* wait for EH to finish */
6013 			ata_port_wait_eh(ap);
6014 		} else {
6015 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6016 			rc = ata_bus_probe(ap);
6017 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6018 
6019 			if (rc) {
6020 				/* FIXME: do something useful here?
6021 				 * Current libata behavior will
6022 				 * tear down everything when
6023 				 * the module is removed
6024 				 * or the h/w is unplugged.
6025 				 */
6026 			}
6027 		}
6028 	}
6029 
6030 	/* probes are done, now scan each port's disk(s) */
6031 	DPRINTK("host probe begin\n");
6032 	for (i = 0; i < host->n_ports; i++) {
6033 		struct ata_port *ap = host->ports[i];
6034 
6035 		ata_scsi_scan_host(ap, 1);
6036 	}
6037 
6038 	return 0;
6039 }
6040 
6041 /**
6042  *	ata_host_activate - start host, request IRQ and register it
6043  *	@host: target ATA host
6044  *	@irq: IRQ to request
6045  *	@irq_handler: irq_handler used when requesting IRQ
6046  *	@irq_flags: irq_flags used when requesting IRQ
6047  *	@sht: scsi_host_template to use when registering the host
6048  *
6049  *	After allocating an ATA host and initializing it, most libata
6050  *	LLDs perform three steps to activate the host - start host,
6051  *	request IRQ and register it.  This helper takes necessasry
6052  *	arguments and performs the three steps in one go.
6053  *
6054  *	An invalid IRQ skips the IRQ registration and expects the host to
6055  *	have set polling mode on the port. In this case, @irq_handler
6056  *	should be NULL.
6057  *
6058  *	LOCKING:
6059  *	Inherited from calling layer (may sleep).
6060  *
6061  *	RETURNS:
6062  *	0 on success, -errno otherwise.
6063  */
6064 int ata_host_activate(struct ata_host *host, int irq,
6065 		      irq_handler_t irq_handler, unsigned long irq_flags,
6066 		      struct scsi_host_template *sht)
6067 {
6068 	int i, rc;
6069 
6070 	rc = ata_host_start(host);
6071 	if (rc)
6072 		return rc;
6073 
6074 	/* Special case for polling mode */
6075 	if (!irq) {
6076 		WARN_ON(irq_handler);
6077 		return ata_host_register(host, sht);
6078 	}
6079 
6080 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6081 			      dev_driver_string(host->dev), host);
6082 	if (rc)
6083 		return rc;
6084 
6085 	for (i = 0; i < host->n_ports; i++)
6086 		ata_port_desc(host->ports[i], "irq %d", irq);
6087 
6088 	rc = ata_host_register(host, sht);
6089 	/* if failed, just free the IRQ and leave ports alone */
6090 	if (rc)
6091 		devm_free_irq(host->dev, irq, host);
6092 
6093 	return rc;
6094 }
6095 
6096 /**
6097  *	ata_port_detach - Detach ATA port in prepration of device removal
6098  *	@ap: ATA port to be detached
6099  *
6100  *	Detach all ATA devices and the associated SCSI devices of @ap;
6101  *	then, remove the associated SCSI host.  @ap is guaranteed to
6102  *	be quiescent on return from this function.
6103  *
6104  *	LOCKING:
6105  *	Kernel thread context (may sleep).
6106  */
6107 static void ata_port_detach(struct ata_port *ap)
6108 {
6109 	unsigned long flags;
6110 
6111 	if (!ap->ops->error_handler)
6112 		goto skip_eh;
6113 
6114 	/* tell EH we're leaving & flush EH */
6115 	spin_lock_irqsave(ap->lock, flags);
6116 	ap->pflags |= ATA_PFLAG_UNLOADING;
6117 	ata_port_schedule_eh(ap);
6118 	spin_unlock_irqrestore(ap->lock, flags);
6119 
6120 	/* wait till EH commits suicide */
6121 	ata_port_wait_eh(ap);
6122 
6123 	/* it better be dead now */
6124 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6125 
6126 	cancel_rearming_delayed_work(&ap->hotplug_task);
6127 
6128  skip_eh:
6129 	/* remove the associated SCSI host */
6130 	scsi_remove_host(ap->scsi_host);
6131 }
6132 
6133 /**
6134  *	ata_host_detach - Detach all ports of an ATA host
6135  *	@host: Host to detach
6136  *
6137  *	Detach all ports of @host.
6138  *
6139  *	LOCKING:
6140  *	Kernel thread context (may sleep).
6141  */
6142 void ata_host_detach(struct ata_host *host)
6143 {
6144 	int i;
6145 
6146 	for (i = 0; i < host->n_ports; i++)
6147 		ata_port_detach(host->ports[i]);
6148 
6149 	/* the host is dead now, dissociate ACPI */
6150 	ata_acpi_dissociate(host);
6151 }
6152 
6153 #ifdef CONFIG_PCI
6154 
6155 /**
6156  *	ata_pci_remove_one - PCI layer callback for device removal
6157  *	@pdev: PCI device that was removed
6158  *
6159  *	PCI layer indicates to libata via this hook that hot-unplug or
6160  *	module unload event has occurred.  Detach all ports.  Resource
6161  *	release is handled via devres.
6162  *
6163  *	LOCKING:
6164  *	Inherited from PCI layer (may sleep).
6165  */
6166 void ata_pci_remove_one(struct pci_dev *pdev)
6167 {
6168 	struct device *dev = &pdev->dev;
6169 	struct ata_host *host = dev_get_drvdata(dev);
6170 
6171 	ata_host_detach(host);
6172 }
6173 
6174 /* move to PCI subsystem */
6175 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6176 {
6177 	unsigned long tmp = 0;
6178 
6179 	switch (bits->width) {
6180 	case 1: {
6181 		u8 tmp8 = 0;
6182 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6183 		tmp = tmp8;
6184 		break;
6185 	}
6186 	case 2: {
6187 		u16 tmp16 = 0;
6188 		pci_read_config_word(pdev, bits->reg, &tmp16);
6189 		tmp = tmp16;
6190 		break;
6191 	}
6192 	case 4: {
6193 		u32 tmp32 = 0;
6194 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6195 		tmp = tmp32;
6196 		break;
6197 	}
6198 
6199 	default:
6200 		return -EINVAL;
6201 	}
6202 
6203 	tmp &= bits->mask;
6204 
6205 	return (tmp == bits->val) ? 1 : 0;
6206 }
6207 
6208 #ifdef CONFIG_PM
6209 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6210 {
6211 	pci_save_state(pdev);
6212 	pci_disable_device(pdev);
6213 
6214 	if (mesg.event & PM_EVENT_SLEEP)
6215 		pci_set_power_state(pdev, PCI_D3hot);
6216 }
6217 
6218 int ata_pci_device_do_resume(struct pci_dev *pdev)
6219 {
6220 	int rc;
6221 
6222 	pci_set_power_state(pdev, PCI_D0);
6223 	pci_restore_state(pdev);
6224 
6225 	rc = pcim_enable_device(pdev);
6226 	if (rc) {
6227 		dev_printk(KERN_ERR, &pdev->dev,
6228 			   "failed to enable device after resume (%d)\n", rc);
6229 		return rc;
6230 	}
6231 
6232 	pci_set_master(pdev);
6233 	return 0;
6234 }
6235 
6236 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6237 {
6238 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6239 	int rc = 0;
6240 
6241 	rc = ata_host_suspend(host, mesg);
6242 	if (rc)
6243 		return rc;
6244 
6245 	ata_pci_device_do_suspend(pdev, mesg);
6246 
6247 	return 0;
6248 }
6249 
6250 int ata_pci_device_resume(struct pci_dev *pdev)
6251 {
6252 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6253 	int rc;
6254 
6255 	rc = ata_pci_device_do_resume(pdev);
6256 	if (rc == 0)
6257 		ata_host_resume(host);
6258 	return rc;
6259 }
6260 #endif /* CONFIG_PM */
6261 
6262 #endif /* CONFIG_PCI */
6263 
6264 static int __init ata_parse_force_one(char **cur,
6265 				      struct ata_force_ent *force_ent,
6266 				      const char **reason)
6267 {
6268 	/* FIXME: Currently, there's no way to tag init const data and
6269 	 * using __initdata causes build failure on some versions of
6270 	 * gcc.  Once __initdataconst is implemented, add const to the
6271 	 * following structure.
6272 	 */
6273 	static struct ata_force_param force_tbl[] __initdata = {
6274 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6275 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6276 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6277 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6278 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6279 		{ "sata",	.cbl		= ATA_CBL_SATA },
6280 		{ "1.5Gbps",	.spd_limit	= 1 },
6281 		{ "3.0Gbps",	.spd_limit	= 2 },
6282 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6283 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6284 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6285 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6286 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6287 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6288 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6289 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6290 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6291 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6292 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6293 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6294 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6295 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6296 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6297 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6298 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6299 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6300 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6301 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6302 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6303 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6304 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6305 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6306 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6307 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6308 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6309 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6310 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6311 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6312 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6313 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6314 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6315 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6316 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6317 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6318 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6319 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6320 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6321 	};
6322 	char *start = *cur, *p = *cur;
6323 	char *id, *val, *endp;
6324 	const struct ata_force_param *match_fp = NULL;
6325 	int nr_matches = 0, i;
6326 
6327 	/* find where this param ends and update *cur */
6328 	while (*p != '\0' && *p != ',')
6329 		p++;
6330 
6331 	if (*p == '\0')
6332 		*cur = p;
6333 	else
6334 		*cur = p + 1;
6335 
6336 	*p = '\0';
6337 
6338 	/* parse */
6339 	p = strchr(start, ':');
6340 	if (!p) {
6341 		val = strstrip(start);
6342 		goto parse_val;
6343 	}
6344 	*p = '\0';
6345 
6346 	id = strstrip(start);
6347 	val = strstrip(p + 1);
6348 
6349 	/* parse id */
6350 	p = strchr(id, '.');
6351 	if (p) {
6352 		*p++ = '\0';
6353 		force_ent->device = simple_strtoul(p, &endp, 10);
6354 		if (p == endp || *endp != '\0') {
6355 			*reason = "invalid device";
6356 			return -EINVAL;
6357 		}
6358 	}
6359 
6360 	force_ent->port = simple_strtoul(id, &endp, 10);
6361 	if (p == endp || *endp != '\0') {
6362 		*reason = "invalid port/link";
6363 		return -EINVAL;
6364 	}
6365 
6366  parse_val:
6367 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6368 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6369 		const struct ata_force_param *fp = &force_tbl[i];
6370 
6371 		if (strncasecmp(val, fp->name, strlen(val)))
6372 			continue;
6373 
6374 		nr_matches++;
6375 		match_fp = fp;
6376 
6377 		if (strcasecmp(val, fp->name) == 0) {
6378 			nr_matches = 1;
6379 			break;
6380 		}
6381 	}
6382 
6383 	if (!nr_matches) {
6384 		*reason = "unknown value";
6385 		return -EINVAL;
6386 	}
6387 	if (nr_matches > 1) {
6388 		*reason = "ambigious value";
6389 		return -EINVAL;
6390 	}
6391 
6392 	force_ent->param = *match_fp;
6393 
6394 	return 0;
6395 }
6396 
6397 static void __init ata_parse_force_param(void)
6398 {
6399 	int idx = 0, size = 1;
6400 	int last_port = -1, last_device = -1;
6401 	char *p, *cur, *next;
6402 
6403 	/* calculate maximum number of params and allocate force_tbl */
6404 	for (p = ata_force_param_buf; *p; p++)
6405 		if (*p == ',')
6406 			size++;
6407 
6408 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6409 	if (!ata_force_tbl) {
6410 		printk(KERN_WARNING "ata: failed to extend force table, "
6411 		       "libata.force ignored\n");
6412 		return;
6413 	}
6414 
6415 	/* parse and populate the table */
6416 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6417 		const char *reason = "";
6418 		struct ata_force_ent te = { .port = -1, .device = -1 };
6419 
6420 		next = cur;
6421 		if (ata_parse_force_one(&next, &te, &reason)) {
6422 			printk(KERN_WARNING "ata: failed to parse force "
6423 			       "parameter \"%s\" (%s)\n",
6424 			       cur, reason);
6425 			continue;
6426 		}
6427 
6428 		if (te.port == -1) {
6429 			te.port = last_port;
6430 			te.device = last_device;
6431 		}
6432 
6433 		ata_force_tbl[idx++] = te;
6434 
6435 		last_port = te.port;
6436 		last_device = te.device;
6437 	}
6438 
6439 	ata_force_tbl_size = idx;
6440 }
6441 
6442 static int __init ata_init(void)
6443 {
6444 	ata_parse_force_param();
6445 
6446 	ata_wq = create_workqueue("ata");
6447 	if (!ata_wq)
6448 		goto free_force_tbl;
6449 
6450 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6451 	if (!ata_aux_wq)
6452 		goto free_wq;
6453 
6454 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6455 	return 0;
6456 
6457 free_wq:
6458 	destroy_workqueue(ata_wq);
6459 free_force_tbl:
6460 	kfree(ata_force_tbl);
6461 	return -ENOMEM;
6462 }
6463 
6464 static void __exit ata_exit(void)
6465 {
6466 	kfree(ata_force_tbl);
6467 	destroy_workqueue(ata_wq);
6468 	destroy_workqueue(ata_aux_wq);
6469 }
6470 
6471 subsys_initcall(ata_init);
6472 module_exit(ata_exit);
6473 
6474 static unsigned long ratelimit_time;
6475 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6476 
6477 int ata_ratelimit(void)
6478 {
6479 	int rc;
6480 	unsigned long flags;
6481 
6482 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6483 
6484 	if (time_after(jiffies, ratelimit_time)) {
6485 		rc = 1;
6486 		ratelimit_time = jiffies + (HZ/5);
6487 	} else
6488 		rc = 0;
6489 
6490 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6491 
6492 	return rc;
6493 }
6494 
6495 /**
6496  *	ata_wait_register - wait until register value changes
6497  *	@reg: IO-mapped register
6498  *	@mask: Mask to apply to read register value
6499  *	@val: Wait condition
6500  *	@interval: polling interval in milliseconds
6501  *	@timeout: timeout in milliseconds
6502  *
6503  *	Waiting for some bits of register to change is a common
6504  *	operation for ATA controllers.  This function reads 32bit LE
6505  *	IO-mapped register @reg and tests for the following condition.
6506  *
6507  *	(*@reg & mask) != val
6508  *
6509  *	If the condition is met, it returns; otherwise, the process is
6510  *	repeated after @interval_msec until timeout.
6511  *
6512  *	LOCKING:
6513  *	Kernel thread context (may sleep)
6514  *
6515  *	RETURNS:
6516  *	The final register value.
6517  */
6518 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6519 		      unsigned long interval, unsigned long timeout)
6520 {
6521 	unsigned long deadline;
6522 	u32 tmp;
6523 
6524 	tmp = ioread32(reg);
6525 
6526 	/* Calculate timeout _after_ the first read to make sure
6527 	 * preceding writes reach the controller before starting to
6528 	 * eat away the timeout.
6529 	 */
6530 	deadline = ata_deadline(jiffies, timeout);
6531 
6532 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6533 		msleep(interval);
6534 		tmp = ioread32(reg);
6535 	}
6536 
6537 	return tmp;
6538 }
6539 
6540 /*
6541  * Dummy port_ops
6542  */
6543 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6544 {
6545 	return AC_ERR_SYSTEM;
6546 }
6547 
6548 static void ata_dummy_error_handler(struct ata_port *ap)
6549 {
6550 	/* truly dummy */
6551 }
6552 
6553 struct ata_port_operations ata_dummy_port_ops = {
6554 	.qc_prep		= ata_noop_qc_prep,
6555 	.qc_issue		= ata_dummy_qc_issue,
6556 	.error_handler		= ata_dummy_error_handler,
6557 };
6558 
6559 const struct ata_port_info ata_dummy_port_info = {
6560 	.port_ops		= &ata_dummy_port_ops,
6561 };
6562 
6563 /*
6564  * libata is essentially a library of internal helper functions for
6565  * low-level ATA host controller drivers.  As such, the API/ABI is
6566  * likely to change as new drivers are added and updated.
6567  * Do not depend on ABI/API stability.
6568  */
6569 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6570 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6571 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6572 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6573 EXPORT_SYMBOL_GPL(sata_port_ops);
6574 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6575 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6576 EXPORT_SYMBOL_GPL(ata_link_next);
6577 EXPORT_SYMBOL_GPL(ata_dev_next);
6578 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6579 EXPORT_SYMBOL_GPL(ata_host_init);
6580 EXPORT_SYMBOL_GPL(ata_host_alloc);
6581 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6582 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6583 EXPORT_SYMBOL_GPL(ata_host_start);
6584 EXPORT_SYMBOL_GPL(ata_host_register);
6585 EXPORT_SYMBOL_GPL(ata_host_activate);
6586 EXPORT_SYMBOL_GPL(ata_host_detach);
6587 EXPORT_SYMBOL_GPL(ata_sg_init);
6588 EXPORT_SYMBOL_GPL(ata_qc_complete);
6589 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6590 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6591 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6592 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6593 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6594 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6595 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6596 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6597 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6598 EXPORT_SYMBOL_GPL(ata_mode_string);
6599 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6600 EXPORT_SYMBOL_GPL(ata_port_start);
6601 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6602 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6603 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6604 EXPORT_SYMBOL_GPL(ata_port_probe);
6605 EXPORT_SYMBOL_GPL(ata_dev_disable);
6606 EXPORT_SYMBOL_GPL(sata_set_spd);
6607 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6608 EXPORT_SYMBOL_GPL(sata_link_debounce);
6609 EXPORT_SYMBOL_GPL(sata_link_resume);
6610 EXPORT_SYMBOL_GPL(ata_std_prereset);
6611 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6612 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6613 EXPORT_SYMBOL_GPL(ata_std_postreset);
6614 EXPORT_SYMBOL_GPL(ata_dev_classify);
6615 EXPORT_SYMBOL_GPL(ata_dev_pair);
6616 EXPORT_SYMBOL_GPL(ata_port_disable);
6617 EXPORT_SYMBOL_GPL(ata_ratelimit);
6618 EXPORT_SYMBOL_GPL(ata_wait_register);
6619 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6620 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6621 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6622 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6623 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6624 EXPORT_SYMBOL_GPL(sata_scr_valid);
6625 EXPORT_SYMBOL_GPL(sata_scr_read);
6626 EXPORT_SYMBOL_GPL(sata_scr_write);
6627 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6628 EXPORT_SYMBOL_GPL(ata_link_online);
6629 EXPORT_SYMBOL_GPL(ata_link_offline);
6630 #ifdef CONFIG_PM
6631 EXPORT_SYMBOL_GPL(ata_host_suspend);
6632 EXPORT_SYMBOL_GPL(ata_host_resume);
6633 #endif /* CONFIG_PM */
6634 EXPORT_SYMBOL_GPL(ata_id_string);
6635 EXPORT_SYMBOL_GPL(ata_id_c_string);
6636 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6637 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6638 
6639 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6640 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6641 EXPORT_SYMBOL_GPL(ata_timing_compute);
6642 EXPORT_SYMBOL_GPL(ata_timing_merge);
6643 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6644 
6645 #ifdef CONFIG_PCI
6646 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6647 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6648 #ifdef CONFIG_PM
6649 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6650 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6651 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6652 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6653 #endif /* CONFIG_PM */
6654 #endif /* CONFIG_PCI */
6655 
6656 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6657 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6658 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6659 EXPORT_SYMBOL_GPL(ata_port_desc);
6660 #ifdef CONFIG_PCI
6661 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6662 #endif /* CONFIG_PCI */
6663 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6664 EXPORT_SYMBOL_GPL(ata_link_abort);
6665 EXPORT_SYMBOL_GPL(ata_port_abort);
6666 EXPORT_SYMBOL_GPL(ata_port_freeze);
6667 EXPORT_SYMBOL_GPL(sata_async_notification);
6668 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6669 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6670 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6671 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6672 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6673 EXPORT_SYMBOL_GPL(ata_do_eh);
6674 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6675 
6676 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6677 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6678 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6679 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6680 EXPORT_SYMBOL_GPL(ata_cable_sata);
6681