xref: /linux/drivers/ata/libata-core.c (revision aeb3f46252e26acdc60a1a8e31fb1ca6319d9a07)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
40 #include <linux/mm.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
56 #include <asm/io.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
59 
60 #include "libata.h"
61 
62 #define DRV_VERSION	"2.21"	/* must be exactly four chars */
63 
64 
65 /* debounce timing parameters in msecs { interval, duration, timeout } */
66 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
67 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
68 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
69 
70 static unsigned int ata_dev_init_params(struct ata_device *dev,
71 					u16 heads, u16 sectors);
72 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73 static void ata_dev_xfermask(struct ata_device *dev);
74 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75 
76 unsigned int ata_print_id = 1;
77 static struct workqueue_struct *ata_wq;
78 
79 struct workqueue_struct *ata_aux_wq;
80 
81 int atapi_enabled = 1;
82 module_param(atapi_enabled, int, 0444);
83 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84 
85 int atapi_dmadir = 0;
86 module_param(atapi_dmadir, int, 0444);
87 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 
89 int libata_fua = 0;
90 module_param_named(fua, libata_fua, int, 0444);
91 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
92 
93 static int ata_ignore_hpa = 0;
94 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
95 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
96 
97 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
98 module_param(ata_probe_timeout, int, 0444);
99 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
100 
101 int libata_noacpi = 1;
102 module_param_named(noacpi, libata_noacpi, int, 0444);
103 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
104 
105 MODULE_AUTHOR("Jeff Garzik");
106 MODULE_DESCRIPTION("Library module for ATA devices");
107 MODULE_LICENSE("GPL");
108 MODULE_VERSION(DRV_VERSION);
109 
110 
111 /**
112  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
113  *	@tf: Taskfile to convert
114  *	@pmp: Port multiplier port
115  *	@is_cmd: This FIS is for command
116  *	@fis: Buffer into which data will output
117  *
118  *	Converts a standard ATA taskfile to a Serial ATA
119  *	FIS structure (Register - Host to Device).
120  *
121  *	LOCKING:
122  *	Inherited from caller.
123  */
124 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
125 {
126 	fis[0] = 0x27;			/* Register - Host to Device FIS */
127 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
128 	if (is_cmd)
129 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
130 
131 	fis[2] = tf->command;
132 	fis[3] = tf->feature;
133 
134 	fis[4] = tf->lbal;
135 	fis[5] = tf->lbam;
136 	fis[6] = tf->lbah;
137 	fis[7] = tf->device;
138 
139 	fis[8] = tf->hob_lbal;
140 	fis[9] = tf->hob_lbam;
141 	fis[10] = tf->hob_lbah;
142 	fis[11] = tf->hob_feature;
143 
144 	fis[12] = tf->nsect;
145 	fis[13] = tf->hob_nsect;
146 	fis[14] = 0;
147 	fis[15] = tf->ctl;
148 
149 	fis[16] = 0;
150 	fis[17] = 0;
151 	fis[18] = 0;
152 	fis[19] = 0;
153 }
154 
155 /**
156  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
157  *	@fis: Buffer from which data will be input
158  *	@tf: Taskfile to output
159  *
160  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
161  *
162  *	LOCKING:
163  *	Inherited from caller.
164  */
165 
166 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
167 {
168 	tf->command	= fis[2];	/* status */
169 	tf->feature	= fis[3];	/* error */
170 
171 	tf->lbal	= fis[4];
172 	tf->lbam	= fis[5];
173 	tf->lbah	= fis[6];
174 	tf->device	= fis[7];
175 
176 	tf->hob_lbal	= fis[8];
177 	tf->hob_lbam	= fis[9];
178 	tf->hob_lbah	= fis[10];
179 
180 	tf->nsect	= fis[12];
181 	tf->hob_nsect	= fis[13];
182 }
183 
184 static const u8 ata_rw_cmds[] = {
185 	/* pio multi */
186 	ATA_CMD_READ_MULTI,
187 	ATA_CMD_WRITE_MULTI,
188 	ATA_CMD_READ_MULTI_EXT,
189 	ATA_CMD_WRITE_MULTI_EXT,
190 	0,
191 	0,
192 	0,
193 	ATA_CMD_WRITE_MULTI_FUA_EXT,
194 	/* pio */
195 	ATA_CMD_PIO_READ,
196 	ATA_CMD_PIO_WRITE,
197 	ATA_CMD_PIO_READ_EXT,
198 	ATA_CMD_PIO_WRITE_EXT,
199 	0,
200 	0,
201 	0,
202 	0,
203 	/* dma */
204 	ATA_CMD_READ,
205 	ATA_CMD_WRITE,
206 	ATA_CMD_READ_EXT,
207 	ATA_CMD_WRITE_EXT,
208 	0,
209 	0,
210 	0,
211 	ATA_CMD_WRITE_FUA_EXT
212 };
213 
214 /**
215  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
216  *	@tf: command to examine and configure
217  *	@dev: device tf belongs to
218  *
219  *	Examine the device configuration and tf->flags to calculate
220  *	the proper read/write commands and protocol to use.
221  *
222  *	LOCKING:
223  *	caller.
224  */
225 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
226 {
227 	u8 cmd;
228 
229 	int index, fua, lba48, write;
230 
231 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
232 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
233 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
234 
235 	if (dev->flags & ATA_DFLAG_PIO) {
236 		tf->protocol = ATA_PROT_PIO;
237 		index = dev->multi_count ? 0 : 8;
238 	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
239 		/* Unable to use DMA due to host limitation */
240 		tf->protocol = ATA_PROT_PIO;
241 		index = dev->multi_count ? 0 : 8;
242 	} else {
243 		tf->protocol = ATA_PROT_DMA;
244 		index = 16;
245 	}
246 
247 	cmd = ata_rw_cmds[index + fua + lba48 + write];
248 	if (cmd) {
249 		tf->command = cmd;
250 		return 0;
251 	}
252 	return -1;
253 }
254 
255 /**
256  *	ata_tf_read_block - Read block address from ATA taskfile
257  *	@tf: ATA taskfile of interest
258  *	@dev: ATA device @tf belongs to
259  *
260  *	LOCKING:
261  *	None.
262  *
263  *	Read block address from @tf.  This function can handle all
264  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
265  *	flags select the address format to use.
266  *
267  *	RETURNS:
268  *	Block address read from @tf.
269  */
270 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
271 {
272 	u64 block = 0;
273 
274 	if (tf->flags & ATA_TFLAG_LBA) {
275 		if (tf->flags & ATA_TFLAG_LBA48) {
276 			block |= (u64)tf->hob_lbah << 40;
277 			block |= (u64)tf->hob_lbam << 32;
278 			block |= tf->hob_lbal << 24;
279 		} else
280 			block |= (tf->device & 0xf) << 24;
281 
282 		block |= tf->lbah << 16;
283 		block |= tf->lbam << 8;
284 		block |= tf->lbal;
285 	} else {
286 		u32 cyl, head, sect;
287 
288 		cyl = tf->lbam | (tf->lbah << 8);
289 		head = tf->device & 0xf;
290 		sect = tf->lbal;
291 
292 		block = (cyl * dev->heads + head) * dev->sectors + sect;
293 	}
294 
295 	return block;
296 }
297 
298 /**
299  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
300  *	@tf: Target ATA taskfile
301  *	@dev: ATA device @tf belongs to
302  *	@block: Block address
303  *	@n_block: Number of blocks
304  *	@tf_flags: RW/FUA etc...
305  *	@tag: tag
306  *
307  *	LOCKING:
308  *	None.
309  *
310  *	Build ATA taskfile @tf for read/write request described by
311  *	@block, @n_block, @tf_flags and @tag on @dev.
312  *
313  *	RETURNS:
314  *
315  *	0 on success, -ERANGE if the request is too large for @dev,
316  *	-EINVAL if the request is invalid.
317  */
318 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
319 		    u64 block, u32 n_block, unsigned int tf_flags,
320 		    unsigned int tag)
321 {
322 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
323 	tf->flags |= tf_flags;
324 
325 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
326 		/* yay, NCQ */
327 		if (!lba_48_ok(block, n_block))
328 			return -ERANGE;
329 
330 		tf->protocol = ATA_PROT_NCQ;
331 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
332 
333 		if (tf->flags & ATA_TFLAG_WRITE)
334 			tf->command = ATA_CMD_FPDMA_WRITE;
335 		else
336 			tf->command = ATA_CMD_FPDMA_READ;
337 
338 		tf->nsect = tag << 3;
339 		tf->hob_feature = (n_block >> 8) & 0xff;
340 		tf->feature = n_block & 0xff;
341 
342 		tf->hob_lbah = (block >> 40) & 0xff;
343 		tf->hob_lbam = (block >> 32) & 0xff;
344 		tf->hob_lbal = (block >> 24) & 0xff;
345 		tf->lbah = (block >> 16) & 0xff;
346 		tf->lbam = (block >> 8) & 0xff;
347 		tf->lbal = block & 0xff;
348 
349 		tf->device = 1 << 6;
350 		if (tf->flags & ATA_TFLAG_FUA)
351 			tf->device |= 1 << 7;
352 	} else if (dev->flags & ATA_DFLAG_LBA) {
353 		tf->flags |= ATA_TFLAG_LBA;
354 
355 		if (lba_28_ok(block, n_block)) {
356 			/* use LBA28 */
357 			tf->device |= (block >> 24) & 0xf;
358 		} else if (lba_48_ok(block, n_block)) {
359 			if (!(dev->flags & ATA_DFLAG_LBA48))
360 				return -ERANGE;
361 
362 			/* use LBA48 */
363 			tf->flags |= ATA_TFLAG_LBA48;
364 
365 			tf->hob_nsect = (n_block >> 8) & 0xff;
366 
367 			tf->hob_lbah = (block >> 40) & 0xff;
368 			tf->hob_lbam = (block >> 32) & 0xff;
369 			tf->hob_lbal = (block >> 24) & 0xff;
370 		} else
371 			/* request too large even for LBA48 */
372 			return -ERANGE;
373 
374 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
375 			return -EINVAL;
376 
377 		tf->nsect = n_block & 0xff;
378 
379 		tf->lbah = (block >> 16) & 0xff;
380 		tf->lbam = (block >> 8) & 0xff;
381 		tf->lbal = block & 0xff;
382 
383 		tf->device |= ATA_LBA;
384 	} else {
385 		/* CHS */
386 		u32 sect, head, cyl, track;
387 
388 		/* The request -may- be too large for CHS addressing. */
389 		if (!lba_28_ok(block, n_block))
390 			return -ERANGE;
391 
392 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
393 			return -EINVAL;
394 
395 		/* Convert LBA to CHS */
396 		track = (u32)block / dev->sectors;
397 		cyl   = track / dev->heads;
398 		head  = track % dev->heads;
399 		sect  = (u32)block % dev->sectors + 1;
400 
401 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
402 			(u32)block, track, cyl, head, sect);
403 
404 		/* Check whether the converted CHS can fit.
405 		   Cylinder: 0-65535
406 		   Head: 0-15
407 		   Sector: 1-255*/
408 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
409 			return -ERANGE;
410 
411 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
412 		tf->lbal = sect;
413 		tf->lbam = cyl;
414 		tf->lbah = cyl >> 8;
415 		tf->device |= head;
416 	}
417 
418 	return 0;
419 }
420 
421 /**
422  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
423  *	@pio_mask: pio_mask
424  *	@mwdma_mask: mwdma_mask
425  *	@udma_mask: udma_mask
426  *
427  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
428  *	unsigned int xfer_mask.
429  *
430  *	LOCKING:
431  *	None.
432  *
433  *	RETURNS:
434  *	Packed xfer_mask.
435  */
436 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
437 				      unsigned int mwdma_mask,
438 				      unsigned int udma_mask)
439 {
440 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
441 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
442 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
443 }
444 
445 /**
446  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
447  *	@xfer_mask: xfer_mask to unpack
448  *	@pio_mask: resulting pio_mask
449  *	@mwdma_mask: resulting mwdma_mask
450  *	@udma_mask: resulting udma_mask
451  *
452  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
453  *	Any NULL distination masks will be ignored.
454  */
455 static void ata_unpack_xfermask(unsigned int xfer_mask,
456 				unsigned int *pio_mask,
457 				unsigned int *mwdma_mask,
458 				unsigned int *udma_mask)
459 {
460 	if (pio_mask)
461 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
462 	if (mwdma_mask)
463 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
464 	if (udma_mask)
465 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
466 }
467 
468 static const struct ata_xfer_ent {
469 	int shift, bits;
470 	u8 base;
471 } ata_xfer_tbl[] = {
472 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
473 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
474 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
475 	{ -1, },
476 };
477 
478 /**
479  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
480  *	@xfer_mask: xfer_mask of interest
481  *
482  *	Return matching XFER_* value for @xfer_mask.  Only the highest
483  *	bit of @xfer_mask is considered.
484  *
485  *	LOCKING:
486  *	None.
487  *
488  *	RETURNS:
489  *	Matching XFER_* value, 0 if no match found.
490  */
491 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
492 {
493 	int highbit = fls(xfer_mask) - 1;
494 	const struct ata_xfer_ent *ent;
495 
496 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
497 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
498 			return ent->base + highbit - ent->shift;
499 	return 0;
500 }
501 
502 /**
503  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
504  *	@xfer_mode: XFER_* of interest
505  *
506  *	Return matching xfer_mask for @xfer_mode.
507  *
508  *	LOCKING:
509  *	None.
510  *
511  *	RETURNS:
512  *	Matching xfer_mask, 0 if no match found.
513  */
514 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
515 {
516 	const struct ata_xfer_ent *ent;
517 
518 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
519 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
520 			return 1 << (ent->shift + xfer_mode - ent->base);
521 	return 0;
522 }
523 
524 /**
525  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
526  *	@xfer_mode: XFER_* of interest
527  *
528  *	Return matching xfer_shift for @xfer_mode.
529  *
530  *	LOCKING:
531  *	None.
532  *
533  *	RETURNS:
534  *	Matching xfer_shift, -1 if no match found.
535  */
536 static int ata_xfer_mode2shift(unsigned int xfer_mode)
537 {
538 	const struct ata_xfer_ent *ent;
539 
540 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
541 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
542 			return ent->shift;
543 	return -1;
544 }
545 
546 /**
547  *	ata_mode_string - convert xfer_mask to string
548  *	@xfer_mask: mask of bits supported; only highest bit counts.
549  *
550  *	Determine string which represents the highest speed
551  *	(highest bit in @modemask).
552  *
553  *	LOCKING:
554  *	None.
555  *
556  *	RETURNS:
557  *	Constant C string representing highest speed listed in
558  *	@mode_mask, or the constant C string "<n/a>".
559  */
560 static const char *ata_mode_string(unsigned int xfer_mask)
561 {
562 	static const char * const xfer_mode_str[] = {
563 		"PIO0",
564 		"PIO1",
565 		"PIO2",
566 		"PIO3",
567 		"PIO4",
568 		"PIO5",
569 		"PIO6",
570 		"MWDMA0",
571 		"MWDMA1",
572 		"MWDMA2",
573 		"MWDMA3",
574 		"MWDMA4",
575 		"UDMA/16",
576 		"UDMA/25",
577 		"UDMA/33",
578 		"UDMA/44",
579 		"UDMA/66",
580 		"UDMA/100",
581 		"UDMA/133",
582 		"UDMA7",
583 	};
584 	int highbit;
585 
586 	highbit = fls(xfer_mask) - 1;
587 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
588 		return xfer_mode_str[highbit];
589 	return "<n/a>";
590 }
591 
592 static const char *sata_spd_string(unsigned int spd)
593 {
594 	static const char * const spd_str[] = {
595 		"1.5 Gbps",
596 		"3.0 Gbps",
597 	};
598 
599 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
600 		return "<unknown>";
601 	return spd_str[spd - 1];
602 }
603 
604 void ata_dev_disable(struct ata_device *dev)
605 {
606 	if (ata_dev_enabled(dev)) {
607 		if (ata_msg_drv(dev->ap))
608 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
609 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
610 					     ATA_DNXFER_QUIET);
611 		dev->class++;
612 	}
613 }
614 
615 /**
616  *	ata_devchk - PATA device presence detection
617  *	@ap: ATA channel to examine
618  *	@device: Device to examine (starting at zero)
619  *
620  *	This technique was originally described in
621  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
622  *	later found its way into the ATA/ATAPI spec.
623  *
624  *	Write a pattern to the ATA shadow registers,
625  *	and if a device is present, it will respond by
626  *	correctly storing and echoing back the
627  *	ATA shadow register contents.
628  *
629  *	LOCKING:
630  *	caller.
631  */
632 
633 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
634 {
635 	struct ata_ioports *ioaddr = &ap->ioaddr;
636 	u8 nsect, lbal;
637 
638 	ap->ops->dev_select(ap, device);
639 
640 	iowrite8(0x55, ioaddr->nsect_addr);
641 	iowrite8(0xaa, ioaddr->lbal_addr);
642 
643 	iowrite8(0xaa, ioaddr->nsect_addr);
644 	iowrite8(0x55, ioaddr->lbal_addr);
645 
646 	iowrite8(0x55, ioaddr->nsect_addr);
647 	iowrite8(0xaa, ioaddr->lbal_addr);
648 
649 	nsect = ioread8(ioaddr->nsect_addr);
650 	lbal = ioread8(ioaddr->lbal_addr);
651 
652 	if ((nsect == 0x55) && (lbal == 0xaa))
653 		return 1;	/* we found a device */
654 
655 	return 0;		/* nothing found */
656 }
657 
658 /**
659  *	ata_dev_classify - determine device type based on ATA-spec signature
660  *	@tf: ATA taskfile register set for device to be identified
661  *
662  *	Determine from taskfile register contents whether a device is
663  *	ATA or ATAPI, as per "Signature and persistence" section
664  *	of ATA/PI spec (volume 1, sect 5.14).
665  *
666  *	LOCKING:
667  *	None.
668  *
669  *	RETURNS:
670  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
671  *	the event of failure.
672  */
673 
674 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
675 {
676 	/* Apple's open source Darwin code hints that some devices only
677 	 * put a proper signature into the LBA mid/high registers,
678 	 * So, we only check those.  It's sufficient for uniqueness.
679 	 */
680 
681 	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
682 	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
683 		DPRINTK("found ATA device by sig\n");
684 		return ATA_DEV_ATA;
685 	}
686 
687 	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
688 	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
689 		DPRINTK("found ATAPI device by sig\n");
690 		return ATA_DEV_ATAPI;
691 	}
692 
693 	DPRINTK("unknown device\n");
694 	return ATA_DEV_UNKNOWN;
695 }
696 
697 /**
698  *	ata_dev_try_classify - Parse returned ATA device signature
699  *	@ap: ATA channel to examine
700  *	@device: Device to examine (starting at zero)
701  *	@r_err: Value of error register on completion
702  *
703  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
704  *	an ATA/ATAPI-defined set of values is placed in the ATA
705  *	shadow registers, indicating the results of device detection
706  *	and diagnostics.
707  *
708  *	Select the ATA device, and read the values from the ATA shadow
709  *	registers.  Then parse according to the Error register value,
710  *	and the spec-defined values examined by ata_dev_classify().
711  *
712  *	LOCKING:
713  *	caller.
714  *
715  *	RETURNS:
716  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
717  */
718 
719 unsigned int
720 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
721 {
722 	struct ata_taskfile tf;
723 	unsigned int class;
724 	u8 err;
725 
726 	ap->ops->dev_select(ap, device);
727 
728 	memset(&tf, 0, sizeof(tf));
729 
730 	ap->ops->tf_read(ap, &tf);
731 	err = tf.feature;
732 	if (r_err)
733 		*r_err = err;
734 
735 	/* see if device passed diags: if master then continue and warn later */
736 	if (err == 0 && device == 0)
737 		/* diagnostic fail : do nothing _YET_ */
738 		ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
739 	else if (err == 1)
740 		/* do nothing */ ;
741 	else if ((device == 0) && (err == 0x81))
742 		/* do nothing */ ;
743 	else
744 		return ATA_DEV_NONE;
745 
746 	/* determine if device is ATA or ATAPI */
747 	class = ata_dev_classify(&tf);
748 
749 	if (class == ATA_DEV_UNKNOWN)
750 		return ATA_DEV_NONE;
751 	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
752 		return ATA_DEV_NONE;
753 	return class;
754 }
755 
756 /**
757  *	ata_id_string - Convert IDENTIFY DEVICE page into string
758  *	@id: IDENTIFY DEVICE results we will examine
759  *	@s: string into which data is output
760  *	@ofs: offset into identify device page
761  *	@len: length of string to return. must be an even number.
762  *
763  *	The strings in the IDENTIFY DEVICE page are broken up into
764  *	16-bit chunks.  Run through the string, and output each
765  *	8-bit chunk linearly, regardless of platform.
766  *
767  *	LOCKING:
768  *	caller.
769  */
770 
771 void ata_id_string(const u16 *id, unsigned char *s,
772 		   unsigned int ofs, unsigned int len)
773 {
774 	unsigned int c;
775 
776 	while (len > 0) {
777 		c = id[ofs] >> 8;
778 		*s = c;
779 		s++;
780 
781 		c = id[ofs] & 0xff;
782 		*s = c;
783 		s++;
784 
785 		ofs++;
786 		len -= 2;
787 	}
788 }
789 
790 /**
791  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
792  *	@id: IDENTIFY DEVICE results we will examine
793  *	@s: string into which data is output
794  *	@ofs: offset into identify device page
795  *	@len: length of string to return. must be an odd number.
796  *
797  *	This function is identical to ata_id_string except that it
798  *	trims trailing spaces and terminates the resulting string with
799  *	null.  @len must be actual maximum length (even number) + 1.
800  *
801  *	LOCKING:
802  *	caller.
803  */
804 void ata_id_c_string(const u16 *id, unsigned char *s,
805 		     unsigned int ofs, unsigned int len)
806 {
807 	unsigned char *p;
808 
809 	WARN_ON(!(len & 1));
810 
811 	ata_id_string(id, s, ofs, len - 1);
812 
813 	p = s + strnlen(s, len - 1);
814 	while (p > s && p[-1] == ' ')
815 		p--;
816 	*p = '\0';
817 }
818 
819 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
820 {
821 	u64 sectors = 0;
822 
823 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
824 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
825 	sectors |= (tf->hob_lbal & 0xff) << 24;
826 	sectors |= (tf->lbah & 0xff) << 16;
827 	sectors |= (tf->lbam & 0xff) << 8;
828 	sectors |= (tf->lbal & 0xff);
829 
830 	return ++sectors;
831 }
832 
833 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
834 {
835 	u64 sectors = 0;
836 
837 	sectors |= (tf->device & 0x0f) << 24;
838 	sectors |= (tf->lbah & 0xff) << 16;
839 	sectors |= (tf->lbam & 0xff) << 8;
840 	sectors |= (tf->lbal & 0xff);
841 
842 	return ++sectors;
843 }
844 
845 /**
846  *	ata_read_native_max_address_ext	-	LBA48 native max query
847  *	@dev: Device to query
848  *
849  *	Perform an LBA48 size query upon the device in question. Return the
850  *	actual LBA48 size or zero if the command fails.
851  */
852 
853 static u64 ata_read_native_max_address_ext(struct ata_device *dev)
854 {
855 	unsigned int err;
856 	struct ata_taskfile tf;
857 
858 	ata_tf_init(dev, &tf);
859 
860 	tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
861 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
862 	tf.protocol |= ATA_PROT_NODATA;
863 	tf.device |= 0x40;
864 
865 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
866 	if (err)
867 		return 0;
868 
869 	return ata_tf_to_lba48(&tf);
870 }
871 
872 /**
873  *	ata_read_native_max_address	-	LBA28 native max query
874  *	@dev: Device to query
875  *
876  *	Performa an LBA28 size query upon the device in question. Return the
877  *	actual LBA28 size or zero if the command fails.
878  */
879 
880 static u64 ata_read_native_max_address(struct ata_device *dev)
881 {
882 	unsigned int err;
883 	struct ata_taskfile tf;
884 
885 	ata_tf_init(dev, &tf);
886 
887 	tf.command = ATA_CMD_READ_NATIVE_MAX;
888 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
889 	tf.protocol |= ATA_PROT_NODATA;
890 	tf.device |= 0x40;
891 
892 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
893 	if (err)
894 		return 0;
895 
896 	return ata_tf_to_lba(&tf);
897 }
898 
899 /**
900  *	ata_set_native_max_address_ext	-	LBA48 native max set
901  *	@dev: Device to query
902  *	@new_sectors: new max sectors value to set for the device
903  *
904  *	Perform an LBA48 size set max upon the device in question. Return the
905  *	actual LBA48 size or zero if the command fails.
906  */
907 
908 static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
909 {
910 	unsigned int err;
911 	struct ata_taskfile tf;
912 
913 	new_sectors--;
914 
915 	ata_tf_init(dev, &tf);
916 
917 	tf.command = ATA_CMD_SET_MAX_EXT;
918 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
919 	tf.protocol |= ATA_PROT_NODATA;
920 	tf.device |= 0x40;
921 
922 	tf.lbal = (new_sectors >> 0) & 0xff;
923 	tf.lbam = (new_sectors >> 8) & 0xff;
924 	tf.lbah = (new_sectors >> 16) & 0xff;
925 
926 	tf.hob_lbal = (new_sectors >> 24) & 0xff;
927 	tf.hob_lbam = (new_sectors >> 32) & 0xff;
928 	tf.hob_lbah = (new_sectors >> 40) & 0xff;
929 
930 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
931 	if (err)
932 		return 0;
933 
934 	return ata_tf_to_lba48(&tf);
935 }
936 
937 /**
938  *	ata_set_native_max_address	-	LBA28 native max set
939  *	@dev: Device to query
940  *	@new_sectors: new max sectors value to set for the device
941  *
942  *	Perform an LBA28 size set max upon the device in question. Return the
943  *	actual LBA28 size or zero if the command fails.
944  */
945 
946 static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
947 {
948 	unsigned int err;
949 	struct ata_taskfile tf;
950 
951 	new_sectors--;
952 
953 	ata_tf_init(dev, &tf);
954 
955 	tf.command = ATA_CMD_SET_MAX;
956 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
957 	tf.protocol |= ATA_PROT_NODATA;
958 
959 	tf.lbal = (new_sectors >> 0) & 0xff;
960 	tf.lbam = (new_sectors >> 8) & 0xff;
961 	tf.lbah = (new_sectors >> 16) & 0xff;
962 	tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
963 
964 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
965 	if (err)
966 		return 0;
967 
968 	return ata_tf_to_lba(&tf);
969 }
970 
971 /**
972  *	ata_hpa_resize		-	Resize a device with an HPA set
973  *	@dev: Device to resize
974  *
975  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
976  *	it if required to the full size of the media. The caller must check
977  *	the drive has the HPA feature set enabled.
978  */
979 
980 static u64 ata_hpa_resize(struct ata_device *dev)
981 {
982 	u64 sectors = dev->n_sectors;
983 	u64 hpa_sectors;
984 
985 	if (ata_id_has_lba48(dev->id))
986 		hpa_sectors = ata_read_native_max_address_ext(dev);
987 	else
988 		hpa_sectors = ata_read_native_max_address(dev);
989 
990 	if (hpa_sectors > sectors) {
991 		ata_dev_printk(dev, KERN_INFO,
992 			"Host Protected Area detected:\n"
993 			"\tcurrent size: %lld sectors\n"
994 			"\tnative size: %lld sectors\n",
995 			(long long)sectors, (long long)hpa_sectors);
996 
997 		if (ata_ignore_hpa) {
998 			if (ata_id_has_lba48(dev->id))
999 				hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1000 			else
1001 				hpa_sectors = ata_set_native_max_address(dev,
1002 								hpa_sectors);
1003 
1004 			if (hpa_sectors) {
1005 				ata_dev_printk(dev, KERN_INFO, "native size "
1006 					"increased to %lld sectors\n",
1007 					(long long)hpa_sectors);
1008 				return hpa_sectors;
1009 			}
1010 		}
1011 	} else if (hpa_sectors < sectors)
1012 		ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1013 			       "is smaller than sectors (%lld)\n", __FUNCTION__,
1014 			       (long long)hpa_sectors, (long long)sectors);
1015 
1016 	return sectors;
1017 }
1018 
1019 static u64 ata_id_n_sectors(const u16 *id)
1020 {
1021 	if (ata_id_has_lba(id)) {
1022 		if (ata_id_has_lba48(id))
1023 			return ata_id_u64(id, 100);
1024 		else
1025 			return ata_id_u32(id, 60);
1026 	} else {
1027 		if (ata_id_current_chs_valid(id))
1028 			return ata_id_u32(id, 57);
1029 		else
1030 			return id[1] * id[3] * id[6];
1031 	}
1032 }
1033 
1034 /**
1035  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
1036  *	@dev: device to identify
1037  *	@unknown: mode to assume if we cannot tell
1038  *
1039  *	Set up the timing values for the device based upon the identify
1040  *	reported values for the DMA mode. This function is used by drivers
1041  *	which rely upon firmware configured modes, but wish to report the
1042  *	mode correctly when possible.
1043  *
1044  *	In addition we emit similarly formatted messages to the default
1045  *	ata_dev_set_mode handler, in order to provide consistency of
1046  *	presentation.
1047  */
1048 
1049 void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1050 {
1051 	unsigned int mask;
1052 	u8 mode;
1053 
1054 	/* Pack the DMA modes */
1055 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1056 	if (dev->id[53] & 0x04)
1057 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1058 
1059 	/* Select the mode in use */
1060 	mode = ata_xfer_mask2mode(mask);
1061 
1062 	if (mode != 0) {
1063 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1064 		       ata_mode_string(mask));
1065 	} else {
1066 		/* SWDMA perhaps ? */
1067 		mode = unknown;
1068 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1069 	}
1070 
1071 	/* Configure the device reporting */
1072 	dev->xfer_mode = mode;
1073 	dev->xfer_shift = ata_xfer_mode2shift(mode);
1074 }
1075 
1076 /**
1077  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1078  *	@ap: ATA channel to manipulate
1079  *	@device: ATA device (numbered from zero) to select
1080  *
1081  *	This function performs no actual function.
1082  *
1083  *	May be used as the dev_select() entry in ata_port_operations.
1084  *
1085  *	LOCKING:
1086  *	caller.
1087  */
1088 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1089 {
1090 }
1091 
1092 
1093 /**
1094  *	ata_std_dev_select - Select device 0/1 on ATA bus
1095  *	@ap: ATA channel to manipulate
1096  *	@device: ATA device (numbered from zero) to select
1097  *
1098  *	Use the method defined in the ATA specification to
1099  *	make either device 0, or device 1, active on the
1100  *	ATA channel.  Works with both PIO and MMIO.
1101  *
1102  *	May be used as the dev_select() entry in ata_port_operations.
1103  *
1104  *	LOCKING:
1105  *	caller.
1106  */
1107 
1108 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1109 {
1110 	u8 tmp;
1111 
1112 	if (device == 0)
1113 		tmp = ATA_DEVICE_OBS;
1114 	else
1115 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1116 
1117 	iowrite8(tmp, ap->ioaddr.device_addr);
1118 	ata_pause(ap);		/* needed; also flushes, for mmio */
1119 }
1120 
1121 /**
1122  *	ata_dev_select - Select device 0/1 on ATA bus
1123  *	@ap: ATA channel to manipulate
1124  *	@device: ATA device (numbered from zero) to select
1125  *	@wait: non-zero to wait for Status register BSY bit to clear
1126  *	@can_sleep: non-zero if context allows sleeping
1127  *
1128  *	Use the method defined in the ATA specification to
1129  *	make either device 0, or device 1, active on the
1130  *	ATA channel.
1131  *
1132  *	This is a high-level version of ata_std_dev_select(),
1133  *	which additionally provides the services of inserting
1134  *	the proper pauses and status polling, where needed.
1135  *
1136  *	LOCKING:
1137  *	caller.
1138  */
1139 
1140 void ata_dev_select(struct ata_port *ap, unsigned int device,
1141 			   unsigned int wait, unsigned int can_sleep)
1142 {
1143 	if (ata_msg_probe(ap))
1144 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1145 				"device %u, wait %u\n", device, wait);
1146 
1147 	if (wait)
1148 		ata_wait_idle(ap);
1149 
1150 	ap->ops->dev_select(ap, device);
1151 
1152 	if (wait) {
1153 		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1154 			msleep(150);
1155 		ata_wait_idle(ap);
1156 	}
1157 }
1158 
1159 /**
1160  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1161  *	@id: IDENTIFY DEVICE page to dump
1162  *
1163  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1164  *	page.
1165  *
1166  *	LOCKING:
1167  *	caller.
1168  */
1169 
1170 static inline void ata_dump_id(const u16 *id)
1171 {
1172 	DPRINTK("49==0x%04x  "
1173 		"53==0x%04x  "
1174 		"63==0x%04x  "
1175 		"64==0x%04x  "
1176 		"75==0x%04x  \n",
1177 		id[49],
1178 		id[53],
1179 		id[63],
1180 		id[64],
1181 		id[75]);
1182 	DPRINTK("80==0x%04x  "
1183 		"81==0x%04x  "
1184 		"82==0x%04x  "
1185 		"83==0x%04x  "
1186 		"84==0x%04x  \n",
1187 		id[80],
1188 		id[81],
1189 		id[82],
1190 		id[83],
1191 		id[84]);
1192 	DPRINTK("88==0x%04x  "
1193 		"93==0x%04x\n",
1194 		id[88],
1195 		id[93]);
1196 }
1197 
1198 /**
1199  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1200  *	@id: IDENTIFY data to compute xfer mask from
1201  *
1202  *	Compute the xfermask for this device. This is not as trivial
1203  *	as it seems if we must consider early devices correctly.
1204  *
1205  *	FIXME: pre IDE drive timing (do we care ?).
1206  *
1207  *	LOCKING:
1208  *	None.
1209  *
1210  *	RETURNS:
1211  *	Computed xfermask
1212  */
1213 static unsigned int ata_id_xfermask(const u16 *id)
1214 {
1215 	unsigned int pio_mask, mwdma_mask, udma_mask;
1216 
1217 	/* Usual case. Word 53 indicates word 64 is valid */
1218 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1219 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1220 		pio_mask <<= 3;
1221 		pio_mask |= 0x7;
1222 	} else {
1223 		/* If word 64 isn't valid then Word 51 high byte holds
1224 		 * the PIO timing number for the maximum. Turn it into
1225 		 * a mask.
1226 		 */
1227 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1228 		if (mode < 5)	/* Valid PIO range */
1229                 	pio_mask = (2 << mode) - 1;
1230 		else
1231 			pio_mask = 1;
1232 
1233 		/* But wait.. there's more. Design your standards by
1234 		 * committee and you too can get a free iordy field to
1235 		 * process. However its the speeds not the modes that
1236 		 * are supported... Note drivers using the timing API
1237 		 * will get this right anyway
1238 		 */
1239 	}
1240 
1241 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1242 
1243 	if (ata_id_is_cfa(id)) {
1244 		/*
1245 		 *	Process compact flash extended modes
1246 		 */
1247 		int pio = id[163] & 0x7;
1248 		int dma = (id[163] >> 3) & 7;
1249 
1250 		if (pio)
1251 			pio_mask |= (1 << 5);
1252 		if (pio > 1)
1253 			pio_mask |= (1 << 6);
1254 		if (dma)
1255 			mwdma_mask |= (1 << 3);
1256 		if (dma > 1)
1257 			mwdma_mask |= (1 << 4);
1258 	}
1259 
1260 	udma_mask = 0;
1261 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1262 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1263 
1264 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1265 }
1266 
1267 /**
1268  *	ata_port_queue_task - Queue port_task
1269  *	@ap: The ata_port to queue port_task for
1270  *	@fn: workqueue function to be scheduled
1271  *	@data: data for @fn to use
1272  *	@delay: delay time for workqueue function
1273  *
1274  *	Schedule @fn(@data) for execution after @delay jiffies using
1275  *	port_task.  There is one port_task per port and it's the
1276  *	user(low level driver)'s responsibility to make sure that only
1277  *	one task is active at any given time.
1278  *
1279  *	libata core layer takes care of synchronization between
1280  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1281  *	synchronization.
1282  *
1283  *	LOCKING:
1284  *	Inherited from caller.
1285  */
1286 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1287 			 unsigned long delay)
1288 {
1289 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
1290 	ap->port_task_data = data;
1291 
1292 	/* may fail if ata_port_flush_task() in progress */
1293 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1294 }
1295 
1296 /**
1297  *	ata_port_flush_task - Flush port_task
1298  *	@ap: The ata_port to flush port_task for
1299  *
1300  *	After this function completes, port_task is guranteed not to
1301  *	be running or scheduled.
1302  *
1303  *	LOCKING:
1304  *	Kernel thread context (may sleep)
1305  */
1306 void ata_port_flush_task(struct ata_port *ap)
1307 {
1308 	DPRINTK("ENTER\n");
1309 
1310 	cancel_rearming_delayed_work(&ap->port_task);
1311 
1312 	if (ata_msg_ctl(ap))
1313 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1314 }
1315 
1316 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1317 {
1318 	struct completion *waiting = qc->private_data;
1319 
1320 	complete(waiting);
1321 }
1322 
1323 /**
1324  *	ata_exec_internal_sg - execute libata internal command
1325  *	@dev: Device to which the command is sent
1326  *	@tf: Taskfile registers for the command and the result
1327  *	@cdb: CDB for packet command
1328  *	@dma_dir: Data tranfer direction of the command
1329  *	@sg: sg list for the data buffer of the command
1330  *	@n_elem: Number of sg entries
1331  *
1332  *	Executes libata internal command with timeout.  @tf contains
1333  *	command on entry and result on return.  Timeout and error
1334  *	conditions are reported via return value.  No recovery action
1335  *	is taken after a command times out.  It's caller's duty to
1336  *	clean up after timeout.
1337  *
1338  *	LOCKING:
1339  *	None.  Should be called with kernel context, might sleep.
1340  *
1341  *	RETURNS:
1342  *	Zero on success, AC_ERR_* mask on failure
1343  */
1344 unsigned ata_exec_internal_sg(struct ata_device *dev,
1345 			      struct ata_taskfile *tf, const u8 *cdb,
1346 			      int dma_dir, struct scatterlist *sg,
1347 			      unsigned int n_elem)
1348 {
1349 	struct ata_port *ap = dev->ap;
1350 	u8 command = tf->command;
1351 	struct ata_queued_cmd *qc;
1352 	unsigned int tag, preempted_tag;
1353 	u32 preempted_sactive, preempted_qc_active;
1354 	DECLARE_COMPLETION_ONSTACK(wait);
1355 	unsigned long flags;
1356 	unsigned int err_mask;
1357 	int rc;
1358 
1359 	spin_lock_irqsave(ap->lock, flags);
1360 
1361 	/* no internal command while frozen */
1362 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1363 		spin_unlock_irqrestore(ap->lock, flags);
1364 		return AC_ERR_SYSTEM;
1365 	}
1366 
1367 	/* initialize internal qc */
1368 
1369 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1370 	 * drivers choke if any other tag is given.  This breaks
1371 	 * ata_tag_internal() test for those drivers.  Don't use new
1372 	 * EH stuff without converting to it.
1373 	 */
1374 	if (ap->ops->error_handler)
1375 		tag = ATA_TAG_INTERNAL;
1376 	else
1377 		tag = 0;
1378 
1379 	if (test_and_set_bit(tag, &ap->qc_allocated))
1380 		BUG();
1381 	qc = __ata_qc_from_tag(ap, tag);
1382 
1383 	qc->tag = tag;
1384 	qc->scsicmd = NULL;
1385 	qc->ap = ap;
1386 	qc->dev = dev;
1387 	ata_qc_reinit(qc);
1388 
1389 	preempted_tag = ap->active_tag;
1390 	preempted_sactive = ap->sactive;
1391 	preempted_qc_active = ap->qc_active;
1392 	ap->active_tag = ATA_TAG_POISON;
1393 	ap->sactive = 0;
1394 	ap->qc_active = 0;
1395 
1396 	/* prepare & issue qc */
1397 	qc->tf = *tf;
1398 	if (cdb)
1399 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1400 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1401 	qc->dma_dir = dma_dir;
1402 	if (dma_dir != DMA_NONE) {
1403 		unsigned int i, buflen = 0;
1404 
1405 		for (i = 0; i < n_elem; i++)
1406 			buflen += sg[i].length;
1407 
1408 		ata_sg_init(qc, sg, n_elem);
1409 		qc->nbytes = buflen;
1410 	}
1411 
1412 	qc->private_data = &wait;
1413 	qc->complete_fn = ata_qc_complete_internal;
1414 
1415 	ata_qc_issue(qc);
1416 
1417 	spin_unlock_irqrestore(ap->lock, flags);
1418 
1419 	rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1420 
1421 	ata_port_flush_task(ap);
1422 
1423 	if (!rc) {
1424 		spin_lock_irqsave(ap->lock, flags);
1425 
1426 		/* We're racing with irq here.  If we lose, the
1427 		 * following test prevents us from completing the qc
1428 		 * twice.  If we win, the port is frozen and will be
1429 		 * cleaned up by ->post_internal_cmd().
1430 		 */
1431 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1432 			qc->err_mask |= AC_ERR_TIMEOUT;
1433 
1434 			if (ap->ops->error_handler)
1435 				ata_port_freeze(ap);
1436 			else
1437 				ata_qc_complete(qc);
1438 
1439 			if (ata_msg_warn(ap))
1440 				ata_dev_printk(dev, KERN_WARNING,
1441 					"qc timeout (cmd 0x%x)\n", command);
1442 		}
1443 
1444 		spin_unlock_irqrestore(ap->lock, flags);
1445 	}
1446 
1447 	/* do post_internal_cmd */
1448 	if (ap->ops->post_internal_cmd)
1449 		ap->ops->post_internal_cmd(qc);
1450 
1451 	/* perform minimal error analysis */
1452 	if (qc->flags & ATA_QCFLAG_FAILED) {
1453 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1454 			qc->err_mask |= AC_ERR_DEV;
1455 
1456 		if (!qc->err_mask)
1457 			qc->err_mask |= AC_ERR_OTHER;
1458 
1459 		if (qc->err_mask & ~AC_ERR_OTHER)
1460 			qc->err_mask &= ~AC_ERR_OTHER;
1461 	}
1462 
1463 	/* finish up */
1464 	spin_lock_irqsave(ap->lock, flags);
1465 
1466 	*tf = qc->result_tf;
1467 	err_mask = qc->err_mask;
1468 
1469 	ata_qc_free(qc);
1470 	ap->active_tag = preempted_tag;
1471 	ap->sactive = preempted_sactive;
1472 	ap->qc_active = preempted_qc_active;
1473 
1474 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1475 	 * Until those drivers are fixed, we detect the condition
1476 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1477 	 * port.
1478 	 *
1479 	 * Note that this doesn't change any behavior as internal
1480 	 * command failure results in disabling the device in the
1481 	 * higher layer for LLDDs without new reset/EH callbacks.
1482 	 *
1483 	 * Kill the following code as soon as those drivers are fixed.
1484 	 */
1485 	if (ap->flags & ATA_FLAG_DISABLED) {
1486 		err_mask |= AC_ERR_SYSTEM;
1487 		ata_port_probe(ap);
1488 	}
1489 
1490 	spin_unlock_irqrestore(ap->lock, flags);
1491 
1492 	return err_mask;
1493 }
1494 
1495 /**
1496  *	ata_exec_internal - execute libata internal command
1497  *	@dev: Device to which the command is sent
1498  *	@tf: Taskfile registers for the command and the result
1499  *	@cdb: CDB for packet command
1500  *	@dma_dir: Data tranfer direction of the command
1501  *	@buf: Data buffer of the command
1502  *	@buflen: Length of data buffer
1503  *
1504  *	Wrapper around ata_exec_internal_sg() which takes simple
1505  *	buffer instead of sg list.
1506  *
1507  *	LOCKING:
1508  *	None.  Should be called with kernel context, might sleep.
1509  *
1510  *	RETURNS:
1511  *	Zero on success, AC_ERR_* mask on failure
1512  */
1513 unsigned ata_exec_internal(struct ata_device *dev,
1514 			   struct ata_taskfile *tf, const u8 *cdb,
1515 			   int dma_dir, void *buf, unsigned int buflen)
1516 {
1517 	struct scatterlist *psg = NULL, sg;
1518 	unsigned int n_elem = 0;
1519 
1520 	if (dma_dir != DMA_NONE) {
1521 		WARN_ON(!buf);
1522 		sg_init_one(&sg, buf, buflen);
1523 		psg = &sg;
1524 		n_elem++;
1525 	}
1526 
1527 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1528 }
1529 
1530 /**
1531  *	ata_do_simple_cmd - execute simple internal command
1532  *	@dev: Device to which the command is sent
1533  *	@cmd: Opcode to execute
1534  *
1535  *	Execute a 'simple' command, that only consists of the opcode
1536  *	'cmd' itself, without filling any other registers
1537  *
1538  *	LOCKING:
1539  *	Kernel thread context (may sleep).
1540  *
1541  *	RETURNS:
1542  *	Zero on success, AC_ERR_* mask on failure
1543  */
1544 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1545 {
1546 	struct ata_taskfile tf;
1547 
1548 	ata_tf_init(dev, &tf);
1549 
1550 	tf.command = cmd;
1551 	tf.flags |= ATA_TFLAG_DEVICE;
1552 	tf.protocol = ATA_PROT_NODATA;
1553 
1554 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1555 }
1556 
1557 /**
1558  *	ata_pio_need_iordy	-	check if iordy needed
1559  *	@adev: ATA device
1560  *
1561  *	Check if the current speed of the device requires IORDY. Used
1562  *	by various controllers for chip configuration.
1563  */
1564 
1565 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1566 {
1567 	/* Controller doesn't support  IORDY. Probably a pointless check
1568 	   as the caller should know this */
1569 	if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1570 		return 0;
1571 	/* PIO3 and higher it is mandatory */
1572 	if (adev->pio_mode > XFER_PIO_2)
1573 		return 1;
1574 	/* We turn it on when possible */
1575 	if (ata_id_has_iordy(adev->id))
1576 		return 1;
1577 	return 0;
1578 }
1579 
1580 /**
1581  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1582  *	@adev: ATA device
1583  *
1584  *	Compute the highest mode possible if we are not using iordy. Return
1585  *	-1 if no iordy mode is available.
1586  */
1587 
1588 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1589 {
1590 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1591 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1592 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1593 		/* Is the speed faster than the drive allows non IORDY ? */
1594 		if (pio) {
1595 			/* This is cycle times not frequency - watch the logic! */
1596 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1597 				return 3 << ATA_SHIFT_PIO;
1598 			return 7 << ATA_SHIFT_PIO;
1599 		}
1600 	}
1601 	return 3 << ATA_SHIFT_PIO;
1602 }
1603 
1604 /**
1605  *	ata_dev_read_id - Read ID data from the specified device
1606  *	@dev: target device
1607  *	@p_class: pointer to class of the target device (may be changed)
1608  *	@flags: ATA_READID_* flags
1609  *	@id: buffer to read IDENTIFY data into
1610  *
1611  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1612  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1613  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1614  *	for pre-ATA4 drives.
1615  *
1616  *	LOCKING:
1617  *	Kernel thread context (may sleep)
1618  *
1619  *	RETURNS:
1620  *	0 on success, -errno otherwise.
1621  */
1622 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1623 		    unsigned int flags, u16 *id)
1624 {
1625 	struct ata_port *ap = dev->ap;
1626 	unsigned int class = *p_class;
1627 	struct ata_taskfile tf;
1628 	unsigned int err_mask = 0;
1629 	const char *reason;
1630 	int may_fallback = 1, tried_spinup = 0;
1631 	int rc;
1632 
1633 	if (ata_msg_ctl(ap))
1634 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1635 
1636 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1637  retry:
1638 	ata_tf_init(dev, &tf);
1639 
1640 	switch (class) {
1641 	case ATA_DEV_ATA:
1642 		tf.command = ATA_CMD_ID_ATA;
1643 		break;
1644 	case ATA_DEV_ATAPI:
1645 		tf.command = ATA_CMD_ID_ATAPI;
1646 		break;
1647 	default:
1648 		rc = -ENODEV;
1649 		reason = "unsupported class";
1650 		goto err_out;
1651 	}
1652 
1653 	tf.protocol = ATA_PROT_PIO;
1654 
1655 	/* Some devices choke if TF registers contain garbage.  Make
1656 	 * sure those are properly initialized.
1657 	 */
1658 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1659 
1660 	/* Device presence detection is unreliable on some
1661 	 * controllers.  Always poll IDENTIFY if available.
1662 	 */
1663 	tf.flags |= ATA_TFLAG_POLLING;
1664 
1665 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1666 				     id, sizeof(id[0]) * ATA_ID_WORDS);
1667 	if (err_mask) {
1668 		if (err_mask & AC_ERR_NODEV_HINT) {
1669 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
1670 				ap->print_id, dev->devno);
1671 			return -ENOENT;
1672 		}
1673 
1674 		/* Device or controller might have reported the wrong
1675 		 * device class.  Give a shot at the other IDENTIFY if
1676 		 * the current one is aborted by the device.
1677 		 */
1678 		if (may_fallback &&
1679 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1680 			may_fallback = 0;
1681 
1682 			if (class == ATA_DEV_ATA)
1683 				class = ATA_DEV_ATAPI;
1684 			else
1685 				class = ATA_DEV_ATA;
1686 			goto retry;
1687 		}
1688 
1689 		rc = -EIO;
1690 		reason = "I/O error";
1691 		goto err_out;
1692 	}
1693 
1694 	/* Falling back doesn't make sense if ID data was read
1695 	 * successfully at least once.
1696 	 */
1697 	may_fallback = 0;
1698 
1699 	swap_buf_le16(id, ATA_ID_WORDS);
1700 
1701 	/* sanity check */
1702 	rc = -EINVAL;
1703 	reason = "device reports invalid type";
1704 
1705 	if (class == ATA_DEV_ATA) {
1706 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1707 			goto err_out;
1708 	} else {
1709 		if (ata_id_is_ata(id))
1710 			goto err_out;
1711 	}
1712 
1713 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1714 		tried_spinup = 1;
1715 		/*
1716 		 * Drive powered-up in standby mode, and requires a specific
1717 		 * SET_FEATURES spin-up subcommand before it will accept
1718 		 * anything other than the original IDENTIFY command.
1719 		 */
1720 		ata_tf_init(dev, &tf);
1721 		tf.command = ATA_CMD_SET_FEATURES;
1722 		tf.feature = SETFEATURES_SPINUP;
1723 		tf.protocol = ATA_PROT_NODATA;
1724 		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1725 		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1726 		if (err_mask) {
1727 			rc = -EIO;
1728 			reason = "SPINUP failed";
1729 			goto err_out;
1730 		}
1731 		/*
1732 		 * If the drive initially returned incomplete IDENTIFY info,
1733 		 * we now must reissue the IDENTIFY command.
1734 		 */
1735 		if (id[2] == 0x37c8)
1736 			goto retry;
1737 	}
1738 
1739 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1740 		/*
1741 		 * The exact sequence expected by certain pre-ATA4 drives is:
1742 		 * SRST RESET
1743 		 * IDENTIFY
1744 		 * INITIALIZE DEVICE PARAMETERS
1745 		 * anything else..
1746 		 * Some drives were very specific about that exact sequence.
1747 		 */
1748 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1749 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1750 			if (err_mask) {
1751 				rc = -EIO;
1752 				reason = "INIT_DEV_PARAMS failed";
1753 				goto err_out;
1754 			}
1755 
1756 			/* current CHS translation info (id[53-58]) might be
1757 			 * changed. reread the identify device info.
1758 			 */
1759 			flags &= ~ATA_READID_POSTRESET;
1760 			goto retry;
1761 		}
1762 	}
1763 
1764 	*p_class = class;
1765 
1766 	return 0;
1767 
1768  err_out:
1769 	if (ata_msg_warn(ap))
1770 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1771 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1772 	return rc;
1773 }
1774 
1775 static inline u8 ata_dev_knobble(struct ata_device *dev)
1776 {
1777 	return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1778 }
1779 
1780 static void ata_dev_config_ncq(struct ata_device *dev,
1781 			       char *desc, size_t desc_sz)
1782 {
1783 	struct ata_port *ap = dev->ap;
1784 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1785 
1786 	if (!ata_id_has_ncq(dev->id)) {
1787 		desc[0] = '\0';
1788 		return;
1789 	}
1790 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
1791 		snprintf(desc, desc_sz, "NCQ (not used)");
1792 		return;
1793 	}
1794 	if (ap->flags & ATA_FLAG_NCQ) {
1795 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1796 		dev->flags |= ATA_DFLAG_NCQ;
1797 	}
1798 
1799 	if (hdepth >= ddepth)
1800 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1801 	else
1802 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1803 }
1804 
1805 /**
1806  *	ata_dev_configure - Configure the specified ATA/ATAPI device
1807  *	@dev: Target device to configure
1808  *
1809  *	Configure @dev according to @dev->id.  Generic and low-level
1810  *	driver specific fixups are also applied.
1811  *
1812  *	LOCKING:
1813  *	Kernel thread context (may sleep)
1814  *
1815  *	RETURNS:
1816  *	0 on success, -errno otherwise
1817  */
1818 int ata_dev_configure(struct ata_device *dev)
1819 {
1820 	struct ata_port *ap = dev->ap;
1821 	struct ata_eh_context *ehc = &ap->eh_context;
1822 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1823 	const u16 *id = dev->id;
1824 	unsigned int xfer_mask;
1825 	char revbuf[7];		/* XYZ-99\0 */
1826 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1827 	char modelbuf[ATA_ID_PROD_LEN+1];
1828 	int rc;
1829 
1830 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1831 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1832 			       __FUNCTION__);
1833 		return 0;
1834 	}
1835 
1836 	if (ata_msg_probe(ap))
1837 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1838 
1839 	/* set horkage */
1840 	dev->horkage |= ata_dev_blacklisted(dev);
1841 
1842 	/* let ACPI work its magic */
1843 	rc = ata_acpi_on_devcfg(dev);
1844 	if (rc)
1845 		return rc;
1846 
1847 	/* print device capabilities */
1848 	if (ata_msg_probe(ap))
1849 		ata_dev_printk(dev, KERN_DEBUG,
1850 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1851 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1852 			       __FUNCTION__,
1853 			       id[49], id[82], id[83], id[84],
1854 			       id[85], id[86], id[87], id[88]);
1855 
1856 	/* initialize to-be-configured parameters */
1857 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1858 	dev->max_sectors = 0;
1859 	dev->cdb_len = 0;
1860 	dev->n_sectors = 0;
1861 	dev->cylinders = 0;
1862 	dev->heads = 0;
1863 	dev->sectors = 0;
1864 
1865 	/*
1866 	 * common ATA, ATAPI feature tests
1867 	 */
1868 
1869 	/* find max transfer mode; for printk only */
1870 	xfer_mask = ata_id_xfermask(id);
1871 
1872 	if (ata_msg_probe(ap))
1873 		ata_dump_id(id);
1874 
1875 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1876 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1877 			sizeof(fwrevbuf));
1878 
1879 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1880 			sizeof(modelbuf));
1881 
1882 	/* ATA-specific feature tests */
1883 	if (dev->class == ATA_DEV_ATA) {
1884 		if (ata_id_is_cfa(id)) {
1885 			if (id[162] & 1) /* CPRM may make this media unusable */
1886 				ata_dev_printk(dev, KERN_WARNING,
1887 					       "supports DRM functions and may "
1888 					       "not be fully accessable.\n");
1889 			snprintf(revbuf, 7, "CFA");
1890 		}
1891 		else
1892 			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
1893 
1894 		dev->n_sectors = ata_id_n_sectors(id);
1895 
1896 		if (dev->id[59] & 0x100)
1897 			dev->multi_count = dev->id[59] & 0xff;
1898 
1899 		if (ata_id_has_lba(id)) {
1900 			const char *lba_desc;
1901 			char ncq_desc[20];
1902 
1903 			lba_desc = "LBA";
1904 			dev->flags |= ATA_DFLAG_LBA;
1905 			if (ata_id_has_lba48(id)) {
1906 				dev->flags |= ATA_DFLAG_LBA48;
1907 				lba_desc = "LBA48";
1908 
1909 				if (dev->n_sectors >= (1UL << 28) &&
1910 				    ata_id_has_flush_ext(id))
1911 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1912 			}
1913 
1914 			if (ata_id_hpa_enabled(dev->id))
1915 				dev->n_sectors = ata_hpa_resize(dev);
1916 
1917 			/* config NCQ */
1918 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1919 
1920 			/* print device info to dmesg */
1921 			if (ata_msg_drv(ap) && print_info) {
1922 				ata_dev_printk(dev, KERN_INFO,
1923 					"%s: %s, %s, max %s\n",
1924 					revbuf, modelbuf, fwrevbuf,
1925 					ata_mode_string(xfer_mask));
1926 				ata_dev_printk(dev, KERN_INFO,
1927 					"%Lu sectors, multi %u: %s %s\n",
1928 					(unsigned long long)dev->n_sectors,
1929 					dev->multi_count, lba_desc, ncq_desc);
1930 			}
1931 		} else {
1932 			/* CHS */
1933 
1934 			/* Default translation */
1935 			dev->cylinders	= id[1];
1936 			dev->heads	= id[3];
1937 			dev->sectors	= id[6];
1938 
1939 			if (ata_id_current_chs_valid(id)) {
1940 				/* Current CHS translation is valid. */
1941 				dev->cylinders = id[54];
1942 				dev->heads     = id[55];
1943 				dev->sectors   = id[56];
1944 			}
1945 
1946 			/* print device info to dmesg */
1947 			if (ata_msg_drv(ap) && print_info) {
1948 				ata_dev_printk(dev, KERN_INFO,
1949 					"%s: %s, %s, max %s\n",
1950 					revbuf,	modelbuf, fwrevbuf,
1951 					ata_mode_string(xfer_mask));
1952 				ata_dev_printk(dev, KERN_INFO,
1953 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
1954 					(unsigned long long)dev->n_sectors,
1955 					dev->multi_count, dev->cylinders,
1956 					dev->heads, dev->sectors);
1957 			}
1958 		}
1959 
1960 		dev->cdb_len = 16;
1961 	}
1962 
1963 	/* ATAPI-specific feature tests */
1964 	else if (dev->class == ATA_DEV_ATAPI) {
1965 		char *cdb_intr_string = "";
1966 
1967 		rc = atapi_cdb_len(id);
1968 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1969 			if (ata_msg_warn(ap))
1970 				ata_dev_printk(dev, KERN_WARNING,
1971 					       "unsupported CDB len\n");
1972 			rc = -EINVAL;
1973 			goto err_out_nosup;
1974 		}
1975 		dev->cdb_len = (unsigned int) rc;
1976 
1977 		if (ata_id_cdb_intr(dev->id)) {
1978 			dev->flags |= ATA_DFLAG_CDB_INTR;
1979 			cdb_intr_string = ", CDB intr";
1980 		}
1981 
1982 		/* print device info to dmesg */
1983 		if (ata_msg_drv(ap) && print_info)
1984 			ata_dev_printk(dev, KERN_INFO,
1985 				       "ATAPI: %s, %s, max %s%s\n",
1986 				       modelbuf, fwrevbuf,
1987 				       ata_mode_string(xfer_mask),
1988 				       cdb_intr_string);
1989 	}
1990 
1991 	/* determine max_sectors */
1992 	dev->max_sectors = ATA_MAX_SECTORS;
1993 	if (dev->flags & ATA_DFLAG_LBA48)
1994 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1995 
1996 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1997 		/* Let the user know. We don't want to disallow opens for
1998 		   rescue purposes, or in case the vendor is just a blithering
1999 		   idiot */
2000                 if (print_info) {
2001 			ata_dev_printk(dev, KERN_WARNING,
2002 "Drive reports diagnostics failure. This may indicate a drive\n");
2003 			ata_dev_printk(dev, KERN_WARNING,
2004 "fault or invalid emulation. Contact drive vendor for information.\n");
2005 		}
2006 	}
2007 
2008 	/* limit bridge transfers to udma5, 200 sectors */
2009 	if (ata_dev_knobble(dev)) {
2010 		if (ata_msg_drv(ap) && print_info)
2011 			ata_dev_printk(dev, KERN_INFO,
2012 				       "applying bridge limits\n");
2013 		dev->udma_mask &= ATA_UDMA5;
2014 		dev->max_sectors = ATA_MAX_SECTORS;
2015 	}
2016 
2017 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2018 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2019 					 dev->max_sectors);
2020 
2021 	if (ap->ops->dev_config)
2022 		ap->ops->dev_config(dev);
2023 
2024 	if (ata_msg_probe(ap))
2025 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2026 			__FUNCTION__, ata_chk_status(ap));
2027 	return 0;
2028 
2029 err_out_nosup:
2030 	if (ata_msg_probe(ap))
2031 		ata_dev_printk(dev, KERN_DEBUG,
2032 			       "%s: EXIT, err\n", __FUNCTION__);
2033 	return rc;
2034 }
2035 
2036 /**
2037  *	ata_cable_40wire	-	return 40 wire cable type
2038  *	@ap: port
2039  *
2040  *	Helper method for drivers which want to hardwire 40 wire cable
2041  *	detection.
2042  */
2043 
2044 int ata_cable_40wire(struct ata_port *ap)
2045 {
2046 	return ATA_CBL_PATA40;
2047 }
2048 
2049 /**
2050  *	ata_cable_80wire	-	return 80 wire cable type
2051  *	@ap: port
2052  *
2053  *	Helper method for drivers which want to hardwire 80 wire cable
2054  *	detection.
2055  */
2056 
2057 int ata_cable_80wire(struct ata_port *ap)
2058 {
2059 	return ATA_CBL_PATA80;
2060 }
2061 
2062 /**
2063  *	ata_cable_unknown	-	return unknown PATA cable.
2064  *	@ap: port
2065  *
2066  *	Helper method for drivers which have no PATA cable detection.
2067  */
2068 
2069 int ata_cable_unknown(struct ata_port *ap)
2070 {
2071 	return ATA_CBL_PATA_UNK;
2072 }
2073 
2074 /**
2075  *	ata_cable_sata	-	return SATA cable type
2076  *	@ap: port
2077  *
2078  *	Helper method for drivers which have SATA cables
2079  */
2080 
2081 int ata_cable_sata(struct ata_port *ap)
2082 {
2083 	return ATA_CBL_SATA;
2084 }
2085 
2086 /**
2087  *	ata_bus_probe - Reset and probe ATA bus
2088  *	@ap: Bus to probe
2089  *
2090  *	Master ATA bus probing function.  Initiates a hardware-dependent
2091  *	bus reset, then attempts to identify any devices found on
2092  *	the bus.
2093  *
2094  *	LOCKING:
2095  *	PCI/etc. bus probe sem.
2096  *
2097  *	RETURNS:
2098  *	Zero on success, negative errno otherwise.
2099  */
2100 
2101 int ata_bus_probe(struct ata_port *ap)
2102 {
2103 	unsigned int classes[ATA_MAX_DEVICES];
2104 	int tries[ATA_MAX_DEVICES];
2105 	int i, rc;
2106 	struct ata_device *dev;
2107 
2108 	ata_port_probe(ap);
2109 
2110 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2111 		tries[i] = ATA_PROBE_MAX_TRIES;
2112 
2113  retry:
2114 	/* reset and determine device classes */
2115 	ap->ops->phy_reset(ap);
2116 
2117 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2118 		dev = &ap->device[i];
2119 
2120 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2121 		    dev->class != ATA_DEV_UNKNOWN)
2122 			classes[dev->devno] = dev->class;
2123 		else
2124 			classes[dev->devno] = ATA_DEV_NONE;
2125 
2126 		dev->class = ATA_DEV_UNKNOWN;
2127 	}
2128 
2129 	ata_port_probe(ap);
2130 
2131 	/* after the reset the device state is PIO 0 and the controller
2132 	   state is undefined. Record the mode */
2133 
2134 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2135 		ap->device[i].pio_mode = XFER_PIO_0;
2136 
2137 	/* read IDENTIFY page and configure devices. We have to do the identify
2138 	   specific sequence bass-ackwards so that PDIAG- is released by
2139 	   the slave device */
2140 
2141 	for (i = ATA_MAX_DEVICES - 1; i >=  0; i--) {
2142 		dev = &ap->device[i];
2143 
2144 		if (tries[i])
2145 			dev->class = classes[i];
2146 
2147 		if (!ata_dev_enabled(dev))
2148 			continue;
2149 
2150 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2151 				     dev->id);
2152 		if (rc)
2153 			goto fail;
2154 	}
2155 
2156 	/* Now ask for the cable type as PDIAG- should have been released */
2157 	if (ap->ops->cable_detect)
2158 		ap->cbl = ap->ops->cable_detect(ap);
2159 
2160 	/* After the identify sequence we can now set up the devices. We do
2161 	   this in the normal order so that the user doesn't get confused */
2162 
2163 	for(i = 0; i < ATA_MAX_DEVICES; i++) {
2164 		dev = &ap->device[i];
2165 		if (!ata_dev_enabled(dev))
2166 			continue;
2167 
2168 		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2169 		rc = ata_dev_configure(dev);
2170 		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2171 		if (rc)
2172 			goto fail;
2173 	}
2174 
2175 	/* configure transfer mode */
2176 	rc = ata_set_mode(ap, &dev);
2177 	if (rc)
2178 		goto fail;
2179 
2180 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2181 		if (ata_dev_enabled(&ap->device[i]))
2182 			return 0;
2183 
2184 	/* no device present, disable port */
2185 	ata_port_disable(ap);
2186 	ap->ops->port_disable(ap);
2187 	return -ENODEV;
2188 
2189  fail:
2190 	tries[dev->devno]--;
2191 
2192 	switch (rc) {
2193 	case -EINVAL:
2194 		/* eeek, something went very wrong, give up */
2195 		tries[dev->devno] = 0;
2196 		break;
2197 
2198 	case -ENODEV:
2199 		/* give it just one more chance */
2200 		tries[dev->devno] = min(tries[dev->devno], 1);
2201 	case -EIO:
2202 		if (tries[dev->devno] == 1) {
2203 			/* This is the last chance, better to slow
2204 			 * down than lose it.
2205 			 */
2206 			sata_down_spd_limit(ap);
2207 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2208 		}
2209 	}
2210 
2211 	if (!tries[dev->devno])
2212 		ata_dev_disable(dev);
2213 
2214 	goto retry;
2215 }
2216 
2217 /**
2218  *	ata_port_probe - Mark port as enabled
2219  *	@ap: Port for which we indicate enablement
2220  *
2221  *	Modify @ap data structure such that the system
2222  *	thinks that the entire port is enabled.
2223  *
2224  *	LOCKING: host lock, or some other form of
2225  *	serialization.
2226  */
2227 
2228 void ata_port_probe(struct ata_port *ap)
2229 {
2230 	ap->flags &= ~ATA_FLAG_DISABLED;
2231 }
2232 
2233 /**
2234  *	sata_print_link_status - Print SATA link status
2235  *	@ap: SATA port to printk link status about
2236  *
2237  *	This function prints link speed and status of a SATA link.
2238  *
2239  *	LOCKING:
2240  *	None.
2241  */
2242 void sata_print_link_status(struct ata_port *ap)
2243 {
2244 	u32 sstatus, scontrol, tmp;
2245 
2246 	if (sata_scr_read(ap, SCR_STATUS, &sstatus))
2247 		return;
2248 	sata_scr_read(ap, SCR_CONTROL, &scontrol);
2249 
2250 	if (ata_port_online(ap)) {
2251 		tmp = (sstatus >> 4) & 0xf;
2252 		ata_port_printk(ap, KERN_INFO,
2253 				"SATA link up %s (SStatus %X SControl %X)\n",
2254 				sata_spd_string(tmp), sstatus, scontrol);
2255 	} else {
2256 		ata_port_printk(ap, KERN_INFO,
2257 				"SATA link down (SStatus %X SControl %X)\n",
2258 				sstatus, scontrol);
2259 	}
2260 }
2261 
2262 /**
2263  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2264  *	@ap: SATA port associated with target SATA PHY.
2265  *
2266  *	This function issues commands to standard SATA Sxxx
2267  *	PHY registers, to wake up the phy (and device), and
2268  *	clear any reset condition.
2269  *
2270  *	LOCKING:
2271  *	PCI/etc. bus probe sem.
2272  *
2273  */
2274 void __sata_phy_reset(struct ata_port *ap)
2275 {
2276 	u32 sstatus;
2277 	unsigned long timeout = jiffies + (HZ * 5);
2278 
2279 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2280 		/* issue phy wake/reset */
2281 		sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2282 		/* Couldn't find anything in SATA I/II specs, but
2283 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2284 		mdelay(1);
2285 	}
2286 	/* phy wake/clear reset */
2287 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2288 
2289 	/* wait for phy to become ready, if necessary */
2290 	do {
2291 		msleep(200);
2292 		sata_scr_read(ap, SCR_STATUS, &sstatus);
2293 		if ((sstatus & 0xf) != 1)
2294 			break;
2295 	} while (time_before(jiffies, timeout));
2296 
2297 	/* print link status */
2298 	sata_print_link_status(ap);
2299 
2300 	/* TODO: phy layer with polling, timeouts, etc. */
2301 	if (!ata_port_offline(ap))
2302 		ata_port_probe(ap);
2303 	else
2304 		ata_port_disable(ap);
2305 
2306 	if (ap->flags & ATA_FLAG_DISABLED)
2307 		return;
2308 
2309 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2310 		ata_port_disable(ap);
2311 		return;
2312 	}
2313 
2314 	ap->cbl = ATA_CBL_SATA;
2315 }
2316 
2317 /**
2318  *	sata_phy_reset - Reset SATA bus.
2319  *	@ap: SATA port associated with target SATA PHY.
2320  *
2321  *	This function resets the SATA bus, and then probes
2322  *	the bus for devices.
2323  *
2324  *	LOCKING:
2325  *	PCI/etc. bus probe sem.
2326  *
2327  */
2328 void sata_phy_reset(struct ata_port *ap)
2329 {
2330 	__sata_phy_reset(ap);
2331 	if (ap->flags & ATA_FLAG_DISABLED)
2332 		return;
2333 	ata_bus_reset(ap);
2334 }
2335 
2336 /**
2337  *	ata_dev_pair		-	return other device on cable
2338  *	@adev: device
2339  *
2340  *	Obtain the other device on the same cable, or if none is
2341  *	present NULL is returned
2342  */
2343 
2344 struct ata_device *ata_dev_pair(struct ata_device *adev)
2345 {
2346 	struct ata_port *ap = adev->ap;
2347 	struct ata_device *pair = &ap->device[1 - adev->devno];
2348 	if (!ata_dev_enabled(pair))
2349 		return NULL;
2350 	return pair;
2351 }
2352 
2353 /**
2354  *	ata_port_disable - Disable port.
2355  *	@ap: Port to be disabled.
2356  *
2357  *	Modify @ap data structure such that the system
2358  *	thinks that the entire port is disabled, and should
2359  *	never attempt to probe or communicate with devices
2360  *	on this port.
2361  *
2362  *	LOCKING: host lock, or some other form of
2363  *	serialization.
2364  */
2365 
2366 void ata_port_disable(struct ata_port *ap)
2367 {
2368 	ap->device[0].class = ATA_DEV_NONE;
2369 	ap->device[1].class = ATA_DEV_NONE;
2370 	ap->flags |= ATA_FLAG_DISABLED;
2371 }
2372 
2373 /**
2374  *	sata_down_spd_limit - adjust SATA spd limit downward
2375  *	@ap: Port to adjust SATA spd limit for
2376  *
2377  *	Adjust SATA spd limit of @ap downward.  Note that this
2378  *	function only adjusts the limit.  The change must be applied
2379  *	using sata_set_spd().
2380  *
2381  *	LOCKING:
2382  *	Inherited from caller.
2383  *
2384  *	RETURNS:
2385  *	0 on success, negative errno on failure
2386  */
2387 int sata_down_spd_limit(struct ata_port *ap)
2388 {
2389 	u32 sstatus, spd, mask;
2390 	int rc, highbit;
2391 
2392 	if (!sata_scr_valid(ap))
2393 		return -EOPNOTSUPP;
2394 
2395 	/* If SCR can be read, use it to determine the current SPD.
2396 	 * If not, use cached value in ap->sata_spd.
2397 	 */
2398 	rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2399 	if (rc == 0)
2400 		spd = (sstatus >> 4) & 0xf;
2401 	else
2402 		spd = ap->sata_spd;
2403 
2404 	mask = ap->sata_spd_limit;
2405 	if (mask <= 1)
2406 		return -EINVAL;
2407 
2408 	/* unconditionally mask off the highest bit */
2409 	highbit = fls(mask) - 1;
2410 	mask &= ~(1 << highbit);
2411 
2412 	/* Mask off all speeds higher than or equal to the current
2413 	 * one.  Force 1.5Gbps if current SPD is not available.
2414 	 */
2415 	if (spd > 1)
2416 		mask &= (1 << (spd - 1)) - 1;
2417 	else
2418 		mask &= 1;
2419 
2420 	/* were we already at the bottom? */
2421 	if (!mask)
2422 		return -EINVAL;
2423 
2424 	ap->sata_spd_limit = mask;
2425 
2426 	ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2427 			sata_spd_string(fls(mask)));
2428 
2429 	return 0;
2430 }
2431 
2432 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2433 {
2434 	u32 spd, limit;
2435 
2436 	if (ap->sata_spd_limit == UINT_MAX)
2437 		limit = 0;
2438 	else
2439 		limit = fls(ap->sata_spd_limit);
2440 
2441 	spd = (*scontrol >> 4) & 0xf;
2442 	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2443 
2444 	return spd != limit;
2445 }
2446 
2447 /**
2448  *	sata_set_spd_needed - is SATA spd configuration needed
2449  *	@ap: Port in question
2450  *
2451  *	Test whether the spd limit in SControl matches
2452  *	@ap->sata_spd_limit.  This function is used to determine
2453  *	whether hardreset is necessary to apply SATA spd
2454  *	configuration.
2455  *
2456  *	LOCKING:
2457  *	Inherited from caller.
2458  *
2459  *	RETURNS:
2460  *	1 if SATA spd configuration is needed, 0 otherwise.
2461  */
2462 int sata_set_spd_needed(struct ata_port *ap)
2463 {
2464 	u32 scontrol;
2465 
2466 	if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2467 		return 0;
2468 
2469 	return __sata_set_spd_needed(ap, &scontrol);
2470 }
2471 
2472 /**
2473  *	sata_set_spd - set SATA spd according to spd limit
2474  *	@ap: Port to set SATA spd for
2475  *
2476  *	Set SATA spd of @ap according to sata_spd_limit.
2477  *
2478  *	LOCKING:
2479  *	Inherited from caller.
2480  *
2481  *	RETURNS:
2482  *	0 if spd doesn't need to be changed, 1 if spd has been
2483  *	changed.  Negative errno if SCR registers are inaccessible.
2484  */
2485 int sata_set_spd(struct ata_port *ap)
2486 {
2487 	u32 scontrol;
2488 	int rc;
2489 
2490 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2491 		return rc;
2492 
2493 	if (!__sata_set_spd_needed(ap, &scontrol))
2494 		return 0;
2495 
2496 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2497 		return rc;
2498 
2499 	return 1;
2500 }
2501 
2502 /*
2503  * This mode timing computation functionality is ported over from
2504  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2505  */
2506 /*
2507  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2508  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2509  * for UDMA6, which is currently supported only by Maxtor drives.
2510  *
2511  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2512  */
2513 
2514 static const struct ata_timing ata_timing[] = {
2515 
2516 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2517 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2518 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2519 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2520 
2521 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2522 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2523 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2524 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2525 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2526 
2527 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2528 
2529 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2530 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2531 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2532 
2533 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2534 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2535 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2536 
2537 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2538 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2539 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2540 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2541 
2542 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2543 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2544 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2545 
2546 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2547 
2548 	{ 0xFF }
2549 };
2550 
2551 #define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
2552 #define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
2553 
2554 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2555 {
2556 	q->setup   = EZ(t->setup   * 1000,  T);
2557 	q->act8b   = EZ(t->act8b   * 1000,  T);
2558 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2559 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2560 	q->active  = EZ(t->active  * 1000,  T);
2561 	q->recover = EZ(t->recover * 1000,  T);
2562 	q->cycle   = EZ(t->cycle   * 1000,  T);
2563 	q->udma    = EZ(t->udma    * 1000, UT);
2564 }
2565 
2566 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2567 		      struct ata_timing *m, unsigned int what)
2568 {
2569 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2570 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2571 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2572 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2573 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2574 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2575 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2576 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2577 }
2578 
2579 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2580 {
2581 	const struct ata_timing *t;
2582 
2583 	for (t = ata_timing; t->mode != speed; t++)
2584 		if (t->mode == 0xFF)
2585 			return NULL;
2586 	return t;
2587 }
2588 
2589 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2590 		       struct ata_timing *t, int T, int UT)
2591 {
2592 	const struct ata_timing *s;
2593 	struct ata_timing p;
2594 
2595 	/*
2596 	 * Find the mode.
2597 	 */
2598 
2599 	if (!(s = ata_timing_find_mode(speed)))
2600 		return -EINVAL;
2601 
2602 	memcpy(t, s, sizeof(*s));
2603 
2604 	/*
2605 	 * If the drive is an EIDE drive, it can tell us it needs extended
2606 	 * PIO/MW_DMA cycle timing.
2607 	 */
2608 
2609 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2610 		memset(&p, 0, sizeof(p));
2611 		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2612 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2613 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2614 		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2615 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2616 		}
2617 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2618 	}
2619 
2620 	/*
2621 	 * Convert the timing to bus clock counts.
2622 	 */
2623 
2624 	ata_timing_quantize(t, t, T, UT);
2625 
2626 	/*
2627 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2628 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2629 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2630 	 */
2631 
2632 	if (speed > XFER_PIO_6) {
2633 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2634 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2635 	}
2636 
2637 	/*
2638 	 * Lengthen active & recovery time so that cycle time is correct.
2639 	 */
2640 
2641 	if (t->act8b + t->rec8b < t->cyc8b) {
2642 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2643 		t->rec8b = t->cyc8b - t->act8b;
2644 	}
2645 
2646 	if (t->active + t->recover < t->cycle) {
2647 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2648 		t->recover = t->cycle - t->active;
2649 	}
2650 
2651 	/* In a few cases quantisation may produce enough errors to
2652 	   leave t->cycle too low for the sum of active and recovery
2653 	   if so we must correct this */
2654 	if (t->active + t->recover > t->cycle)
2655 		t->cycle = t->active + t->recover;
2656 
2657 	return 0;
2658 }
2659 
2660 /**
2661  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2662  *	@dev: Device to adjust xfer masks
2663  *	@sel: ATA_DNXFER_* selector
2664  *
2665  *	Adjust xfer masks of @dev downward.  Note that this function
2666  *	does not apply the change.  Invoking ata_set_mode() afterwards
2667  *	will apply the limit.
2668  *
2669  *	LOCKING:
2670  *	Inherited from caller.
2671  *
2672  *	RETURNS:
2673  *	0 on success, negative errno on failure
2674  */
2675 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2676 {
2677 	char buf[32];
2678 	unsigned int orig_mask, xfer_mask;
2679 	unsigned int pio_mask, mwdma_mask, udma_mask;
2680 	int quiet, highbit;
2681 
2682 	quiet = !!(sel & ATA_DNXFER_QUIET);
2683 	sel &= ~ATA_DNXFER_QUIET;
2684 
2685 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2686 						  dev->mwdma_mask,
2687 						  dev->udma_mask);
2688 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2689 
2690 	switch (sel) {
2691 	case ATA_DNXFER_PIO:
2692 		highbit = fls(pio_mask) - 1;
2693 		pio_mask &= ~(1 << highbit);
2694 		break;
2695 
2696 	case ATA_DNXFER_DMA:
2697 		if (udma_mask) {
2698 			highbit = fls(udma_mask) - 1;
2699 			udma_mask &= ~(1 << highbit);
2700 			if (!udma_mask)
2701 				return -ENOENT;
2702 		} else if (mwdma_mask) {
2703 			highbit = fls(mwdma_mask) - 1;
2704 			mwdma_mask &= ~(1 << highbit);
2705 			if (!mwdma_mask)
2706 				return -ENOENT;
2707 		}
2708 		break;
2709 
2710 	case ATA_DNXFER_40C:
2711 		udma_mask &= ATA_UDMA_MASK_40C;
2712 		break;
2713 
2714 	case ATA_DNXFER_FORCE_PIO0:
2715 		pio_mask &= 1;
2716 	case ATA_DNXFER_FORCE_PIO:
2717 		mwdma_mask = 0;
2718 		udma_mask = 0;
2719 		break;
2720 
2721 	default:
2722 		BUG();
2723 	}
2724 
2725 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2726 
2727 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2728 		return -ENOENT;
2729 
2730 	if (!quiet) {
2731 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2732 			snprintf(buf, sizeof(buf), "%s:%s",
2733 				 ata_mode_string(xfer_mask),
2734 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2735 		else
2736 			snprintf(buf, sizeof(buf), "%s",
2737 				 ata_mode_string(xfer_mask));
2738 
2739 		ata_dev_printk(dev, KERN_WARNING,
2740 			       "limiting speed to %s\n", buf);
2741 	}
2742 
2743 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2744 			    &dev->udma_mask);
2745 
2746 	return 0;
2747 }
2748 
2749 static int ata_dev_set_mode(struct ata_device *dev)
2750 {
2751 	struct ata_eh_context *ehc = &dev->ap->eh_context;
2752 	unsigned int err_mask;
2753 	int rc;
2754 
2755 	dev->flags &= ~ATA_DFLAG_PIO;
2756 	if (dev->xfer_shift == ATA_SHIFT_PIO)
2757 		dev->flags |= ATA_DFLAG_PIO;
2758 
2759 	err_mask = ata_dev_set_xfermode(dev);
2760 	/* Old CFA may refuse this command, which is just fine */
2761 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2762         	err_mask &= ~AC_ERR_DEV;
2763 
2764 	if (err_mask) {
2765 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2766 			       "(err_mask=0x%x)\n", err_mask);
2767 		return -EIO;
2768 	}
2769 
2770 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2771 	rc = ata_dev_revalidate(dev, 0);
2772 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2773 	if (rc)
2774 		return rc;
2775 
2776 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2777 		dev->xfer_shift, (int)dev->xfer_mode);
2778 
2779 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2780 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2781 	return 0;
2782 }
2783 
2784 /**
2785  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2786  *	@ap: port on which timings will be programmed
2787  *	@r_failed_dev: out paramter for failed device
2788  *
2789  *	Standard implementation of the function used to tune and set
2790  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
2791  *	ata_dev_set_mode() fails, pointer to the failing device is
2792  *	returned in @r_failed_dev.
2793  *
2794  *	LOCKING:
2795  *	PCI/etc. bus probe sem.
2796  *
2797  *	RETURNS:
2798  *	0 on success, negative errno otherwise
2799  */
2800 
2801 int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2802 {
2803 	struct ata_device *dev;
2804 	int i, rc = 0, used_dma = 0, found = 0;
2805 
2806 
2807 	/* step 1: calculate xfer_mask */
2808 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2809 		unsigned int pio_mask, dma_mask;
2810 
2811 		dev = &ap->device[i];
2812 
2813 		if (!ata_dev_enabled(dev))
2814 			continue;
2815 
2816 		ata_dev_xfermask(dev);
2817 
2818 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2819 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2820 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2821 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2822 
2823 		found = 1;
2824 		if (dev->dma_mode)
2825 			used_dma = 1;
2826 	}
2827 	if (!found)
2828 		goto out;
2829 
2830 	/* step 2: always set host PIO timings */
2831 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2832 		dev = &ap->device[i];
2833 		if (!ata_dev_enabled(dev))
2834 			continue;
2835 
2836 		if (!dev->pio_mode) {
2837 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2838 			rc = -EINVAL;
2839 			goto out;
2840 		}
2841 
2842 		dev->xfer_mode = dev->pio_mode;
2843 		dev->xfer_shift = ATA_SHIFT_PIO;
2844 		if (ap->ops->set_piomode)
2845 			ap->ops->set_piomode(ap, dev);
2846 	}
2847 
2848 	/* step 3: set host DMA timings */
2849 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2850 		dev = &ap->device[i];
2851 
2852 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2853 			continue;
2854 
2855 		dev->xfer_mode = dev->dma_mode;
2856 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2857 		if (ap->ops->set_dmamode)
2858 			ap->ops->set_dmamode(ap, dev);
2859 	}
2860 
2861 	/* step 4: update devices' xfer mode */
2862 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2863 		dev = &ap->device[i];
2864 
2865 		/* don't update suspended devices' xfer mode */
2866 		if (!ata_dev_enabled(dev))
2867 			continue;
2868 
2869 		rc = ata_dev_set_mode(dev);
2870 		if (rc)
2871 			goto out;
2872 	}
2873 
2874 	/* Record simplex status. If we selected DMA then the other
2875 	 * host channels are not permitted to do so.
2876 	 */
2877 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2878 		ap->host->simplex_claimed = ap;
2879 
2880  out:
2881 	if (rc)
2882 		*r_failed_dev = dev;
2883 	return rc;
2884 }
2885 
2886 /**
2887  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
2888  *	@ap: port on which timings will be programmed
2889  *	@r_failed_dev: out paramter for failed device
2890  *
2891  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
2892  *	ata_set_mode() fails, pointer to the failing device is
2893  *	returned in @r_failed_dev.
2894  *
2895  *	LOCKING:
2896  *	PCI/etc. bus probe sem.
2897  *
2898  *	RETURNS:
2899  *	0 on success, negative errno otherwise
2900  */
2901 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2902 {
2903 	/* has private set_mode? */
2904 	if (ap->ops->set_mode)
2905 		return ap->ops->set_mode(ap, r_failed_dev);
2906 	return ata_do_set_mode(ap, r_failed_dev);
2907 }
2908 
2909 /**
2910  *	ata_tf_to_host - issue ATA taskfile to host controller
2911  *	@ap: port to which command is being issued
2912  *	@tf: ATA taskfile register set
2913  *
2914  *	Issues ATA taskfile register set to ATA host controller,
2915  *	with proper synchronization with interrupt handler and
2916  *	other threads.
2917  *
2918  *	LOCKING:
2919  *	spin_lock_irqsave(host lock)
2920  */
2921 
2922 static inline void ata_tf_to_host(struct ata_port *ap,
2923 				  const struct ata_taskfile *tf)
2924 {
2925 	ap->ops->tf_load(ap, tf);
2926 	ap->ops->exec_command(ap, tf);
2927 }
2928 
2929 /**
2930  *	ata_busy_sleep - sleep until BSY clears, or timeout
2931  *	@ap: port containing status register to be polled
2932  *	@tmout_pat: impatience timeout
2933  *	@tmout: overall timeout
2934  *
2935  *	Sleep until ATA Status register bit BSY clears,
2936  *	or a timeout occurs.
2937  *
2938  *	LOCKING:
2939  *	Kernel thread context (may sleep).
2940  *
2941  *	RETURNS:
2942  *	0 on success, -errno otherwise.
2943  */
2944 int ata_busy_sleep(struct ata_port *ap,
2945 		   unsigned long tmout_pat, unsigned long tmout)
2946 {
2947 	unsigned long timer_start, timeout;
2948 	u8 status;
2949 
2950 	status = ata_busy_wait(ap, ATA_BUSY, 300);
2951 	timer_start = jiffies;
2952 	timeout = timer_start + tmout_pat;
2953 	while (status != 0xff && (status & ATA_BUSY) &&
2954 	       time_before(jiffies, timeout)) {
2955 		msleep(50);
2956 		status = ata_busy_wait(ap, ATA_BUSY, 3);
2957 	}
2958 
2959 	if (status != 0xff && (status & ATA_BUSY))
2960 		ata_port_printk(ap, KERN_WARNING,
2961 				"port is slow to respond, please be patient "
2962 				"(Status 0x%x)\n", status);
2963 
2964 	timeout = timer_start + tmout;
2965 	while (status != 0xff && (status & ATA_BUSY) &&
2966 	       time_before(jiffies, timeout)) {
2967 		msleep(50);
2968 		status = ata_chk_status(ap);
2969 	}
2970 
2971 	if (status == 0xff)
2972 		return -ENODEV;
2973 
2974 	if (status & ATA_BUSY) {
2975 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
2976 				"(%lu secs, Status 0x%x)\n",
2977 				tmout / HZ, status);
2978 		return -EBUSY;
2979 	}
2980 
2981 	return 0;
2982 }
2983 
2984 /**
2985  *	ata_wait_ready - sleep until BSY clears, or timeout
2986  *	@ap: port containing status register to be polled
2987  *	@deadline: deadline jiffies for the operation
2988  *
2989  *	Sleep until ATA Status register bit BSY clears, or timeout
2990  *	occurs.
2991  *
2992  *	LOCKING:
2993  *	Kernel thread context (may sleep).
2994  *
2995  *	RETURNS:
2996  *	0 on success, -errno otherwise.
2997  */
2998 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
2999 {
3000 	unsigned long start = jiffies;
3001 	int warned = 0;
3002 
3003 	while (1) {
3004 		u8 status = ata_chk_status(ap);
3005 		unsigned long now = jiffies;
3006 
3007 		if (!(status & ATA_BUSY))
3008 			return 0;
3009 		if (!ata_port_online(ap) && status == 0xff)
3010 			return -ENODEV;
3011 		if (time_after(now, deadline))
3012 			return -EBUSY;
3013 
3014 		if (!warned && time_after(now, start + 5 * HZ) &&
3015 		    (deadline - now > 3 * HZ)) {
3016 			ata_port_printk(ap, KERN_WARNING,
3017 				"port is slow to respond, please be patient "
3018 				"(Status 0x%x)\n", status);
3019 			warned = 1;
3020 		}
3021 
3022 		msleep(50);
3023 	}
3024 }
3025 
3026 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3027 			      unsigned long deadline)
3028 {
3029 	struct ata_ioports *ioaddr = &ap->ioaddr;
3030 	unsigned int dev0 = devmask & (1 << 0);
3031 	unsigned int dev1 = devmask & (1 << 1);
3032 	int rc, ret = 0;
3033 
3034 	/* if device 0 was found in ata_devchk, wait for its
3035 	 * BSY bit to clear
3036 	 */
3037 	if (dev0) {
3038 		rc = ata_wait_ready(ap, deadline);
3039 		if (rc) {
3040 			if (rc != -ENODEV)
3041 				return rc;
3042 			ret = rc;
3043 		}
3044 	}
3045 
3046 	/* if device 1 was found in ata_devchk, wait for register
3047 	 * access briefly, then wait for BSY to clear.
3048 	 */
3049 	if (dev1) {
3050 		int i;
3051 
3052 		ap->ops->dev_select(ap, 1);
3053 
3054 		/* Wait for register access.  Some ATAPI devices fail
3055 		 * to set nsect/lbal after reset, so don't waste too
3056 		 * much time on it.  We're gonna wait for !BSY anyway.
3057 		 */
3058 		for (i = 0; i < 2; i++) {
3059 			u8 nsect, lbal;
3060 
3061 			nsect = ioread8(ioaddr->nsect_addr);
3062 			lbal = ioread8(ioaddr->lbal_addr);
3063 			if ((nsect == 1) && (lbal == 1))
3064 				break;
3065 			msleep(50);	/* give drive a breather */
3066 		}
3067 
3068 		rc = ata_wait_ready(ap, deadline);
3069 		if (rc) {
3070 			if (rc != -ENODEV)
3071 				return rc;
3072 			ret = rc;
3073 		}
3074 	}
3075 
3076 	/* is all this really necessary? */
3077 	ap->ops->dev_select(ap, 0);
3078 	if (dev1)
3079 		ap->ops->dev_select(ap, 1);
3080 	if (dev0)
3081 		ap->ops->dev_select(ap, 0);
3082 
3083 	return ret;
3084 }
3085 
3086 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3087 			     unsigned long deadline)
3088 {
3089 	struct ata_ioports *ioaddr = &ap->ioaddr;
3090 
3091 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3092 
3093 	/* software reset.  causes dev0 to be selected */
3094 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3095 	udelay(20);	/* FIXME: flush */
3096 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3097 	udelay(20);	/* FIXME: flush */
3098 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3099 
3100 	/* spec mandates ">= 2ms" before checking status.
3101 	 * We wait 150ms, because that was the magic delay used for
3102 	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3103 	 * between when the ATA command register is written, and then
3104 	 * status is checked.  Because waiting for "a while" before
3105 	 * checking status is fine, post SRST, we perform this magic
3106 	 * delay here as well.
3107 	 *
3108 	 * Old drivers/ide uses the 2mS rule and then waits for ready
3109 	 */
3110 	msleep(150);
3111 
3112 	/* Before we perform post reset processing we want to see if
3113 	 * the bus shows 0xFF because the odd clown forgets the D7
3114 	 * pulldown resistor.
3115 	 */
3116 	if (ata_check_status(ap) == 0xFF)
3117 		return -ENODEV;
3118 
3119 	return ata_bus_post_reset(ap, devmask, deadline);
3120 }
3121 
3122 /**
3123  *	ata_bus_reset - reset host port and associated ATA channel
3124  *	@ap: port to reset
3125  *
3126  *	This is typically the first time we actually start issuing
3127  *	commands to the ATA channel.  We wait for BSY to clear, then
3128  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3129  *	result.  Determine what devices, if any, are on the channel
3130  *	by looking at the device 0/1 error register.  Look at the signature
3131  *	stored in each device's taskfile registers, to determine if
3132  *	the device is ATA or ATAPI.
3133  *
3134  *	LOCKING:
3135  *	PCI/etc. bus probe sem.
3136  *	Obtains host lock.
3137  *
3138  *	SIDE EFFECTS:
3139  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3140  */
3141 
3142 void ata_bus_reset(struct ata_port *ap)
3143 {
3144 	struct ata_ioports *ioaddr = &ap->ioaddr;
3145 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3146 	u8 err;
3147 	unsigned int dev0, dev1 = 0, devmask = 0;
3148 	int rc;
3149 
3150 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3151 
3152 	/* determine if device 0/1 are present */
3153 	if (ap->flags & ATA_FLAG_SATA_RESET)
3154 		dev0 = 1;
3155 	else {
3156 		dev0 = ata_devchk(ap, 0);
3157 		if (slave_possible)
3158 			dev1 = ata_devchk(ap, 1);
3159 	}
3160 
3161 	if (dev0)
3162 		devmask |= (1 << 0);
3163 	if (dev1)
3164 		devmask |= (1 << 1);
3165 
3166 	/* select device 0 again */
3167 	ap->ops->dev_select(ap, 0);
3168 
3169 	/* issue bus reset */
3170 	if (ap->flags & ATA_FLAG_SRST) {
3171 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3172 		if (rc && rc != -ENODEV)
3173 			goto err_out;
3174 	}
3175 
3176 	/*
3177 	 * determine by signature whether we have ATA or ATAPI devices
3178 	 */
3179 	ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
3180 	if ((slave_possible) && (err != 0x81))
3181 		ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
3182 
3183 	/* is double-select really necessary? */
3184 	if (ap->device[1].class != ATA_DEV_NONE)
3185 		ap->ops->dev_select(ap, 1);
3186 	if (ap->device[0].class != ATA_DEV_NONE)
3187 		ap->ops->dev_select(ap, 0);
3188 
3189 	/* if no devices were detected, disable this port */
3190 	if ((ap->device[0].class == ATA_DEV_NONE) &&
3191 	    (ap->device[1].class == ATA_DEV_NONE))
3192 		goto err_out;
3193 
3194 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3195 		/* set up device control for ATA_FLAG_SATA_RESET */
3196 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3197 	}
3198 
3199 	DPRINTK("EXIT\n");
3200 	return;
3201 
3202 err_out:
3203 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3204 	ap->ops->port_disable(ap);
3205 
3206 	DPRINTK("EXIT\n");
3207 }
3208 
3209 /**
3210  *	sata_phy_debounce - debounce SATA phy status
3211  *	@ap: ATA port to debounce SATA phy status for
3212  *	@params: timing parameters { interval, duratinon, timeout } in msec
3213  *	@deadline: deadline jiffies for the operation
3214  *
3215  *	Make sure SStatus of @ap reaches stable state, determined by
3216  *	holding the same value where DET is not 1 for @duration polled
3217  *	every @interval, before @timeout.  Timeout constraints the
3218  *	beginning of the stable state.  Because DET gets stuck at 1 on
3219  *	some controllers after hot unplugging, this functions waits
3220  *	until timeout then returns 0 if DET is stable at 1.
3221  *
3222  *	@timeout is further limited by @deadline.  The sooner of the
3223  *	two is used.
3224  *
3225  *	LOCKING:
3226  *	Kernel thread context (may sleep)
3227  *
3228  *	RETURNS:
3229  *	0 on success, -errno on failure.
3230  */
3231 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3232 		      unsigned long deadline)
3233 {
3234 	unsigned long interval_msec = params[0];
3235 	unsigned long duration = msecs_to_jiffies(params[1]);
3236 	unsigned long last_jiffies, t;
3237 	u32 last, cur;
3238 	int rc;
3239 
3240 	t = jiffies + msecs_to_jiffies(params[2]);
3241 	if (time_before(t, deadline))
3242 		deadline = t;
3243 
3244 	if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3245 		return rc;
3246 	cur &= 0xf;
3247 
3248 	last = cur;
3249 	last_jiffies = jiffies;
3250 
3251 	while (1) {
3252 		msleep(interval_msec);
3253 		if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3254 			return rc;
3255 		cur &= 0xf;
3256 
3257 		/* DET stable? */
3258 		if (cur == last) {
3259 			if (cur == 1 && time_before(jiffies, deadline))
3260 				continue;
3261 			if (time_after(jiffies, last_jiffies + duration))
3262 				return 0;
3263 			continue;
3264 		}
3265 
3266 		/* unstable, start over */
3267 		last = cur;
3268 		last_jiffies = jiffies;
3269 
3270 		/* Check deadline.  If debouncing failed, return
3271 		 * -EPIPE to tell upper layer to lower link speed.
3272 		 */
3273 		if (time_after(jiffies, deadline))
3274 			return -EPIPE;
3275 	}
3276 }
3277 
3278 /**
3279  *	sata_phy_resume - resume SATA phy
3280  *	@ap: ATA port to resume SATA phy for
3281  *	@params: timing parameters { interval, duratinon, timeout } in msec
3282  *	@deadline: deadline jiffies for the operation
3283  *
3284  *	Resume SATA phy of @ap and debounce it.
3285  *
3286  *	LOCKING:
3287  *	Kernel thread context (may sleep)
3288  *
3289  *	RETURNS:
3290  *	0 on success, -errno on failure.
3291  */
3292 int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3293 		    unsigned long deadline)
3294 {
3295 	u32 scontrol;
3296 	int rc;
3297 
3298 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3299 		return rc;
3300 
3301 	scontrol = (scontrol & 0x0f0) | 0x300;
3302 
3303 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3304 		return rc;
3305 
3306 	/* Some PHYs react badly if SStatus is pounded immediately
3307 	 * after resuming.  Delay 200ms before debouncing.
3308 	 */
3309 	msleep(200);
3310 
3311 	return sata_phy_debounce(ap, params, deadline);
3312 }
3313 
3314 /**
3315  *	ata_std_prereset - prepare for reset
3316  *	@ap: ATA port to be reset
3317  *	@deadline: deadline jiffies for the operation
3318  *
3319  *	@ap is about to be reset.  Initialize it.  Failure from
3320  *	prereset makes libata abort whole reset sequence and give up
3321  *	that port, so prereset should be best-effort.  It does its
3322  *	best to prepare for reset sequence but if things go wrong, it
3323  *	should just whine, not fail.
3324  *
3325  *	LOCKING:
3326  *	Kernel thread context (may sleep)
3327  *
3328  *	RETURNS:
3329  *	0 on success, -errno otherwise.
3330  */
3331 int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3332 {
3333 	struct ata_eh_context *ehc = &ap->eh_context;
3334 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3335 	int rc;
3336 
3337 	/* handle link resume */
3338 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3339 	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3340 		ehc->i.action |= ATA_EH_HARDRESET;
3341 
3342 	/* if we're about to do hardreset, nothing more to do */
3343 	if (ehc->i.action & ATA_EH_HARDRESET)
3344 		return 0;
3345 
3346 	/* if SATA, resume phy */
3347 	if (ap->flags & ATA_FLAG_SATA) {
3348 		rc = sata_phy_resume(ap, timing, deadline);
3349 		/* whine about phy resume failure but proceed */
3350 		if (rc && rc != -EOPNOTSUPP)
3351 			ata_port_printk(ap, KERN_WARNING, "failed to resume "
3352 					"link for reset (errno=%d)\n", rc);
3353 	}
3354 
3355 	/* Wait for !BSY if the controller can wait for the first D2H
3356 	 * Reg FIS and we don't know that no device is attached.
3357 	 */
3358 	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3359 		rc = ata_wait_ready(ap, deadline);
3360 		if (rc && rc != -ENODEV) {
3361 			ata_port_printk(ap, KERN_WARNING, "device not ready "
3362 					"(errno=%d), forcing hardreset\n", rc);
3363 			ehc->i.action |= ATA_EH_HARDRESET;
3364 		}
3365 	}
3366 
3367 	return 0;
3368 }
3369 
3370 /**
3371  *	ata_std_softreset - reset host port via ATA SRST
3372  *	@ap: port to reset
3373  *	@classes: resulting classes of attached devices
3374  *	@deadline: deadline jiffies for the operation
3375  *
3376  *	Reset host port using ATA SRST.
3377  *
3378  *	LOCKING:
3379  *	Kernel thread context (may sleep)
3380  *
3381  *	RETURNS:
3382  *	0 on success, -errno otherwise.
3383  */
3384 int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3385 		      unsigned long deadline)
3386 {
3387 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3388 	unsigned int devmask = 0;
3389 	int rc;
3390 	u8 err;
3391 
3392 	DPRINTK("ENTER\n");
3393 
3394 	if (ata_port_offline(ap)) {
3395 		classes[0] = ATA_DEV_NONE;
3396 		goto out;
3397 	}
3398 
3399 	/* determine if device 0/1 are present */
3400 	if (ata_devchk(ap, 0))
3401 		devmask |= (1 << 0);
3402 	if (slave_possible && ata_devchk(ap, 1))
3403 		devmask |= (1 << 1);
3404 
3405 	/* select device 0 again */
3406 	ap->ops->dev_select(ap, 0);
3407 
3408 	/* issue bus reset */
3409 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3410 	rc = ata_bus_softreset(ap, devmask, deadline);
3411 	/* if link is occupied, -ENODEV too is an error */
3412 	if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
3413 		ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3414 		return rc;
3415 	}
3416 
3417 	/* determine by signature whether we have ATA or ATAPI devices */
3418 	classes[0] = ata_dev_try_classify(ap, 0, &err);
3419 	if (slave_possible && err != 0x81)
3420 		classes[1] = ata_dev_try_classify(ap, 1, &err);
3421 
3422  out:
3423 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3424 	return 0;
3425 }
3426 
3427 /**
3428  *	sata_port_hardreset - reset port via SATA phy reset
3429  *	@ap: port to reset
3430  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3431  *	@deadline: deadline jiffies for the operation
3432  *
3433  *	SATA phy-reset host port using DET bits of SControl register.
3434  *
3435  *	LOCKING:
3436  *	Kernel thread context (may sleep)
3437  *
3438  *	RETURNS:
3439  *	0 on success, -errno otherwise.
3440  */
3441 int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3442 			unsigned long deadline)
3443 {
3444 	u32 scontrol;
3445 	int rc;
3446 
3447 	DPRINTK("ENTER\n");
3448 
3449 	if (sata_set_spd_needed(ap)) {
3450 		/* SATA spec says nothing about how to reconfigure
3451 		 * spd.  To be on the safe side, turn off phy during
3452 		 * reconfiguration.  This works for at least ICH7 AHCI
3453 		 * and Sil3124.
3454 		 */
3455 		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3456 			goto out;
3457 
3458 		scontrol = (scontrol & 0x0f0) | 0x304;
3459 
3460 		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3461 			goto out;
3462 
3463 		sata_set_spd(ap);
3464 	}
3465 
3466 	/* issue phy wake/reset */
3467 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3468 		goto out;
3469 
3470 	scontrol = (scontrol & 0x0f0) | 0x301;
3471 
3472 	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3473 		goto out;
3474 
3475 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3476 	 * 10.4.2 says at least 1 ms.
3477 	 */
3478 	msleep(1);
3479 
3480 	/* bring phy back */
3481 	rc = sata_phy_resume(ap, timing, deadline);
3482  out:
3483 	DPRINTK("EXIT, rc=%d\n", rc);
3484 	return rc;
3485 }
3486 
3487 /**
3488  *	sata_std_hardreset - reset host port via SATA phy reset
3489  *	@ap: port to reset
3490  *	@class: resulting class of attached device
3491  *	@deadline: deadline jiffies for the operation
3492  *
3493  *	SATA phy-reset host port using DET bits of SControl register,
3494  *	wait for !BSY and classify the attached device.
3495  *
3496  *	LOCKING:
3497  *	Kernel thread context (may sleep)
3498  *
3499  *	RETURNS:
3500  *	0 on success, -errno otherwise.
3501  */
3502 int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3503 		       unsigned long deadline)
3504 {
3505 	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3506 	int rc;
3507 
3508 	DPRINTK("ENTER\n");
3509 
3510 	/* do hardreset */
3511 	rc = sata_port_hardreset(ap, timing, deadline);
3512 	if (rc) {
3513 		ata_port_printk(ap, KERN_ERR,
3514 				"COMRESET failed (errno=%d)\n", rc);
3515 		return rc;
3516 	}
3517 
3518 	/* TODO: phy layer with polling, timeouts, etc. */
3519 	if (ata_port_offline(ap)) {
3520 		*class = ATA_DEV_NONE;
3521 		DPRINTK("EXIT, link offline\n");
3522 		return 0;
3523 	}
3524 
3525 	/* wait a while before checking status, see SRST for more info */
3526 	msleep(150);
3527 
3528 	rc = ata_wait_ready(ap, deadline);
3529 	/* link occupied, -ENODEV too is an error */
3530 	if (rc) {
3531 		ata_port_printk(ap, KERN_ERR,
3532 				"COMRESET failed (errno=%d)\n", rc);
3533 		return rc;
3534 	}
3535 
3536 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3537 
3538 	*class = ata_dev_try_classify(ap, 0, NULL);
3539 
3540 	DPRINTK("EXIT, class=%u\n", *class);
3541 	return 0;
3542 }
3543 
3544 /**
3545  *	ata_std_postreset - standard postreset callback
3546  *	@ap: the target ata_port
3547  *	@classes: classes of attached devices
3548  *
3549  *	This function is invoked after a successful reset.  Note that
3550  *	the device might have been reset more than once using
3551  *	different reset methods before postreset is invoked.
3552  *
3553  *	LOCKING:
3554  *	Kernel thread context (may sleep)
3555  */
3556 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3557 {
3558 	u32 serror;
3559 
3560 	DPRINTK("ENTER\n");
3561 
3562 	/* print link status */
3563 	sata_print_link_status(ap);
3564 
3565 	/* clear SError */
3566 	if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3567 		sata_scr_write(ap, SCR_ERROR, serror);
3568 
3569 	/* is double-select really necessary? */
3570 	if (classes[0] != ATA_DEV_NONE)
3571 		ap->ops->dev_select(ap, 1);
3572 	if (classes[1] != ATA_DEV_NONE)
3573 		ap->ops->dev_select(ap, 0);
3574 
3575 	/* bail out if no device is present */
3576 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3577 		DPRINTK("EXIT, no device\n");
3578 		return;
3579 	}
3580 
3581 	/* set up device control */
3582 	if (ap->ioaddr.ctl_addr)
3583 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3584 
3585 	DPRINTK("EXIT\n");
3586 }
3587 
3588 /**
3589  *	ata_dev_same_device - Determine whether new ID matches configured device
3590  *	@dev: device to compare against
3591  *	@new_class: class of the new device
3592  *	@new_id: IDENTIFY page of the new device
3593  *
3594  *	Compare @new_class and @new_id against @dev and determine
3595  *	whether @dev is the device indicated by @new_class and
3596  *	@new_id.
3597  *
3598  *	LOCKING:
3599  *	None.
3600  *
3601  *	RETURNS:
3602  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3603  */
3604 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3605 			       const u16 *new_id)
3606 {
3607 	const u16 *old_id = dev->id;
3608 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3609 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3610 
3611 	if (dev->class != new_class) {
3612 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3613 			       dev->class, new_class);
3614 		return 0;
3615 	}
3616 
3617 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3618 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3619 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3620 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3621 
3622 	if (strcmp(model[0], model[1])) {
3623 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3624 			       "'%s' != '%s'\n", model[0], model[1]);
3625 		return 0;
3626 	}
3627 
3628 	if (strcmp(serial[0], serial[1])) {
3629 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3630 			       "'%s' != '%s'\n", serial[0], serial[1]);
3631 		return 0;
3632 	}
3633 
3634 	return 1;
3635 }
3636 
3637 /**
3638  *	ata_dev_reread_id - Re-read IDENTIFY data
3639  *	@dev: target ATA device
3640  *	@readid_flags: read ID flags
3641  *
3642  *	Re-read IDENTIFY page and make sure @dev is still attached to
3643  *	the port.
3644  *
3645  *	LOCKING:
3646  *	Kernel thread context (may sleep)
3647  *
3648  *	RETURNS:
3649  *	0 on success, negative errno otherwise
3650  */
3651 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3652 {
3653 	unsigned int class = dev->class;
3654 	u16 *id = (void *)dev->ap->sector_buf;
3655 	int rc;
3656 
3657 	/* read ID data */
3658 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3659 	if (rc)
3660 		return rc;
3661 
3662 	/* is the device still there? */
3663 	if (!ata_dev_same_device(dev, class, id))
3664 		return -ENODEV;
3665 
3666 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3667 	return 0;
3668 }
3669 
3670 /**
3671  *	ata_dev_revalidate - Revalidate ATA device
3672  *	@dev: device to revalidate
3673  *	@readid_flags: read ID flags
3674  *
3675  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3676  *	port and reconfigure it according to the new IDENTIFY page.
3677  *
3678  *	LOCKING:
3679  *	Kernel thread context (may sleep)
3680  *
3681  *	RETURNS:
3682  *	0 on success, negative errno otherwise
3683  */
3684 int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3685 {
3686 	u64 n_sectors = dev->n_sectors;
3687 	int rc;
3688 
3689 	if (!ata_dev_enabled(dev))
3690 		return -ENODEV;
3691 
3692 	/* re-read ID */
3693 	rc = ata_dev_reread_id(dev, readid_flags);
3694 	if (rc)
3695 		goto fail;
3696 
3697 	/* configure device according to the new ID */
3698 	rc = ata_dev_configure(dev);
3699 	if (rc)
3700 		goto fail;
3701 
3702 	/* verify n_sectors hasn't changed */
3703 	if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
3704 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3705 			       "%llu != %llu\n",
3706 			       (unsigned long long)n_sectors,
3707 			       (unsigned long long)dev->n_sectors);
3708 		rc = -ENODEV;
3709 		goto fail;
3710 	}
3711 
3712 	return 0;
3713 
3714  fail:
3715 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3716 	return rc;
3717 }
3718 
3719 struct ata_blacklist_entry {
3720 	const char *model_num;
3721 	const char *model_rev;
3722 	unsigned long horkage;
3723 };
3724 
3725 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3726 	/* Devices with DMA related problems under Linux */
3727 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
3728 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
3729 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
3730 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
3731 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
3732 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
3733 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
3734 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
3735 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
3736 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
3737 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
3738 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
3739 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
3740 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
3741 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
3742 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
3743 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
3744 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
3745 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
3746 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
3747 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
3748 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
3749 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
3750 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
3751 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
3752 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
3753 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3754 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
3755 	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
3756 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
3757 	{ "IOMEGA  ZIP 250       ATAPI", NULL,	ATA_HORKAGE_NODMA }, /* temporary fix */
3758 	{ "IOMEGA  ZIP 250       ATAPI       Floppy",
3759 				NULL,		ATA_HORKAGE_NODMA },
3760 
3761 	/* Weird ATAPI devices */
3762 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
3763 
3764 	/* Devices we expect to fail diagnostics */
3765 
3766 	/* Devices where NCQ should be avoided */
3767 	/* NCQ is slow */
3768         { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
3769 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
3770 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
3771 	/* NCQ is broken */
3772 	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
3773 	{ "Maxtor 6B200M0",	"BANC1BM0",	ATA_HORKAGE_NONCQ },
3774 	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
3775 	{ "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
3776 	 ATA_HORKAGE_NONCQ },
3777 	/* NCQ hard hangs device under heavier load, needs hard power cycle */
3778 	{ "Maxtor 6B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
3779 	/* Blacklist entries taken from Silicon Image 3124/3132
3780 	   Windows driver .inf file - also several Linux problem reports */
3781 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
3782 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
3783 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3784 	/* Drives which do spurious command completion */
3785 	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
3786 	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
3787 	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3788 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3789 	{ "FUJITSU MHV2080BH",	"00840028",	ATA_HORKAGE_NONCQ, },
3790 	{ "ST9160821AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
3791 
3792 	/* Devices with NCQ limits */
3793 
3794 	/* End Marker */
3795 	{ }
3796 };
3797 
3798 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3799 {
3800 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
3801 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3802 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3803 
3804 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3805 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3806 
3807 	while (ad->model_num) {
3808 		if (!strcmp(ad->model_num, model_num)) {
3809 			if (ad->model_rev == NULL)
3810 				return ad->horkage;
3811 			if (!strcmp(ad->model_rev, model_rev))
3812 				return ad->horkage;
3813 		}
3814 		ad++;
3815 	}
3816 	return 0;
3817 }
3818 
3819 static int ata_dma_blacklisted(const struct ata_device *dev)
3820 {
3821 	/* We don't support polling DMA.
3822 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3823 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3824 	 */
3825 	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3826 	    (dev->flags & ATA_DFLAG_CDB_INTR))
3827 		return 1;
3828 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
3829 }
3830 
3831 /**
3832  *	ata_dev_xfermask - Compute supported xfermask of the given device
3833  *	@dev: Device to compute xfermask for
3834  *
3835  *	Compute supported xfermask of @dev and store it in
3836  *	dev->*_mask.  This function is responsible for applying all
3837  *	known limits including host controller limits, device
3838  *	blacklist, etc...
3839  *
3840  *	LOCKING:
3841  *	None.
3842  */
3843 static void ata_dev_xfermask(struct ata_device *dev)
3844 {
3845 	struct ata_port *ap = dev->ap;
3846 	struct ata_host *host = ap->host;
3847 	unsigned long xfer_mask;
3848 
3849 	/* controller modes available */
3850 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
3851 				      ap->mwdma_mask, ap->udma_mask);
3852 
3853 	/* drive modes available */
3854 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3855 				       dev->mwdma_mask, dev->udma_mask);
3856 	xfer_mask &= ata_id_xfermask(dev->id);
3857 
3858 	/*
3859 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
3860 	 *	cable
3861 	 */
3862 	if (ata_dev_pair(dev)) {
3863 		/* No PIO5 or PIO6 */
3864 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3865 		/* No MWDMA3 or MWDMA 4 */
3866 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3867 	}
3868 
3869 	if (ata_dma_blacklisted(dev)) {
3870 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3871 		ata_dev_printk(dev, KERN_WARNING,
3872 			       "device is on DMA blacklist, disabling DMA\n");
3873 	}
3874 
3875 	if ((host->flags & ATA_HOST_SIMPLEX) &&
3876             host->simplex_claimed && host->simplex_claimed != ap) {
3877 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3878 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3879 			       "other device, disabling DMA\n");
3880 	}
3881 
3882 	if (ap->flags & ATA_FLAG_NO_IORDY)
3883 		xfer_mask &= ata_pio_mask_no_iordy(dev);
3884 
3885 	if (ap->ops->mode_filter)
3886 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3887 
3888 	/* Apply cable rule here.  Don't apply it early because when
3889 	 * we handle hot plug the cable type can itself change.
3890 	 * Check this last so that we know if the transfer rate was
3891 	 * solely limited by the cable.
3892 	 * Unknown or 80 wire cables reported host side are checked
3893 	 * drive side as well. Cases where we know a 40wire cable
3894 	 * is used safely for 80 are not checked here.
3895 	 */
3896 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3897 		/* UDMA/44 or higher would be available */
3898 		if((ap->cbl == ATA_CBL_PATA40) ||
3899    		    (ata_drive_40wire(dev->id) &&
3900 		     (ap->cbl == ATA_CBL_PATA_UNK ||
3901                      ap->cbl == ATA_CBL_PATA80))) {
3902 		      	ata_dev_printk(dev, KERN_WARNING,
3903 				 "limited to UDMA/33 due to 40-wire cable\n");
3904 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3905 		}
3906 
3907 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3908 			    &dev->mwdma_mask, &dev->udma_mask);
3909 }
3910 
3911 /**
3912  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3913  *	@dev: Device to which command will be sent
3914  *
3915  *	Issue SET FEATURES - XFER MODE command to device @dev
3916  *	on port @ap.
3917  *
3918  *	LOCKING:
3919  *	PCI/etc. bus probe sem.
3920  *
3921  *	RETURNS:
3922  *	0 on success, AC_ERR_* mask otherwise.
3923  */
3924 
3925 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3926 {
3927 	struct ata_taskfile tf;
3928 	unsigned int err_mask;
3929 
3930 	/* set up set-features taskfile */
3931 	DPRINTK("set features - xfer mode\n");
3932 
3933 	/* Some controllers and ATAPI devices show flaky interrupt
3934 	 * behavior after setting xfer mode.  Use polling instead.
3935 	 */
3936 	ata_tf_init(dev, &tf);
3937 	tf.command = ATA_CMD_SET_FEATURES;
3938 	tf.feature = SETFEATURES_XFER;
3939 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
3940 	tf.protocol = ATA_PROT_NODATA;
3941 	tf.nsect = dev->xfer_mode;
3942 
3943 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3944 
3945 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3946 	return err_mask;
3947 }
3948 
3949 /**
3950  *	ata_dev_init_params - Issue INIT DEV PARAMS command
3951  *	@dev: Device to which command will be sent
3952  *	@heads: Number of heads (taskfile parameter)
3953  *	@sectors: Number of sectors (taskfile parameter)
3954  *
3955  *	LOCKING:
3956  *	Kernel thread context (may sleep)
3957  *
3958  *	RETURNS:
3959  *	0 on success, AC_ERR_* mask otherwise.
3960  */
3961 static unsigned int ata_dev_init_params(struct ata_device *dev,
3962 					u16 heads, u16 sectors)
3963 {
3964 	struct ata_taskfile tf;
3965 	unsigned int err_mask;
3966 
3967 	/* Number of sectors per track 1-255. Number of heads 1-16 */
3968 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3969 		return AC_ERR_INVALID;
3970 
3971 	/* set up init dev params taskfile */
3972 	DPRINTK("init dev params \n");
3973 
3974 	ata_tf_init(dev, &tf);
3975 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
3976 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3977 	tf.protocol = ATA_PROT_NODATA;
3978 	tf.nsect = sectors;
3979 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3980 
3981 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3982 
3983 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3984 	return err_mask;
3985 }
3986 
3987 /**
3988  *	ata_sg_clean - Unmap DMA memory associated with command
3989  *	@qc: Command containing DMA memory to be released
3990  *
3991  *	Unmap all mapped DMA memory associated with this command.
3992  *
3993  *	LOCKING:
3994  *	spin_lock_irqsave(host lock)
3995  */
3996 void ata_sg_clean(struct ata_queued_cmd *qc)
3997 {
3998 	struct ata_port *ap = qc->ap;
3999 	struct scatterlist *sg = qc->__sg;
4000 	int dir = qc->dma_dir;
4001 	void *pad_buf = NULL;
4002 
4003 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4004 	WARN_ON(sg == NULL);
4005 
4006 	if (qc->flags & ATA_QCFLAG_SINGLE)
4007 		WARN_ON(qc->n_elem > 1);
4008 
4009 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4010 
4011 	/* if we padded the buffer out to 32-bit bound, and data
4012 	 * xfer direction is from-device, we must copy from the
4013 	 * pad buffer back into the supplied buffer
4014 	 */
4015 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4016 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4017 
4018 	if (qc->flags & ATA_QCFLAG_SG) {
4019 		if (qc->n_elem)
4020 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4021 		/* restore last sg */
4022 		sg[qc->orig_n_elem - 1].length += qc->pad_len;
4023 		if (pad_buf) {
4024 			struct scatterlist *psg = &qc->pad_sgent;
4025 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4026 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4027 			kunmap_atomic(addr, KM_IRQ0);
4028 		}
4029 	} else {
4030 		if (qc->n_elem)
4031 			dma_unmap_single(ap->dev,
4032 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4033 				dir);
4034 		/* restore sg */
4035 		sg->length += qc->pad_len;
4036 		if (pad_buf)
4037 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4038 			       pad_buf, qc->pad_len);
4039 	}
4040 
4041 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4042 	qc->__sg = NULL;
4043 }
4044 
4045 /**
4046  *	ata_fill_sg - Fill PCI IDE PRD table
4047  *	@qc: Metadata associated with taskfile to be transferred
4048  *
4049  *	Fill PCI IDE PRD (scatter-gather) table with segments
4050  *	associated with the current disk command.
4051  *
4052  *	LOCKING:
4053  *	spin_lock_irqsave(host lock)
4054  *
4055  */
4056 static void ata_fill_sg(struct ata_queued_cmd *qc)
4057 {
4058 	struct ata_port *ap = qc->ap;
4059 	struct scatterlist *sg;
4060 	unsigned int idx;
4061 
4062 	WARN_ON(qc->__sg == NULL);
4063 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4064 
4065 	idx = 0;
4066 	ata_for_each_sg(sg, qc) {
4067 		u32 addr, offset;
4068 		u32 sg_len, len;
4069 
4070 		/* determine if physical DMA addr spans 64K boundary.
4071 		 * Note h/w doesn't support 64-bit, so we unconditionally
4072 		 * truncate dma_addr_t to u32.
4073 		 */
4074 		addr = (u32) sg_dma_address(sg);
4075 		sg_len = sg_dma_len(sg);
4076 
4077 		while (sg_len) {
4078 			offset = addr & 0xffff;
4079 			len = sg_len;
4080 			if ((offset + sg_len) > 0x10000)
4081 				len = 0x10000 - offset;
4082 
4083 			ap->prd[idx].addr = cpu_to_le32(addr);
4084 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4085 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4086 
4087 			idx++;
4088 			sg_len -= len;
4089 			addr += len;
4090 		}
4091 	}
4092 
4093 	if (idx)
4094 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4095 }
4096 
4097 /**
4098  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4099  *	@qc: Metadata associated with taskfile to be transferred
4100  *
4101  *	Fill PCI IDE PRD (scatter-gather) table with segments
4102  *	associated with the current disk command. Perform the fill
4103  *	so that we avoid writing any length 64K records for
4104  *	controllers that don't follow the spec.
4105  *
4106  *	LOCKING:
4107  *	spin_lock_irqsave(host lock)
4108  *
4109  */
4110 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4111 {
4112 	struct ata_port *ap = qc->ap;
4113 	struct scatterlist *sg;
4114 	unsigned int idx;
4115 
4116 	WARN_ON(qc->__sg == NULL);
4117 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4118 
4119 	idx = 0;
4120 	ata_for_each_sg(sg, qc) {
4121 		u32 addr, offset;
4122 		u32 sg_len, len, blen;
4123 
4124  		/* determine if physical DMA addr spans 64K boundary.
4125 		 * Note h/w doesn't support 64-bit, so we unconditionally
4126 		 * truncate dma_addr_t to u32.
4127 		 */
4128 		addr = (u32) sg_dma_address(sg);
4129 		sg_len = sg_dma_len(sg);
4130 
4131 		while (sg_len) {
4132 			offset = addr & 0xffff;
4133 			len = sg_len;
4134 			if ((offset + sg_len) > 0x10000)
4135 				len = 0x10000 - offset;
4136 
4137 			blen = len & 0xffff;
4138 			ap->prd[idx].addr = cpu_to_le32(addr);
4139 			if (blen == 0) {
4140 			   /* Some PATA chipsets like the CS5530 can't
4141 			      cope with 0x0000 meaning 64K as the spec says */
4142 				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4143 				blen = 0x8000;
4144 				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4145 			}
4146 			ap->prd[idx].flags_len = cpu_to_le32(blen);
4147 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4148 
4149 			idx++;
4150 			sg_len -= len;
4151 			addr += len;
4152 		}
4153 	}
4154 
4155 	if (idx)
4156 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4157 }
4158 
4159 /**
4160  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4161  *	@qc: Metadata associated with taskfile to check
4162  *
4163  *	Allow low-level driver to filter ATA PACKET commands, returning
4164  *	a status indicating whether or not it is OK to use DMA for the
4165  *	supplied PACKET command.
4166  *
4167  *	LOCKING:
4168  *	spin_lock_irqsave(host lock)
4169  *
4170  *	RETURNS: 0 when ATAPI DMA can be used
4171  *               nonzero otherwise
4172  */
4173 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4174 {
4175 	struct ata_port *ap = qc->ap;
4176 
4177 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4178 	 * few ATAPI devices choke on such DMA requests.
4179 	 */
4180 	if (unlikely(qc->nbytes & 15))
4181 		return 1;
4182 
4183 	if (ap->ops->check_atapi_dma)
4184 		return ap->ops->check_atapi_dma(qc);
4185 
4186 	return 0;
4187 }
4188 
4189 /**
4190  *	ata_qc_prep - Prepare taskfile for submission
4191  *	@qc: Metadata associated with taskfile to be prepared
4192  *
4193  *	Prepare ATA taskfile for submission.
4194  *
4195  *	LOCKING:
4196  *	spin_lock_irqsave(host lock)
4197  */
4198 void ata_qc_prep(struct ata_queued_cmd *qc)
4199 {
4200 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4201 		return;
4202 
4203 	ata_fill_sg(qc);
4204 }
4205 
4206 /**
4207  *	ata_dumb_qc_prep - Prepare taskfile for submission
4208  *	@qc: Metadata associated with taskfile to be prepared
4209  *
4210  *	Prepare ATA taskfile for submission.
4211  *
4212  *	LOCKING:
4213  *	spin_lock_irqsave(host lock)
4214  */
4215 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4216 {
4217 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4218 		return;
4219 
4220 	ata_fill_sg_dumb(qc);
4221 }
4222 
4223 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4224 
4225 /**
4226  *	ata_sg_init_one - Associate command with memory buffer
4227  *	@qc: Command to be associated
4228  *	@buf: Memory buffer
4229  *	@buflen: Length of memory buffer, in bytes.
4230  *
4231  *	Initialize the data-related elements of queued_cmd @qc
4232  *	to point to a single memory buffer, @buf of byte length @buflen.
4233  *
4234  *	LOCKING:
4235  *	spin_lock_irqsave(host lock)
4236  */
4237 
4238 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4239 {
4240 	qc->flags |= ATA_QCFLAG_SINGLE;
4241 
4242 	qc->__sg = &qc->sgent;
4243 	qc->n_elem = 1;
4244 	qc->orig_n_elem = 1;
4245 	qc->buf_virt = buf;
4246 	qc->nbytes = buflen;
4247 
4248 	sg_init_one(&qc->sgent, buf, buflen);
4249 }
4250 
4251 /**
4252  *	ata_sg_init - Associate command with scatter-gather table.
4253  *	@qc: Command to be associated
4254  *	@sg: Scatter-gather table.
4255  *	@n_elem: Number of elements in s/g table.
4256  *
4257  *	Initialize the data-related elements of queued_cmd @qc
4258  *	to point to a scatter-gather table @sg, containing @n_elem
4259  *	elements.
4260  *
4261  *	LOCKING:
4262  *	spin_lock_irqsave(host lock)
4263  */
4264 
4265 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4266 		 unsigned int n_elem)
4267 {
4268 	qc->flags |= ATA_QCFLAG_SG;
4269 	qc->__sg = sg;
4270 	qc->n_elem = n_elem;
4271 	qc->orig_n_elem = n_elem;
4272 }
4273 
4274 /**
4275  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4276  *	@qc: Command with memory buffer to be mapped.
4277  *
4278  *	DMA-map the memory buffer associated with queued_cmd @qc.
4279  *
4280  *	LOCKING:
4281  *	spin_lock_irqsave(host lock)
4282  *
4283  *	RETURNS:
4284  *	Zero on success, negative on error.
4285  */
4286 
4287 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4288 {
4289 	struct ata_port *ap = qc->ap;
4290 	int dir = qc->dma_dir;
4291 	struct scatterlist *sg = qc->__sg;
4292 	dma_addr_t dma_address;
4293 	int trim_sg = 0;
4294 
4295 	/* we must lengthen transfers to end on a 32-bit boundary */
4296 	qc->pad_len = sg->length & 3;
4297 	if (qc->pad_len) {
4298 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4299 		struct scatterlist *psg = &qc->pad_sgent;
4300 
4301 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4302 
4303 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4304 
4305 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4306 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4307 			       qc->pad_len);
4308 
4309 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4310 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4311 		/* trim sg */
4312 		sg->length -= qc->pad_len;
4313 		if (sg->length == 0)
4314 			trim_sg = 1;
4315 
4316 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4317 			sg->length, qc->pad_len);
4318 	}
4319 
4320 	if (trim_sg) {
4321 		qc->n_elem--;
4322 		goto skip_map;
4323 	}
4324 
4325 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4326 				     sg->length, dir);
4327 	if (dma_mapping_error(dma_address)) {
4328 		/* restore sg */
4329 		sg->length += qc->pad_len;
4330 		return -1;
4331 	}
4332 
4333 	sg_dma_address(sg) = dma_address;
4334 	sg_dma_len(sg) = sg->length;
4335 
4336 skip_map:
4337 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4338 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4339 
4340 	return 0;
4341 }
4342 
4343 /**
4344  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4345  *	@qc: Command with scatter-gather table to be mapped.
4346  *
4347  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4348  *
4349  *	LOCKING:
4350  *	spin_lock_irqsave(host lock)
4351  *
4352  *	RETURNS:
4353  *	Zero on success, negative on error.
4354  *
4355  */
4356 
4357 static int ata_sg_setup(struct ata_queued_cmd *qc)
4358 {
4359 	struct ata_port *ap = qc->ap;
4360 	struct scatterlist *sg = qc->__sg;
4361 	struct scatterlist *lsg = &sg[qc->n_elem - 1];
4362 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4363 
4364 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4365 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4366 
4367 	/* we must lengthen transfers to end on a 32-bit boundary */
4368 	qc->pad_len = lsg->length & 3;
4369 	if (qc->pad_len) {
4370 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4371 		struct scatterlist *psg = &qc->pad_sgent;
4372 		unsigned int offset;
4373 
4374 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4375 
4376 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4377 
4378 		/*
4379 		 * psg->page/offset are used to copy to-be-written
4380 		 * data in this function or read data in ata_sg_clean.
4381 		 */
4382 		offset = lsg->offset + lsg->length - qc->pad_len;
4383 		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4384 		psg->offset = offset_in_page(offset);
4385 
4386 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
4387 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4388 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4389 			kunmap_atomic(addr, KM_IRQ0);
4390 		}
4391 
4392 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4393 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4394 		/* trim last sg */
4395 		lsg->length -= qc->pad_len;
4396 		if (lsg->length == 0)
4397 			trim_sg = 1;
4398 
4399 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4400 			qc->n_elem - 1, lsg->length, qc->pad_len);
4401 	}
4402 
4403 	pre_n_elem = qc->n_elem;
4404 	if (trim_sg && pre_n_elem)
4405 		pre_n_elem--;
4406 
4407 	if (!pre_n_elem) {
4408 		n_elem = 0;
4409 		goto skip_map;
4410 	}
4411 
4412 	dir = qc->dma_dir;
4413 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4414 	if (n_elem < 1) {
4415 		/* restore last sg */
4416 		lsg->length += qc->pad_len;
4417 		return -1;
4418 	}
4419 
4420 	DPRINTK("%d sg elements mapped\n", n_elem);
4421 
4422 skip_map:
4423 	qc->n_elem = n_elem;
4424 
4425 	return 0;
4426 }
4427 
4428 /**
4429  *	swap_buf_le16 - swap halves of 16-bit words in place
4430  *	@buf:  Buffer to swap
4431  *	@buf_words:  Number of 16-bit words in buffer.
4432  *
4433  *	Swap halves of 16-bit words if needed to convert from
4434  *	little-endian byte order to native cpu byte order, or
4435  *	vice-versa.
4436  *
4437  *	LOCKING:
4438  *	Inherited from caller.
4439  */
4440 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4441 {
4442 #ifdef __BIG_ENDIAN
4443 	unsigned int i;
4444 
4445 	for (i = 0; i < buf_words; i++)
4446 		buf[i] = le16_to_cpu(buf[i]);
4447 #endif /* __BIG_ENDIAN */
4448 }
4449 
4450 /**
4451  *	ata_data_xfer - Transfer data by PIO
4452  *	@adev: device to target
4453  *	@buf: data buffer
4454  *	@buflen: buffer length
4455  *	@write_data: read/write
4456  *
4457  *	Transfer data from/to the device data register by PIO.
4458  *
4459  *	LOCKING:
4460  *	Inherited from caller.
4461  */
4462 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4463 		   unsigned int buflen, int write_data)
4464 {
4465 	struct ata_port *ap = adev->ap;
4466 	unsigned int words = buflen >> 1;
4467 
4468 	/* Transfer multiple of 2 bytes */
4469 	if (write_data)
4470 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4471 	else
4472 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4473 
4474 	/* Transfer trailing 1 byte, if any. */
4475 	if (unlikely(buflen & 0x01)) {
4476 		u16 align_buf[1] = { 0 };
4477 		unsigned char *trailing_buf = buf + buflen - 1;
4478 
4479 		if (write_data) {
4480 			memcpy(align_buf, trailing_buf, 1);
4481 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4482 		} else {
4483 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4484 			memcpy(trailing_buf, align_buf, 1);
4485 		}
4486 	}
4487 }
4488 
4489 /**
4490  *	ata_data_xfer_noirq - Transfer data by PIO
4491  *	@adev: device to target
4492  *	@buf: data buffer
4493  *	@buflen: buffer length
4494  *	@write_data: read/write
4495  *
4496  *	Transfer data from/to the device data register by PIO. Do the
4497  *	transfer with interrupts disabled.
4498  *
4499  *	LOCKING:
4500  *	Inherited from caller.
4501  */
4502 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4503 			 unsigned int buflen, int write_data)
4504 {
4505 	unsigned long flags;
4506 	local_irq_save(flags);
4507 	ata_data_xfer(adev, buf, buflen, write_data);
4508 	local_irq_restore(flags);
4509 }
4510 
4511 
4512 /**
4513  *	ata_pio_sector - Transfer a sector of data.
4514  *	@qc: Command on going
4515  *
4516  *	Transfer qc->sect_size bytes of data from/to the ATA device.
4517  *
4518  *	LOCKING:
4519  *	Inherited from caller.
4520  */
4521 
4522 static void ata_pio_sector(struct ata_queued_cmd *qc)
4523 {
4524 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4525 	struct scatterlist *sg = qc->__sg;
4526 	struct ata_port *ap = qc->ap;
4527 	struct page *page;
4528 	unsigned int offset;
4529 	unsigned char *buf;
4530 
4531 	if (qc->curbytes == qc->nbytes - qc->sect_size)
4532 		ap->hsm_task_state = HSM_ST_LAST;
4533 
4534 	page = sg[qc->cursg].page;
4535 	offset = sg[qc->cursg].offset + qc->cursg_ofs;
4536 
4537 	/* get the current page and offset */
4538 	page = nth_page(page, (offset >> PAGE_SHIFT));
4539 	offset %= PAGE_SIZE;
4540 
4541 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4542 
4543 	if (PageHighMem(page)) {
4544 		unsigned long flags;
4545 
4546 		/* FIXME: use a bounce buffer */
4547 		local_irq_save(flags);
4548 		buf = kmap_atomic(page, KM_IRQ0);
4549 
4550 		/* do the actual data transfer */
4551 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4552 
4553 		kunmap_atomic(buf, KM_IRQ0);
4554 		local_irq_restore(flags);
4555 	} else {
4556 		buf = page_address(page);
4557 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4558 	}
4559 
4560 	qc->curbytes += qc->sect_size;
4561 	qc->cursg_ofs += qc->sect_size;
4562 
4563 	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4564 		qc->cursg++;
4565 		qc->cursg_ofs = 0;
4566 	}
4567 }
4568 
4569 /**
4570  *	ata_pio_sectors - Transfer one or many sectors.
4571  *	@qc: Command on going
4572  *
4573  *	Transfer one or many sectors of data from/to the
4574  *	ATA device for the DRQ request.
4575  *
4576  *	LOCKING:
4577  *	Inherited from caller.
4578  */
4579 
4580 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4581 {
4582 	if (is_multi_taskfile(&qc->tf)) {
4583 		/* READ/WRITE MULTIPLE */
4584 		unsigned int nsect;
4585 
4586 		WARN_ON(qc->dev->multi_count == 0);
4587 
4588 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4589 			    qc->dev->multi_count);
4590 		while (nsect--)
4591 			ata_pio_sector(qc);
4592 	} else
4593 		ata_pio_sector(qc);
4594 }
4595 
4596 /**
4597  *	atapi_send_cdb - Write CDB bytes to hardware
4598  *	@ap: Port to which ATAPI device is attached.
4599  *	@qc: Taskfile currently active
4600  *
4601  *	When device has indicated its readiness to accept
4602  *	a CDB, this function is called.  Send the CDB.
4603  *
4604  *	LOCKING:
4605  *	caller.
4606  */
4607 
4608 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4609 {
4610 	/* send SCSI cdb */
4611 	DPRINTK("send cdb\n");
4612 	WARN_ON(qc->dev->cdb_len < 12);
4613 
4614 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4615 	ata_altstatus(ap); /* flush */
4616 
4617 	switch (qc->tf.protocol) {
4618 	case ATA_PROT_ATAPI:
4619 		ap->hsm_task_state = HSM_ST;
4620 		break;
4621 	case ATA_PROT_ATAPI_NODATA:
4622 		ap->hsm_task_state = HSM_ST_LAST;
4623 		break;
4624 	case ATA_PROT_ATAPI_DMA:
4625 		ap->hsm_task_state = HSM_ST_LAST;
4626 		/* initiate bmdma */
4627 		ap->ops->bmdma_start(qc);
4628 		break;
4629 	}
4630 }
4631 
4632 /**
4633  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4634  *	@qc: Command on going
4635  *	@bytes: number of bytes
4636  *
4637  *	Transfer Transfer data from/to the ATAPI device.
4638  *
4639  *	LOCKING:
4640  *	Inherited from caller.
4641  *
4642  */
4643 
4644 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4645 {
4646 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4647 	struct scatterlist *sg = qc->__sg;
4648 	struct ata_port *ap = qc->ap;
4649 	struct page *page;
4650 	unsigned char *buf;
4651 	unsigned int offset, count;
4652 
4653 	if (qc->curbytes + bytes >= qc->nbytes)
4654 		ap->hsm_task_state = HSM_ST_LAST;
4655 
4656 next_sg:
4657 	if (unlikely(qc->cursg >= qc->n_elem)) {
4658 		/*
4659 		 * The end of qc->sg is reached and the device expects
4660 		 * more data to transfer. In order not to overrun qc->sg
4661 		 * and fulfill length specified in the byte count register,
4662 		 *    - for read case, discard trailing data from the device
4663 		 *    - for write case, padding zero data to the device
4664 		 */
4665 		u16 pad_buf[1] = { 0 };
4666 		unsigned int words = bytes >> 1;
4667 		unsigned int i;
4668 
4669 		if (words) /* warning if bytes > 1 */
4670 			ata_dev_printk(qc->dev, KERN_WARNING,
4671 				       "%u bytes trailing data\n", bytes);
4672 
4673 		for (i = 0; i < words; i++)
4674 			ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4675 
4676 		ap->hsm_task_state = HSM_ST_LAST;
4677 		return;
4678 	}
4679 
4680 	sg = &qc->__sg[qc->cursg];
4681 
4682 	page = sg->page;
4683 	offset = sg->offset + qc->cursg_ofs;
4684 
4685 	/* get the current page and offset */
4686 	page = nth_page(page, (offset >> PAGE_SHIFT));
4687 	offset %= PAGE_SIZE;
4688 
4689 	/* don't overrun current sg */
4690 	count = min(sg->length - qc->cursg_ofs, bytes);
4691 
4692 	/* don't cross page boundaries */
4693 	count = min(count, (unsigned int)PAGE_SIZE - offset);
4694 
4695 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4696 
4697 	if (PageHighMem(page)) {
4698 		unsigned long flags;
4699 
4700 		/* FIXME: use bounce buffer */
4701 		local_irq_save(flags);
4702 		buf = kmap_atomic(page, KM_IRQ0);
4703 
4704 		/* do the actual data transfer */
4705 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4706 
4707 		kunmap_atomic(buf, KM_IRQ0);
4708 		local_irq_restore(flags);
4709 	} else {
4710 		buf = page_address(page);
4711 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4712 	}
4713 
4714 	bytes -= count;
4715 	qc->curbytes += count;
4716 	qc->cursg_ofs += count;
4717 
4718 	if (qc->cursg_ofs == sg->length) {
4719 		qc->cursg++;
4720 		qc->cursg_ofs = 0;
4721 	}
4722 
4723 	if (bytes)
4724 		goto next_sg;
4725 }
4726 
4727 /**
4728  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
4729  *	@qc: Command on going
4730  *
4731  *	Transfer Transfer data from/to the ATAPI device.
4732  *
4733  *	LOCKING:
4734  *	Inherited from caller.
4735  */
4736 
4737 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4738 {
4739 	struct ata_port *ap = qc->ap;
4740 	struct ata_device *dev = qc->dev;
4741 	unsigned int ireason, bc_lo, bc_hi, bytes;
4742 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4743 
4744 	/* Abuse qc->result_tf for temp storage of intermediate TF
4745 	 * here to save some kernel stack usage.
4746 	 * For normal completion, qc->result_tf is not relevant. For
4747 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
4748 	 * So, the correctness of qc->result_tf is not affected.
4749 	 */
4750 	ap->ops->tf_read(ap, &qc->result_tf);
4751 	ireason = qc->result_tf.nsect;
4752 	bc_lo = qc->result_tf.lbam;
4753 	bc_hi = qc->result_tf.lbah;
4754 	bytes = (bc_hi << 8) | bc_lo;
4755 
4756 	/* shall be cleared to zero, indicating xfer of data */
4757 	if (ireason & (1 << 0))
4758 		goto err_out;
4759 
4760 	/* make sure transfer direction matches expected */
4761 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4762 	if (do_write != i_write)
4763 		goto err_out;
4764 
4765 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4766 
4767 	__atapi_pio_bytes(qc, bytes);
4768 
4769 	return;
4770 
4771 err_out:
4772 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4773 	qc->err_mask |= AC_ERR_HSM;
4774 	ap->hsm_task_state = HSM_ST_ERR;
4775 }
4776 
4777 /**
4778  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4779  *	@ap: the target ata_port
4780  *	@qc: qc on going
4781  *
4782  *	RETURNS:
4783  *	1 if ok in workqueue, 0 otherwise.
4784  */
4785 
4786 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4787 {
4788 	if (qc->tf.flags & ATA_TFLAG_POLLING)
4789 		return 1;
4790 
4791 	if (ap->hsm_task_state == HSM_ST_FIRST) {
4792 		if (qc->tf.protocol == ATA_PROT_PIO &&
4793 		    (qc->tf.flags & ATA_TFLAG_WRITE))
4794 		    return 1;
4795 
4796 		if (is_atapi_taskfile(&qc->tf) &&
4797 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4798 			return 1;
4799 	}
4800 
4801 	return 0;
4802 }
4803 
4804 /**
4805  *	ata_hsm_qc_complete - finish a qc running on standard HSM
4806  *	@qc: Command to complete
4807  *	@in_wq: 1 if called from workqueue, 0 otherwise
4808  *
4809  *	Finish @qc which is running on standard HSM.
4810  *
4811  *	LOCKING:
4812  *	If @in_wq is zero, spin_lock_irqsave(host lock).
4813  *	Otherwise, none on entry and grabs host lock.
4814  */
4815 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4816 {
4817 	struct ata_port *ap = qc->ap;
4818 	unsigned long flags;
4819 
4820 	if (ap->ops->error_handler) {
4821 		if (in_wq) {
4822 			spin_lock_irqsave(ap->lock, flags);
4823 
4824 			/* EH might have kicked in while host lock is
4825 			 * released.
4826 			 */
4827 			qc = ata_qc_from_tag(ap, qc->tag);
4828 			if (qc) {
4829 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4830 					ap->ops->irq_on(ap);
4831 					ata_qc_complete(qc);
4832 				} else
4833 					ata_port_freeze(ap);
4834 			}
4835 
4836 			spin_unlock_irqrestore(ap->lock, flags);
4837 		} else {
4838 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
4839 				ata_qc_complete(qc);
4840 			else
4841 				ata_port_freeze(ap);
4842 		}
4843 	} else {
4844 		if (in_wq) {
4845 			spin_lock_irqsave(ap->lock, flags);
4846 			ap->ops->irq_on(ap);
4847 			ata_qc_complete(qc);
4848 			spin_unlock_irqrestore(ap->lock, flags);
4849 		} else
4850 			ata_qc_complete(qc);
4851 	}
4852 }
4853 
4854 /**
4855  *	ata_hsm_move - move the HSM to the next state.
4856  *	@ap: the target ata_port
4857  *	@qc: qc on going
4858  *	@status: current device status
4859  *	@in_wq: 1 if called from workqueue, 0 otherwise
4860  *
4861  *	RETURNS:
4862  *	1 when poll next status needed, 0 otherwise.
4863  */
4864 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4865 		 u8 status, int in_wq)
4866 {
4867 	unsigned long flags = 0;
4868 	int poll_next;
4869 
4870 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4871 
4872 	/* Make sure ata_qc_issue_prot() does not throw things
4873 	 * like DMA polling into the workqueue. Notice that
4874 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4875 	 */
4876 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4877 
4878 fsm_start:
4879 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4880 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4881 
4882 	switch (ap->hsm_task_state) {
4883 	case HSM_ST_FIRST:
4884 		/* Send first data block or PACKET CDB */
4885 
4886 		/* If polling, we will stay in the work queue after
4887 		 * sending the data. Otherwise, interrupt handler
4888 		 * takes over after sending the data.
4889 		 */
4890 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4891 
4892 		/* check device status */
4893 		if (unlikely((status & ATA_DRQ) == 0)) {
4894 			/* handle BSY=0, DRQ=0 as error */
4895 			if (likely(status & (ATA_ERR | ATA_DF)))
4896 				/* device stops HSM for abort/error */
4897 				qc->err_mask |= AC_ERR_DEV;
4898 			else
4899 				/* HSM violation. Let EH handle this */
4900 				qc->err_mask |= AC_ERR_HSM;
4901 
4902 			ap->hsm_task_state = HSM_ST_ERR;
4903 			goto fsm_start;
4904 		}
4905 
4906 		/* Device should not ask for data transfer (DRQ=1)
4907 		 * when it finds something wrong.
4908 		 * We ignore DRQ here and stop the HSM by
4909 		 * changing hsm_task_state to HSM_ST_ERR and
4910 		 * let the EH abort the command or reset the device.
4911 		 */
4912 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
4913 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4914 					"error, dev_stat 0x%X\n", status);
4915 			qc->err_mask |= AC_ERR_HSM;
4916 			ap->hsm_task_state = HSM_ST_ERR;
4917 			goto fsm_start;
4918 		}
4919 
4920 		/* Send the CDB (atapi) or the first data block (ata pio out).
4921 		 * During the state transition, interrupt handler shouldn't
4922 		 * be invoked before the data transfer is complete and
4923 		 * hsm_task_state is changed. Hence, the following locking.
4924 		 */
4925 		if (in_wq)
4926 			spin_lock_irqsave(ap->lock, flags);
4927 
4928 		if (qc->tf.protocol == ATA_PROT_PIO) {
4929 			/* PIO data out protocol.
4930 			 * send first data block.
4931 			 */
4932 
4933 			/* ata_pio_sectors() might change the state
4934 			 * to HSM_ST_LAST. so, the state is changed here
4935 			 * before ata_pio_sectors().
4936 			 */
4937 			ap->hsm_task_state = HSM_ST;
4938 			ata_pio_sectors(qc);
4939 			ata_altstatus(ap); /* flush */
4940 		} else
4941 			/* send CDB */
4942 			atapi_send_cdb(ap, qc);
4943 
4944 		if (in_wq)
4945 			spin_unlock_irqrestore(ap->lock, flags);
4946 
4947 		/* if polling, ata_pio_task() handles the rest.
4948 		 * otherwise, interrupt handler takes over from here.
4949 		 */
4950 		break;
4951 
4952 	case HSM_ST:
4953 		/* complete command or read/write the data register */
4954 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
4955 			/* ATAPI PIO protocol */
4956 			if ((status & ATA_DRQ) == 0) {
4957 				/* No more data to transfer or device error.
4958 				 * Device error will be tagged in HSM_ST_LAST.
4959 				 */
4960 				ap->hsm_task_state = HSM_ST_LAST;
4961 				goto fsm_start;
4962 			}
4963 
4964 			/* Device should not ask for data transfer (DRQ=1)
4965 			 * when it finds something wrong.
4966 			 * We ignore DRQ here and stop the HSM by
4967 			 * changing hsm_task_state to HSM_ST_ERR and
4968 			 * let the EH abort the command or reset the device.
4969 			 */
4970 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
4971 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4972 						"device error, dev_stat 0x%X\n",
4973 						status);
4974 				qc->err_mask |= AC_ERR_HSM;
4975 				ap->hsm_task_state = HSM_ST_ERR;
4976 				goto fsm_start;
4977 			}
4978 
4979 			atapi_pio_bytes(qc);
4980 
4981 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4982 				/* bad ireason reported by device */
4983 				goto fsm_start;
4984 
4985 		} else {
4986 			/* ATA PIO protocol */
4987 			if (unlikely((status & ATA_DRQ) == 0)) {
4988 				/* handle BSY=0, DRQ=0 as error */
4989 				if (likely(status & (ATA_ERR | ATA_DF)))
4990 					/* device stops HSM for abort/error */
4991 					qc->err_mask |= AC_ERR_DEV;
4992 				else
4993 					/* HSM violation. Let EH handle this.
4994 					 * Phantom devices also trigger this
4995 					 * condition.  Mark hint.
4996 					 */
4997 					qc->err_mask |= AC_ERR_HSM |
4998 							AC_ERR_NODEV_HINT;
4999 
5000 				ap->hsm_task_state = HSM_ST_ERR;
5001 				goto fsm_start;
5002 			}
5003 
5004 			/* For PIO reads, some devices may ask for
5005 			 * data transfer (DRQ=1) alone with ERR=1.
5006 			 * We respect DRQ here and transfer one
5007 			 * block of junk data before changing the
5008 			 * hsm_task_state to HSM_ST_ERR.
5009 			 *
5010 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5011 			 * sense since the data block has been
5012 			 * transferred to the device.
5013 			 */
5014 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5015 				/* data might be corrputed */
5016 				qc->err_mask |= AC_ERR_DEV;
5017 
5018 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5019 					ata_pio_sectors(qc);
5020 					ata_altstatus(ap);
5021 					status = ata_wait_idle(ap);
5022 				}
5023 
5024 				if (status & (ATA_BUSY | ATA_DRQ))
5025 					qc->err_mask |= AC_ERR_HSM;
5026 
5027 				/* ata_pio_sectors() might change the
5028 				 * state to HSM_ST_LAST. so, the state
5029 				 * is changed after ata_pio_sectors().
5030 				 */
5031 				ap->hsm_task_state = HSM_ST_ERR;
5032 				goto fsm_start;
5033 			}
5034 
5035 			ata_pio_sectors(qc);
5036 
5037 			if (ap->hsm_task_state == HSM_ST_LAST &&
5038 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5039 				/* all data read */
5040 				ata_altstatus(ap);
5041 				status = ata_wait_idle(ap);
5042 				goto fsm_start;
5043 			}
5044 		}
5045 
5046 		ata_altstatus(ap); /* flush */
5047 		poll_next = 1;
5048 		break;
5049 
5050 	case HSM_ST_LAST:
5051 		if (unlikely(!ata_ok(status))) {
5052 			qc->err_mask |= __ac_err_mask(status);
5053 			ap->hsm_task_state = HSM_ST_ERR;
5054 			goto fsm_start;
5055 		}
5056 
5057 		/* no more data to transfer */
5058 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5059 			ap->print_id, qc->dev->devno, status);
5060 
5061 		WARN_ON(qc->err_mask);
5062 
5063 		ap->hsm_task_state = HSM_ST_IDLE;
5064 
5065 		/* complete taskfile transaction */
5066 		ata_hsm_qc_complete(qc, in_wq);
5067 
5068 		poll_next = 0;
5069 		break;
5070 
5071 	case HSM_ST_ERR:
5072 		/* make sure qc->err_mask is available to
5073 		 * know what's wrong and recover
5074 		 */
5075 		WARN_ON(qc->err_mask == 0);
5076 
5077 		ap->hsm_task_state = HSM_ST_IDLE;
5078 
5079 		/* complete taskfile transaction */
5080 		ata_hsm_qc_complete(qc, in_wq);
5081 
5082 		poll_next = 0;
5083 		break;
5084 	default:
5085 		poll_next = 0;
5086 		BUG();
5087 	}
5088 
5089 	return poll_next;
5090 }
5091 
5092 static void ata_pio_task(struct work_struct *work)
5093 {
5094 	struct ata_port *ap =
5095 		container_of(work, struct ata_port, port_task.work);
5096 	struct ata_queued_cmd *qc = ap->port_task_data;
5097 	u8 status;
5098 	int poll_next;
5099 
5100 fsm_start:
5101 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5102 
5103 	/*
5104 	 * This is purely heuristic.  This is a fast path.
5105 	 * Sometimes when we enter, BSY will be cleared in
5106 	 * a chk-status or two.  If not, the drive is probably seeking
5107 	 * or something.  Snooze for a couple msecs, then
5108 	 * chk-status again.  If still busy, queue delayed work.
5109 	 */
5110 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5111 	if (status & ATA_BUSY) {
5112 		msleep(2);
5113 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5114 		if (status & ATA_BUSY) {
5115 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5116 			return;
5117 		}
5118 	}
5119 
5120 	/* move the HSM */
5121 	poll_next = ata_hsm_move(ap, qc, status, 1);
5122 
5123 	/* another command or interrupt handler
5124 	 * may be running at this point.
5125 	 */
5126 	if (poll_next)
5127 		goto fsm_start;
5128 }
5129 
5130 /**
5131  *	ata_qc_new - Request an available ATA command, for queueing
5132  *	@ap: Port associated with device @dev
5133  *	@dev: Device from whom we request an available command structure
5134  *
5135  *	LOCKING:
5136  *	None.
5137  */
5138 
5139 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5140 {
5141 	struct ata_queued_cmd *qc = NULL;
5142 	unsigned int i;
5143 
5144 	/* no command while frozen */
5145 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5146 		return NULL;
5147 
5148 	/* the last tag is reserved for internal command. */
5149 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5150 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5151 			qc = __ata_qc_from_tag(ap, i);
5152 			break;
5153 		}
5154 
5155 	if (qc)
5156 		qc->tag = i;
5157 
5158 	return qc;
5159 }
5160 
5161 /**
5162  *	ata_qc_new_init - Request an available ATA command, and initialize it
5163  *	@dev: Device from whom we request an available command structure
5164  *
5165  *	LOCKING:
5166  *	None.
5167  */
5168 
5169 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5170 {
5171 	struct ata_port *ap = dev->ap;
5172 	struct ata_queued_cmd *qc;
5173 
5174 	qc = ata_qc_new(ap);
5175 	if (qc) {
5176 		qc->scsicmd = NULL;
5177 		qc->ap = ap;
5178 		qc->dev = dev;
5179 
5180 		ata_qc_reinit(qc);
5181 	}
5182 
5183 	return qc;
5184 }
5185 
5186 /**
5187  *	ata_qc_free - free unused ata_queued_cmd
5188  *	@qc: Command to complete
5189  *
5190  *	Designed to free unused ata_queued_cmd object
5191  *	in case something prevents using it.
5192  *
5193  *	LOCKING:
5194  *	spin_lock_irqsave(host lock)
5195  */
5196 void ata_qc_free(struct ata_queued_cmd *qc)
5197 {
5198 	struct ata_port *ap = qc->ap;
5199 	unsigned int tag;
5200 
5201 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5202 
5203 	qc->flags = 0;
5204 	tag = qc->tag;
5205 	if (likely(ata_tag_valid(tag))) {
5206 		qc->tag = ATA_TAG_POISON;
5207 		clear_bit(tag, &ap->qc_allocated);
5208 	}
5209 }
5210 
5211 void __ata_qc_complete(struct ata_queued_cmd *qc)
5212 {
5213 	struct ata_port *ap = qc->ap;
5214 
5215 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5216 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5217 
5218 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5219 		ata_sg_clean(qc);
5220 
5221 	/* command should be marked inactive atomically with qc completion */
5222 	if (qc->tf.protocol == ATA_PROT_NCQ)
5223 		ap->sactive &= ~(1 << qc->tag);
5224 	else
5225 		ap->active_tag = ATA_TAG_POISON;
5226 
5227 	/* atapi: mark qc as inactive to prevent the interrupt handler
5228 	 * from completing the command twice later, before the error handler
5229 	 * is called. (when rc != 0 and atapi request sense is needed)
5230 	 */
5231 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5232 	ap->qc_active &= ~(1 << qc->tag);
5233 
5234 	/* call completion callback */
5235 	qc->complete_fn(qc);
5236 }
5237 
5238 static void fill_result_tf(struct ata_queued_cmd *qc)
5239 {
5240 	struct ata_port *ap = qc->ap;
5241 
5242 	qc->result_tf.flags = qc->tf.flags;
5243 	ap->ops->tf_read(ap, &qc->result_tf);
5244 }
5245 
5246 /**
5247  *	ata_qc_complete - Complete an active ATA command
5248  *	@qc: Command to complete
5249  *	@err_mask: ATA Status register contents
5250  *
5251  *	Indicate to the mid and upper layers that an ATA
5252  *	command has completed, with either an ok or not-ok status.
5253  *
5254  *	LOCKING:
5255  *	spin_lock_irqsave(host lock)
5256  */
5257 void ata_qc_complete(struct ata_queued_cmd *qc)
5258 {
5259 	struct ata_port *ap = qc->ap;
5260 
5261 	/* XXX: New EH and old EH use different mechanisms to
5262 	 * synchronize EH with regular execution path.
5263 	 *
5264 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5265 	 * Normal execution path is responsible for not accessing a
5266 	 * failed qc.  libata core enforces the rule by returning NULL
5267 	 * from ata_qc_from_tag() for failed qcs.
5268 	 *
5269 	 * Old EH depends on ata_qc_complete() nullifying completion
5270 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5271 	 * not synchronize with interrupt handler.  Only PIO task is
5272 	 * taken care of.
5273 	 */
5274 	if (ap->ops->error_handler) {
5275 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5276 
5277 		if (unlikely(qc->err_mask))
5278 			qc->flags |= ATA_QCFLAG_FAILED;
5279 
5280 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5281 			if (!ata_tag_internal(qc->tag)) {
5282 				/* always fill result TF for failed qc */
5283 				fill_result_tf(qc);
5284 				ata_qc_schedule_eh(qc);
5285 				return;
5286 			}
5287 		}
5288 
5289 		/* read result TF if requested */
5290 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5291 			fill_result_tf(qc);
5292 
5293 		__ata_qc_complete(qc);
5294 	} else {
5295 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5296 			return;
5297 
5298 		/* read result TF if failed or requested */
5299 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5300 			fill_result_tf(qc);
5301 
5302 		__ata_qc_complete(qc);
5303 	}
5304 }
5305 
5306 /**
5307  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5308  *	@ap: port in question
5309  *	@qc_active: new qc_active mask
5310  *	@finish_qc: LLDD callback invoked before completing a qc
5311  *
5312  *	Complete in-flight commands.  This functions is meant to be
5313  *	called from low-level driver's interrupt routine to complete
5314  *	requests normally.  ap->qc_active and @qc_active is compared
5315  *	and commands are completed accordingly.
5316  *
5317  *	LOCKING:
5318  *	spin_lock_irqsave(host lock)
5319  *
5320  *	RETURNS:
5321  *	Number of completed commands on success, -errno otherwise.
5322  */
5323 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5324 			     void (*finish_qc)(struct ata_queued_cmd *))
5325 {
5326 	int nr_done = 0;
5327 	u32 done_mask;
5328 	int i;
5329 
5330 	done_mask = ap->qc_active ^ qc_active;
5331 
5332 	if (unlikely(done_mask & qc_active)) {
5333 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5334 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5335 		return -EINVAL;
5336 	}
5337 
5338 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5339 		struct ata_queued_cmd *qc;
5340 
5341 		if (!(done_mask & (1 << i)))
5342 			continue;
5343 
5344 		if ((qc = ata_qc_from_tag(ap, i))) {
5345 			if (finish_qc)
5346 				finish_qc(qc);
5347 			ata_qc_complete(qc);
5348 			nr_done++;
5349 		}
5350 	}
5351 
5352 	return nr_done;
5353 }
5354 
5355 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5356 {
5357 	struct ata_port *ap = qc->ap;
5358 
5359 	switch (qc->tf.protocol) {
5360 	case ATA_PROT_NCQ:
5361 	case ATA_PROT_DMA:
5362 	case ATA_PROT_ATAPI_DMA:
5363 		return 1;
5364 
5365 	case ATA_PROT_ATAPI:
5366 	case ATA_PROT_PIO:
5367 		if (ap->flags & ATA_FLAG_PIO_DMA)
5368 			return 1;
5369 
5370 		/* fall through */
5371 
5372 	default:
5373 		return 0;
5374 	}
5375 
5376 	/* never reached */
5377 }
5378 
5379 /**
5380  *	ata_qc_issue - issue taskfile to device
5381  *	@qc: command to issue to device
5382  *
5383  *	Prepare an ATA command to submission to device.
5384  *	This includes mapping the data into a DMA-able
5385  *	area, filling in the S/G table, and finally
5386  *	writing the taskfile to hardware, starting the command.
5387  *
5388  *	LOCKING:
5389  *	spin_lock_irqsave(host lock)
5390  */
5391 void ata_qc_issue(struct ata_queued_cmd *qc)
5392 {
5393 	struct ata_port *ap = qc->ap;
5394 
5395 	/* Make sure only one non-NCQ command is outstanding.  The
5396 	 * check is skipped for old EH because it reuses active qc to
5397 	 * request ATAPI sense.
5398 	 */
5399 	WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5400 
5401 	if (qc->tf.protocol == ATA_PROT_NCQ) {
5402 		WARN_ON(ap->sactive & (1 << qc->tag));
5403 		ap->sactive |= 1 << qc->tag;
5404 	} else {
5405 		WARN_ON(ap->sactive);
5406 		ap->active_tag = qc->tag;
5407 	}
5408 
5409 	qc->flags |= ATA_QCFLAG_ACTIVE;
5410 	ap->qc_active |= 1 << qc->tag;
5411 
5412 	if (ata_should_dma_map(qc)) {
5413 		if (qc->flags & ATA_QCFLAG_SG) {
5414 			if (ata_sg_setup(qc))
5415 				goto sg_err;
5416 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5417 			if (ata_sg_setup_one(qc))
5418 				goto sg_err;
5419 		}
5420 	} else {
5421 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5422 	}
5423 
5424 	ap->ops->qc_prep(qc);
5425 
5426 	qc->err_mask |= ap->ops->qc_issue(qc);
5427 	if (unlikely(qc->err_mask))
5428 		goto err;
5429 	return;
5430 
5431 sg_err:
5432 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5433 	qc->err_mask |= AC_ERR_SYSTEM;
5434 err:
5435 	ata_qc_complete(qc);
5436 }
5437 
5438 /**
5439  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5440  *	@qc: command to issue to device
5441  *
5442  *	Using various libata functions and hooks, this function
5443  *	starts an ATA command.  ATA commands are grouped into
5444  *	classes called "protocols", and issuing each type of protocol
5445  *	is slightly different.
5446  *
5447  *	May be used as the qc_issue() entry in ata_port_operations.
5448  *
5449  *	LOCKING:
5450  *	spin_lock_irqsave(host lock)
5451  *
5452  *	RETURNS:
5453  *	Zero on success, AC_ERR_* mask on failure
5454  */
5455 
5456 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5457 {
5458 	struct ata_port *ap = qc->ap;
5459 
5460 	/* Use polling pio if the LLD doesn't handle
5461 	 * interrupt driven pio and atapi CDB interrupt.
5462 	 */
5463 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5464 		switch (qc->tf.protocol) {
5465 		case ATA_PROT_PIO:
5466 		case ATA_PROT_NODATA:
5467 		case ATA_PROT_ATAPI:
5468 		case ATA_PROT_ATAPI_NODATA:
5469 			qc->tf.flags |= ATA_TFLAG_POLLING;
5470 			break;
5471 		case ATA_PROT_ATAPI_DMA:
5472 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5473 				/* see ata_dma_blacklisted() */
5474 				BUG();
5475 			break;
5476 		default:
5477 			break;
5478 		}
5479 	}
5480 
5481 	/* select the device */
5482 	ata_dev_select(ap, qc->dev->devno, 1, 0);
5483 
5484 	/* start the command */
5485 	switch (qc->tf.protocol) {
5486 	case ATA_PROT_NODATA:
5487 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5488 			ata_qc_set_polling(qc);
5489 
5490 		ata_tf_to_host(ap, &qc->tf);
5491 		ap->hsm_task_state = HSM_ST_LAST;
5492 
5493 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5494 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5495 
5496 		break;
5497 
5498 	case ATA_PROT_DMA:
5499 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5500 
5501 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5502 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5503 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5504 		ap->hsm_task_state = HSM_ST_LAST;
5505 		break;
5506 
5507 	case ATA_PROT_PIO:
5508 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5509 			ata_qc_set_polling(qc);
5510 
5511 		ata_tf_to_host(ap, &qc->tf);
5512 
5513 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5514 			/* PIO data out protocol */
5515 			ap->hsm_task_state = HSM_ST_FIRST;
5516 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5517 
5518 			/* always send first data block using
5519 			 * the ata_pio_task() codepath.
5520 			 */
5521 		} else {
5522 			/* PIO data in protocol */
5523 			ap->hsm_task_state = HSM_ST;
5524 
5525 			if (qc->tf.flags & ATA_TFLAG_POLLING)
5526 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5527 
5528 			/* if polling, ata_pio_task() handles the rest.
5529 			 * otherwise, interrupt handler takes over from here.
5530 			 */
5531 		}
5532 
5533 		break;
5534 
5535 	case ATA_PROT_ATAPI:
5536 	case ATA_PROT_ATAPI_NODATA:
5537 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5538 			ata_qc_set_polling(qc);
5539 
5540 		ata_tf_to_host(ap, &qc->tf);
5541 
5542 		ap->hsm_task_state = HSM_ST_FIRST;
5543 
5544 		/* send cdb by polling if no cdb interrupt */
5545 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5546 		    (qc->tf.flags & ATA_TFLAG_POLLING))
5547 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5548 		break;
5549 
5550 	case ATA_PROT_ATAPI_DMA:
5551 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5552 
5553 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5554 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5555 		ap->hsm_task_state = HSM_ST_FIRST;
5556 
5557 		/* send cdb by polling if no cdb interrupt */
5558 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5559 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5560 		break;
5561 
5562 	default:
5563 		WARN_ON(1);
5564 		return AC_ERR_SYSTEM;
5565 	}
5566 
5567 	return 0;
5568 }
5569 
5570 /**
5571  *	ata_host_intr - Handle host interrupt for given (port, task)
5572  *	@ap: Port on which interrupt arrived (possibly...)
5573  *	@qc: Taskfile currently active in engine
5574  *
5575  *	Handle host interrupt for given queued command.  Currently,
5576  *	only DMA interrupts are handled.  All other commands are
5577  *	handled via polling with interrupts disabled (nIEN bit).
5578  *
5579  *	LOCKING:
5580  *	spin_lock_irqsave(host lock)
5581  *
5582  *	RETURNS:
5583  *	One if interrupt was handled, zero if not (shared irq).
5584  */
5585 
5586 inline unsigned int ata_host_intr (struct ata_port *ap,
5587 				   struct ata_queued_cmd *qc)
5588 {
5589 	struct ata_eh_info *ehi = &ap->eh_info;
5590 	u8 status, host_stat = 0;
5591 
5592 	VPRINTK("ata%u: protocol %d task_state %d\n",
5593 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5594 
5595 	/* Check whether we are expecting interrupt in this state */
5596 	switch (ap->hsm_task_state) {
5597 	case HSM_ST_FIRST:
5598 		/* Some pre-ATAPI-4 devices assert INTRQ
5599 		 * at this state when ready to receive CDB.
5600 		 */
5601 
5602 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5603 		 * The flag was turned on only for atapi devices.
5604 		 * No need to check is_atapi_taskfile(&qc->tf) again.
5605 		 */
5606 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5607 			goto idle_irq;
5608 		break;
5609 	case HSM_ST_LAST:
5610 		if (qc->tf.protocol == ATA_PROT_DMA ||
5611 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5612 			/* check status of DMA engine */
5613 			host_stat = ap->ops->bmdma_status(ap);
5614 			VPRINTK("ata%u: host_stat 0x%X\n",
5615 				ap->print_id, host_stat);
5616 
5617 			/* if it's not our irq... */
5618 			if (!(host_stat & ATA_DMA_INTR))
5619 				goto idle_irq;
5620 
5621 			/* before we do anything else, clear DMA-Start bit */
5622 			ap->ops->bmdma_stop(qc);
5623 
5624 			if (unlikely(host_stat & ATA_DMA_ERR)) {
5625 				/* error when transfering data to/from memory */
5626 				qc->err_mask |= AC_ERR_HOST_BUS;
5627 				ap->hsm_task_state = HSM_ST_ERR;
5628 			}
5629 		}
5630 		break;
5631 	case HSM_ST:
5632 		break;
5633 	default:
5634 		goto idle_irq;
5635 	}
5636 
5637 	/* check altstatus */
5638 	status = ata_altstatus(ap);
5639 	if (status & ATA_BUSY)
5640 		goto idle_irq;
5641 
5642 	/* check main status, clearing INTRQ */
5643 	status = ata_chk_status(ap);
5644 	if (unlikely(status & ATA_BUSY))
5645 		goto idle_irq;
5646 
5647 	/* ack bmdma irq events */
5648 	ap->ops->irq_clear(ap);
5649 
5650 	ata_hsm_move(ap, qc, status, 0);
5651 
5652 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5653 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5654 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5655 
5656 	return 1;	/* irq handled */
5657 
5658 idle_irq:
5659 	ap->stats.idle_irq++;
5660 
5661 #ifdef ATA_IRQ_TRAP
5662 	if ((ap->stats.idle_irq % 1000) == 0) {
5663 		ap->ops->irq_ack(ap, 0); /* debug trap */
5664 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5665 		return 1;
5666 	}
5667 #endif
5668 	return 0;	/* irq not handled */
5669 }
5670 
5671 /**
5672  *	ata_interrupt - Default ATA host interrupt handler
5673  *	@irq: irq line (unused)
5674  *	@dev_instance: pointer to our ata_host information structure
5675  *
5676  *	Default interrupt handler for PCI IDE devices.  Calls
5677  *	ata_host_intr() for each port that is not disabled.
5678  *
5679  *	LOCKING:
5680  *	Obtains host lock during operation.
5681  *
5682  *	RETURNS:
5683  *	IRQ_NONE or IRQ_HANDLED.
5684  */
5685 
5686 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5687 {
5688 	struct ata_host *host = dev_instance;
5689 	unsigned int i;
5690 	unsigned int handled = 0;
5691 	unsigned long flags;
5692 
5693 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5694 	spin_lock_irqsave(&host->lock, flags);
5695 
5696 	for (i = 0; i < host->n_ports; i++) {
5697 		struct ata_port *ap;
5698 
5699 		ap = host->ports[i];
5700 		if (ap &&
5701 		    !(ap->flags & ATA_FLAG_DISABLED)) {
5702 			struct ata_queued_cmd *qc;
5703 
5704 			qc = ata_qc_from_tag(ap, ap->active_tag);
5705 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5706 			    (qc->flags & ATA_QCFLAG_ACTIVE))
5707 				handled |= ata_host_intr(ap, qc);
5708 		}
5709 	}
5710 
5711 	spin_unlock_irqrestore(&host->lock, flags);
5712 
5713 	return IRQ_RETVAL(handled);
5714 }
5715 
5716 /**
5717  *	sata_scr_valid - test whether SCRs are accessible
5718  *	@ap: ATA port to test SCR accessibility for
5719  *
5720  *	Test whether SCRs are accessible for @ap.
5721  *
5722  *	LOCKING:
5723  *	None.
5724  *
5725  *	RETURNS:
5726  *	1 if SCRs are accessible, 0 otherwise.
5727  */
5728 int sata_scr_valid(struct ata_port *ap)
5729 {
5730 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5731 }
5732 
5733 /**
5734  *	sata_scr_read - read SCR register of the specified port
5735  *	@ap: ATA port to read SCR for
5736  *	@reg: SCR to read
5737  *	@val: Place to store read value
5738  *
5739  *	Read SCR register @reg of @ap into *@val.  This function is
5740  *	guaranteed to succeed if the cable type of the port is SATA
5741  *	and the port implements ->scr_read.
5742  *
5743  *	LOCKING:
5744  *	None.
5745  *
5746  *	RETURNS:
5747  *	0 on success, negative errno on failure.
5748  */
5749 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5750 {
5751 	if (sata_scr_valid(ap))
5752 		return ap->ops->scr_read(ap, reg, val);
5753 	return -EOPNOTSUPP;
5754 }
5755 
5756 /**
5757  *	sata_scr_write - write SCR register of the specified port
5758  *	@ap: ATA port to write SCR for
5759  *	@reg: SCR to write
5760  *	@val: value to write
5761  *
5762  *	Write @val to SCR register @reg of @ap.  This function is
5763  *	guaranteed to succeed if the cable type of the port is SATA
5764  *	and the port implements ->scr_read.
5765  *
5766  *	LOCKING:
5767  *	None.
5768  *
5769  *	RETURNS:
5770  *	0 on success, negative errno on failure.
5771  */
5772 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5773 {
5774 	if (sata_scr_valid(ap))
5775 		return ap->ops->scr_write(ap, reg, val);
5776 	return -EOPNOTSUPP;
5777 }
5778 
5779 /**
5780  *	sata_scr_write_flush - write SCR register of the specified port and flush
5781  *	@ap: ATA port to write SCR for
5782  *	@reg: SCR to write
5783  *	@val: value to write
5784  *
5785  *	This function is identical to sata_scr_write() except that this
5786  *	function performs flush after writing to the register.
5787  *
5788  *	LOCKING:
5789  *	None.
5790  *
5791  *	RETURNS:
5792  *	0 on success, negative errno on failure.
5793  */
5794 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5795 {
5796 	int rc;
5797 
5798 	if (sata_scr_valid(ap)) {
5799 		rc = ap->ops->scr_write(ap, reg, val);
5800 		if (rc == 0)
5801 			rc = ap->ops->scr_read(ap, reg, &val);
5802 		return rc;
5803 	}
5804 	return -EOPNOTSUPP;
5805 }
5806 
5807 /**
5808  *	ata_port_online - test whether the given port is online
5809  *	@ap: ATA port to test
5810  *
5811  *	Test whether @ap is online.  Note that this function returns 0
5812  *	if online status of @ap cannot be obtained, so
5813  *	ata_port_online(ap) != !ata_port_offline(ap).
5814  *
5815  *	LOCKING:
5816  *	None.
5817  *
5818  *	RETURNS:
5819  *	1 if the port online status is available and online.
5820  */
5821 int ata_port_online(struct ata_port *ap)
5822 {
5823 	u32 sstatus;
5824 
5825 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5826 		return 1;
5827 	return 0;
5828 }
5829 
5830 /**
5831  *	ata_port_offline - test whether the given port is offline
5832  *	@ap: ATA port to test
5833  *
5834  *	Test whether @ap is offline.  Note that this function returns
5835  *	0 if offline status of @ap cannot be obtained, so
5836  *	ata_port_online(ap) != !ata_port_offline(ap).
5837  *
5838  *	LOCKING:
5839  *	None.
5840  *
5841  *	RETURNS:
5842  *	1 if the port offline status is available and offline.
5843  */
5844 int ata_port_offline(struct ata_port *ap)
5845 {
5846 	u32 sstatus;
5847 
5848 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5849 		return 1;
5850 	return 0;
5851 }
5852 
5853 int ata_flush_cache(struct ata_device *dev)
5854 {
5855 	unsigned int err_mask;
5856 	u8 cmd;
5857 
5858 	if (!ata_try_flush_cache(dev))
5859 		return 0;
5860 
5861 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5862 		cmd = ATA_CMD_FLUSH_EXT;
5863 	else
5864 		cmd = ATA_CMD_FLUSH;
5865 
5866 	err_mask = ata_do_simple_cmd(dev, cmd);
5867 	if (err_mask) {
5868 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5869 		return -EIO;
5870 	}
5871 
5872 	return 0;
5873 }
5874 
5875 #ifdef CONFIG_PM
5876 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5877 			       unsigned int action, unsigned int ehi_flags,
5878 			       int wait)
5879 {
5880 	unsigned long flags;
5881 	int i, rc;
5882 
5883 	for (i = 0; i < host->n_ports; i++) {
5884 		struct ata_port *ap = host->ports[i];
5885 
5886 		/* Previous resume operation might still be in
5887 		 * progress.  Wait for PM_PENDING to clear.
5888 		 */
5889 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5890 			ata_port_wait_eh(ap);
5891 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5892 		}
5893 
5894 		/* request PM ops to EH */
5895 		spin_lock_irqsave(ap->lock, flags);
5896 
5897 		ap->pm_mesg = mesg;
5898 		if (wait) {
5899 			rc = 0;
5900 			ap->pm_result = &rc;
5901 		}
5902 
5903 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5904 		ap->eh_info.action |= action;
5905 		ap->eh_info.flags |= ehi_flags;
5906 
5907 		ata_port_schedule_eh(ap);
5908 
5909 		spin_unlock_irqrestore(ap->lock, flags);
5910 
5911 		/* wait and check result */
5912 		if (wait) {
5913 			ata_port_wait_eh(ap);
5914 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5915 			if (rc)
5916 				return rc;
5917 		}
5918 	}
5919 
5920 	return 0;
5921 }
5922 
5923 /**
5924  *	ata_host_suspend - suspend host
5925  *	@host: host to suspend
5926  *	@mesg: PM message
5927  *
5928  *	Suspend @host.  Actual operation is performed by EH.  This
5929  *	function requests EH to perform PM operations and waits for EH
5930  *	to finish.
5931  *
5932  *	LOCKING:
5933  *	Kernel thread context (may sleep).
5934  *
5935  *	RETURNS:
5936  *	0 on success, -errno on failure.
5937  */
5938 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5939 {
5940 	int rc;
5941 
5942 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5943 	if (rc == 0)
5944 		host->dev->power.power_state = mesg;
5945 	return rc;
5946 }
5947 
5948 /**
5949  *	ata_host_resume - resume host
5950  *	@host: host to resume
5951  *
5952  *	Resume @host.  Actual operation is performed by EH.  This
5953  *	function requests EH to perform PM operations and returns.
5954  *	Note that all resume operations are performed parallely.
5955  *
5956  *	LOCKING:
5957  *	Kernel thread context (may sleep).
5958  */
5959 void ata_host_resume(struct ata_host *host)
5960 {
5961 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5962 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5963 	host->dev->power.power_state = PMSG_ON;
5964 }
5965 #endif
5966 
5967 /**
5968  *	ata_port_start - Set port up for dma.
5969  *	@ap: Port to initialize
5970  *
5971  *	Called just after data structures for each port are
5972  *	initialized.  Allocates space for PRD table.
5973  *
5974  *	May be used as the port_start() entry in ata_port_operations.
5975  *
5976  *	LOCKING:
5977  *	Inherited from caller.
5978  */
5979 int ata_port_start(struct ata_port *ap)
5980 {
5981 	struct device *dev = ap->dev;
5982 	int rc;
5983 
5984 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5985 				      GFP_KERNEL);
5986 	if (!ap->prd)
5987 		return -ENOMEM;
5988 
5989 	rc = ata_pad_alloc(ap, dev);
5990 	if (rc)
5991 		return rc;
5992 
5993 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5994 		(unsigned long long)ap->prd_dma);
5995 	return 0;
5996 }
5997 
5998 /**
5999  *	ata_dev_init - Initialize an ata_device structure
6000  *	@dev: Device structure to initialize
6001  *
6002  *	Initialize @dev in preparation for probing.
6003  *
6004  *	LOCKING:
6005  *	Inherited from caller.
6006  */
6007 void ata_dev_init(struct ata_device *dev)
6008 {
6009 	struct ata_port *ap = dev->ap;
6010 	unsigned long flags;
6011 
6012 	/* SATA spd limit is bound to the first device */
6013 	ap->sata_spd_limit = ap->hw_sata_spd_limit;
6014 	ap->sata_spd = 0;
6015 
6016 	/* High bits of dev->flags are used to record warm plug
6017 	 * requests which occur asynchronously.  Synchronize using
6018 	 * host lock.
6019 	 */
6020 	spin_lock_irqsave(ap->lock, flags);
6021 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
6022 	spin_unlock_irqrestore(ap->lock, flags);
6023 
6024 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6025 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6026 	dev->pio_mask = UINT_MAX;
6027 	dev->mwdma_mask = UINT_MAX;
6028 	dev->udma_mask = UINT_MAX;
6029 }
6030 
6031 /**
6032  *	ata_port_alloc - allocate and initialize basic ATA port resources
6033  *	@host: ATA host this allocated port belongs to
6034  *
6035  *	Allocate and initialize basic ATA port resources.
6036  *
6037  *	RETURNS:
6038  *	Allocate ATA port on success, NULL on failure.
6039  *
6040  *	LOCKING:
6041  *	Inherited from calling layer (may sleep).
6042  */
6043 struct ata_port *ata_port_alloc(struct ata_host *host)
6044 {
6045 	struct ata_port *ap;
6046 	unsigned int i;
6047 
6048 	DPRINTK("ENTER\n");
6049 
6050 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6051 	if (!ap)
6052 		return NULL;
6053 
6054 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6055 	ap->lock = &host->lock;
6056 	ap->flags = ATA_FLAG_DISABLED;
6057 	ap->print_id = -1;
6058 	ap->ctl = ATA_DEVCTL_OBS;
6059 	ap->host = host;
6060 	ap->dev = host->dev;
6061 
6062 	ap->hw_sata_spd_limit = UINT_MAX;
6063 	ap->active_tag = ATA_TAG_POISON;
6064 	ap->last_ctl = 0xFF;
6065 
6066 #if defined(ATA_VERBOSE_DEBUG)
6067 	/* turn on all debugging levels */
6068 	ap->msg_enable = 0x00FF;
6069 #elif defined(ATA_DEBUG)
6070 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6071 #else
6072 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6073 #endif
6074 
6075 	INIT_DELAYED_WORK(&ap->port_task, NULL);
6076 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6077 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6078 	INIT_LIST_HEAD(&ap->eh_done_q);
6079 	init_waitqueue_head(&ap->eh_wait_q);
6080 	init_timer_deferrable(&ap->fastdrain_timer);
6081 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6082 	ap->fastdrain_timer.data = (unsigned long)ap;
6083 
6084 	ap->cbl = ATA_CBL_NONE;
6085 
6086 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
6087 		struct ata_device *dev = &ap->device[i];
6088 		dev->ap = ap;
6089 		dev->devno = i;
6090 		ata_dev_init(dev);
6091 	}
6092 
6093 #ifdef ATA_IRQ_TRAP
6094 	ap->stats.unhandled_irq = 1;
6095 	ap->stats.idle_irq = 1;
6096 #endif
6097 	return ap;
6098 }
6099 
6100 static void ata_host_release(struct device *gendev, void *res)
6101 {
6102 	struct ata_host *host = dev_get_drvdata(gendev);
6103 	int i;
6104 
6105 	for (i = 0; i < host->n_ports; i++) {
6106 		struct ata_port *ap = host->ports[i];
6107 
6108 		if (!ap)
6109 			continue;
6110 
6111 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6112 			ap->ops->port_stop(ap);
6113 	}
6114 
6115 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6116 		host->ops->host_stop(host);
6117 
6118 	for (i = 0; i < host->n_ports; i++) {
6119 		struct ata_port *ap = host->ports[i];
6120 
6121 		if (!ap)
6122 			continue;
6123 
6124 		if (ap->scsi_host)
6125 			scsi_host_put(ap->scsi_host);
6126 
6127 		kfree(ap);
6128 		host->ports[i] = NULL;
6129 	}
6130 
6131 	dev_set_drvdata(gendev, NULL);
6132 }
6133 
6134 /**
6135  *	ata_host_alloc - allocate and init basic ATA host resources
6136  *	@dev: generic device this host is associated with
6137  *	@max_ports: maximum number of ATA ports associated with this host
6138  *
6139  *	Allocate and initialize basic ATA host resources.  LLD calls
6140  *	this function to allocate a host, initializes it fully and
6141  *	attaches it using ata_host_register().
6142  *
6143  *	@max_ports ports are allocated and host->n_ports is
6144  *	initialized to @max_ports.  The caller is allowed to decrease
6145  *	host->n_ports before calling ata_host_register().  The unused
6146  *	ports will be automatically freed on registration.
6147  *
6148  *	RETURNS:
6149  *	Allocate ATA host on success, NULL on failure.
6150  *
6151  *	LOCKING:
6152  *	Inherited from calling layer (may sleep).
6153  */
6154 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6155 {
6156 	struct ata_host *host;
6157 	size_t sz;
6158 	int i;
6159 
6160 	DPRINTK("ENTER\n");
6161 
6162 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6163 		return NULL;
6164 
6165 	/* alloc a container for our list of ATA ports (buses) */
6166 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6167 	/* alloc a container for our list of ATA ports (buses) */
6168 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6169 	if (!host)
6170 		goto err_out;
6171 
6172 	devres_add(dev, host);
6173 	dev_set_drvdata(dev, host);
6174 
6175 	spin_lock_init(&host->lock);
6176 	host->dev = dev;
6177 	host->n_ports = max_ports;
6178 
6179 	/* allocate ports bound to this host */
6180 	for (i = 0; i < max_ports; i++) {
6181 		struct ata_port *ap;
6182 
6183 		ap = ata_port_alloc(host);
6184 		if (!ap)
6185 			goto err_out;
6186 
6187 		ap->port_no = i;
6188 		host->ports[i] = ap;
6189 	}
6190 
6191 	devres_remove_group(dev, NULL);
6192 	return host;
6193 
6194  err_out:
6195 	devres_release_group(dev, NULL);
6196 	return NULL;
6197 }
6198 
6199 /**
6200  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6201  *	@dev: generic device this host is associated with
6202  *	@ppi: array of ATA port_info to initialize host with
6203  *	@n_ports: number of ATA ports attached to this host
6204  *
6205  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6206  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6207  *	last entry will be used for the remaining ports.
6208  *
6209  *	RETURNS:
6210  *	Allocate ATA host on success, NULL on failure.
6211  *
6212  *	LOCKING:
6213  *	Inherited from calling layer (may sleep).
6214  */
6215 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6216 				      const struct ata_port_info * const * ppi,
6217 				      int n_ports)
6218 {
6219 	const struct ata_port_info *pi;
6220 	struct ata_host *host;
6221 	int i, j;
6222 
6223 	host = ata_host_alloc(dev, n_ports);
6224 	if (!host)
6225 		return NULL;
6226 
6227 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6228 		struct ata_port *ap = host->ports[i];
6229 
6230 		if (ppi[j])
6231 			pi = ppi[j++];
6232 
6233 		ap->pio_mask = pi->pio_mask;
6234 		ap->mwdma_mask = pi->mwdma_mask;
6235 		ap->udma_mask = pi->udma_mask;
6236 		ap->flags |= pi->flags;
6237 		ap->ops = pi->port_ops;
6238 
6239 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6240 			host->ops = pi->port_ops;
6241 		if (!host->private_data && pi->private_data)
6242 			host->private_data = pi->private_data;
6243 	}
6244 
6245 	return host;
6246 }
6247 
6248 /**
6249  *	ata_host_start - start and freeze ports of an ATA host
6250  *	@host: ATA host to start ports for
6251  *
6252  *	Start and then freeze ports of @host.  Started status is
6253  *	recorded in host->flags, so this function can be called
6254  *	multiple times.  Ports are guaranteed to get started only
6255  *	once.  If host->ops isn't initialized yet, its set to the
6256  *	first non-dummy port ops.
6257  *
6258  *	LOCKING:
6259  *	Inherited from calling layer (may sleep).
6260  *
6261  *	RETURNS:
6262  *	0 if all ports are started successfully, -errno otherwise.
6263  */
6264 int ata_host_start(struct ata_host *host)
6265 {
6266 	int i, rc;
6267 
6268 	if (host->flags & ATA_HOST_STARTED)
6269 		return 0;
6270 
6271 	for (i = 0; i < host->n_ports; i++) {
6272 		struct ata_port *ap = host->ports[i];
6273 
6274 		if (!host->ops && !ata_port_is_dummy(ap))
6275 			host->ops = ap->ops;
6276 
6277 		if (ap->ops->port_start) {
6278 			rc = ap->ops->port_start(ap);
6279 			if (rc) {
6280 				ata_port_printk(ap, KERN_ERR, "failed to "
6281 						"start port (errno=%d)\n", rc);
6282 				goto err_out;
6283 			}
6284 		}
6285 
6286 		ata_eh_freeze_port(ap);
6287 	}
6288 
6289 	host->flags |= ATA_HOST_STARTED;
6290 	return 0;
6291 
6292  err_out:
6293 	while (--i >= 0) {
6294 		struct ata_port *ap = host->ports[i];
6295 
6296 		if (ap->ops->port_stop)
6297 			ap->ops->port_stop(ap);
6298 	}
6299 	return rc;
6300 }
6301 
6302 /**
6303  *	ata_sas_host_init - Initialize a host struct
6304  *	@host:	host to initialize
6305  *	@dev:	device host is attached to
6306  *	@flags:	host flags
6307  *	@ops:	port_ops
6308  *
6309  *	LOCKING:
6310  *	PCI/etc. bus probe sem.
6311  *
6312  */
6313 /* KILLME - the only user left is ipr */
6314 void ata_host_init(struct ata_host *host, struct device *dev,
6315 		   unsigned long flags, const struct ata_port_operations *ops)
6316 {
6317 	spin_lock_init(&host->lock);
6318 	host->dev = dev;
6319 	host->flags = flags;
6320 	host->ops = ops;
6321 }
6322 
6323 /**
6324  *	ata_host_register - register initialized ATA host
6325  *	@host: ATA host to register
6326  *	@sht: template for SCSI host
6327  *
6328  *	Register initialized ATA host.  @host is allocated using
6329  *	ata_host_alloc() and fully initialized by LLD.  This function
6330  *	starts ports, registers @host with ATA and SCSI layers and
6331  *	probe registered devices.
6332  *
6333  *	LOCKING:
6334  *	Inherited from calling layer (may sleep).
6335  *
6336  *	RETURNS:
6337  *	0 on success, -errno otherwise.
6338  */
6339 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6340 {
6341 	int i, rc;
6342 
6343 	/* host must have been started */
6344 	if (!(host->flags & ATA_HOST_STARTED)) {
6345 		dev_printk(KERN_ERR, host->dev,
6346 			   "BUG: trying to register unstarted host\n");
6347 		WARN_ON(1);
6348 		return -EINVAL;
6349 	}
6350 
6351 	/* Blow away unused ports.  This happens when LLD can't
6352 	 * determine the exact number of ports to allocate at
6353 	 * allocation time.
6354 	 */
6355 	for (i = host->n_ports; host->ports[i]; i++)
6356 		kfree(host->ports[i]);
6357 
6358 	/* give ports names and add SCSI hosts */
6359 	for (i = 0; i < host->n_ports; i++)
6360 		host->ports[i]->print_id = ata_print_id++;
6361 
6362 	rc = ata_scsi_add_hosts(host, sht);
6363 	if (rc)
6364 		return rc;
6365 
6366 	/* associate with ACPI nodes */
6367 	ata_acpi_associate(host);
6368 
6369 	/* set cable, sata_spd_limit and report */
6370 	for (i = 0; i < host->n_ports; i++) {
6371 		struct ata_port *ap = host->ports[i];
6372 		int irq_line;
6373 		u32 scontrol;
6374 		unsigned long xfer_mask;
6375 
6376 		/* set SATA cable type if still unset */
6377 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6378 			ap->cbl = ATA_CBL_SATA;
6379 
6380 		/* init sata_spd_limit to the current value */
6381 		if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6382 			int spd = (scontrol >> 4) & 0xf;
6383 			if (spd)
6384 				ap->hw_sata_spd_limit &= (1 << spd) - 1;
6385 		}
6386 		ap->sata_spd_limit = ap->hw_sata_spd_limit;
6387 
6388 		/* report the secondary IRQ for second channel legacy */
6389 		irq_line = host->irq;
6390 		if (i == 1 && host->irq2)
6391 			irq_line = host->irq2;
6392 
6393 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6394 					      ap->udma_mask);
6395 
6396 		/* print per-port info to dmesg */
6397 		if (!ata_port_is_dummy(ap))
6398 			ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6399 					"ctl 0x%p bmdma 0x%p irq %d\n",
6400 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6401 					ata_mode_string(xfer_mask),
6402 					ap->ioaddr.cmd_addr,
6403 					ap->ioaddr.ctl_addr,
6404 					ap->ioaddr.bmdma_addr,
6405 					irq_line);
6406 		else
6407 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6408 	}
6409 
6410 	/* perform each probe synchronously */
6411 	DPRINTK("probe begin\n");
6412 	for (i = 0; i < host->n_ports; i++) {
6413 		struct ata_port *ap = host->ports[i];
6414 		int rc;
6415 
6416 		/* probe */
6417 		if (ap->ops->error_handler) {
6418 			struct ata_eh_info *ehi = &ap->eh_info;
6419 			unsigned long flags;
6420 
6421 			ata_port_probe(ap);
6422 
6423 			/* kick EH for boot probing */
6424 			spin_lock_irqsave(ap->lock, flags);
6425 
6426 			ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6427 			ehi->action |= ATA_EH_SOFTRESET;
6428 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6429 
6430 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6431 			ap->pflags |= ATA_PFLAG_LOADING;
6432 			ata_port_schedule_eh(ap);
6433 
6434 			spin_unlock_irqrestore(ap->lock, flags);
6435 
6436 			/* wait for EH to finish */
6437 			ata_port_wait_eh(ap);
6438 		} else {
6439 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6440 			rc = ata_bus_probe(ap);
6441 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6442 
6443 			if (rc) {
6444 				/* FIXME: do something useful here?
6445 				 * Current libata behavior will
6446 				 * tear down everything when
6447 				 * the module is removed
6448 				 * or the h/w is unplugged.
6449 				 */
6450 			}
6451 		}
6452 	}
6453 
6454 	/* probes are done, now scan each port's disk(s) */
6455 	DPRINTK("host probe begin\n");
6456 	for (i = 0; i < host->n_ports; i++) {
6457 		struct ata_port *ap = host->ports[i];
6458 
6459 		ata_scsi_scan_host(ap, 1);
6460 	}
6461 
6462 	return 0;
6463 }
6464 
6465 /**
6466  *	ata_host_activate - start host, request IRQ and register it
6467  *	@host: target ATA host
6468  *	@irq: IRQ to request
6469  *	@irq_handler: irq_handler used when requesting IRQ
6470  *	@irq_flags: irq_flags used when requesting IRQ
6471  *	@sht: scsi_host_template to use when registering the host
6472  *
6473  *	After allocating an ATA host and initializing it, most libata
6474  *	LLDs perform three steps to activate the host - start host,
6475  *	request IRQ and register it.  This helper takes necessasry
6476  *	arguments and performs the three steps in one go.
6477  *
6478  *	LOCKING:
6479  *	Inherited from calling layer (may sleep).
6480  *
6481  *	RETURNS:
6482  *	0 on success, -errno otherwise.
6483  */
6484 int ata_host_activate(struct ata_host *host, int irq,
6485 		      irq_handler_t irq_handler, unsigned long irq_flags,
6486 		      struct scsi_host_template *sht)
6487 {
6488 	int rc;
6489 
6490 	rc = ata_host_start(host);
6491 	if (rc)
6492 		return rc;
6493 
6494 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6495 			      dev_driver_string(host->dev), host);
6496 	if (rc)
6497 		return rc;
6498 
6499 	/* Used to print device info at probe */
6500 	host->irq = irq;
6501 
6502 	rc = ata_host_register(host, sht);
6503 	/* if failed, just free the IRQ and leave ports alone */
6504 	if (rc)
6505 		devm_free_irq(host->dev, irq, host);
6506 
6507 	return rc;
6508 }
6509 
6510 /**
6511  *	ata_port_detach - Detach ATA port in prepration of device removal
6512  *	@ap: ATA port to be detached
6513  *
6514  *	Detach all ATA devices and the associated SCSI devices of @ap;
6515  *	then, remove the associated SCSI host.  @ap is guaranteed to
6516  *	be quiescent on return from this function.
6517  *
6518  *	LOCKING:
6519  *	Kernel thread context (may sleep).
6520  */
6521 void ata_port_detach(struct ata_port *ap)
6522 {
6523 	unsigned long flags;
6524 	int i;
6525 
6526 	if (!ap->ops->error_handler)
6527 		goto skip_eh;
6528 
6529 	/* tell EH we're leaving & flush EH */
6530 	spin_lock_irqsave(ap->lock, flags);
6531 	ap->pflags |= ATA_PFLAG_UNLOADING;
6532 	spin_unlock_irqrestore(ap->lock, flags);
6533 
6534 	ata_port_wait_eh(ap);
6535 
6536 	/* EH is now guaranteed to see UNLOADING, so no new device
6537 	 * will be attached.  Disable all existing devices.
6538 	 */
6539 	spin_lock_irqsave(ap->lock, flags);
6540 
6541 	for (i = 0; i < ATA_MAX_DEVICES; i++)
6542 		ata_dev_disable(&ap->device[i]);
6543 
6544 	spin_unlock_irqrestore(ap->lock, flags);
6545 
6546 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6547 	 * will be skipped and retrials will be terminated with bad
6548 	 * target.
6549 	 */
6550 	spin_lock_irqsave(ap->lock, flags);
6551 	ata_port_freeze(ap);	/* won't be thawed */
6552 	spin_unlock_irqrestore(ap->lock, flags);
6553 
6554 	ata_port_wait_eh(ap);
6555 	cancel_rearming_delayed_work(&ap->hotplug_task);
6556 
6557  skip_eh:
6558 	/* remove the associated SCSI host */
6559 	scsi_remove_host(ap->scsi_host);
6560 }
6561 
6562 /**
6563  *	ata_host_detach - Detach all ports of an ATA host
6564  *	@host: Host to detach
6565  *
6566  *	Detach all ports of @host.
6567  *
6568  *	LOCKING:
6569  *	Kernel thread context (may sleep).
6570  */
6571 void ata_host_detach(struct ata_host *host)
6572 {
6573 	int i;
6574 
6575 	for (i = 0; i < host->n_ports; i++)
6576 		ata_port_detach(host->ports[i]);
6577 }
6578 
6579 /**
6580  *	ata_std_ports - initialize ioaddr with standard port offsets.
6581  *	@ioaddr: IO address structure to be initialized
6582  *
6583  *	Utility function which initializes data_addr, error_addr,
6584  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6585  *	device_addr, status_addr, and command_addr to standard offsets
6586  *	relative to cmd_addr.
6587  *
6588  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6589  */
6590 
6591 void ata_std_ports(struct ata_ioports *ioaddr)
6592 {
6593 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6594 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6595 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6596 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6597 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6598 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6599 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6600 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6601 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6602 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6603 }
6604 
6605 
6606 #ifdef CONFIG_PCI
6607 
6608 /**
6609  *	ata_pci_remove_one - PCI layer callback for device removal
6610  *	@pdev: PCI device that was removed
6611  *
6612  *	PCI layer indicates to libata via this hook that hot-unplug or
6613  *	module unload event has occurred.  Detach all ports.  Resource
6614  *	release is handled via devres.
6615  *
6616  *	LOCKING:
6617  *	Inherited from PCI layer (may sleep).
6618  */
6619 void ata_pci_remove_one(struct pci_dev *pdev)
6620 {
6621 	struct device *dev = pci_dev_to_dev(pdev);
6622 	struct ata_host *host = dev_get_drvdata(dev);
6623 
6624 	ata_host_detach(host);
6625 }
6626 
6627 /* move to PCI subsystem */
6628 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6629 {
6630 	unsigned long tmp = 0;
6631 
6632 	switch (bits->width) {
6633 	case 1: {
6634 		u8 tmp8 = 0;
6635 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6636 		tmp = tmp8;
6637 		break;
6638 	}
6639 	case 2: {
6640 		u16 tmp16 = 0;
6641 		pci_read_config_word(pdev, bits->reg, &tmp16);
6642 		tmp = tmp16;
6643 		break;
6644 	}
6645 	case 4: {
6646 		u32 tmp32 = 0;
6647 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6648 		tmp = tmp32;
6649 		break;
6650 	}
6651 
6652 	default:
6653 		return -EINVAL;
6654 	}
6655 
6656 	tmp &= bits->mask;
6657 
6658 	return (tmp == bits->val) ? 1 : 0;
6659 }
6660 
6661 #ifdef CONFIG_PM
6662 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6663 {
6664 	pci_save_state(pdev);
6665 	pci_disable_device(pdev);
6666 
6667 	if (mesg.event == PM_EVENT_SUSPEND)
6668 		pci_set_power_state(pdev, PCI_D3hot);
6669 }
6670 
6671 int ata_pci_device_do_resume(struct pci_dev *pdev)
6672 {
6673 	int rc;
6674 
6675 	pci_set_power_state(pdev, PCI_D0);
6676 	pci_restore_state(pdev);
6677 
6678 	rc = pcim_enable_device(pdev);
6679 	if (rc) {
6680 		dev_printk(KERN_ERR, &pdev->dev,
6681 			   "failed to enable device after resume (%d)\n", rc);
6682 		return rc;
6683 	}
6684 
6685 	pci_set_master(pdev);
6686 	return 0;
6687 }
6688 
6689 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6690 {
6691 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6692 	int rc = 0;
6693 
6694 	rc = ata_host_suspend(host, mesg);
6695 	if (rc)
6696 		return rc;
6697 
6698 	ata_pci_device_do_suspend(pdev, mesg);
6699 
6700 	return 0;
6701 }
6702 
6703 int ata_pci_device_resume(struct pci_dev *pdev)
6704 {
6705 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6706 	int rc;
6707 
6708 	rc = ata_pci_device_do_resume(pdev);
6709 	if (rc == 0)
6710 		ata_host_resume(host);
6711 	return rc;
6712 }
6713 #endif /* CONFIG_PM */
6714 
6715 #endif /* CONFIG_PCI */
6716 
6717 
6718 static int __init ata_init(void)
6719 {
6720 	ata_probe_timeout *= HZ;
6721 	ata_wq = create_workqueue("ata");
6722 	if (!ata_wq)
6723 		return -ENOMEM;
6724 
6725 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6726 	if (!ata_aux_wq) {
6727 		destroy_workqueue(ata_wq);
6728 		return -ENOMEM;
6729 	}
6730 
6731 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6732 	return 0;
6733 }
6734 
6735 static void __exit ata_exit(void)
6736 {
6737 	destroy_workqueue(ata_wq);
6738 	destroy_workqueue(ata_aux_wq);
6739 }
6740 
6741 subsys_initcall(ata_init);
6742 module_exit(ata_exit);
6743 
6744 static unsigned long ratelimit_time;
6745 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6746 
6747 int ata_ratelimit(void)
6748 {
6749 	int rc;
6750 	unsigned long flags;
6751 
6752 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6753 
6754 	if (time_after(jiffies, ratelimit_time)) {
6755 		rc = 1;
6756 		ratelimit_time = jiffies + (HZ/5);
6757 	} else
6758 		rc = 0;
6759 
6760 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6761 
6762 	return rc;
6763 }
6764 
6765 /**
6766  *	ata_wait_register - wait until register value changes
6767  *	@reg: IO-mapped register
6768  *	@mask: Mask to apply to read register value
6769  *	@val: Wait condition
6770  *	@interval_msec: polling interval in milliseconds
6771  *	@timeout_msec: timeout in milliseconds
6772  *
6773  *	Waiting for some bits of register to change is a common
6774  *	operation for ATA controllers.  This function reads 32bit LE
6775  *	IO-mapped register @reg and tests for the following condition.
6776  *
6777  *	(*@reg & mask) != val
6778  *
6779  *	If the condition is met, it returns; otherwise, the process is
6780  *	repeated after @interval_msec until timeout.
6781  *
6782  *	LOCKING:
6783  *	Kernel thread context (may sleep)
6784  *
6785  *	RETURNS:
6786  *	The final register value.
6787  */
6788 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6789 		      unsigned long interval_msec,
6790 		      unsigned long timeout_msec)
6791 {
6792 	unsigned long timeout;
6793 	u32 tmp;
6794 
6795 	tmp = ioread32(reg);
6796 
6797 	/* Calculate timeout _after_ the first read to make sure
6798 	 * preceding writes reach the controller before starting to
6799 	 * eat away the timeout.
6800 	 */
6801 	timeout = jiffies + (timeout_msec * HZ) / 1000;
6802 
6803 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6804 		msleep(interval_msec);
6805 		tmp = ioread32(reg);
6806 	}
6807 
6808 	return tmp;
6809 }
6810 
6811 /*
6812  * Dummy port_ops
6813  */
6814 static void ata_dummy_noret(struct ata_port *ap)	{ }
6815 static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
6816 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6817 
6818 static u8 ata_dummy_check_status(struct ata_port *ap)
6819 {
6820 	return ATA_DRDY;
6821 }
6822 
6823 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6824 {
6825 	return AC_ERR_SYSTEM;
6826 }
6827 
6828 const struct ata_port_operations ata_dummy_port_ops = {
6829 	.port_disable		= ata_port_disable,
6830 	.check_status		= ata_dummy_check_status,
6831 	.check_altstatus	= ata_dummy_check_status,
6832 	.dev_select		= ata_noop_dev_select,
6833 	.qc_prep		= ata_noop_qc_prep,
6834 	.qc_issue		= ata_dummy_qc_issue,
6835 	.freeze			= ata_dummy_noret,
6836 	.thaw			= ata_dummy_noret,
6837 	.error_handler		= ata_dummy_noret,
6838 	.post_internal_cmd	= ata_dummy_qc_noret,
6839 	.irq_clear		= ata_dummy_noret,
6840 	.port_start		= ata_dummy_ret0,
6841 	.port_stop		= ata_dummy_noret,
6842 };
6843 
6844 const struct ata_port_info ata_dummy_port_info = {
6845 	.port_ops		= &ata_dummy_port_ops,
6846 };
6847 
6848 /*
6849  * libata is essentially a library of internal helper functions for
6850  * low-level ATA host controller drivers.  As such, the API/ABI is
6851  * likely to change as new drivers are added and updated.
6852  * Do not depend on ABI/API stability.
6853  */
6854 
6855 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6856 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6857 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6858 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6859 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6860 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6861 EXPORT_SYMBOL_GPL(ata_std_ports);
6862 EXPORT_SYMBOL_GPL(ata_host_init);
6863 EXPORT_SYMBOL_GPL(ata_host_alloc);
6864 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6865 EXPORT_SYMBOL_GPL(ata_host_start);
6866 EXPORT_SYMBOL_GPL(ata_host_register);
6867 EXPORT_SYMBOL_GPL(ata_host_activate);
6868 EXPORT_SYMBOL_GPL(ata_host_detach);
6869 EXPORT_SYMBOL_GPL(ata_sg_init);
6870 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6871 EXPORT_SYMBOL_GPL(ata_hsm_move);
6872 EXPORT_SYMBOL_GPL(ata_qc_complete);
6873 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6874 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6875 EXPORT_SYMBOL_GPL(ata_tf_load);
6876 EXPORT_SYMBOL_GPL(ata_tf_read);
6877 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6878 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6879 EXPORT_SYMBOL_GPL(sata_print_link_status);
6880 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6881 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6882 EXPORT_SYMBOL_GPL(ata_check_status);
6883 EXPORT_SYMBOL_GPL(ata_altstatus);
6884 EXPORT_SYMBOL_GPL(ata_exec_command);
6885 EXPORT_SYMBOL_GPL(ata_port_start);
6886 EXPORT_SYMBOL_GPL(ata_sff_port_start);
6887 EXPORT_SYMBOL_GPL(ata_interrupt);
6888 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6889 EXPORT_SYMBOL_GPL(ata_data_xfer);
6890 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6891 EXPORT_SYMBOL_GPL(ata_qc_prep);
6892 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
6893 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6894 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6895 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6896 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6897 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6898 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6899 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6900 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6901 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6902 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6903 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6904 EXPORT_SYMBOL_GPL(ata_port_probe);
6905 EXPORT_SYMBOL_GPL(ata_dev_disable);
6906 EXPORT_SYMBOL_GPL(sata_set_spd);
6907 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6908 EXPORT_SYMBOL_GPL(sata_phy_resume);
6909 EXPORT_SYMBOL_GPL(sata_phy_reset);
6910 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6911 EXPORT_SYMBOL_GPL(ata_bus_reset);
6912 EXPORT_SYMBOL_GPL(ata_std_prereset);
6913 EXPORT_SYMBOL_GPL(ata_std_softreset);
6914 EXPORT_SYMBOL_GPL(sata_port_hardreset);
6915 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6916 EXPORT_SYMBOL_GPL(ata_std_postreset);
6917 EXPORT_SYMBOL_GPL(ata_dev_classify);
6918 EXPORT_SYMBOL_GPL(ata_dev_pair);
6919 EXPORT_SYMBOL_GPL(ata_port_disable);
6920 EXPORT_SYMBOL_GPL(ata_ratelimit);
6921 EXPORT_SYMBOL_GPL(ata_wait_register);
6922 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6923 EXPORT_SYMBOL_GPL(ata_wait_ready);
6924 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6925 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6926 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6927 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6928 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6929 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6930 EXPORT_SYMBOL_GPL(ata_host_intr);
6931 EXPORT_SYMBOL_GPL(sata_scr_valid);
6932 EXPORT_SYMBOL_GPL(sata_scr_read);
6933 EXPORT_SYMBOL_GPL(sata_scr_write);
6934 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6935 EXPORT_SYMBOL_GPL(ata_port_online);
6936 EXPORT_SYMBOL_GPL(ata_port_offline);
6937 #ifdef CONFIG_PM
6938 EXPORT_SYMBOL_GPL(ata_host_suspend);
6939 EXPORT_SYMBOL_GPL(ata_host_resume);
6940 #endif /* CONFIG_PM */
6941 EXPORT_SYMBOL_GPL(ata_id_string);
6942 EXPORT_SYMBOL_GPL(ata_id_c_string);
6943 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6944 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6945 
6946 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6947 EXPORT_SYMBOL_GPL(ata_timing_compute);
6948 EXPORT_SYMBOL_GPL(ata_timing_merge);
6949 
6950 #ifdef CONFIG_PCI
6951 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6952 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
6953 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
6954 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
6955 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6956 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6957 #ifdef CONFIG_PM
6958 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6959 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6960 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6961 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6962 #endif /* CONFIG_PM */
6963 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6964 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6965 #endif /* CONFIG_PCI */
6966 
6967 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6968 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6969 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6970 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6971 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6972 EXPORT_SYMBOL_GPL(ata_port_abort);
6973 EXPORT_SYMBOL_GPL(ata_port_freeze);
6974 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6975 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6976 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6977 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6978 EXPORT_SYMBOL_GPL(ata_do_eh);
6979 EXPORT_SYMBOL_GPL(ata_irq_on);
6980 EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6981 EXPORT_SYMBOL_GPL(ata_irq_ack);
6982 EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6983 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
6984 
6985 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6986 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6987 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6988 EXPORT_SYMBOL_GPL(ata_cable_sata);
6989