xref: /linux/drivers/ata/sata_nv.c (revision b3b77c8caef1750ebeea1054e39e358550ea9f55)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/interrupt.h>
47 #include <linux/device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_device.h>
50 #include <linux/libata.h>
51 
52 #define DRV_NAME			"sata_nv"
53 #define DRV_VERSION			"3.5"
54 
55 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
56 
57 enum {
58 	NV_MMIO_BAR			= 5,
59 
60 	NV_PORTS			= 2,
61 	NV_PIO_MASK			= ATA_PIO4,
62 	NV_MWDMA_MASK			= ATA_MWDMA2,
63 	NV_UDMA_MASK			= ATA_UDMA6,
64 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
65 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
66 
67 	/* INT_STATUS/ENABLE */
68 	NV_INT_STATUS			= 0x10,
69 	NV_INT_ENABLE			= 0x11,
70 	NV_INT_STATUS_CK804		= 0x440,
71 	NV_INT_ENABLE_CK804		= 0x441,
72 
73 	/* INT_STATUS/ENABLE bits */
74 	NV_INT_DEV			= 0x01,
75 	NV_INT_PM			= 0x02,
76 	NV_INT_ADDED			= 0x04,
77 	NV_INT_REMOVED			= 0x08,
78 
79 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
80 
81 	NV_INT_ALL			= 0x0f,
82 	NV_INT_MASK			= NV_INT_DEV |
83 					  NV_INT_ADDED | NV_INT_REMOVED,
84 
85 	/* INT_CONFIG */
86 	NV_INT_CONFIG			= 0x12,
87 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
88 
89 	// For PCI config register 20
90 	NV_MCP_SATA_CFG_20		= 0x50,
91 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
92 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
93 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
94 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
95 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
96 
97 	NV_ADMA_MAX_CPBS		= 32,
98 	NV_ADMA_CPB_SZ			= 128,
99 	NV_ADMA_APRD_SZ			= 16,
100 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
101 					   NV_ADMA_APRD_SZ,
102 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
103 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
104 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
105 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 
107 	/* BAR5 offset to ADMA general registers */
108 	NV_ADMA_GEN			= 0x400,
109 	NV_ADMA_GEN_CTL			= 0x00,
110 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
111 
112 	/* BAR5 offset to ADMA ports */
113 	NV_ADMA_PORT			= 0x480,
114 
115 	/* size of ADMA port register space  */
116 	NV_ADMA_PORT_SIZE		= 0x100,
117 
118 	/* ADMA port registers */
119 	NV_ADMA_CTL			= 0x40,
120 	NV_ADMA_CPB_COUNT		= 0x42,
121 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
122 	NV_ADMA_STAT			= 0x44,
123 	NV_ADMA_CPB_BASE_LOW		= 0x48,
124 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
125 	NV_ADMA_APPEND			= 0x50,
126 	NV_ADMA_NOTIFIER		= 0x68,
127 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
128 
129 	/* NV_ADMA_CTL register bits */
130 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
131 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
132 	NV_ADMA_CTL_GO			= (1 << 7),
133 	NV_ADMA_CTL_AIEN		= (1 << 8),
134 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
135 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
136 
137 	/* CPB response flag bits */
138 	NV_CPB_RESP_DONE		= (1 << 0),
139 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
140 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
141 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
142 
143 	/* CPB control flag bits */
144 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
145 	NV_CPB_CTL_QUEUE		= (1 << 1),
146 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
147 	NV_CPB_CTL_IEN			= (1 << 3),
148 	NV_CPB_CTL_FPDMA		= (1 << 4),
149 
150 	/* APRD flags */
151 	NV_APRD_WRITE			= (1 << 1),
152 	NV_APRD_END			= (1 << 2),
153 	NV_APRD_CONT			= (1 << 3),
154 
155 	/* NV_ADMA_STAT flags */
156 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
157 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
158 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
159 	NV_ADMA_STAT_CPBERR		= (1 << 4),
160 	NV_ADMA_STAT_SERROR		= (1 << 5),
161 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
162 	NV_ADMA_STAT_IDLE		= (1 << 8),
163 	NV_ADMA_STAT_LEGACY		= (1 << 9),
164 	NV_ADMA_STAT_STOPPED		= (1 << 10),
165 	NV_ADMA_STAT_DONE		= (1 << 12),
166 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
167 					  NV_ADMA_STAT_TIMEOUT,
168 
169 	/* port flags */
170 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
171 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
172 
173 	/* MCP55 reg offset */
174 	NV_CTL_MCP55			= 0x400,
175 	NV_INT_STATUS_MCP55		= 0x440,
176 	NV_INT_ENABLE_MCP55		= 0x444,
177 	NV_NCQ_REG_MCP55		= 0x448,
178 
179 	/* MCP55 */
180 	NV_INT_ALL_MCP55		= 0xffff,
181 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
182 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
183 
184 	/* SWNCQ ENABLE BITS*/
185 	NV_CTL_PRI_SWNCQ		= 0x02,
186 	NV_CTL_SEC_SWNCQ		= 0x04,
187 
188 	/* SW NCQ status bits*/
189 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
190 	NV_SWNCQ_IRQ_PM			= (1 << 1),
191 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
192 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
193 
194 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
195 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
196 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
197 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
198 
199 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
200 					  NV_SWNCQ_IRQ_REMOVED,
201 
202 };
203 
204 /* ADMA Physical Region Descriptor - one SG segment */
205 struct nv_adma_prd {
206 	__le64			addr;
207 	__le32			len;
208 	u8			flags;
209 	u8			packet_len;
210 	__le16			reserved;
211 };
212 
213 enum nv_adma_regbits {
214 	CMDEND	= (1 << 15),		/* end of command list */
215 	WNB	= (1 << 14),		/* wait-not-BSY */
216 	IGN	= (1 << 13),		/* ignore this entry */
217 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
218 	DA2	= (1 << (2 + 8)),
219 	DA1	= (1 << (1 + 8)),
220 	DA0	= (1 << (0 + 8)),
221 };
222 
223 /* ADMA Command Parameter Block
224    The first 5 SG segments are stored inside the Command Parameter Block itself.
225    If there are more than 5 segments the remainder are stored in a separate
226    memory area indicated by next_aprd. */
227 struct nv_adma_cpb {
228 	u8			resp_flags;    /* 0 */
229 	u8			reserved1;     /* 1 */
230 	u8			ctl_flags;     /* 2 */
231 	/* len is length of taskfile in 64 bit words */
232 	u8			len;		/* 3  */
233 	u8			tag;           /* 4 */
234 	u8			next_cpb_idx;  /* 5 */
235 	__le16			reserved2;     /* 6-7 */
236 	__le16			tf[12];        /* 8-31 */
237 	struct nv_adma_prd	aprd[5];       /* 32-111 */
238 	__le64			next_aprd;     /* 112-119 */
239 	__le64			reserved3;     /* 120-127 */
240 };
241 
242 
243 struct nv_adma_port_priv {
244 	struct nv_adma_cpb	*cpb;
245 	dma_addr_t		cpb_dma;
246 	struct nv_adma_prd	*aprd;
247 	dma_addr_t		aprd_dma;
248 	void __iomem		*ctl_block;
249 	void __iomem		*gen_block;
250 	void __iomem		*notifier_clear_block;
251 	u64			adma_dma_mask;
252 	u8			flags;
253 	int			last_issue_ncq;
254 };
255 
256 struct nv_host_priv {
257 	unsigned long		type;
258 };
259 
260 struct defer_queue {
261 	u32		defer_bits;
262 	unsigned int	head;
263 	unsigned int	tail;
264 	unsigned int	tag[ATA_MAX_QUEUE];
265 };
266 
267 enum ncq_saw_flag_list {
268 	ncq_saw_d2h	= (1U << 0),
269 	ncq_saw_dmas	= (1U << 1),
270 	ncq_saw_sdb	= (1U << 2),
271 	ncq_saw_backout	= (1U << 3),
272 };
273 
274 struct nv_swncq_port_priv {
275 	struct ata_bmdma_prd *prd;	 /* our SG list */
276 	dma_addr_t	prd_dma; /* and its DMA mapping */
277 	void __iomem	*sactive_block;
278 	void __iomem	*irq_block;
279 	void __iomem	*tag_block;
280 	u32		qc_active;
281 
282 	unsigned int	last_issue_tag;
283 
284 	/* fifo circular queue to store deferral command */
285 	struct defer_queue defer_queue;
286 
287 	/* for NCQ interrupt analysis */
288 	u32		dhfis_bits;
289 	u32		dmafis_bits;
290 	u32		sdbfis_bits;
291 
292 	unsigned int	ncq_flags;
293 };
294 
295 
296 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 
298 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
299 #ifdef CONFIG_PM
300 static int nv_pci_device_resume(struct pci_dev *pdev);
301 #endif
302 static void nv_ck804_host_stop(struct ata_host *host);
303 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
305 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
306 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
307 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
308 
309 static int nv_hardreset(struct ata_link *link, unsigned int *class,
310 			unsigned long deadline);
311 static void nv_nf2_freeze(struct ata_port *ap);
312 static void nv_nf2_thaw(struct ata_port *ap);
313 static void nv_ck804_freeze(struct ata_port *ap);
314 static void nv_ck804_thaw(struct ata_port *ap);
315 static int nv_adma_slave_config(struct scsi_device *sdev);
316 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
317 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
318 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
319 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
320 static void nv_adma_irq_clear(struct ata_port *ap);
321 static int nv_adma_port_start(struct ata_port *ap);
322 static void nv_adma_port_stop(struct ata_port *ap);
323 #ifdef CONFIG_PM
324 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
325 static int nv_adma_port_resume(struct ata_port *ap);
326 #endif
327 static void nv_adma_freeze(struct ata_port *ap);
328 static void nv_adma_thaw(struct ata_port *ap);
329 static void nv_adma_error_handler(struct ata_port *ap);
330 static void nv_adma_host_stop(struct ata_host *host);
331 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
332 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
333 
334 static void nv_mcp55_thaw(struct ata_port *ap);
335 static void nv_mcp55_freeze(struct ata_port *ap);
336 static void nv_swncq_error_handler(struct ata_port *ap);
337 static int nv_swncq_slave_config(struct scsi_device *sdev);
338 static int nv_swncq_port_start(struct ata_port *ap);
339 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
340 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
341 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
342 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
343 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344 #ifdef CONFIG_PM
345 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
346 static int nv_swncq_port_resume(struct ata_port *ap);
347 #endif
348 
349 enum nv_host_type
350 {
351 	GENERIC,
352 	NFORCE2,
353 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
354 	CK804,
355 	ADMA,
356 	MCP5x,
357 	SWNCQ,
358 };
359 
360 static const struct pci_device_id nv_pci_tbl[] = {
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
373 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
374 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
375 
376 	{ } /* terminate list */
377 };
378 
379 static struct pci_driver nv_pci_driver = {
380 	.name			= DRV_NAME,
381 	.id_table		= nv_pci_tbl,
382 	.probe			= nv_init_one,
383 #ifdef CONFIG_PM
384 	.suspend		= ata_pci_device_suspend,
385 	.resume			= nv_pci_device_resume,
386 #endif
387 	.remove			= ata_pci_remove_one,
388 };
389 
390 static struct scsi_host_template nv_sht = {
391 	ATA_BMDMA_SHT(DRV_NAME),
392 };
393 
394 static struct scsi_host_template nv_adma_sht = {
395 	ATA_NCQ_SHT(DRV_NAME),
396 	.can_queue		= NV_ADMA_MAX_CPBS,
397 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
398 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
399 	.slave_configure	= nv_adma_slave_config,
400 };
401 
402 static struct scsi_host_template nv_swncq_sht = {
403 	ATA_NCQ_SHT(DRV_NAME),
404 	.can_queue		= ATA_MAX_QUEUE,
405 	.sg_tablesize		= LIBATA_MAX_PRD,
406 	.dma_boundary		= ATA_DMA_BOUNDARY,
407 	.slave_configure	= nv_swncq_slave_config,
408 };
409 
410 /*
411  * NV SATA controllers have various different problems with hardreset
412  * protocol depending on the specific controller and device.
413  *
414  * GENERIC:
415  *
416  *  bko11195 reports that link doesn't come online after hardreset on
417  *  generic nv's and there have been several other similar reports on
418  *  linux-ide.
419  *
420  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
421  *  softreset.
422  *
423  * NF2/3:
424  *
425  *  bko3352 reports nf2/3 controllers can't determine device signature
426  *  reliably after hardreset.  The following thread reports detection
427  *  failure on cold boot with the standard debouncing timing.
428  *
429  *  http://thread.gmane.org/gmane.linux.ide/34098
430  *
431  *  bko12176 reports that hardreset fails to bring up the link during
432  *  boot on nf2.
433  *
434  * CK804:
435  *
436  *  For initial probing after boot and hot plugging, hardreset mostly
437  *  works fine on CK804 but curiously, reprobing on the initial port
438  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
439  *  FIS in somewhat undeterministic way.
440  *
441  * SWNCQ:
442  *
443  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
444  *  hardreset should be used and hardreset can't report proper
445  *  signature, which suggests that mcp5x is closer to nf2 as long as
446  *  reset quirkiness is concerned.
447  *
448  *  bko12703 reports that boot probing fails for intel SSD with
449  *  hardreset.  Link fails to come online.  Softreset works fine.
450  *
451  * The failures are varied but the following patterns seem true for
452  * all flavors.
453  *
454  * - Softreset during boot always works.
455  *
456  * - Hardreset during boot sometimes fails to bring up the link on
457  *   certain comibnations and device signature acquisition is
458  *   unreliable.
459  *
460  * - Hardreset is often necessary after hotplug.
461  *
462  * So, preferring softreset for boot probing and error handling (as
463  * hardreset might bring down the link) but using hardreset for
464  * post-boot probing should work around the above issues in most
465  * cases.  Define nv_hardreset() which only kicks in for post-boot
466  * probing and use it for all variants.
467  */
468 static struct ata_port_operations nv_generic_ops = {
469 	.inherits		= &ata_bmdma_port_ops,
470 	.lost_interrupt		= ATA_OP_NULL,
471 	.scr_read		= nv_scr_read,
472 	.scr_write		= nv_scr_write,
473 	.hardreset		= nv_hardreset,
474 };
475 
476 static struct ata_port_operations nv_nf2_ops = {
477 	.inherits		= &nv_generic_ops,
478 	.freeze			= nv_nf2_freeze,
479 	.thaw			= nv_nf2_thaw,
480 };
481 
482 static struct ata_port_operations nv_ck804_ops = {
483 	.inherits		= &nv_generic_ops,
484 	.freeze			= nv_ck804_freeze,
485 	.thaw			= nv_ck804_thaw,
486 	.host_stop		= nv_ck804_host_stop,
487 };
488 
489 static struct ata_port_operations nv_adma_ops = {
490 	.inherits		= &nv_ck804_ops,
491 
492 	.check_atapi_dma	= nv_adma_check_atapi_dma,
493 	.sff_tf_read		= nv_adma_tf_read,
494 	.qc_defer		= ata_std_qc_defer,
495 	.qc_prep		= nv_adma_qc_prep,
496 	.qc_issue		= nv_adma_qc_issue,
497 	.sff_irq_clear		= nv_adma_irq_clear,
498 
499 	.freeze			= nv_adma_freeze,
500 	.thaw			= nv_adma_thaw,
501 	.error_handler		= nv_adma_error_handler,
502 	.post_internal_cmd	= nv_adma_post_internal_cmd,
503 
504 	.port_start		= nv_adma_port_start,
505 	.port_stop		= nv_adma_port_stop,
506 #ifdef CONFIG_PM
507 	.port_suspend		= nv_adma_port_suspend,
508 	.port_resume		= nv_adma_port_resume,
509 #endif
510 	.host_stop		= nv_adma_host_stop,
511 };
512 
513 static struct ata_port_operations nv_swncq_ops = {
514 	.inherits		= &nv_generic_ops,
515 
516 	.qc_defer		= ata_std_qc_defer,
517 	.qc_prep		= nv_swncq_qc_prep,
518 	.qc_issue		= nv_swncq_qc_issue,
519 
520 	.freeze			= nv_mcp55_freeze,
521 	.thaw			= nv_mcp55_thaw,
522 	.error_handler		= nv_swncq_error_handler,
523 
524 #ifdef CONFIG_PM
525 	.port_suspend		= nv_swncq_port_suspend,
526 	.port_resume		= nv_swncq_port_resume,
527 #endif
528 	.port_start		= nv_swncq_port_start,
529 };
530 
531 struct nv_pi_priv {
532 	irq_handler_t			irq_handler;
533 	struct scsi_host_template	*sht;
534 };
535 
536 #define NV_PI_PRIV(_irq_handler, _sht) \
537 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
538 
539 static const struct ata_port_info nv_port_info[] = {
540 	/* generic */
541 	{
542 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
543 		.pio_mask	= NV_PIO_MASK,
544 		.mwdma_mask	= NV_MWDMA_MASK,
545 		.udma_mask	= NV_UDMA_MASK,
546 		.port_ops	= &nv_generic_ops,
547 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
548 	},
549 	/* nforce2/3 */
550 	{
551 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
552 		.pio_mask	= NV_PIO_MASK,
553 		.mwdma_mask	= NV_MWDMA_MASK,
554 		.udma_mask	= NV_UDMA_MASK,
555 		.port_ops	= &nv_nf2_ops,
556 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
557 	},
558 	/* ck804 */
559 	{
560 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
561 		.pio_mask	= NV_PIO_MASK,
562 		.mwdma_mask	= NV_MWDMA_MASK,
563 		.udma_mask	= NV_UDMA_MASK,
564 		.port_ops	= &nv_ck804_ops,
565 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
566 	},
567 	/* ADMA */
568 	{
569 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
570 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
571 		.pio_mask	= NV_PIO_MASK,
572 		.mwdma_mask	= NV_MWDMA_MASK,
573 		.udma_mask	= NV_UDMA_MASK,
574 		.port_ops	= &nv_adma_ops,
575 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
576 	},
577 	/* MCP5x */
578 	{
579 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
580 		.pio_mask	= NV_PIO_MASK,
581 		.mwdma_mask	= NV_MWDMA_MASK,
582 		.udma_mask	= NV_UDMA_MASK,
583 		.port_ops	= &nv_generic_ops,
584 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
585 	},
586 	/* SWNCQ */
587 	{
588 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
589 				  ATA_FLAG_NCQ,
590 		.pio_mask	= NV_PIO_MASK,
591 		.mwdma_mask	= NV_MWDMA_MASK,
592 		.udma_mask	= NV_UDMA_MASK,
593 		.port_ops	= &nv_swncq_ops,
594 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
595 	},
596 };
597 
598 MODULE_AUTHOR("NVIDIA");
599 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
600 MODULE_LICENSE("GPL");
601 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
602 MODULE_VERSION(DRV_VERSION);
603 
604 static int adma_enabled;
605 static int swncq_enabled = 1;
606 static int msi_enabled;
607 
608 static void nv_adma_register_mode(struct ata_port *ap)
609 {
610 	struct nv_adma_port_priv *pp = ap->private_data;
611 	void __iomem *mmio = pp->ctl_block;
612 	u16 tmp, status;
613 	int count = 0;
614 
615 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
616 		return;
617 
618 	status = readw(mmio + NV_ADMA_STAT);
619 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
620 		ndelay(50);
621 		status = readw(mmio + NV_ADMA_STAT);
622 		count++;
623 	}
624 	if (count == 20)
625 		ata_port_printk(ap, KERN_WARNING,
626 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
627 			status);
628 
629 	tmp = readw(mmio + NV_ADMA_CTL);
630 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
631 
632 	count = 0;
633 	status = readw(mmio + NV_ADMA_STAT);
634 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
635 		ndelay(50);
636 		status = readw(mmio + NV_ADMA_STAT);
637 		count++;
638 	}
639 	if (count == 20)
640 		ata_port_printk(ap, KERN_WARNING,
641 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
642 			 status);
643 
644 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
645 }
646 
647 static void nv_adma_mode(struct ata_port *ap)
648 {
649 	struct nv_adma_port_priv *pp = ap->private_data;
650 	void __iomem *mmio = pp->ctl_block;
651 	u16 tmp, status;
652 	int count = 0;
653 
654 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
655 		return;
656 
657 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
658 
659 	tmp = readw(mmio + NV_ADMA_CTL);
660 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
661 
662 	status = readw(mmio + NV_ADMA_STAT);
663 	while (((status & NV_ADMA_STAT_LEGACY) ||
664 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
665 		ndelay(50);
666 		status = readw(mmio + NV_ADMA_STAT);
667 		count++;
668 	}
669 	if (count == 20)
670 		ata_port_printk(ap, KERN_WARNING,
671 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
672 			status);
673 
674 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
675 }
676 
677 static int nv_adma_slave_config(struct scsi_device *sdev)
678 {
679 	struct ata_port *ap = ata_shost_to_port(sdev->host);
680 	struct nv_adma_port_priv *pp = ap->private_data;
681 	struct nv_adma_port_priv *port0, *port1;
682 	struct scsi_device *sdev0, *sdev1;
683 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
684 	unsigned long segment_boundary, flags;
685 	unsigned short sg_tablesize;
686 	int rc;
687 	int adma_enable;
688 	u32 current_reg, new_reg, config_mask;
689 
690 	rc = ata_scsi_slave_config(sdev);
691 
692 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
693 		/* Not a proper libata device, ignore */
694 		return rc;
695 
696 	spin_lock_irqsave(ap->lock, flags);
697 
698 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
699 		/*
700 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
701 		 * Therefore ATAPI commands are sent through the legacy interface.
702 		 * However, the legacy interface only supports 32-bit DMA.
703 		 * Restrict DMA parameters as required by the legacy interface
704 		 * when an ATAPI device is connected.
705 		 */
706 		segment_boundary = ATA_DMA_BOUNDARY;
707 		/* Subtract 1 since an extra entry may be needed for padding, see
708 		   libata-scsi.c */
709 		sg_tablesize = LIBATA_MAX_PRD - 1;
710 
711 		/* Since the legacy DMA engine is in use, we need to disable ADMA
712 		   on the port. */
713 		adma_enable = 0;
714 		nv_adma_register_mode(ap);
715 	} else {
716 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
717 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
718 		adma_enable = 1;
719 	}
720 
721 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
722 
723 	if (ap->port_no == 1)
724 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
725 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
726 	else
727 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
728 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
729 
730 	if (adma_enable) {
731 		new_reg = current_reg | config_mask;
732 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
733 	} else {
734 		new_reg = current_reg & ~config_mask;
735 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
736 	}
737 
738 	if (current_reg != new_reg)
739 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
740 
741 	port0 = ap->host->ports[0]->private_data;
742 	port1 = ap->host->ports[1]->private_data;
743 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
744 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
745 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
746 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
747 		/** We have to set the DMA mask to 32-bit if either port is in
748 		    ATAPI mode, since they are on the same PCI device which is
749 		    used for DMA mapping. If we set the mask we also need to set
750 		    the bounce limit on both ports to ensure that the block
751 		    layer doesn't feed addresses that cause DMA mapping to
752 		    choke. If either SCSI device is not allocated yet, it's OK
753 		    since that port will discover its correct setting when it
754 		    does get allocated.
755 		    Note: Setting 32-bit mask should not fail. */
756 		if (sdev0)
757 			blk_queue_bounce_limit(sdev0->request_queue,
758 					       ATA_DMA_MASK);
759 		if (sdev1)
760 			blk_queue_bounce_limit(sdev1->request_queue,
761 					       ATA_DMA_MASK);
762 
763 		pci_set_dma_mask(pdev, ATA_DMA_MASK);
764 	} else {
765 		/** This shouldn't fail as it was set to this value before */
766 		pci_set_dma_mask(pdev, pp->adma_dma_mask);
767 		if (sdev0)
768 			blk_queue_bounce_limit(sdev0->request_queue,
769 					       pp->adma_dma_mask);
770 		if (sdev1)
771 			blk_queue_bounce_limit(sdev1->request_queue,
772 					       pp->adma_dma_mask);
773 	}
774 
775 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
776 	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
777 	ata_port_printk(ap, KERN_INFO,
778 		"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
779 		(unsigned long long)*ap->host->dev->dma_mask,
780 		segment_boundary, sg_tablesize);
781 
782 	spin_unlock_irqrestore(ap->lock, flags);
783 
784 	return rc;
785 }
786 
787 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
788 {
789 	struct nv_adma_port_priv *pp = qc->ap->private_data;
790 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
791 }
792 
793 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
794 {
795 	/* Other than when internal or pass-through commands are executed,
796 	   the only time this function will be called in ADMA mode will be
797 	   if a command fails. In the failure case we don't care about going
798 	   into register mode with ADMA commands pending, as the commands will
799 	   all shortly be aborted anyway. We assume that NCQ commands are not
800 	   issued via passthrough, which is the only way that switching into
801 	   ADMA mode could abort outstanding commands. */
802 	nv_adma_register_mode(ap);
803 
804 	ata_sff_tf_read(ap, tf);
805 }
806 
807 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
808 {
809 	unsigned int idx = 0;
810 
811 	if (tf->flags & ATA_TFLAG_ISADDR) {
812 		if (tf->flags & ATA_TFLAG_LBA48) {
813 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
814 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
815 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
816 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
817 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
818 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
819 		} else
820 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
821 
822 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
823 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
824 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
825 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
826 	}
827 
828 	if (tf->flags & ATA_TFLAG_DEVICE)
829 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
830 
831 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
832 
833 	while (idx < 12)
834 		cpb[idx++] = cpu_to_le16(IGN);
835 
836 	return idx;
837 }
838 
839 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
840 {
841 	struct nv_adma_port_priv *pp = ap->private_data;
842 	u8 flags = pp->cpb[cpb_num].resp_flags;
843 
844 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
845 
846 	if (unlikely((force_err ||
847 		     flags & (NV_CPB_RESP_ATA_ERR |
848 			      NV_CPB_RESP_CMD_ERR |
849 			      NV_CPB_RESP_CPB_ERR)))) {
850 		struct ata_eh_info *ehi = &ap->link.eh_info;
851 		int freeze = 0;
852 
853 		ata_ehi_clear_desc(ehi);
854 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
855 		if (flags & NV_CPB_RESP_ATA_ERR) {
856 			ata_ehi_push_desc(ehi, "ATA error");
857 			ehi->err_mask |= AC_ERR_DEV;
858 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
859 			ata_ehi_push_desc(ehi, "CMD error");
860 			ehi->err_mask |= AC_ERR_DEV;
861 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
862 			ata_ehi_push_desc(ehi, "CPB error");
863 			ehi->err_mask |= AC_ERR_SYSTEM;
864 			freeze = 1;
865 		} else {
866 			/* notifier error, but no error in CPB flags? */
867 			ata_ehi_push_desc(ehi, "unknown");
868 			ehi->err_mask |= AC_ERR_OTHER;
869 			freeze = 1;
870 		}
871 		/* Kill all commands. EH will determine what actually failed. */
872 		if (freeze)
873 			ata_port_freeze(ap);
874 		else
875 			ata_port_abort(ap);
876 		return 1;
877 	}
878 
879 	if (likely(flags & NV_CPB_RESP_DONE)) {
880 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
881 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
882 		if (likely(qc)) {
883 			DPRINTK("Completing qc from tag %d\n", cpb_num);
884 			ata_qc_complete(qc);
885 		} else {
886 			struct ata_eh_info *ehi = &ap->link.eh_info;
887 			/* Notifier bits set without a command may indicate the drive
888 			   is misbehaving. Raise host state machine violation on this
889 			   condition. */
890 			ata_port_printk(ap, KERN_ERR,
891 					"notifier for tag %d with no cmd?\n",
892 					cpb_num);
893 			ehi->err_mask |= AC_ERR_HSM;
894 			ehi->action |= ATA_EH_RESET;
895 			ata_port_freeze(ap);
896 			return 1;
897 		}
898 	}
899 	return 0;
900 }
901 
902 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
903 {
904 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
905 
906 	/* freeze if hotplugged */
907 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
908 		ata_port_freeze(ap);
909 		return 1;
910 	}
911 
912 	/* bail out if not our interrupt */
913 	if (!(irq_stat & NV_INT_DEV))
914 		return 0;
915 
916 	/* DEV interrupt w/ no active qc? */
917 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
918 		ata_sff_check_status(ap);
919 		return 1;
920 	}
921 
922 	/* handle interrupt */
923 	return ata_sff_host_intr(ap, qc);
924 }
925 
926 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
927 {
928 	struct ata_host *host = dev_instance;
929 	int i, handled = 0;
930 	u32 notifier_clears[2];
931 
932 	spin_lock(&host->lock);
933 
934 	for (i = 0; i < host->n_ports; i++) {
935 		struct ata_port *ap = host->ports[i];
936 		struct nv_adma_port_priv *pp = ap->private_data;
937 		void __iomem *mmio = pp->ctl_block;
938 		u16 status;
939 		u32 gen_ctl;
940 		u32 notifier, notifier_error;
941 
942 		notifier_clears[i] = 0;
943 
944 		/* if ADMA is disabled, use standard ata interrupt handler */
945 		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
946 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
947 				>> (NV_INT_PORT_SHIFT * i);
948 			handled += nv_host_intr(ap, irq_stat);
949 			continue;
950 		}
951 
952 		/* if in ATA register mode, check for standard interrupts */
953 		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
954 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
955 				>> (NV_INT_PORT_SHIFT * i);
956 			if (ata_tag_valid(ap->link.active_tag))
957 				/** NV_INT_DEV indication seems unreliable
958 				    at times at least in ADMA mode. Force it
959 				    on always when a command is active, to
960 				    prevent losing interrupts. */
961 				irq_stat |= NV_INT_DEV;
962 			handled += nv_host_intr(ap, irq_stat);
963 		}
964 
965 		notifier = readl(mmio + NV_ADMA_NOTIFIER);
966 		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
967 		notifier_clears[i] = notifier | notifier_error;
968 
969 		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
970 
971 		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
972 		    !notifier_error)
973 			/* Nothing to do */
974 			continue;
975 
976 		status = readw(mmio + NV_ADMA_STAT);
977 
978 		/*
979 		 * Clear status. Ensure the controller sees the
980 		 * clearing before we start looking at any of the CPB
981 		 * statuses, so that any CPB completions after this
982 		 * point in the handler will raise another interrupt.
983 		 */
984 		writew(status, mmio + NV_ADMA_STAT);
985 		readw(mmio + NV_ADMA_STAT); /* flush posted write */
986 		rmb();
987 
988 		handled++; /* irq handled if we got here */
989 
990 		/* freeze if hotplugged or controller error */
991 		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
992 				       NV_ADMA_STAT_HOTUNPLUG |
993 				       NV_ADMA_STAT_TIMEOUT |
994 				       NV_ADMA_STAT_SERROR))) {
995 			struct ata_eh_info *ehi = &ap->link.eh_info;
996 
997 			ata_ehi_clear_desc(ehi);
998 			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
999 			if (status & NV_ADMA_STAT_TIMEOUT) {
1000 				ehi->err_mask |= AC_ERR_SYSTEM;
1001 				ata_ehi_push_desc(ehi, "timeout");
1002 			} else if (status & NV_ADMA_STAT_HOTPLUG) {
1003 				ata_ehi_hotplugged(ehi);
1004 				ata_ehi_push_desc(ehi, "hotplug");
1005 			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1006 				ata_ehi_hotplugged(ehi);
1007 				ata_ehi_push_desc(ehi, "hot unplug");
1008 			} else if (status & NV_ADMA_STAT_SERROR) {
1009 				/* let EH analyze SError and figure out cause */
1010 				ata_ehi_push_desc(ehi, "SError");
1011 			} else
1012 				ata_ehi_push_desc(ehi, "unknown");
1013 			ata_port_freeze(ap);
1014 			continue;
1015 		}
1016 
1017 		if (status & (NV_ADMA_STAT_DONE |
1018 			      NV_ADMA_STAT_CPBERR |
1019 			      NV_ADMA_STAT_CMD_COMPLETE)) {
1020 			u32 check_commands = notifier_clears[i];
1021 			int pos, error = 0;
1022 
1023 			if (status & NV_ADMA_STAT_CPBERR) {
1024 				/* check all active commands */
1025 				if (ata_tag_valid(ap->link.active_tag))
1026 					check_commands = 1 <<
1027 						ap->link.active_tag;
1028 				else
1029 					check_commands = ap->link.sactive;
1030 			}
1031 
1032 			/* check CPBs for completed commands */
1033 			while ((pos = ffs(check_commands)) && !error) {
1034 				pos--;
1035 				error = nv_adma_check_cpb(ap, pos,
1036 						notifier_error & (1 << pos));
1037 				check_commands &= ~(1 << pos);
1038 			}
1039 		}
1040 	}
1041 
1042 	if (notifier_clears[0] || notifier_clears[1]) {
1043 		/* Note: Both notifier clear registers must be written
1044 		   if either is set, even if one is zero, according to NVIDIA. */
1045 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1046 		writel(notifier_clears[0], pp->notifier_clear_block);
1047 		pp = host->ports[1]->private_data;
1048 		writel(notifier_clears[1], pp->notifier_clear_block);
1049 	}
1050 
1051 	spin_unlock(&host->lock);
1052 
1053 	return IRQ_RETVAL(handled);
1054 }
1055 
1056 static void nv_adma_freeze(struct ata_port *ap)
1057 {
1058 	struct nv_adma_port_priv *pp = ap->private_data;
1059 	void __iomem *mmio = pp->ctl_block;
1060 	u16 tmp;
1061 
1062 	nv_ck804_freeze(ap);
1063 
1064 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1065 		return;
1066 
1067 	/* clear any outstanding CK804 notifications */
1068 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1069 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1070 
1071 	/* Disable interrupt */
1072 	tmp = readw(mmio + NV_ADMA_CTL);
1073 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1074 		mmio + NV_ADMA_CTL);
1075 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1076 }
1077 
1078 static void nv_adma_thaw(struct ata_port *ap)
1079 {
1080 	struct nv_adma_port_priv *pp = ap->private_data;
1081 	void __iomem *mmio = pp->ctl_block;
1082 	u16 tmp;
1083 
1084 	nv_ck804_thaw(ap);
1085 
1086 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1087 		return;
1088 
1089 	/* Enable interrupt */
1090 	tmp = readw(mmio + NV_ADMA_CTL);
1091 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1092 		mmio + NV_ADMA_CTL);
1093 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1094 }
1095 
1096 static void nv_adma_irq_clear(struct ata_port *ap)
1097 {
1098 	struct nv_adma_port_priv *pp = ap->private_data;
1099 	void __iomem *mmio = pp->ctl_block;
1100 	u32 notifier_clears[2];
1101 
1102 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1103 		ata_sff_irq_clear(ap);
1104 		return;
1105 	}
1106 
1107 	/* clear any outstanding CK804 notifications */
1108 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1109 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1110 
1111 	/* clear ADMA status */
1112 	writew(0xffff, mmio + NV_ADMA_STAT);
1113 
1114 	/* clear notifiers - note both ports need to be written with
1115 	   something even though we are only clearing on one */
1116 	if (ap->port_no == 0) {
1117 		notifier_clears[0] = 0xFFFFFFFF;
1118 		notifier_clears[1] = 0;
1119 	} else {
1120 		notifier_clears[0] = 0;
1121 		notifier_clears[1] = 0xFFFFFFFF;
1122 	}
1123 	pp = ap->host->ports[0]->private_data;
1124 	writel(notifier_clears[0], pp->notifier_clear_block);
1125 	pp = ap->host->ports[1]->private_data;
1126 	writel(notifier_clears[1], pp->notifier_clear_block);
1127 }
1128 
1129 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1130 {
1131 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1132 
1133 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1134 		ata_bmdma_post_internal_cmd(qc);
1135 }
1136 
1137 static int nv_adma_port_start(struct ata_port *ap)
1138 {
1139 	struct device *dev = ap->host->dev;
1140 	struct nv_adma_port_priv *pp;
1141 	int rc;
1142 	void *mem;
1143 	dma_addr_t mem_dma;
1144 	void __iomem *mmio;
1145 	struct pci_dev *pdev = to_pci_dev(dev);
1146 	u16 tmp;
1147 
1148 	VPRINTK("ENTER\n");
1149 
1150 	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1151 	   pad buffers */
1152 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1153 	if (rc)
1154 		return rc;
1155 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1156 	if (rc)
1157 		return rc;
1158 
1159 	/* we might fallback to bmdma, allocate bmdma resources */
1160 	rc = ata_bmdma_port_start(ap);
1161 	if (rc)
1162 		return rc;
1163 
1164 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1165 	if (!pp)
1166 		return -ENOMEM;
1167 
1168 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1169 	       ap->port_no * NV_ADMA_PORT_SIZE;
1170 	pp->ctl_block = mmio;
1171 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1172 	pp->notifier_clear_block = pp->gen_block +
1173 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1174 
1175 	/* Now that the legacy PRD and padding buffer are allocated we can
1176 	   safely raise the DMA mask to allocate the CPB/APRD table.
1177 	   These are allowed to fail since we store the value that ends up
1178 	   being used to set as the bounce limit in slave_config later if
1179 	   needed. */
1180 	pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1181 	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1182 	pp->adma_dma_mask = *dev->dma_mask;
1183 
1184 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1185 				  &mem_dma, GFP_KERNEL);
1186 	if (!mem)
1187 		return -ENOMEM;
1188 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1189 
1190 	/*
1191 	 * First item in chunk of DMA memory:
1192 	 * 128-byte command parameter block (CPB)
1193 	 * one for each command tag
1194 	 */
1195 	pp->cpb     = mem;
1196 	pp->cpb_dma = mem_dma;
1197 
1198 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1199 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1200 
1201 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1202 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1203 
1204 	/*
1205 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1206 	 */
1207 	pp->aprd = mem;
1208 	pp->aprd_dma = mem_dma;
1209 
1210 	ap->private_data = pp;
1211 
1212 	/* clear any outstanding interrupt conditions */
1213 	writew(0xffff, mmio + NV_ADMA_STAT);
1214 
1215 	/* initialize port variables */
1216 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1217 
1218 	/* clear CPB fetch count */
1219 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1220 
1221 	/* clear GO for register mode, enable interrupt */
1222 	tmp = readw(mmio + NV_ADMA_CTL);
1223 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1224 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1225 
1226 	tmp = readw(mmio + NV_ADMA_CTL);
1227 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1228 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1229 	udelay(1);
1230 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1231 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1232 
1233 	return 0;
1234 }
1235 
1236 static void nv_adma_port_stop(struct ata_port *ap)
1237 {
1238 	struct nv_adma_port_priv *pp = ap->private_data;
1239 	void __iomem *mmio = pp->ctl_block;
1240 
1241 	VPRINTK("ENTER\n");
1242 	writew(0, mmio + NV_ADMA_CTL);
1243 }
1244 
1245 #ifdef CONFIG_PM
1246 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1247 {
1248 	struct nv_adma_port_priv *pp = ap->private_data;
1249 	void __iomem *mmio = pp->ctl_block;
1250 
1251 	/* Go to register mode - clears GO */
1252 	nv_adma_register_mode(ap);
1253 
1254 	/* clear CPB fetch count */
1255 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1256 
1257 	/* disable interrupt, shut down port */
1258 	writew(0, mmio + NV_ADMA_CTL);
1259 
1260 	return 0;
1261 }
1262 
1263 static int nv_adma_port_resume(struct ata_port *ap)
1264 {
1265 	struct nv_adma_port_priv *pp = ap->private_data;
1266 	void __iomem *mmio = pp->ctl_block;
1267 	u16 tmp;
1268 
1269 	/* set CPB block location */
1270 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1271 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1272 
1273 	/* clear any outstanding interrupt conditions */
1274 	writew(0xffff, mmio + NV_ADMA_STAT);
1275 
1276 	/* initialize port variables */
1277 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1278 
1279 	/* clear CPB fetch count */
1280 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1281 
1282 	/* clear GO for register mode, enable interrupt */
1283 	tmp = readw(mmio + NV_ADMA_CTL);
1284 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1285 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1286 
1287 	tmp = readw(mmio + NV_ADMA_CTL);
1288 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1289 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1290 	udelay(1);
1291 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1292 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1293 
1294 	return 0;
1295 }
1296 #endif
1297 
1298 static void nv_adma_setup_port(struct ata_port *ap)
1299 {
1300 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1301 	struct ata_ioports *ioport = &ap->ioaddr;
1302 
1303 	VPRINTK("ENTER\n");
1304 
1305 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1306 
1307 	ioport->cmd_addr	= mmio;
1308 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1309 	ioport->error_addr	=
1310 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1311 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1312 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1313 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1314 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1315 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1316 	ioport->status_addr	=
1317 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1318 	ioport->altstatus_addr	=
1319 	ioport->ctl_addr	= mmio + 0x20;
1320 }
1321 
1322 static int nv_adma_host_init(struct ata_host *host)
1323 {
1324 	struct pci_dev *pdev = to_pci_dev(host->dev);
1325 	unsigned int i;
1326 	u32 tmp32;
1327 
1328 	VPRINTK("ENTER\n");
1329 
1330 	/* enable ADMA on the ports */
1331 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1332 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1333 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1334 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1335 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1336 
1337 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1338 
1339 	for (i = 0; i < host->n_ports; i++)
1340 		nv_adma_setup_port(host->ports[i]);
1341 
1342 	return 0;
1343 }
1344 
1345 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1346 			      struct scatterlist *sg,
1347 			      int idx,
1348 			      struct nv_adma_prd *aprd)
1349 {
1350 	u8 flags = 0;
1351 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1352 		flags |= NV_APRD_WRITE;
1353 	if (idx == qc->n_elem - 1)
1354 		flags |= NV_APRD_END;
1355 	else if (idx != 4)
1356 		flags |= NV_APRD_CONT;
1357 
1358 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1359 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1360 	aprd->flags = flags;
1361 	aprd->packet_len = 0;
1362 }
1363 
1364 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1365 {
1366 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1367 	struct nv_adma_prd *aprd;
1368 	struct scatterlist *sg;
1369 	unsigned int si;
1370 
1371 	VPRINTK("ENTER\n");
1372 
1373 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1374 		aprd = (si < 5) ? &cpb->aprd[si] :
1375 			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1376 		nv_adma_fill_aprd(qc, sg, si, aprd);
1377 	}
1378 	if (si > 5)
1379 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1380 	else
1381 		cpb->next_aprd = cpu_to_le64(0);
1382 }
1383 
1384 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1385 {
1386 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1387 
1388 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1389 	   or interrupt-driven no-data commands. */
1390 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1391 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1392 		return 1;
1393 
1394 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1395 	   (qc->tf.protocol == ATA_PROT_NODATA))
1396 		return 0;
1397 
1398 	return 1;
1399 }
1400 
1401 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1402 {
1403 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1404 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1405 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1406 		       NV_CPB_CTL_IEN;
1407 
1408 	if (nv_adma_use_reg_mode(qc)) {
1409 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1410 			(qc->flags & ATA_QCFLAG_DMAMAP));
1411 		nv_adma_register_mode(qc->ap);
1412 		ata_bmdma_qc_prep(qc);
1413 		return;
1414 	}
1415 
1416 	cpb->resp_flags = NV_CPB_RESP_DONE;
1417 	wmb();
1418 	cpb->ctl_flags = 0;
1419 	wmb();
1420 
1421 	cpb->len		= 3;
1422 	cpb->tag		= qc->tag;
1423 	cpb->next_cpb_idx	= 0;
1424 
1425 	/* turn on NCQ flags for NCQ commands */
1426 	if (qc->tf.protocol == ATA_PROT_NCQ)
1427 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1428 
1429 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1430 
1431 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1432 
1433 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1434 		nv_adma_fill_sg(qc, cpb);
1435 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1436 	} else
1437 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1438 
1439 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1440 	   until we are finished filling in all of the contents */
1441 	wmb();
1442 	cpb->ctl_flags = ctl_flags;
1443 	wmb();
1444 	cpb->resp_flags = 0;
1445 }
1446 
1447 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1448 {
1449 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1450 	void __iomem *mmio = pp->ctl_block;
1451 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1452 
1453 	VPRINTK("ENTER\n");
1454 
1455 	/* We can't handle result taskfile with NCQ commands, since
1456 	   retrieving the taskfile switches us out of ADMA mode and would abort
1457 	   existing commands. */
1458 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1459 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1460 		ata_dev_printk(qc->dev, KERN_ERR,
1461 			"NCQ w/ RESULT_TF not allowed\n");
1462 		return AC_ERR_SYSTEM;
1463 	}
1464 
1465 	if (nv_adma_use_reg_mode(qc)) {
1466 		/* use ATA register mode */
1467 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1468 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1469 			(qc->flags & ATA_QCFLAG_DMAMAP));
1470 		nv_adma_register_mode(qc->ap);
1471 		return ata_bmdma_qc_issue(qc);
1472 	} else
1473 		nv_adma_mode(qc->ap);
1474 
1475 	/* write append register, command tag in lower 8 bits
1476 	   and (number of cpbs to append -1) in top 8 bits */
1477 	wmb();
1478 
1479 	if (curr_ncq != pp->last_issue_ncq) {
1480 		/* Seems to need some delay before switching between NCQ and
1481 		   non-NCQ commands, else we get command timeouts and such. */
1482 		udelay(20);
1483 		pp->last_issue_ncq = curr_ncq;
1484 	}
1485 
1486 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1487 
1488 	DPRINTK("Issued tag %u\n", qc->tag);
1489 
1490 	return 0;
1491 }
1492 
1493 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1494 {
1495 	struct ata_host *host = dev_instance;
1496 	unsigned int i;
1497 	unsigned int handled = 0;
1498 	unsigned long flags;
1499 
1500 	spin_lock_irqsave(&host->lock, flags);
1501 
1502 	for (i = 0; i < host->n_ports; i++) {
1503 		struct ata_port *ap = host->ports[i];
1504 		struct ata_queued_cmd *qc;
1505 
1506 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1507 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1508 			handled += ata_sff_host_intr(ap, qc);
1509 		} else {
1510 			/*
1511 			 * No request pending?  Clear interrupt status
1512 			 * anyway, in case there's one pending.
1513 			 */
1514 			ap->ops->sff_check_status(ap);
1515 		}
1516 	}
1517 
1518 	spin_unlock_irqrestore(&host->lock, flags);
1519 
1520 	return IRQ_RETVAL(handled);
1521 }
1522 
1523 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1524 {
1525 	int i, handled = 0;
1526 
1527 	for (i = 0; i < host->n_ports; i++) {
1528 		handled += nv_host_intr(host->ports[i], irq_stat);
1529 		irq_stat >>= NV_INT_PORT_SHIFT;
1530 	}
1531 
1532 	return IRQ_RETVAL(handled);
1533 }
1534 
1535 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1536 {
1537 	struct ata_host *host = dev_instance;
1538 	u8 irq_stat;
1539 	irqreturn_t ret;
1540 
1541 	spin_lock(&host->lock);
1542 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1543 	ret = nv_do_interrupt(host, irq_stat);
1544 	spin_unlock(&host->lock);
1545 
1546 	return ret;
1547 }
1548 
1549 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1550 {
1551 	struct ata_host *host = dev_instance;
1552 	u8 irq_stat;
1553 	irqreturn_t ret;
1554 
1555 	spin_lock(&host->lock);
1556 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1557 	ret = nv_do_interrupt(host, irq_stat);
1558 	spin_unlock(&host->lock);
1559 
1560 	return ret;
1561 }
1562 
1563 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1564 {
1565 	if (sc_reg > SCR_CONTROL)
1566 		return -EINVAL;
1567 
1568 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1569 	return 0;
1570 }
1571 
1572 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1573 {
1574 	if (sc_reg > SCR_CONTROL)
1575 		return -EINVAL;
1576 
1577 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1578 	return 0;
1579 }
1580 
1581 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1582 			unsigned long deadline)
1583 {
1584 	struct ata_eh_context *ehc = &link->eh_context;
1585 
1586 	/* Do hardreset iff it's post-boot probing, please read the
1587 	 * comment above port ops for details.
1588 	 */
1589 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1590 	    !ata_dev_enabled(link->device))
1591 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1592 				    NULL, NULL);
1593 	else {
1594 		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1595 		int rc;
1596 
1597 		if (!(ehc->i.flags & ATA_EHI_QUIET))
1598 			ata_link_printk(link, KERN_INFO, "nv: skipping "
1599 					"hardreset on occupied port\n");
1600 
1601 		/* make sure the link is online */
1602 		rc = sata_link_resume(link, timing, deadline);
1603 		/* whine about phy resume failure but proceed */
1604 		if (rc && rc != -EOPNOTSUPP)
1605 			ata_link_printk(link, KERN_WARNING, "failed to resume "
1606 					"link (errno=%d)\n", rc);
1607 	}
1608 
1609 	/* device signature acquisition is unreliable */
1610 	return -EAGAIN;
1611 }
1612 
1613 static void nv_nf2_freeze(struct ata_port *ap)
1614 {
1615 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1616 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1617 	u8 mask;
1618 
1619 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1620 	mask &= ~(NV_INT_ALL << shift);
1621 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1622 }
1623 
1624 static void nv_nf2_thaw(struct ata_port *ap)
1625 {
1626 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1627 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1628 	u8 mask;
1629 
1630 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1631 
1632 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1633 	mask |= (NV_INT_MASK << shift);
1634 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1635 }
1636 
1637 static void nv_ck804_freeze(struct ata_port *ap)
1638 {
1639 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1640 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1641 	u8 mask;
1642 
1643 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1644 	mask &= ~(NV_INT_ALL << shift);
1645 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1646 }
1647 
1648 static void nv_ck804_thaw(struct ata_port *ap)
1649 {
1650 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1651 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1652 	u8 mask;
1653 
1654 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1655 
1656 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1657 	mask |= (NV_INT_MASK << shift);
1658 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1659 }
1660 
1661 static void nv_mcp55_freeze(struct ata_port *ap)
1662 {
1663 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1664 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1665 	u32 mask;
1666 
1667 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1668 
1669 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1670 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1671 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1672 	ata_sff_freeze(ap);
1673 }
1674 
1675 static void nv_mcp55_thaw(struct ata_port *ap)
1676 {
1677 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1678 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1679 	u32 mask;
1680 
1681 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1682 
1683 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1684 	mask |= (NV_INT_MASK_MCP55 << shift);
1685 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1686 	ata_sff_thaw(ap);
1687 }
1688 
1689 static void nv_adma_error_handler(struct ata_port *ap)
1690 {
1691 	struct nv_adma_port_priv *pp = ap->private_data;
1692 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1693 		void __iomem *mmio = pp->ctl_block;
1694 		int i;
1695 		u16 tmp;
1696 
1697 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1698 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1699 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1700 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1701 			u32 status = readw(mmio + NV_ADMA_STAT);
1702 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1703 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1704 
1705 			ata_port_printk(ap, KERN_ERR,
1706 				"EH in ADMA mode, notifier 0x%X "
1707 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1708 				"next cpb count 0x%X next cpb idx 0x%x\n",
1709 				notifier, notifier_error, gen_ctl, status,
1710 				cpb_count, next_cpb_idx);
1711 
1712 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1713 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1714 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1715 				    ap->link.sactive & (1 << i))
1716 					ata_port_printk(ap, KERN_ERR,
1717 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1718 						i, cpb->ctl_flags, cpb->resp_flags);
1719 			}
1720 		}
1721 
1722 		/* Push us back into port register mode for error handling. */
1723 		nv_adma_register_mode(ap);
1724 
1725 		/* Mark all of the CPBs as invalid to prevent them from
1726 		   being executed */
1727 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1728 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1729 
1730 		/* clear CPB fetch count */
1731 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1732 
1733 		/* Reset channel */
1734 		tmp = readw(mmio + NV_ADMA_CTL);
1735 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1736 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1737 		udelay(1);
1738 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1739 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1740 	}
1741 
1742 	ata_bmdma_error_handler(ap);
1743 }
1744 
1745 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1746 {
1747 	struct nv_swncq_port_priv *pp = ap->private_data;
1748 	struct defer_queue *dq = &pp->defer_queue;
1749 
1750 	/* queue is full */
1751 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1752 	dq->defer_bits |= (1 << qc->tag);
1753 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1754 }
1755 
1756 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1757 {
1758 	struct nv_swncq_port_priv *pp = ap->private_data;
1759 	struct defer_queue *dq = &pp->defer_queue;
1760 	unsigned int tag;
1761 
1762 	if (dq->head == dq->tail)	/* null queue */
1763 		return NULL;
1764 
1765 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1766 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1767 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1768 	dq->defer_bits &= ~(1 << tag);
1769 
1770 	return ata_qc_from_tag(ap, tag);
1771 }
1772 
1773 static void nv_swncq_fis_reinit(struct ata_port *ap)
1774 {
1775 	struct nv_swncq_port_priv *pp = ap->private_data;
1776 
1777 	pp->dhfis_bits = 0;
1778 	pp->dmafis_bits = 0;
1779 	pp->sdbfis_bits = 0;
1780 	pp->ncq_flags = 0;
1781 }
1782 
1783 static void nv_swncq_pp_reinit(struct ata_port *ap)
1784 {
1785 	struct nv_swncq_port_priv *pp = ap->private_data;
1786 	struct defer_queue *dq = &pp->defer_queue;
1787 
1788 	dq->head = 0;
1789 	dq->tail = 0;
1790 	dq->defer_bits = 0;
1791 	pp->qc_active = 0;
1792 	pp->last_issue_tag = ATA_TAG_POISON;
1793 	nv_swncq_fis_reinit(ap);
1794 }
1795 
1796 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1797 {
1798 	struct nv_swncq_port_priv *pp = ap->private_data;
1799 
1800 	writew(fis, pp->irq_block);
1801 }
1802 
1803 static void __ata_bmdma_stop(struct ata_port *ap)
1804 {
1805 	struct ata_queued_cmd qc;
1806 
1807 	qc.ap = ap;
1808 	ata_bmdma_stop(&qc);
1809 }
1810 
1811 static void nv_swncq_ncq_stop(struct ata_port *ap)
1812 {
1813 	struct nv_swncq_port_priv *pp = ap->private_data;
1814 	unsigned int i;
1815 	u32 sactive;
1816 	u32 done_mask;
1817 
1818 	ata_port_printk(ap, KERN_ERR,
1819 			"EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1820 			ap->qc_active, ap->link.sactive);
1821 	ata_port_printk(ap, KERN_ERR,
1822 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1823 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1824 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1825 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1826 
1827 	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1828 			ap->ops->sff_check_status(ap),
1829 			ioread8(ap->ioaddr.error_addr));
1830 
1831 	sactive = readl(pp->sactive_block);
1832 	done_mask = pp->qc_active ^ sactive;
1833 
1834 	ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1835 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1836 		u8 err = 0;
1837 		if (pp->qc_active & (1 << i))
1838 			err = 0;
1839 		else if (done_mask & (1 << i))
1840 			err = 1;
1841 		else
1842 			continue;
1843 
1844 		ata_port_printk(ap, KERN_ERR,
1845 				"tag 0x%x: %01x %01x %01x %01x %s\n", i,
1846 				(pp->dhfis_bits >> i) & 0x1,
1847 				(pp->dmafis_bits >> i) & 0x1,
1848 				(pp->sdbfis_bits >> i) & 0x1,
1849 				(sactive >> i) & 0x1,
1850 				(err ? "error! tag doesn't exit" : " "));
1851 	}
1852 
1853 	nv_swncq_pp_reinit(ap);
1854 	ap->ops->sff_irq_clear(ap);
1855 	__ata_bmdma_stop(ap);
1856 	nv_swncq_irq_clear(ap, 0xffff);
1857 }
1858 
1859 static void nv_swncq_error_handler(struct ata_port *ap)
1860 {
1861 	struct ata_eh_context *ehc = &ap->link.eh_context;
1862 
1863 	if (ap->link.sactive) {
1864 		nv_swncq_ncq_stop(ap);
1865 		ehc->i.action |= ATA_EH_RESET;
1866 	}
1867 
1868 	ata_bmdma_error_handler(ap);
1869 }
1870 
1871 #ifdef CONFIG_PM
1872 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1873 {
1874 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1875 	u32 tmp;
1876 
1877 	/* clear irq */
1878 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1879 
1880 	/* disable irq */
1881 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1882 
1883 	/* disable swncq */
1884 	tmp = readl(mmio + NV_CTL_MCP55);
1885 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1886 	writel(tmp, mmio + NV_CTL_MCP55);
1887 
1888 	return 0;
1889 }
1890 
1891 static int nv_swncq_port_resume(struct ata_port *ap)
1892 {
1893 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1894 	u32 tmp;
1895 
1896 	/* clear irq */
1897 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1898 
1899 	/* enable irq */
1900 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1901 
1902 	/* enable swncq */
1903 	tmp = readl(mmio + NV_CTL_MCP55);
1904 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1905 
1906 	return 0;
1907 }
1908 #endif
1909 
1910 static void nv_swncq_host_init(struct ata_host *host)
1911 {
1912 	u32 tmp;
1913 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1914 	struct pci_dev *pdev = to_pci_dev(host->dev);
1915 	u8 regval;
1916 
1917 	/* disable  ECO 398 */
1918 	pci_read_config_byte(pdev, 0x7f, &regval);
1919 	regval &= ~(1 << 7);
1920 	pci_write_config_byte(pdev, 0x7f, regval);
1921 
1922 	/* enable swncq */
1923 	tmp = readl(mmio + NV_CTL_MCP55);
1924 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1925 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1926 
1927 	/* enable irq intr */
1928 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1929 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1930 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1931 
1932 	/*  clear port irq */
1933 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1934 }
1935 
1936 static int nv_swncq_slave_config(struct scsi_device *sdev)
1937 {
1938 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1939 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1940 	struct ata_device *dev;
1941 	int rc;
1942 	u8 rev;
1943 	u8 check_maxtor = 0;
1944 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1945 
1946 	rc = ata_scsi_slave_config(sdev);
1947 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1948 		/* Not a proper libata device, ignore */
1949 		return rc;
1950 
1951 	dev = &ap->link.device[sdev->id];
1952 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1953 		return rc;
1954 
1955 	/* if MCP51 and Maxtor, then disable ncq */
1956 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1957 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1958 		check_maxtor = 1;
1959 
1960 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1961 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1962 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1963 		pci_read_config_byte(pdev, 0x8, &rev);
1964 		if (rev <= 0xa2)
1965 			check_maxtor = 1;
1966 	}
1967 
1968 	if (!check_maxtor)
1969 		return rc;
1970 
1971 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1972 
1973 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1974 		ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
1975 		ata_dev_printk(dev, KERN_NOTICE,
1976 			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1977 	}
1978 
1979 	return rc;
1980 }
1981 
1982 static int nv_swncq_port_start(struct ata_port *ap)
1983 {
1984 	struct device *dev = ap->host->dev;
1985 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1986 	struct nv_swncq_port_priv *pp;
1987 	int rc;
1988 
1989 	/* we might fallback to bmdma, allocate bmdma resources */
1990 	rc = ata_bmdma_port_start(ap);
1991 	if (rc)
1992 		return rc;
1993 
1994 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1995 	if (!pp)
1996 		return -ENOMEM;
1997 
1998 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1999 				      &pp->prd_dma, GFP_KERNEL);
2000 	if (!pp->prd)
2001 		return -ENOMEM;
2002 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2003 
2004 	ap->private_data = pp;
2005 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2006 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2007 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2008 
2009 	return 0;
2010 }
2011 
2012 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2013 {
2014 	if (qc->tf.protocol != ATA_PROT_NCQ) {
2015 		ata_bmdma_qc_prep(qc);
2016 		return;
2017 	}
2018 
2019 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2020 		return;
2021 
2022 	nv_swncq_fill_sg(qc);
2023 }
2024 
2025 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2026 {
2027 	struct ata_port *ap = qc->ap;
2028 	struct scatterlist *sg;
2029 	struct nv_swncq_port_priv *pp = ap->private_data;
2030 	struct ata_bmdma_prd *prd;
2031 	unsigned int si, idx;
2032 
2033 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
2034 
2035 	idx = 0;
2036 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2037 		u32 addr, offset;
2038 		u32 sg_len, len;
2039 
2040 		addr = (u32)sg_dma_address(sg);
2041 		sg_len = sg_dma_len(sg);
2042 
2043 		while (sg_len) {
2044 			offset = addr & 0xffff;
2045 			len = sg_len;
2046 			if ((offset + sg_len) > 0x10000)
2047 				len = 0x10000 - offset;
2048 
2049 			prd[idx].addr = cpu_to_le32(addr);
2050 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2051 
2052 			idx++;
2053 			sg_len -= len;
2054 			addr += len;
2055 		}
2056 	}
2057 
2058 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2059 }
2060 
2061 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2062 					  struct ata_queued_cmd *qc)
2063 {
2064 	struct nv_swncq_port_priv *pp = ap->private_data;
2065 
2066 	if (qc == NULL)
2067 		return 0;
2068 
2069 	DPRINTK("Enter\n");
2070 
2071 	writel((1 << qc->tag), pp->sactive_block);
2072 	pp->last_issue_tag = qc->tag;
2073 	pp->dhfis_bits &= ~(1 << qc->tag);
2074 	pp->dmafis_bits &= ~(1 << qc->tag);
2075 	pp->qc_active |= (0x1 << qc->tag);
2076 
2077 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2078 	ap->ops->sff_exec_command(ap, &qc->tf);
2079 
2080 	DPRINTK("Issued tag %u\n", qc->tag);
2081 
2082 	return 0;
2083 }
2084 
2085 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2086 {
2087 	struct ata_port *ap = qc->ap;
2088 	struct nv_swncq_port_priv *pp = ap->private_data;
2089 
2090 	if (qc->tf.protocol != ATA_PROT_NCQ)
2091 		return ata_bmdma_qc_issue(qc);
2092 
2093 	DPRINTK("Enter\n");
2094 
2095 	if (!pp->qc_active)
2096 		nv_swncq_issue_atacmd(ap, qc);
2097 	else
2098 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2099 
2100 	return 0;
2101 }
2102 
2103 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2104 {
2105 	u32 serror;
2106 	struct ata_eh_info *ehi = &ap->link.eh_info;
2107 
2108 	ata_ehi_clear_desc(ehi);
2109 
2110 	/* AHCI needs SError cleared; otherwise, it might lock up */
2111 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2112 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2113 
2114 	/* analyze @irq_stat */
2115 	if (fis & NV_SWNCQ_IRQ_ADDED)
2116 		ata_ehi_push_desc(ehi, "hot plug");
2117 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2118 		ata_ehi_push_desc(ehi, "hot unplug");
2119 
2120 	ata_ehi_hotplugged(ehi);
2121 
2122 	/* okay, let's hand over to EH */
2123 	ehi->serror |= serror;
2124 
2125 	ata_port_freeze(ap);
2126 }
2127 
2128 static int nv_swncq_sdbfis(struct ata_port *ap)
2129 {
2130 	struct ata_queued_cmd *qc;
2131 	struct nv_swncq_port_priv *pp = ap->private_data;
2132 	struct ata_eh_info *ehi = &ap->link.eh_info;
2133 	u32 sactive;
2134 	int nr_done = 0;
2135 	u32 done_mask;
2136 	int i;
2137 	u8 host_stat;
2138 	u8 lack_dhfis = 0;
2139 
2140 	host_stat = ap->ops->bmdma_status(ap);
2141 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2142 		/* error when transfering data to/from memory */
2143 		ata_ehi_clear_desc(ehi);
2144 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2145 		ehi->err_mask |= AC_ERR_HOST_BUS;
2146 		ehi->action |= ATA_EH_RESET;
2147 		return -EINVAL;
2148 	}
2149 
2150 	ap->ops->sff_irq_clear(ap);
2151 	__ata_bmdma_stop(ap);
2152 
2153 	sactive = readl(pp->sactive_block);
2154 	done_mask = pp->qc_active ^ sactive;
2155 
2156 	if (unlikely(done_mask & sactive)) {
2157 		ata_ehi_clear_desc(ehi);
2158 		ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2159 				  "(%08x->%08x)", pp->qc_active, sactive);
2160 		ehi->err_mask |= AC_ERR_HSM;
2161 		ehi->action |= ATA_EH_RESET;
2162 		return -EINVAL;
2163 	}
2164 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
2165 		if (!(done_mask & (1 << i)))
2166 			continue;
2167 
2168 		qc = ata_qc_from_tag(ap, i);
2169 		if (qc) {
2170 			ata_qc_complete(qc);
2171 			pp->qc_active &= ~(1 << i);
2172 			pp->dhfis_bits &= ~(1 << i);
2173 			pp->dmafis_bits &= ~(1 << i);
2174 			pp->sdbfis_bits |= (1 << i);
2175 			nr_done++;
2176 		}
2177 	}
2178 
2179 	if (!ap->qc_active) {
2180 		DPRINTK("over\n");
2181 		nv_swncq_pp_reinit(ap);
2182 		return nr_done;
2183 	}
2184 
2185 	if (pp->qc_active & pp->dhfis_bits)
2186 		return nr_done;
2187 
2188 	if ((pp->ncq_flags & ncq_saw_backout) ||
2189 	    (pp->qc_active ^ pp->dhfis_bits))
2190 		/* if the controller cann't get a device to host register FIS,
2191 		 * The driver needs to reissue the new command.
2192 		 */
2193 		lack_dhfis = 1;
2194 
2195 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2196 		"SWNCQ:qc_active 0x%X defer_bits %X "
2197 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2198 		ap->print_id, ap->qc_active, pp->qc_active,
2199 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2200 		pp->dmafis_bits, pp->last_issue_tag);
2201 
2202 	nv_swncq_fis_reinit(ap);
2203 
2204 	if (lack_dhfis) {
2205 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2206 		nv_swncq_issue_atacmd(ap, qc);
2207 		return nr_done;
2208 	}
2209 
2210 	if (pp->defer_queue.defer_bits) {
2211 		/* send deferral queue command */
2212 		qc = nv_swncq_qc_from_dq(ap);
2213 		WARN_ON(qc == NULL);
2214 		nv_swncq_issue_atacmd(ap, qc);
2215 	}
2216 
2217 	return nr_done;
2218 }
2219 
2220 static inline u32 nv_swncq_tag(struct ata_port *ap)
2221 {
2222 	struct nv_swncq_port_priv *pp = ap->private_data;
2223 	u32 tag;
2224 
2225 	tag = readb(pp->tag_block) >> 2;
2226 	return (tag & 0x1f);
2227 }
2228 
2229 static int nv_swncq_dmafis(struct ata_port *ap)
2230 {
2231 	struct ata_queued_cmd *qc;
2232 	unsigned int rw;
2233 	u8 dmactl;
2234 	u32 tag;
2235 	struct nv_swncq_port_priv *pp = ap->private_data;
2236 
2237 	__ata_bmdma_stop(ap);
2238 	tag = nv_swncq_tag(ap);
2239 
2240 	DPRINTK("dma setup tag 0x%x\n", tag);
2241 	qc = ata_qc_from_tag(ap, tag);
2242 
2243 	if (unlikely(!qc))
2244 		return 0;
2245 
2246 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2247 
2248 	/* load PRD table addr. */
2249 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2250 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2251 
2252 	/* specify data direction, triple-check start bit is clear */
2253 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2254 	dmactl &= ~ATA_DMA_WR;
2255 	if (!rw)
2256 		dmactl |= ATA_DMA_WR;
2257 
2258 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2259 
2260 	return 1;
2261 }
2262 
2263 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2264 {
2265 	struct nv_swncq_port_priv *pp = ap->private_data;
2266 	struct ata_queued_cmd *qc;
2267 	struct ata_eh_info *ehi = &ap->link.eh_info;
2268 	u32 serror;
2269 	u8 ata_stat;
2270 	int rc = 0;
2271 
2272 	ata_stat = ap->ops->sff_check_status(ap);
2273 	nv_swncq_irq_clear(ap, fis);
2274 	if (!fis)
2275 		return;
2276 
2277 	if (ap->pflags & ATA_PFLAG_FROZEN)
2278 		return;
2279 
2280 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2281 		nv_swncq_hotplug(ap, fis);
2282 		return;
2283 	}
2284 
2285 	if (!pp->qc_active)
2286 		return;
2287 
2288 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2289 		return;
2290 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2291 
2292 	if (ata_stat & ATA_ERR) {
2293 		ata_ehi_clear_desc(ehi);
2294 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2295 		ehi->err_mask |= AC_ERR_DEV;
2296 		ehi->serror |= serror;
2297 		ehi->action |= ATA_EH_RESET;
2298 		ata_port_freeze(ap);
2299 		return;
2300 	}
2301 
2302 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2303 		/* If the IRQ is backout, driver must issue
2304 		 * the new command again some time later.
2305 		 */
2306 		pp->ncq_flags |= ncq_saw_backout;
2307 	}
2308 
2309 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2310 		pp->ncq_flags |= ncq_saw_sdb;
2311 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2312 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2313 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2314 			pp->dmafis_bits, readl(pp->sactive_block));
2315 		rc = nv_swncq_sdbfis(ap);
2316 		if (rc < 0)
2317 			goto irq_error;
2318 	}
2319 
2320 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2321 		/* The interrupt indicates the new command
2322 		 * was transmitted correctly to the drive.
2323 		 */
2324 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2325 		pp->ncq_flags |= ncq_saw_d2h;
2326 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2327 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2328 			ehi->err_mask |= AC_ERR_HSM;
2329 			ehi->action |= ATA_EH_RESET;
2330 			goto irq_error;
2331 		}
2332 
2333 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2334 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2335 			ata_stat = ap->ops->sff_check_status(ap);
2336 			if (ata_stat & ATA_BUSY)
2337 				goto irq_exit;
2338 
2339 			if (pp->defer_queue.defer_bits) {
2340 				DPRINTK("send next command\n");
2341 				qc = nv_swncq_qc_from_dq(ap);
2342 				nv_swncq_issue_atacmd(ap, qc);
2343 			}
2344 		}
2345 	}
2346 
2347 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2348 		/* program the dma controller with appropriate PRD buffers
2349 		 * and start the DMA transfer for requested command.
2350 		 */
2351 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2352 		pp->ncq_flags |= ncq_saw_dmas;
2353 		rc = nv_swncq_dmafis(ap);
2354 	}
2355 
2356 irq_exit:
2357 	return;
2358 irq_error:
2359 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2360 	ata_port_freeze(ap);
2361 	return;
2362 }
2363 
2364 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2365 {
2366 	struct ata_host *host = dev_instance;
2367 	unsigned int i;
2368 	unsigned int handled = 0;
2369 	unsigned long flags;
2370 	u32 irq_stat;
2371 
2372 	spin_lock_irqsave(&host->lock, flags);
2373 
2374 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2375 
2376 	for (i = 0; i < host->n_ports; i++) {
2377 		struct ata_port *ap = host->ports[i];
2378 
2379 		if (ap->link.sactive) {
2380 			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2381 			handled = 1;
2382 		} else {
2383 			if (irq_stat)	/* reserve Hotplug */
2384 				nv_swncq_irq_clear(ap, 0xfff0);
2385 
2386 			handled += nv_host_intr(ap, (u8)irq_stat);
2387 		}
2388 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2389 	}
2390 
2391 	spin_unlock_irqrestore(&host->lock, flags);
2392 
2393 	return IRQ_RETVAL(handled);
2394 }
2395 
2396 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2397 {
2398 	static int printed_version;
2399 	const struct ata_port_info *ppi[] = { NULL, NULL };
2400 	struct nv_pi_priv *ipriv;
2401 	struct ata_host *host;
2402 	struct nv_host_priv *hpriv;
2403 	int rc;
2404 	u32 bar;
2405 	void __iomem *base;
2406 	unsigned long type = ent->driver_data;
2407 
2408         // Make sure this is a SATA controller by counting the number of bars
2409         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2410         // it's an IDE controller and we ignore it.
2411 	for (bar = 0; bar < 6; bar++)
2412 		if (pci_resource_start(pdev, bar) == 0)
2413 			return -ENODEV;
2414 
2415 	if (!printed_version++)
2416 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2417 
2418 	rc = pcim_enable_device(pdev);
2419 	if (rc)
2420 		return rc;
2421 
2422 	/* determine type and allocate host */
2423 	if (type == CK804 && adma_enabled) {
2424 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2425 		type = ADMA;
2426 	} else if (type == MCP5x && swncq_enabled) {
2427 		dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2428 		type = SWNCQ;
2429 	}
2430 
2431 	ppi[0] = &nv_port_info[type];
2432 	ipriv = ppi[0]->private_data;
2433 	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2434 	if (rc)
2435 		return rc;
2436 
2437 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2438 	if (!hpriv)
2439 		return -ENOMEM;
2440 	hpriv->type = type;
2441 	host->private_data = hpriv;
2442 
2443 	/* request and iomap NV_MMIO_BAR */
2444 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2445 	if (rc)
2446 		return rc;
2447 
2448 	/* configure SCR access */
2449 	base = host->iomap[NV_MMIO_BAR];
2450 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2451 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2452 
2453 	/* enable SATA space for CK804 */
2454 	if (type >= CK804) {
2455 		u8 regval;
2456 
2457 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2458 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2459 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2460 	}
2461 
2462 	/* init ADMA */
2463 	if (type == ADMA) {
2464 		rc = nv_adma_host_init(host);
2465 		if (rc)
2466 			return rc;
2467 	} else if (type == SWNCQ)
2468 		nv_swncq_host_init(host);
2469 
2470 	if (msi_enabled) {
2471 		dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
2472 		pci_enable_msi(pdev);
2473 	}
2474 
2475 	pci_set_master(pdev);
2476 	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2477 }
2478 
2479 #ifdef CONFIG_PM
2480 static int nv_pci_device_resume(struct pci_dev *pdev)
2481 {
2482 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2483 	struct nv_host_priv *hpriv = host->private_data;
2484 	int rc;
2485 
2486 	rc = ata_pci_device_do_resume(pdev);
2487 	if (rc)
2488 		return rc;
2489 
2490 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2491 		if (hpriv->type >= CK804) {
2492 			u8 regval;
2493 
2494 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2495 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2496 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2497 		}
2498 		if (hpriv->type == ADMA) {
2499 			u32 tmp32;
2500 			struct nv_adma_port_priv *pp;
2501 			/* enable/disable ADMA on the ports appropriately */
2502 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2503 
2504 			pp = host->ports[0]->private_data;
2505 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2506 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2507 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2508 			else
2509 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2510 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2511 			pp = host->ports[1]->private_data;
2512 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2513 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2514 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2515 			else
2516 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2517 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2518 
2519 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2520 		}
2521 	}
2522 
2523 	ata_host_resume(host);
2524 
2525 	return 0;
2526 }
2527 #endif
2528 
2529 static void nv_ck804_host_stop(struct ata_host *host)
2530 {
2531 	struct pci_dev *pdev = to_pci_dev(host->dev);
2532 	u8 regval;
2533 
2534 	/* disable SATA space for CK804 */
2535 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2536 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2537 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2538 }
2539 
2540 static void nv_adma_host_stop(struct ata_host *host)
2541 {
2542 	struct pci_dev *pdev = to_pci_dev(host->dev);
2543 	u32 tmp32;
2544 
2545 	/* disable ADMA on the ports */
2546 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2547 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2548 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2549 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2550 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2551 
2552 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2553 
2554 	nv_ck804_host_stop(host);
2555 }
2556 
2557 static int __init nv_init(void)
2558 {
2559 	return pci_register_driver(&nv_pci_driver);
2560 }
2561 
2562 static void __exit nv_exit(void)
2563 {
2564 	pci_unregister_driver(&nv_pci_driver);
2565 }
2566 
2567 module_init(nv_init);
2568 module_exit(nv_exit);
2569 module_param_named(adma, adma_enabled, bool, 0444);
2570 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2571 module_param_named(swncq, swncq_enabled, bool, 0444);
2572 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2573 module_param_named(msi, msi_enabled, bool, 0444);
2574 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2575 
2576