xref: /linux/drivers/ata/sata_nv.c (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= ATA_PIO4,
61 	NV_MWDMA_MASK			= ATA_MWDMA2,
62 	NV_UDMA_MASK			= ATA_UDMA6,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 					  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 	/* MCP55 reg offset */
173 	NV_CTL_MCP55			= 0x400,
174 	NV_INT_STATUS_MCP55		= 0x440,
175 	NV_INT_ENABLE_MCP55		= 0x444,
176 	NV_NCQ_REG_MCP55		= 0x448,
177 
178 	/* MCP55 */
179 	NV_INT_ALL_MCP55		= 0xffff,
180 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
181 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
182 
183 	/* SWNCQ ENABLE BITS*/
184 	NV_CTL_PRI_SWNCQ		= 0x02,
185 	NV_CTL_SEC_SWNCQ		= 0x04,
186 
187 	/* SW NCQ status bits*/
188 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
189 	NV_SWNCQ_IRQ_PM			= (1 << 1),
190 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
191 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
192 
193 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
194 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
195 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
196 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
197 
198 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
199 					  NV_SWNCQ_IRQ_REMOVED,
200 
201 };
202 
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 	__le64			addr;
206 	__le32			len;
207 	u8			flags;
208 	u8			packet_len;
209 	__le16			reserved;
210 };
211 
212 enum nv_adma_regbits {
213 	CMDEND	= (1 << 15),		/* end of command list */
214 	WNB	= (1 << 14),		/* wait-not-BSY */
215 	IGN	= (1 << 13),		/* ignore this entry */
216 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
217 	DA2	= (1 << (2 + 8)),
218 	DA1	= (1 << (1 + 8)),
219 	DA0	= (1 << (0 + 8)),
220 };
221 
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 	u8			resp_flags;    /* 0 */
228 	u8			reserved1;     /* 1 */
229 	u8			ctl_flags;     /* 2 */
230 	/* len is length of taskfile in 64 bit words */
231 	u8			len;		/* 3  */
232 	u8			tag;           /* 4 */
233 	u8			next_cpb_idx;  /* 5 */
234 	__le16			reserved2;     /* 6-7 */
235 	__le16			tf[12];        /* 8-31 */
236 	struct nv_adma_prd	aprd[5];       /* 32-111 */
237 	__le64			next_aprd;     /* 112-119 */
238 	__le64			reserved3;     /* 120-127 */
239 };
240 
241 
242 struct nv_adma_port_priv {
243 	struct nv_adma_cpb	*cpb;
244 	dma_addr_t		cpb_dma;
245 	struct nv_adma_prd	*aprd;
246 	dma_addr_t		aprd_dma;
247 	void __iomem		*ctl_block;
248 	void __iomem		*gen_block;
249 	void __iomem		*notifier_clear_block;
250 	u64			adma_dma_mask;
251 	u8			flags;
252 	int			last_issue_ncq;
253 };
254 
255 struct nv_host_priv {
256 	unsigned long		type;
257 };
258 
259 struct defer_queue {
260 	u32		defer_bits;
261 	unsigned int	head;
262 	unsigned int	tail;
263 	unsigned int	tag[ATA_MAX_QUEUE];
264 };
265 
266 enum ncq_saw_flag_list {
267 	ncq_saw_d2h	= (1U << 0),
268 	ncq_saw_dmas	= (1U << 1),
269 	ncq_saw_sdb	= (1U << 2),
270 	ncq_saw_backout	= (1U << 3),
271 };
272 
273 struct nv_swncq_port_priv {
274 	struct ata_prd	*prd;	 /* our SG list */
275 	dma_addr_t	prd_dma; /* and its DMA mapping */
276 	void __iomem	*sactive_block;
277 	void __iomem	*irq_block;
278 	void __iomem	*tag_block;
279 	u32		qc_active;
280 
281 	unsigned int	last_issue_tag;
282 
283 	/* fifo circular queue to store deferral command */
284 	struct defer_queue defer_queue;
285 
286 	/* for NCQ interrupt analysis */
287 	u32		dhfis_bits;
288 	u32		dmafis_bits;
289 	u32		sdbfis_bits;
290 
291 	unsigned int	ncq_flags;
292 };
293 
294 
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296 
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 
308 static int nv_hardreset(struct ata_link *link, unsigned int *class,
309 			unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347 
348 enum nv_host_type
349 {
350 	GENERIC,
351 	NFORCE2,
352 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
353 	CK804,
354 	ADMA,
355 	MCP5x,
356 	SWNCQ,
357 };
358 
359 static const struct pci_device_id nv_pci_tbl[] = {
360 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
374 
375 	{ } /* terminate list */
376 };
377 
378 static struct pci_driver nv_pci_driver = {
379 	.name			= DRV_NAME,
380 	.id_table		= nv_pci_tbl,
381 	.probe			= nv_init_one,
382 #ifdef CONFIG_PM
383 	.suspend		= ata_pci_device_suspend,
384 	.resume			= nv_pci_device_resume,
385 #endif
386 	.remove			= ata_pci_remove_one,
387 };
388 
389 static struct scsi_host_template nv_sht = {
390 	ATA_BMDMA_SHT(DRV_NAME),
391 };
392 
393 static struct scsi_host_template nv_adma_sht = {
394 	ATA_NCQ_SHT(DRV_NAME),
395 	.can_queue		= NV_ADMA_MAX_CPBS,
396 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
397 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
398 	.slave_configure	= nv_adma_slave_config,
399 };
400 
401 static struct scsi_host_template nv_swncq_sht = {
402 	ATA_NCQ_SHT(DRV_NAME),
403 	.can_queue		= ATA_MAX_QUEUE,
404 	.sg_tablesize		= LIBATA_MAX_PRD,
405 	.dma_boundary		= ATA_DMA_BOUNDARY,
406 	.slave_configure	= nv_swncq_slave_config,
407 };
408 
409 /*
410  * NV SATA controllers have various different problems with hardreset
411  * protocol depending on the specific controller and device.
412  *
413  * GENERIC:
414  *
415  *  bko11195 reports that link doesn't come online after hardreset on
416  *  generic nv's and there have been several other similar reports on
417  *  linux-ide.
418  *
419  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
420  *  softreset.
421  *
422  * NF2/3:
423  *
424  *  bko3352 reports nf2/3 controllers can't determine device signature
425  *  reliably after hardreset.  The following thread reports detection
426  *  failure on cold boot with the standard debouncing timing.
427  *
428  *  http://thread.gmane.org/gmane.linux.ide/34098
429  *
430  *  bko12176 reports that hardreset fails to bring up the link during
431  *  boot on nf2.
432  *
433  * CK804:
434  *
435  *  For initial probing after boot and hot plugging, hardreset mostly
436  *  works fine on CK804 but curiously, reprobing on the initial port
437  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
438  *  FIS in somewhat undeterministic way.
439  *
440  * SWNCQ:
441  *
442  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
443  *  hardreset should be used and hardreset can't report proper
444  *  signature, which suggests that mcp5x is closer to nf2 as long as
445  *  reset quirkiness is concerned.
446  *
447  *  bko12703 reports that boot probing fails for intel SSD with
448  *  hardreset.  Link fails to come online.  Softreset works fine.
449  *
450  * The failures are varied but the following patterns seem true for
451  * all flavors.
452  *
453  * - Softreset during boot always works.
454  *
455  * - Hardreset during boot sometimes fails to bring up the link on
456  *   certain comibnations and device signature acquisition is
457  *   unreliable.
458  *
459  * - Hardreset is often necessary after hotplug.
460  *
461  * So, preferring softreset for boot probing and error handling (as
462  * hardreset might bring down the link) but using hardreset for
463  * post-boot probing should work around the above issues in most
464  * cases.  Define nv_hardreset() which only kicks in for post-boot
465  * probing and use it for all variants.
466  */
467 static struct ata_port_operations nv_generic_ops = {
468 	.inherits		= &ata_bmdma_port_ops,
469 	.lost_interrupt		= ATA_OP_NULL,
470 	.scr_read		= nv_scr_read,
471 	.scr_write		= nv_scr_write,
472 	.hardreset		= nv_hardreset,
473 };
474 
475 static struct ata_port_operations nv_nf2_ops = {
476 	.inherits		= &nv_generic_ops,
477 	.freeze			= nv_nf2_freeze,
478 	.thaw			= nv_nf2_thaw,
479 };
480 
481 static struct ata_port_operations nv_ck804_ops = {
482 	.inherits		= &nv_generic_ops,
483 	.freeze			= nv_ck804_freeze,
484 	.thaw			= nv_ck804_thaw,
485 	.host_stop		= nv_ck804_host_stop,
486 };
487 
488 static struct ata_port_operations nv_adma_ops = {
489 	.inherits		= &nv_ck804_ops,
490 
491 	.check_atapi_dma	= nv_adma_check_atapi_dma,
492 	.sff_tf_read		= nv_adma_tf_read,
493 	.qc_defer		= ata_std_qc_defer,
494 	.qc_prep		= nv_adma_qc_prep,
495 	.qc_issue		= nv_adma_qc_issue,
496 	.sff_irq_clear		= nv_adma_irq_clear,
497 
498 	.freeze			= nv_adma_freeze,
499 	.thaw			= nv_adma_thaw,
500 	.error_handler		= nv_adma_error_handler,
501 	.post_internal_cmd	= nv_adma_post_internal_cmd,
502 
503 	.port_start		= nv_adma_port_start,
504 	.port_stop		= nv_adma_port_stop,
505 #ifdef CONFIG_PM
506 	.port_suspend		= nv_adma_port_suspend,
507 	.port_resume		= nv_adma_port_resume,
508 #endif
509 	.host_stop		= nv_adma_host_stop,
510 };
511 
512 static struct ata_port_operations nv_swncq_ops = {
513 	.inherits		= &nv_generic_ops,
514 
515 	.qc_defer		= ata_std_qc_defer,
516 	.qc_prep		= nv_swncq_qc_prep,
517 	.qc_issue		= nv_swncq_qc_issue,
518 
519 	.freeze			= nv_mcp55_freeze,
520 	.thaw			= nv_mcp55_thaw,
521 	.error_handler		= nv_swncq_error_handler,
522 
523 #ifdef CONFIG_PM
524 	.port_suspend		= nv_swncq_port_suspend,
525 	.port_resume		= nv_swncq_port_resume,
526 #endif
527 	.port_start		= nv_swncq_port_start,
528 };
529 
530 struct nv_pi_priv {
531 	irq_handler_t			irq_handler;
532 	struct scsi_host_template	*sht;
533 };
534 
535 #define NV_PI_PRIV(_irq_handler, _sht) \
536 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
537 
538 static const struct ata_port_info nv_port_info[] = {
539 	/* generic */
540 	{
541 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
542 		.pio_mask	= NV_PIO_MASK,
543 		.mwdma_mask	= NV_MWDMA_MASK,
544 		.udma_mask	= NV_UDMA_MASK,
545 		.port_ops	= &nv_generic_ops,
546 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
547 	},
548 	/* nforce2/3 */
549 	{
550 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
551 		.pio_mask	= NV_PIO_MASK,
552 		.mwdma_mask	= NV_MWDMA_MASK,
553 		.udma_mask	= NV_UDMA_MASK,
554 		.port_ops	= &nv_nf2_ops,
555 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
556 	},
557 	/* ck804 */
558 	{
559 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
560 		.pio_mask	= NV_PIO_MASK,
561 		.mwdma_mask	= NV_MWDMA_MASK,
562 		.udma_mask	= NV_UDMA_MASK,
563 		.port_ops	= &nv_ck804_ops,
564 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
565 	},
566 	/* ADMA */
567 	{
568 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
569 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
570 		.pio_mask	= NV_PIO_MASK,
571 		.mwdma_mask	= NV_MWDMA_MASK,
572 		.udma_mask	= NV_UDMA_MASK,
573 		.port_ops	= &nv_adma_ops,
574 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
575 	},
576 	/* MCP5x */
577 	{
578 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
579 		.pio_mask	= NV_PIO_MASK,
580 		.mwdma_mask	= NV_MWDMA_MASK,
581 		.udma_mask	= NV_UDMA_MASK,
582 		.port_ops	= &nv_generic_ops,
583 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
584 	},
585 	/* SWNCQ */
586 	{
587 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
588 				  ATA_FLAG_NCQ,
589 		.pio_mask	= NV_PIO_MASK,
590 		.mwdma_mask	= NV_MWDMA_MASK,
591 		.udma_mask	= NV_UDMA_MASK,
592 		.port_ops	= &nv_swncq_ops,
593 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
594 	},
595 };
596 
597 MODULE_AUTHOR("NVIDIA");
598 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
599 MODULE_LICENSE("GPL");
600 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
601 MODULE_VERSION(DRV_VERSION);
602 
603 static int adma_enabled;
604 static int swncq_enabled = 1;
605 
606 static void nv_adma_register_mode(struct ata_port *ap)
607 {
608 	struct nv_adma_port_priv *pp = ap->private_data;
609 	void __iomem *mmio = pp->ctl_block;
610 	u16 tmp, status;
611 	int count = 0;
612 
613 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
614 		return;
615 
616 	status = readw(mmio + NV_ADMA_STAT);
617 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
618 		ndelay(50);
619 		status = readw(mmio + NV_ADMA_STAT);
620 		count++;
621 	}
622 	if (count == 20)
623 		ata_port_printk(ap, KERN_WARNING,
624 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
625 			status);
626 
627 	tmp = readw(mmio + NV_ADMA_CTL);
628 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
629 
630 	count = 0;
631 	status = readw(mmio + NV_ADMA_STAT);
632 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
633 		ndelay(50);
634 		status = readw(mmio + NV_ADMA_STAT);
635 		count++;
636 	}
637 	if (count == 20)
638 		ata_port_printk(ap, KERN_WARNING,
639 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
640 			 status);
641 
642 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
643 }
644 
645 static void nv_adma_mode(struct ata_port *ap)
646 {
647 	struct nv_adma_port_priv *pp = ap->private_data;
648 	void __iomem *mmio = pp->ctl_block;
649 	u16 tmp, status;
650 	int count = 0;
651 
652 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
653 		return;
654 
655 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
656 
657 	tmp = readw(mmio + NV_ADMA_CTL);
658 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
659 
660 	status = readw(mmio + NV_ADMA_STAT);
661 	while (((status & NV_ADMA_STAT_LEGACY) ||
662 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
663 		ndelay(50);
664 		status = readw(mmio + NV_ADMA_STAT);
665 		count++;
666 	}
667 	if (count == 20)
668 		ata_port_printk(ap, KERN_WARNING,
669 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
670 			status);
671 
672 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
673 }
674 
675 static int nv_adma_slave_config(struct scsi_device *sdev)
676 {
677 	struct ata_port *ap = ata_shost_to_port(sdev->host);
678 	struct nv_adma_port_priv *pp = ap->private_data;
679 	struct nv_adma_port_priv *port0, *port1;
680 	struct scsi_device *sdev0, *sdev1;
681 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
682 	unsigned long segment_boundary, flags;
683 	unsigned short sg_tablesize;
684 	int rc;
685 	int adma_enable;
686 	u32 current_reg, new_reg, config_mask;
687 
688 	rc = ata_scsi_slave_config(sdev);
689 
690 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
691 		/* Not a proper libata device, ignore */
692 		return rc;
693 
694 	spin_lock_irqsave(ap->lock, flags);
695 
696 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
697 		/*
698 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
699 		 * Therefore ATAPI commands are sent through the legacy interface.
700 		 * However, the legacy interface only supports 32-bit DMA.
701 		 * Restrict DMA parameters as required by the legacy interface
702 		 * when an ATAPI device is connected.
703 		 */
704 		segment_boundary = ATA_DMA_BOUNDARY;
705 		/* Subtract 1 since an extra entry may be needed for padding, see
706 		   libata-scsi.c */
707 		sg_tablesize = LIBATA_MAX_PRD - 1;
708 
709 		/* Since the legacy DMA engine is in use, we need to disable ADMA
710 		   on the port. */
711 		adma_enable = 0;
712 		nv_adma_register_mode(ap);
713 	} else {
714 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
715 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
716 		adma_enable = 1;
717 	}
718 
719 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
720 
721 	if (ap->port_no == 1)
722 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
723 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
724 	else
725 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
726 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
727 
728 	if (adma_enable) {
729 		new_reg = current_reg | config_mask;
730 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
731 	} else {
732 		new_reg = current_reg & ~config_mask;
733 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
734 	}
735 
736 	if (current_reg != new_reg)
737 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
738 
739 	port0 = ap->host->ports[0]->private_data;
740 	port1 = ap->host->ports[1]->private_data;
741 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
742 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
743 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
744 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
745 		/** We have to set the DMA mask to 32-bit if either port is in
746 		    ATAPI mode, since they are on the same PCI device which is
747 		    used for DMA mapping. If we set the mask we also need to set
748 		    the bounce limit on both ports to ensure that the block
749 		    layer doesn't feed addresses that cause DMA mapping to
750 		    choke. If either SCSI device is not allocated yet, it's OK
751 		    since that port will discover its correct setting when it
752 		    does get allocated.
753 		    Note: Setting 32-bit mask should not fail. */
754 		if (sdev0)
755 			blk_queue_bounce_limit(sdev0->request_queue,
756 					       ATA_DMA_MASK);
757 		if (sdev1)
758 			blk_queue_bounce_limit(sdev1->request_queue,
759 					       ATA_DMA_MASK);
760 
761 		pci_set_dma_mask(pdev, ATA_DMA_MASK);
762 	} else {
763 		/** This shouldn't fail as it was set to this value before */
764 		pci_set_dma_mask(pdev, pp->adma_dma_mask);
765 		if (sdev0)
766 			blk_queue_bounce_limit(sdev0->request_queue,
767 					       pp->adma_dma_mask);
768 		if (sdev1)
769 			blk_queue_bounce_limit(sdev1->request_queue,
770 					       pp->adma_dma_mask);
771 	}
772 
773 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
774 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
775 	ata_port_printk(ap, KERN_INFO,
776 		"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
777 		(unsigned long long)*ap->host->dev->dma_mask,
778 		segment_boundary, sg_tablesize);
779 
780 	spin_unlock_irqrestore(ap->lock, flags);
781 
782 	return rc;
783 }
784 
785 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
786 {
787 	struct nv_adma_port_priv *pp = qc->ap->private_data;
788 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
789 }
790 
791 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
792 {
793 	/* Other than when internal or pass-through commands are executed,
794 	   the only time this function will be called in ADMA mode will be
795 	   if a command fails. In the failure case we don't care about going
796 	   into register mode with ADMA commands pending, as the commands will
797 	   all shortly be aborted anyway. We assume that NCQ commands are not
798 	   issued via passthrough, which is the only way that switching into
799 	   ADMA mode could abort outstanding commands. */
800 	nv_adma_register_mode(ap);
801 
802 	ata_sff_tf_read(ap, tf);
803 }
804 
805 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
806 {
807 	unsigned int idx = 0;
808 
809 	if (tf->flags & ATA_TFLAG_ISADDR) {
810 		if (tf->flags & ATA_TFLAG_LBA48) {
811 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
812 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
813 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
814 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
815 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
816 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
817 		} else
818 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
819 
820 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
821 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
822 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
823 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
824 	}
825 
826 	if (tf->flags & ATA_TFLAG_DEVICE)
827 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
828 
829 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
830 
831 	while (idx < 12)
832 		cpb[idx++] = cpu_to_le16(IGN);
833 
834 	return idx;
835 }
836 
837 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
838 {
839 	struct nv_adma_port_priv *pp = ap->private_data;
840 	u8 flags = pp->cpb[cpb_num].resp_flags;
841 
842 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
843 
844 	if (unlikely((force_err ||
845 		     flags & (NV_CPB_RESP_ATA_ERR |
846 			      NV_CPB_RESP_CMD_ERR |
847 			      NV_CPB_RESP_CPB_ERR)))) {
848 		struct ata_eh_info *ehi = &ap->link.eh_info;
849 		int freeze = 0;
850 
851 		ata_ehi_clear_desc(ehi);
852 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
853 		if (flags & NV_CPB_RESP_ATA_ERR) {
854 			ata_ehi_push_desc(ehi, "ATA error");
855 			ehi->err_mask |= AC_ERR_DEV;
856 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
857 			ata_ehi_push_desc(ehi, "CMD error");
858 			ehi->err_mask |= AC_ERR_DEV;
859 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
860 			ata_ehi_push_desc(ehi, "CPB error");
861 			ehi->err_mask |= AC_ERR_SYSTEM;
862 			freeze = 1;
863 		} else {
864 			/* notifier error, but no error in CPB flags? */
865 			ata_ehi_push_desc(ehi, "unknown");
866 			ehi->err_mask |= AC_ERR_OTHER;
867 			freeze = 1;
868 		}
869 		/* Kill all commands. EH will determine what actually failed. */
870 		if (freeze)
871 			ata_port_freeze(ap);
872 		else
873 			ata_port_abort(ap);
874 		return 1;
875 	}
876 
877 	if (likely(flags & NV_CPB_RESP_DONE)) {
878 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
879 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
880 		if (likely(qc)) {
881 			DPRINTK("Completing qc from tag %d\n", cpb_num);
882 			ata_qc_complete(qc);
883 		} else {
884 			struct ata_eh_info *ehi = &ap->link.eh_info;
885 			/* Notifier bits set without a command may indicate the drive
886 			   is misbehaving. Raise host state machine violation on this
887 			   condition. */
888 			ata_port_printk(ap, KERN_ERR,
889 					"notifier for tag %d with no cmd?\n",
890 					cpb_num);
891 			ehi->err_mask |= AC_ERR_HSM;
892 			ehi->action |= ATA_EH_RESET;
893 			ata_port_freeze(ap);
894 			return 1;
895 		}
896 	}
897 	return 0;
898 }
899 
900 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
901 {
902 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
903 
904 	/* freeze if hotplugged */
905 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
906 		ata_port_freeze(ap);
907 		return 1;
908 	}
909 
910 	/* bail out if not our interrupt */
911 	if (!(irq_stat & NV_INT_DEV))
912 		return 0;
913 
914 	/* DEV interrupt w/ no active qc? */
915 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
916 		ata_sff_check_status(ap);
917 		return 1;
918 	}
919 
920 	/* handle interrupt */
921 	return ata_sff_host_intr(ap, qc);
922 }
923 
924 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
925 {
926 	struct ata_host *host = dev_instance;
927 	int i, handled = 0;
928 	u32 notifier_clears[2];
929 
930 	spin_lock(&host->lock);
931 
932 	for (i = 0; i < host->n_ports; i++) {
933 		struct ata_port *ap = host->ports[i];
934 		notifier_clears[i] = 0;
935 
936 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
937 			struct nv_adma_port_priv *pp = ap->private_data;
938 			void __iomem *mmio = pp->ctl_block;
939 			u16 status;
940 			u32 gen_ctl;
941 			u32 notifier, notifier_error;
942 
943 			/* if ADMA is disabled, use standard ata interrupt handler */
944 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
945 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
946 					>> (NV_INT_PORT_SHIFT * i);
947 				handled += nv_host_intr(ap, irq_stat);
948 				continue;
949 			}
950 
951 			/* if in ATA register mode, check for standard interrupts */
952 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
953 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
954 					>> (NV_INT_PORT_SHIFT * i);
955 				if (ata_tag_valid(ap->link.active_tag))
956 					/** NV_INT_DEV indication seems unreliable at times
957 					    at least in ADMA mode. Force it on always when a
958 					    command is active, to prevent losing interrupts. */
959 					irq_stat |= NV_INT_DEV;
960 				handled += nv_host_intr(ap, irq_stat);
961 			}
962 
963 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
964 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
965 			notifier_clears[i] = notifier | notifier_error;
966 
967 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
968 
969 			if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
970 			    !notifier_error)
971 				/* Nothing to do */
972 				continue;
973 
974 			status = readw(mmio + NV_ADMA_STAT);
975 
976 			/* Clear status. Ensure the controller sees the clearing before we start
977 			   looking at any of the CPB statuses, so that any CPB completions after
978 			   this point in the handler will raise another interrupt. */
979 			writew(status, mmio + NV_ADMA_STAT);
980 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
981 			rmb();
982 
983 			handled++; /* irq handled if we got here */
984 
985 			/* freeze if hotplugged or controller error */
986 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
987 					       NV_ADMA_STAT_HOTUNPLUG |
988 					       NV_ADMA_STAT_TIMEOUT |
989 					       NV_ADMA_STAT_SERROR))) {
990 				struct ata_eh_info *ehi = &ap->link.eh_info;
991 
992 				ata_ehi_clear_desc(ehi);
993 				__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
994 				if (status & NV_ADMA_STAT_TIMEOUT) {
995 					ehi->err_mask |= AC_ERR_SYSTEM;
996 					ata_ehi_push_desc(ehi, "timeout");
997 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
998 					ata_ehi_hotplugged(ehi);
999 					ata_ehi_push_desc(ehi, "hotplug");
1000 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1001 					ata_ehi_hotplugged(ehi);
1002 					ata_ehi_push_desc(ehi, "hot unplug");
1003 				} else if (status & NV_ADMA_STAT_SERROR) {
1004 					/* let libata analyze SError and figure out the cause */
1005 					ata_ehi_push_desc(ehi, "SError");
1006 				} else
1007 					ata_ehi_push_desc(ehi, "unknown");
1008 				ata_port_freeze(ap);
1009 				continue;
1010 			}
1011 
1012 			if (status & (NV_ADMA_STAT_DONE |
1013 				      NV_ADMA_STAT_CPBERR |
1014 				      NV_ADMA_STAT_CMD_COMPLETE)) {
1015 				u32 check_commands = notifier_clears[i];
1016 				int pos, error = 0;
1017 
1018 				if (status & NV_ADMA_STAT_CPBERR) {
1019 					/* Check all active commands */
1020 					if (ata_tag_valid(ap->link.active_tag))
1021 						check_commands = 1 <<
1022 							ap->link.active_tag;
1023 					else
1024 						check_commands = ap->
1025 							link.sactive;
1026 				}
1027 
1028 				/** Check CPBs for completed commands */
1029 				while ((pos = ffs(check_commands)) && !error) {
1030 					pos--;
1031 					error = nv_adma_check_cpb(ap, pos,
1032 						notifier_error & (1 << pos));
1033 					check_commands &= ~(1 << pos);
1034 				}
1035 			}
1036 		}
1037 	}
1038 
1039 	if (notifier_clears[0] || notifier_clears[1]) {
1040 		/* Note: Both notifier clear registers must be written
1041 		   if either is set, even if one is zero, according to NVIDIA. */
1042 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1043 		writel(notifier_clears[0], pp->notifier_clear_block);
1044 		pp = host->ports[1]->private_data;
1045 		writel(notifier_clears[1], pp->notifier_clear_block);
1046 	}
1047 
1048 	spin_unlock(&host->lock);
1049 
1050 	return IRQ_RETVAL(handled);
1051 }
1052 
1053 static void nv_adma_freeze(struct ata_port *ap)
1054 {
1055 	struct nv_adma_port_priv *pp = ap->private_data;
1056 	void __iomem *mmio = pp->ctl_block;
1057 	u16 tmp;
1058 
1059 	nv_ck804_freeze(ap);
1060 
1061 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1062 		return;
1063 
1064 	/* clear any outstanding CK804 notifications */
1065 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1066 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1067 
1068 	/* Disable interrupt */
1069 	tmp = readw(mmio + NV_ADMA_CTL);
1070 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1071 		mmio + NV_ADMA_CTL);
1072 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1073 }
1074 
1075 static void nv_adma_thaw(struct ata_port *ap)
1076 {
1077 	struct nv_adma_port_priv *pp = ap->private_data;
1078 	void __iomem *mmio = pp->ctl_block;
1079 	u16 tmp;
1080 
1081 	nv_ck804_thaw(ap);
1082 
1083 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1084 		return;
1085 
1086 	/* Enable interrupt */
1087 	tmp = readw(mmio + NV_ADMA_CTL);
1088 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1089 		mmio + NV_ADMA_CTL);
1090 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1091 }
1092 
1093 static void nv_adma_irq_clear(struct ata_port *ap)
1094 {
1095 	struct nv_adma_port_priv *pp = ap->private_data;
1096 	void __iomem *mmio = pp->ctl_block;
1097 	u32 notifier_clears[2];
1098 
1099 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1100 		ata_sff_irq_clear(ap);
1101 		return;
1102 	}
1103 
1104 	/* clear any outstanding CK804 notifications */
1105 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1106 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1107 
1108 	/* clear ADMA status */
1109 	writew(0xffff, mmio + NV_ADMA_STAT);
1110 
1111 	/* clear notifiers - note both ports need to be written with
1112 	   something even though we are only clearing on one */
1113 	if (ap->port_no == 0) {
1114 		notifier_clears[0] = 0xFFFFFFFF;
1115 		notifier_clears[1] = 0;
1116 	} else {
1117 		notifier_clears[0] = 0;
1118 		notifier_clears[1] = 0xFFFFFFFF;
1119 	}
1120 	pp = ap->host->ports[0]->private_data;
1121 	writel(notifier_clears[0], pp->notifier_clear_block);
1122 	pp = ap->host->ports[1]->private_data;
1123 	writel(notifier_clears[1], pp->notifier_clear_block);
1124 }
1125 
1126 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1127 {
1128 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1129 
1130 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1131 		ata_sff_post_internal_cmd(qc);
1132 }
1133 
1134 static int nv_adma_port_start(struct ata_port *ap)
1135 {
1136 	struct device *dev = ap->host->dev;
1137 	struct nv_adma_port_priv *pp;
1138 	int rc;
1139 	void *mem;
1140 	dma_addr_t mem_dma;
1141 	void __iomem *mmio;
1142 	struct pci_dev *pdev = to_pci_dev(dev);
1143 	u16 tmp;
1144 
1145 	VPRINTK("ENTER\n");
1146 
1147 	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1148 	   pad buffers */
1149 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1150 	if (rc)
1151 		return rc;
1152 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1153 	if (rc)
1154 		return rc;
1155 
1156 	rc = ata_port_start(ap);
1157 	if (rc)
1158 		return rc;
1159 
1160 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1161 	if (!pp)
1162 		return -ENOMEM;
1163 
1164 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1165 	       ap->port_no * NV_ADMA_PORT_SIZE;
1166 	pp->ctl_block = mmio;
1167 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1168 	pp->notifier_clear_block = pp->gen_block +
1169 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1170 
1171 	/* Now that the legacy PRD and padding buffer are allocated we can
1172 	   safely raise the DMA mask to allocate the CPB/APRD table.
1173 	   These are allowed to fail since we store the value that ends up
1174 	   being used to set as the bounce limit in slave_config later if
1175 	   needed. */
1176 	pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1177 	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1178 	pp->adma_dma_mask = *dev->dma_mask;
1179 
1180 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1181 				  &mem_dma, GFP_KERNEL);
1182 	if (!mem)
1183 		return -ENOMEM;
1184 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1185 
1186 	/*
1187 	 * First item in chunk of DMA memory:
1188 	 * 128-byte command parameter block (CPB)
1189 	 * one for each command tag
1190 	 */
1191 	pp->cpb     = mem;
1192 	pp->cpb_dma = mem_dma;
1193 
1194 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1195 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1196 
1197 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1198 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1199 
1200 	/*
1201 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1202 	 */
1203 	pp->aprd = mem;
1204 	pp->aprd_dma = mem_dma;
1205 
1206 	ap->private_data = pp;
1207 
1208 	/* clear any outstanding interrupt conditions */
1209 	writew(0xffff, mmio + NV_ADMA_STAT);
1210 
1211 	/* initialize port variables */
1212 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1213 
1214 	/* clear CPB fetch count */
1215 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1216 
1217 	/* clear GO for register mode, enable interrupt */
1218 	tmp = readw(mmio + NV_ADMA_CTL);
1219 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1220 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1221 
1222 	tmp = readw(mmio + NV_ADMA_CTL);
1223 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1224 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1225 	udelay(1);
1226 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1227 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1228 
1229 	return 0;
1230 }
1231 
1232 static void nv_adma_port_stop(struct ata_port *ap)
1233 {
1234 	struct nv_adma_port_priv *pp = ap->private_data;
1235 	void __iomem *mmio = pp->ctl_block;
1236 
1237 	VPRINTK("ENTER\n");
1238 	writew(0, mmio + NV_ADMA_CTL);
1239 }
1240 
1241 #ifdef CONFIG_PM
1242 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1243 {
1244 	struct nv_adma_port_priv *pp = ap->private_data;
1245 	void __iomem *mmio = pp->ctl_block;
1246 
1247 	/* Go to register mode - clears GO */
1248 	nv_adma_register_mode(ap);
1249 
1250 	/* clear CPB fetch count */
1251 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1252 
1253 	/* disable interrupt, shut down port */
1254 	writew(0, mmio + NV_ADMA_CTL);
1255 
1256 	return 0;
1257 }
1258 
1259 static int nv_adma_port_resume(struct ata_port *ap)
1260 {
1261 	struct nv_adma_port_priv *pp = ap->private_data;
1262 	void __iomem *mmio = pp->ctl_block;
1263 	u16 tmp;
1264 
1265 	/* set CPB block location */
1266 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1267 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1268 
1269 	/* clear any outstanding interrupt conditions */
1270 	writew(0xffff, mmio + NV_ADMA_STAT);
1271 
1272 	/* initialize port variables */
1273 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1274 
1275 	/* clear CPB fetch count */
1276 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1277 
1278 	/* clear GO for register mode, enable interrupt */
1279 	tmp = readw(mmio + NV_ADMA_CTL);
1280 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1281 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1282 
1283 	tmp = readw(mmio + NV_ADMA_CTL);
1284 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1285 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1286 	udelay(1);
1287 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1288 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1289 
1290 	return 0;
1291 }
1292 #endif
1293 
1294 static void nv_adma_setup_port(struct ata_port *ap)
1295 {
1296 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1297 	struct ata_ioports *ioport = &ap->ioaddr;
1298 
1299 	VPRINTK("ENTER\n");
1300 
1301 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1302 
1303 	ioport->cmd_addr	= mmio;
1304 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1305 	ioport->error_addr	=
1306 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1307 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1308 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1309 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1310 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1311 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1312 	ioport->status_addr	=
1313 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1314 	ioport->altstatus_addr	=
1315 	ioport->ctl_addr	= mmio + 0x20;
1316 }
1317 
1318 static int nv_adma_host_init(struct ata_host *host)
1319 {
1320 	struct pci_dev *pdev = to_pci_dev(host->dev);
1321 	unsigned int i;
1322 	u32 tmp32;
1323 
1324 	VPRINTK("ENTER\n");
1325 
1326 	/* enable ADMA on the ports */
1327 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1328 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1329 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1330 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1331 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1332 
1333 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1334 
1335 	for (i = 0; i < host->n_ports; i++)
1336 		nv_adma_setup_port(host->ports[i]);
1337 
1338 	return 0;
1339 }
1340 
1341 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1342 			      struct scatterlist *sg,
1343 			      int idx,
1344 			      struct nv_adma_prd *aprd)
1345 {
1346 	u8 flags = 0;
1347 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1348 		flags |= NV_APRD_WRITE;
1349 	if (idx == qc->n_elem - 1)
1350 		flags |= NV_APRD_END;
1351 	else if (idx != 4)
1352 		flags |= NV_APRD_CONT;
1353 
1354 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1355 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1356 	aprd->flags = flags;
1357 	aprd->packet_len = 0;
1358 }
1359 
1360 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1361 {
1362 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1363 	struct nv_adma_prd *aprd;
1364 	struct scatterlist *sg;
1365 	unsigned int si;
1366 
1367 	VPRINTK("ENTER\n");
1368 
1369 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1370 		aprd = (si < 5) ? &cpb->aprd[si] :
1371 			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1372 		nv_adma_fill_aprd(qc, sg, si, aprd);
1373 	}
1374 	if (si > 5)
1375 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1376 	else
1377 		cpb->next_aprd = cpu_to_le64(0);
1378 }
1379 
1380 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1381 {
1382 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1383 
1384 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1385 	   or interrupt-driven no-data commands. */
1386 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1387 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1388 		return 1;
1389 
1390 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1391 	   (qc->tf.protocol == ATA_PROT_NODATA))
1392 		return 0;
1393 
1394 	return 1;
1395 }
1396 
1397 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1398 {
1399 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1400 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1401 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1402 		       NV_CPB_CTL_IEN;
1403 
1404 	if (nv_adma_use_reg_mode(qc)) {
1405 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1406 			(qc->flags & ATA_QCFLAG_DMAMAP));
1407 		nv_adma_register_mode(qc->ap);
1408 		ata_sff_qc_prep(qc);
1409 		return;
1410 	}
1411 
1412 	cpb->resp_flags = NV_CPB_RESP_DONE;
1413 	wmb();
1414 	cpb->ctl_flags = 0;
1415 	wmb();
1416 
1417 	cpb->len		= 3;
1418 	cpb->tag		= qc->tag;
1419 	cpb->next_cpb_idx	= 0;
1420 
1421 	/* turn on NCQ flags for NCQ commands */
1422 	if (qc->tf.protocol == ATA_PROT_NCQ)
1423 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1424 
1425 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1426 
1427 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1428 
1429 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1430 		nv_adma_fill_sg(qc, cpb);
1431 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1432 	} else
1433 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1434 
1435 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1436 	   until we are finished filling in all of the contents */
1437 	wmb();
1438 	cpb->ctl_flags = ctl_flags;
1439 	wmb();
1440 	cpb->resp_flags = 0;
1441 }
1442 
1443 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1444 {
1445 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1446 	void __iomem *mmio = pp->ctl_block;
1447 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1448 
1449 	VPRINTK("ENTER\n");
1450 
1451 	/* We can't handle result taskfile with NCQ commands, since
1452 	   retrieving the taskfile switches us out of ADMA mode and would abort
1453 	   existing commands. */
1454 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1455 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1456 		ata_dev_printk(qc->dev, KERN_ERR,
1457 			"NCQ w/ RESULT_TF not allowed\n");
1458 		return AC_ERR_SYSTEM;
1459 	}
1460 
1461 	if (nv_adma_use_reg_mode(qc)) {
1462 		/* use ATA register mode */
1463 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1464 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1465 			(qc->flags & ATA_QCFLAG_DMAMAP));
1466 		nv_adma_register_mode(qc->ap);
1467 		return ata_sff_qc_issue(qc);
1468 	} else
1469 		nv_adma_mode(qc->ap);
1470 
1471 	/* write append register, command tag in lower 8 bits
1472 	   and (number of cpbs to append -1) in top 8 bits */
1473 	wmb();
1474 
1475 	if (curr_ncq != pp->last_issue_ncq) {
1476 		/* Seems to need some delay before switching between NCQ and
1477 		   non-NCQ commands, else we get command timeouts and such. */
1478 		udelay(20);
1479 		pp->last_issue_ncq = curr_ncq;
1480 	}
1481 
1482 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1483 
1484 	DPRINTK("Issued tag %u\n", qc->tag);
1485 
1486 	return 0;
1487 }
1488 
1489 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1490 {
1491 	struct ata_host *host = dev_instance;
1492 	unsigned int i;
1493 	unsigned int handled = 0;
1494 	unsigned long flags;
1495 
1496 	spin_lock_irqsave(&host->lock, flags);
1497 
1498 	for (i = 0; i < host->n_ports; i++) {
1499 		struct ata_port *ap;
1500 
1501 		ap = host->ports[i];
1502 		if (ap &&
1503 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1504 			struct ata_queued_cmd *qc;
1505 
1506 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1507 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1508 				handled += ata_sff_host_intr(ap, qc);
1509 			else
1510 				// No request pending?  Clear interrupt status
1511 				// anyway, in case there's one pending.
1512 				ap->ops->sff_check_status(ap);
1513 		}
1514 
1515 	}
1516 
1517 	spin_unlock_irqrestore(&host->lock, flags);
1518 
1519 	return IRQ_RETVAL(handled);
1520 }
1521 
1522 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1523 {
1524 	int i, handled = 0;
1525 
1526 	for (i = 0; i < host->n_ports; i++) {
1527 		struct ata_port *ap = host->ports[i];
1528 
1529 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1530 			handled += nv_host_intr(ap, irq_stat);
1531 
1532 		irq_stat >>= NV_INT_PORT_SHIFT;
1533 	}
1534 
1535 	return IRQ_RETVAL(handled);
1536 }
1537 
1538 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1539 {
1540 	struct ata_host *host = dev_instance;
1541 	u8 irq_stat;
1542 	irqreturn_t ret;
1543 
1544 	spin_lock(&host->lock);
1545 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1546 	ret = nv_do_interrupt(host, irq_stat);
1547 	spin_unlock(&host->lock);
1548 
1549 	return ret;
1550 }
1551 
1552 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1553 {
1554 	struct ata_host *host = dev_instance;
1555 	u8 irq_stat;
1556 	irqreturn_t ret;
1557 
1558 	spin_lock(&host->lock);
1559 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1560 	ret = nv_do_interrupt(host, irq_stat);
1561 	spin_unlock(&host->lock);
1562 
1563 	return ret;
1564 }
1565 
1566 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1567 {
1568 	if (sc_reg > SCR_CONTROL)
1569 		return -EINVAL;
1570 
1571 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1572 	return 0;
1573 }
1574 
1575 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1576 {
1577 	if (sc_reg > SCR_CONTROL)
1578 		return -EINVAL;
1579 
1580 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1581 	return 0;
1582 }
1583 
1584 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1585 			unsigned long deadline)
1586 {
1587 	struct ata_eh_context *ehc = &link->eh_context;
1588 
1589 	/* Do hardreset iff it's post-boot probing, please read the
1590 	 * comment above port ops for details.
1591 	 */
1592 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1593 	    !ata_dev_enabled(link->device))
1594 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1595 				    NULL, NULL);
1596 	else if (!(ehc->i.flags & ATA_EHI_QUIET))
1597 		ata_link_printk(link, KERN_INFO,
1598 				"nv: skipping hardreset on occupied port\n");
1599 
1600 	/* device signature acquisition is unreliable */
1601 	return -EAGAIN;
1602 }
1603 
1604 static void nv_nf2_freeze(struct ata_port *ap)
1605 {
1606 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1607 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1608 	u8 mask;
1609 
1610 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1611 	mask &= ~(NV_INT_ALL << shift);
1612 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1613 }
1614 
1615 static void nv_nf2_thaw(struct ata_port *ap)
1616 {
1617 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1618 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1619 	u8 mask;
1620 
1621 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1622 
1623 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1624 	mask |= (NV_INT_MASK << shift);
1625 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1626 }
1627 
1628 static void nv_ck804_freeze(struct ata_port *ap)
1629 {
1630 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1631 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1632 	u8 mask;
1633 
1634 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1635 	mask &= ~(NV_INT_ALL << shift);
1636 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1637 }
1638 
1639 static void nv_ck804_thaw(struct ata_port *ap)
1640 {
1641 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1642 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1643 	u8 mask;
1644 
1645 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1646 
1647 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1648 	mask |= (NV_INT_MASK << shift);
1649 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1650 }
1651 
1652 static void nv_mcp55_freeze(struct ata_port *ap)
1653 {
1654 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1655 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1656 	u32 mask;
1657 
1658 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1659 
1660 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1661 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1662 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1663 	ata_sff_freeze(ap);
1664 }
1665 
1666 static void nv_mcp55_thaw(struct ata_port *ap)
1667 {
1668 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1669 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1670 	u32 mask;
1671 
1672 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1673 
1674 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1675 	mask |= (NV_INT_MASK_MCP55 << shift);
1676 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1677 	ata_sff_thaw(ap);
1678 }
1679 
1680 static void nv_adma_error_handler(struct ata_port *ap)
1681 {
1682 	struct nv_adma_port_priv *pp = ap->private_data;
1683 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1684 		void __iomem *mmio = pp->ctl_block;
1685 		int i;
1686 		u16 tmp;
1687 
1688 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1689 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1690 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1691 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1692 			u32 status = readw(mmio + NV_ADMA_STAT);
1693 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1694 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1695 
1696 			ata_port_printk(ap, KERN_ERR,
1697 				"EH in ADMA mode, notifier 0x%X "
1698 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1699 				"next cpb count 0x%X next cpb idx 0x%x\n",
1700 				notifier, notifier_error, gen_ctl, status,
1701 				cpb_count, next_cpb_idx);
1702 
1703 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1704 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1705 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1706 				    ap->link.sactive & (1 << i))
1707 					ata_port_printk(ap, KERN_ERR,
1708 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1709 						i, cpb->ctl_flags, cpb->resp_flags);
1710 			}
1711 		}
1712 
1713 		/* Push us back into port register mode for error handling. */
1714 		nv_adma_register_mode(ap);
1715 
1716 		/* Mark all of the CPBs as invalid to prevent them from
1717 		   being executed */
1718 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1719 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1720 
1721 		/* clear CPB fetch count */
1722 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1723 
1724 		/* Reset channel */
1725 		tmp = readw(mmio + NV_ADMA_CTL);
1726 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1727 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1728 		udelay(1);
1729 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1730 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1731 	}
1732 
1733 	ata_sff_error_handler(ap);
1734 }
1735 
1736 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1737 {
1738 	struct nv_swncq_port_priv *pp = ap->private_data;
1739 	struct defer_queue *dq = &pp->defer_queue;
1740 
1741 	/* queue is full */
1742 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1743 	dq->defer_bits |= (1 << qc->tag);
1744 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1745 }
1746 
1747 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1748 {
1749 	struct nv_swncq_port_priv *pp = ap->private_data;
1750 	struct defer_queue *dq = &pp->defer_queue;
1751 	unsigned int tag;
1752 
1753 	if (dq->head == dq->tail)	/* null queue */
1754 		return NULL;
1755 
1756 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1757 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1758 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1759 	dq->defer_bits &= ~(1 << tag);
1760 
1761 	return ata_qc_from_tag(ap, tag);
1762 }
1763 
1764 static void nv_swncq_fis_reinit(struct ata_port *ap)
1765 {
1766 	struct nv_swncq_port_priv *pp = ap->private_data;
1767 
1768 	pp->dhfis_bits = 0;
1769 	pp->dmafis_bits = 0;
1770 	pp->sdbfis_bits = 0;
1771 	pp->ncq_flags = 0;
1772 }
1773 
1774 static void nv_swncq_pp_reinit(struct ata_port *ap)
1775 {
1776 	struct nv_swncq_port_priv *pp = ap->private_data;
1777 	struct defer_queue *dq = &pp->defer_queue;
1778 
1779 	dq->head = 0;
1780 	dq->tail = 0;
1781 	dq->defer_bits = 0;
1782 	pp->qc_active = 0;
1783 	pp->last_issue_tag = ATA_TAG_POISON;
1784 	nv_swncq_fis_reinit(ap);
1785 }
1786 
1787 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1788 {
1789 	struct nv_swncq_port_priv *pp = ap->private_data;
1790 
1791 	writew(fis, pp->irq_block);
1792 }
1793 
1794 static void __ata_bmdma_stop(struct ata_port *ap)
1795 {
1796 	struct ata_queued_cmd qc;
1797 
1798 	qc.ap = ap;
1799 	ata_bmdma_stop(&qc);
1800 }
1801 
1802 static void nv_swncq_ncq_stop(struct ata_port *ap)
1803 {
1804 	struct nv_swncq_port_priv *pp = ap->private_data;
1805 	unsigned int i;
1806 	u32 sactive;
1807 	u32 done_mask;
1808 
1809 	ata_port_printk(ap, KERN_ERR,
1810 			"EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1811 			ap->qc_active, ap->link.sactive);
1812 	ata_port_printk(ap, KERN_ERR,
1813 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1814 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1815 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1816 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1817 
1818 	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1819 			ap->ops->sff_check_status(ap),
1820 			ioread8(ap->ioaddr.error_addr));
1821 
1822 	sactive = readl(pp->sactive_block);
1823 	done_mask = pp->qc_active ^ sactive;
1824 
1825 	ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1826 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1827 		u8 err = 0;
1828 		if (pp->qc_active & (1 << i))
1829 			err = 0;
1830 		else if (done_mask & (1 << i))
1831 			err = 1;
1832 		else
1833 			continue;
1834 
1835 		ata_port_printk(ap, KERN_ERR,
1836 				"tag 0x%x: %01x %01x %01x %01x %s\n", i,
1837 				(pp->dhfis_bits >> i) & 0x1,
1838 				(pp->dmafis_bits >> i) & 0x1,
1839 				(pp->sdbfis_bits >> i) & 0x1,
1840 				(sactive >> i) & 0x1,
1841 				(err ? "error! tag doesn't exit" : " "));
1842 	}
1843 
1844 	nv_swncq_pp_reinit(ap);
1845 	ap->ops->sff_irq_clear(ap);
1846 	__ata_bmdma_stop(ap);
1847 	nv_swncq_irq_clear(ap, 0xffff);
1848 }
1849 
1850 static void nv_swncq_error_handler(struct ata_port *ap)
1851 {
1852 	struct ata_eh_context *ehc = &ap->link.eh_context;
1853 
1854 	if (ap->link.sactive) {
1855 		nv_swncq_ncq_stop(ap);
1856 		ehc->i.action |= ATA_EH_RESET;
1857 	}
1858 
1859 	ata_sff_error_handler(ap);
1860 }
1861 
1862 #ifdef CONFIG_PM
1863 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1864 {
1865 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1866 	u32 tmp;
1867 
1868 	/* clear irq */
1869 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1870 
1871 	/* disable irq */
1872 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1873 
1874 	/* disable swncq */
1875 	tmp = readl(mmio + NV_CTL_MCP55);
1876 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1877 	writel(tmp, mmio + NV_CTL_MCP55);
1878 
1879 	return 0;
1880 }
1881 
1882 static int nv_swncq_port_resume(struct ata_port *ap)
1883 {
1884 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1885 	u32 tmp;
1886 
1887 	/* clear irq */
1888 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1889 
1890 	/* enable irq */
1891 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1892 
1893 	/* enable swncq */
1894 	tmp = readl(mmio + NV_CTL_MCP55);
1895 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1896 
1897 	return 0;
1898 }
1899 #endif
1900 
1901 static void nv_swncq_host_init(struct ata_host *host)
1902 {
1903 	u32 tmp;
1904 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1905 	struct pci_dev *pdev = to_pci_dev(host->dev);
1906 	u8 regval;
1907 
1908 	/* disable  ECO 398 */
1909 	pci_read_config_byte(pdev, 0x7f, &regval);
1910 	regval &= ~(1 << 7);
1911 	pci_write_config_byte(pdev, 0x7f, regval);
1912 
1913 	/* enable swncq */
1914 	tmp = readl(mmio + NV_CTL_MCP55);
1915 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1916 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1917 
1918 	/* enable irq intr */
1919 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1920 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1921 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1922 
1923 	/*  clear port irq */
1924 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1925 }
1926 
1927 static int nv_swncq_slave_config(struct scsi_device *sdev)
1928 {
1929 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1930 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1931 	struct ata_device *dev;
1932 	int rc;
1933 	u8 rev;
1934 	u8 check_maxtor = 0;
1935 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1936 
1937 	rc = ata_scsi_slave_config(sdev);
1938 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1939 		/* Not a proper libata device, ignore */
1940 		return rc;
1941 
1942 	dev = &ap->link.device[sdev->id];
1943 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1944 		return rc;
1945 
1946 	/* if MCP51 and Maxtor, then disable ncq */
1947 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1948 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1949 		check_maxtor = 1;
1950 
1951 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1952 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1953 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1954 		pci_read_config_byte(pdev, 0x8, &rev);
1955 		if (rev <= 0xa2)
1956 			check_maxtor = 1;
1957 	}
1958 
1959 	if (!check_maxtor)
1960 		return rc;
1961 
1962 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1963 
1964 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1965 		ata_scsi_change_queue_depth(sdev, 1);
1966 		ata_dev_printk(dev, KERN_NOTICE,
1967 			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1968 	}
1969 
1970 	return rc;
1971 }
1972 
1973 static int nv_swncq_port_start(struct ata_port *ap)
1974 {
1975 	struct device *dev = ap->host->dev;
1976 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1977 	struct nv_swncq_port_priv *pp;
1978 	int rc;
1979 
1980 	rc = ata_port_start(ap);
1981 	if (rc)
1982 		return rc;
1983 
1984 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1985 	if (!pp)
1986 		return -ENOMEM;
1987 
1988 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1989 				      &pp->prd_dma, GFP_KERNEL);
1990 	if (!pp->prd)
1991 		return -ENOMEM;
1992 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1993 
1994 	ap->private_data = pp;
1995 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1996 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1997 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1998 
1999 	return 0;
2000 }
2001 
2002 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2003 {
2004 	if (qc->tf.protocol != ATA_PROT_NCQ) {
2005 		ata_sff_qc_prep(qc);
2006 		return;
2007 	}
2008 
2009 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2010 		return;
2011 
2012 	nv_swncq_fill_sg(qc);
2013 }
2014 
2015 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2016 {
2017 	struct ata_port *ap = qc->ap;
2018 	struct scatterlist *sg;
2019 	struct nv_swncq_port_priv *pp = ap->private_data;
2020 	struct ata_prd *prd;
2021 	unsigned int si, idx;
2022 
2023 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
2024 
2025 	idx = 0;
2026 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2027 		u32 addr, offset;
2028 		u32 sg_len, len;
2029 
2030 		addr = (u32)sg_dma_address(sg);
2031 		sg_len = sg_dma_len(sg);
2032 
2033 		while (sg_len) {
2034 			offset = addr & 0xffff;
2035 			len = sg_len;
2036 			if ((offset + sg_len) > 0x10000)
2037 				len = 0x10000 - offset;
2038 
2039 			prd[idx].addr = cpu_to_le32(addr);
2040 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2041 
2042 			idx++;
2043 			sg_len -= len;
2044 			addr += len;
2045 		}
2046 	}
2047 
2048 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2049 }
2050 
2051 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2052 					  struct ata_queued_cmd *qc)
2053 {
2054 	struct nv_swncq_port_priv *pp = ap->private_data;
2055 
2056 	if (qc == NULL)
2057 		return 0;
2058 
2059 	DPRINTK("Enter\n");
2060 
2061 	writel((1 << qc->tag), pp->sactive_block);
2062 	pp->last_issue_tag = qc->tag;
2063 	pp->dhfis_bits &= ~(1 << qc->tag);
2064 	pp->dmafis_bits &= ~(1 << qc->tag);
2065 	pp->qc_active |= (0x1 << qc->tag);
2066 
2067 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2068 	ap->ops->sff_exec_command(ap, &qc->tf);
2069 
2070 	DPRINTK("Issued tag %u\n", qc->tag);
2071 
2072 	return 0;
2073 }
2074 
2075 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2076 {
2077 	struct ata_port *ap = qc->ap;
2078 	struct nv_swncq_port_priv *pp = ap->private_data;
2079 
2080 	if (qc->tf.protocol != ATA_PROT_NCQ)
2081 		return ata_sff_qc_issue(qc);
2082 
2083 	DPRINTK("Enter\n");
2084 
2085 	if (!pp->qc_active)
2086 		nv_swncq_issue_atacmd(ap, qc);
2087 	else
2088 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2089 
2090 	return 0;
2091 }
2092 
2093 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2094 {
2095 	u32 serror;
2096 	struct ata_eh_info *ehi = &ap->link.eh_info;
2097 
2098 	ata_ehi_clear_desc(ehi);
2099 
2100 	/* AHCI needs SError cleared; otherwise, it might lock up */
2101 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2102 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2103 
2104 	/* analyze @irq_stat */
2105 	if (fis & NV_SWNCQ_IRQ_ADDED)
2106 		ata_ehi_push_desc(ehi, "hot plug");
2107 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2108 		ata_ehi_push_desc(ehi, "hot unplug");
2109 
2110 	ata_ehi_hotplugged(ehi);
2111 
2112 	/* okay, let's hand over to EH */
2113 	ehi->serror |= serror;
2114 
2115 	ata_port_freeze(ap);
2116 }
2117 
2118 static int nv_swncq_sdbfis(struct ata_port *ap)
2119 {
2120 	struct ata_queued_cmd *qc;
2121 	struct nv_swncq_port_priv *pp = ap->private_data;
2122 	struct ata_eh_info *ehi = &ap->link.eh_info;
2123 	u32 sactive;
2124 	int nr_done = 0;
2125 	u32 done_mask;
2126 	int i;
2127 	u8 host_stat;
2128 	u8 lack_dhfis = 0;
2129 
2130 	host_stat = ap->ops->bmdma_status(ap);
2131 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2132 		/* error when transfering data to/from memory */
2133 		ata_ehi_clear_desc(ehi);
2134 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2135 		ehi->err_mask |= AC_ERR_HOST_BUS;
2136 		ehi->action |= ATA_EH_RESET;
2137 		return -EINVAL;
2138 	}
2139 
2140 	ap->ops->sff_irq_clear(ap);
2141 	__ata_bmdma_stop(ap);
2142 
2143 	sactive = readl(pp->sactive_block);
2144 	done_mask = pp->qc_active ^ sactive;
2145 
2146 	if (unlikely(done_mask & sactive)) {
2147 		ata_ehi_clear_desc(ehi);
2148 		ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2149 				  "(%08x->%08x)", pp->qc_active, sactive);
2150 		ehi->err_mask |= AC_ERR_HSM;
2151 		ehi->action |= ATA_EH_RESET;
2152 		return -EINVAL;
2153 	}
2154 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
2155 		if (!(done_mask & (1 << i)))
2156 			continue;
2157 
2158 		qc = ata_qc_from_tag(ap, i);
2159 		if (qc) {
2160 			ata_qc_complete(qc);
2161 			pp->qc_active &= ~(1 << i);
2162 			pp->dhfis_bits &= ~(1 << i);
2163 			pp->dmafis_bits &= ~(1 << i);
2164 			pp->sdbfis_bits |= (1 << i);
2165 			nr_done++;
2166 		}
2167 	}
2168 
2169 	if (!ap->qc_active) {
2170 		DPRINTK("over\n");
2171 		nv_swncq_pp_reinit(ap);
2172 		return nr_done;
2173 	}
2174 
2175 	if (pp->qc_active & pp->dhfis_bits)
2176 		return nr_done;
2177 
2178 	if ((pp->ncq_flags & ncq_saw_backout) ||
2179 	    (pp->qc_active ^ pp->dhfis_bits))
2180 		/* if the controller cann't get a device to host register FIS,
2181 		 * The driver needs to reissue the new command.
2182 		 */
2183 		lack_dhfis = 1;
2184 
2185 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2186 		"SWNCQ:qc_active 0x%X defer_bits %X "
2187 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2188 		ap->print_id, ap->qc_active, pp->qc_active,
2189 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2190 		pp->dmafis_bits, pp->last_issue_tag);
2191 
2192 	nv_swncq_fis_reinit(ap);
2193 
2194 	if (lack_dhfis) {
2195 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2196 		nv_swncq_issue_atacmd(ap, qc);
2197 		return nr_done;
2198 	}
2199 
2200 	if (pp->defer_queue.defer_bits) {
2201 		/* send deferral queue command */
2202 		qc = nv_swncq_qc_from_dq(ap);
2203 		WARN_ON(qc == NULL);
2204 		nv_swncq_issue_atacmd(ap, qc);
2205 	}
2206 
2207 	return nr_done;
2208 }
2209 
2210 static inline u32 nv_swncq_tag(struct ata_port *ap)
2211 {
2212 	struct nv_swncq_port_priv *pp = ap->private_data;
2213 	u32 tag;
2214 
2215 	tag = readb(pp->tag_block) >> 2;
2216 	return (tag & 0x1f);
2217 }
2218 
2219 static int nv_swncq_dmafis(struct ata_port *ap)
2220 {
2221 	struct ata_queued_cmd *qc;
2222 	unsigned int rw;
2223 	u8 dmactl;
2224 	u32 tag;
2225 	struct nv_swncq_port_priv *pp = ap->private_data;
2226 
2227 	__ata_bmdma_stop(ap);
2228 	tag = nv_swncq_tag(ap);
2229 
2230 	DPRINTK("dma setup tag 0x%x\n", tag);
2231 	qc = ata_qc_from_tag(ap, tag);
2232 
2233 	if (unlikely(!qc))
2234 		return 0;
2235 
2236 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2237 
2238 	/* load PRD table addr. */
2239 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2240 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2241 
2242 	/* specify data direction, triple-check start bit is clear */
2243 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2244 	dmactl &= ~ATA_DMA_WR;
2245 	if (!rw)
2246 		dmactl |= ATA_DMA_WR;
2247 
2248 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2249 
2250 	return 1;
2251 }
2252 
2253 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2254 {
2255 	struct nv_swncq_port_priv *pp = ap->private_data;
2256 	struct ata_queued_cmd *qc;
2257 	struct ata_eh_info *ehi = &ap->link.eh_info;
2258 	u32 serror;
2259 	u8 ata_stat;
2260 	int rc = 0;
2261 
2262 	ata_stat = ap->ops->sff_check_status(ap);
2263 	nv_swncq_irq_clear(ap, fis);
2264 	if (!fis)
2265 		return;
2266 
2267 	if (ap->pflags & ATA_PFLAG_FROZEN)
2268 		return;
2269 
2270 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2271 		nv_swncq_hotplug(ap, fis);
2272 		return;
2273 	}
2274 
2275 	if (!pp->qc_active)
2276 		return;
2277 
2278 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2279 		return;
2280 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2281 
2282 	if (ata_stat & ATA_ERR) {
2283 		ata_ehi_clear_desc(ehi);
2284 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2285 		ehi->err_mask |= AC_ERR_DEV;
2286 		ehi->serror |= serror;
2287 		ehi->action |= ATA_EH_RESET;
2288 		ata_port_freeze(ap);
2289 		return;
2290 	}
2291 
2292 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2293 		/* If the IRQ is backout, driver must issue
2294 		 * the new command again some time later.
2295 		 */
2296 		pp->ncq_flags |= ncq_saw_backout;
2297 	}
2298 
2299 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2300 		pp->ncq_flags |= ncq_saw_sdb;
2301 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2302 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2303 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2304 			pp->dmafis_bits, readl(pp->sactive_block));
2305 		rc = nv_swncq_sdbfis(ap);
2306 		if (rc < 0)
2307 			goto irq_error;
2308 	}
2309 
2310 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2311 		/* The interrupt indicates the new command
2312 		 * was transmitted correctly to the drive.
2313 		 */
2314 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2315 		pp->ncq_flags |= ncq_saw_d2h;
2316 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2317 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2318 			ehi->err_mask |= AC_ERR_HSM;
2319 			ehi->action |= ATA_EH_RESET;
2320 			goto irq_error;
2321 		}
2322 
2323 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2324 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2325 			ata_stat = ap->ops->sff_check_status(ap);
2326 			if (ata_stat & ATA_BUSY)
2327 				goto irq_exit;
2328 
2329 			if (pp->defer_queue.defer_bits) {
2330 				DPRINTK("send next command\n");
2331 				qc = nv_swncq_qc_from_dq(ap);
2332 				nv_swncq_issue_atacmd(ap, qc);
2333 			}
2334 		}
2335 	}
2336 
2337 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2338 		/* program the dma controller with appropriate PRD buffers
2339 		 * and start the DMA transfer for requested command.
2340 		 */
2341 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2342 		pp->ncq_flags |= ncq_saw_dmas;
2343 		rc = nv_swncq_dmafis(ap);
2344 	}
2345 
2346 irq_exit:
2347 	return;
2348 irq_error:
2349 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2350 	ata_port_freeze(ap);
2351 	return;
2352 }
2353 
2354 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2355 {
2356 	struct ata_host *host = dev_instance;
2357 	unsigned int i;
2358 	unsigned int handled = 0;
2359 	unsigned long flags;
2360 	u32 irq_stat;
2361 
2362 	spin_lock_irqsave(&host->lock, flags);
2363 
2364 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2365 
2366 	for (i = 0; i < host->n_ports; i++) {
2367 		struct ata_port *ap = host->ports[i];
2368 
2369 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2370 			if (ap->link.sactive) {
2371 				nv_swncq_host_interrupt(ap, (u16)irq_stat);
2372 				handled = 1;
2373 			} else {
2374 				if (irq_stat)	/* reserve Hotplug */
2375 					nv_swncq_irq_clear(ap, 0xfff0);
2376 
2377 				handled += nv_host_intr(ap, (u8)irq_stat);
2378 			}
2379 		}
2380 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2381 	}
2382 
2383 	spin_unlock_irqrestore(&host->lock, flags);
2384 
2385 	return IRQ_RETVAL(handled);
2386 }
2387 
2388 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2389 {
2390 	static int printed_version;
2391 	const struct ata_port_info *ppi[] = { NULL, NULL };
2392 	struct nv_pi_priv *ipriv;
2393 	struct ata_host *host;
2394 	struct nv_host_priv *hpriv;
2395 	int rc;
2396 	u32 bar;
2397 	void __iomem *base;
2398 	unsigned long type = ent->driver_data;
2399 
2400         // Make sure this is a SATA controller by counting the number of bars
2401         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2402         // it's an IDE controller and we ignore it.
2403 	for (bar = 0; bar < 6; bar++)
2404 		if (pci_resource_start(pdev, bar) == 0)
2405 			return -ENODEV;
2406 
2407 	if (!printed_version++)
2408 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2409 
2410 	rc = pcim_enable_device(pdev);
2411 	if (rc)
2412 		return rc;
2413 
2414 	/* determine type and allocate host */
2415 	if (type == CK804 && adma_enabled) {
2416 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2417 		type = ADMA;
2418 	} else if (type == MCP5x && swncq_enabled) {
2419 		dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2420 		type = SWNCQ;
2421 	}
2422 
2423 	ppi[0] = &nv_port_info[type];
2424 	ipriv = ppi[0]->private_data;
2425 	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2426 	if (rc)
2427 		return rc;
2428 
2429 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2430 	if (!hpriv)
2431 		return -ENOMEM;
2432 	hpriv->type = type;
2433 	host->private_data = hpriv;
2434 
2435 	/* request and iomap NV_MMIO_BAR */
2436 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2437 	if (rc)
2438 		return rc;
2439 
2440 	/* configure SCR access */
2441 	base = host->iomap[NV_MMIO_BAR];
2442 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2443 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2444 
2445 	/* enable SATA space for CK804 */
2446 	if (type >= CK804) {
2447 		u8 regval;
2448 
2449 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2450 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2451 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2452 	}
2453 
2454 	/* init ADMA */
2455 	if (type == ADMA) {
2456 		rc = nv_adma_host_init(host);
2457 		if (rc)
2458 			return rc;
2459 	} else if (type == SWNCQ)
2460 		nv_swncq_host_init(host);
2461 
2462 	pci_set_master(pdev);
2463 	return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2464 				 IRQF_SHARED, ipriv->sht);
2465 }
2466 
2467 #ifdef CONFIG_PM
2468 static int nv_pci_device_resume(struct pci_dev *pdev)
2469 {
2470 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2471 	struct nv_host_priv *hpriv = host->private_data;
2472 	int rc;
2473 
2474 	rc = ata_pci_device_do_resume(pdev);
2475 	if (rc)
2476 		return rc;
2477 
2478 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2479 		if (hpriv->type >= CK804) {
2480 			u8 regval;
2481 
2482 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2483 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2484 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2485 		}
2486 		if (hpriv->type == ADMA) {
2487 			u32 tmp32;
2488 			struct nv_adma_port_priv *pp;
2489 			/* enable/disable ADMA on the ports appropriately */
2490 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2491 
2492 			pp = host->ports[0]->private_data;
2493 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2494 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2495 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2496 			else
2497 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2498 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2499 			pp = host->ports[1]->private_data;
2500 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2501 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2502 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2503 			else
2504 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2505 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2506 
2507 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2508 		}
2509 	}
2510 
2511 	ata_host_resume(host);
2512 
2513 	return 0;
2514 }
2515 #endif
2516 
2517 static void nv_ck804_host_stop(struct ata_host *host)
2518 {
2519 	struct pci_dev *pdev = to_pci_dev(host->dev);
2520 	u8 regval;
2521 
2522 	/* disable SATA space for CK804 */
2523 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2524 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2525 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2526 }
2527 
2528 static void nv_adma_host_stop(struct ata_host *host)
2529 {
2530 	struct pci_dev *pdev = to_pci_dev(host->dev);
2531 	u32 tmp32;
2532 
2533 	/* disable ADMA on the ports */
2534 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2535 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2536 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2537 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2538 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2539 
2540 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2541 
2542 	nv_ck804_host_stop(host);
2543 }
2544 
2545 static int __init nv_init(void)
2546 {
2547 	return pci_register_driver(&nv_pci_driver);
2548 }
2549 
2550 static void __exit nv_exit(void)
2551 {
2552 	pci_unregister_driver(&nv_pci_driver);
2553 }
2554 
2555 module_init(nv_init);
2556 module_exit(nv_exit);
2557 module_param_named(adma, adma_enabled, bool, 0444);
2558 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2559 module_param_named(swncq, swncq_enabled, bool, 0444);
2560 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2561 
2562