xref: /linux/drivers/ata/sata_nv.c (revision b233b28eac0cc37d07c2d007ea08c86c778c5af4)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= 0x1f,
61 	NV_MWDMA_MASK			= 0x07,
62 	NV_UDMA_MASK			= 0x7f,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 					  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 	/* MCP55 reg offset */
173 	NV_CTL_MCP55			= 0x400,
174 	NV_INT_STATUS_MCP55		= 0x440,
175 	NV_INT_ENABLE_MCP55		= 0x444,
176 	NV_NCQ_REG_MCP55		= 0x448,
177 
178 	/* MCP55 */
179 	NV_INT_ALL_MCP55		= 0xffff,
180 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
181 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
182 
183 	/* SWNCQ ENABLE BITS*/
184 	NV_CTL_PRI_SWNCQ		= 0x02,
185 	NV_CTL_SEC_SWNCQ		= 0x04,
186 
187 	/* SW NCQ status bits*/
188 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
189 	NV_SWNCQ_IRQ_PM			= (1 << 1),
190 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
191 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
192 
193 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
194 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
195 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
196 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
197 
198 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
199 					  NV_SWNCQ_IRQ_REMOVED,
200 
201 };
202 
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 	__le64			addr;
206 	__le32			len;
207 	u8			flags;
208 	u8			packet_len;
209 	__le16			reserved;
210 };
211 
212 enum nv_adma_regbits {
213 	CMDEND	= (1 << 15),		/* end of command list */
214 	WNB	= (1 << 14),		/* wait-not-BSY */
215 	IGN	= (1 << 13),		/* ignore this entry */
216 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
217 	DA2	= (1 << (2 + 8)),
218 	DA1	= (1 << (1 + 8)),
219 	DA0	= (1 << (0 + 8)),
220 };
221 
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 	u8			resp_flags;    /* 0 */
228 	u8			reserved1;     /* 1 */
229 	u8			ctl_flags;     /* 2 */
230 	/* len is length of taskfile in 64 bit words */
231 	u8			len;		/* 3  */
232 	u8			tag;           /* 4 */
233 	u8			next_cpb_idx;  /* 5 */
234 	__le16			reserved2;     /* 6-7 */
235 	__le16			tf[12];        /* 8-31 */
236 	struct nv_adma_prd	aprd[5];       /* 32-111 */
237 	__le64			next_aprd;     /* 112-119 */
238 	__le64			reserved3;     /* 120-127 */
239 };
240 
241 
242 struct nv_adma_port_priv {
243 	struct nv_adma_cpb	*cpb;
244 	dma_addr_t		cpb_dma;
245 	struct nv_adma_prd	*aprd;
246 	dma_addr_t		aprd_dma;
247 	void __iomem		*ctl_block;
248 	void __iomem		*gen_block;
249 	void __iomem		*notifier_clear_block;
250 	u64			adma_dma_mask;
251 	u8			flags;
252 	int			last_issue_ncq;
253 };
254 
255 struct nv_host_priv {
256 	unsigned long		type;
257 };
258 
259 struct defer_queue {
260 	u32		defer_bits;
261 	unsigned int	head;
262 	unsigned int	tail;
263 	unsigned int	tag[ATA_MAX_QUEUE];
264 };
265 
266 enum ncq_saw_flag_list {
267 	ncq_saw_d2h	= (1U << 0),
268 	ncq_saw_dmas	= (1U << 1),
269 	ncq_saw_sdb	= (1U << 2),
270 	ncq_saw_backout	= (1U << 3),
271 };
272 
273 struct nv_swncq_port_priv {
274 	struct ata_prd	*prd;	 /* our SG list */
275 	dma_addr_t	prd_dma; /* and its DMA mapping */
276 	void __iomem	*sactive_block;
277 	void __iomem	*irq_block;
278 	void __iomem	*tag_block;
279 	u32		qc_active;
280 
281 	unsigned int	last_issue_tag;
282 
283 	/* fifo circular queue to store deferral command */
284 	struct defer_queue defer_queue;
285 
286 	/* for NCQ interrupt analysis */
287 	u32		dhfis_bits;
288 	u32		dmafis_bits;
289 	u32		sdbfis_bits;
290 
291 	unsigned int	ncq_flags;
292 };
293 
294 
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296 
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 
308 static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
309 				   unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347 
348 enum nv_host_type
349 {
350 	GENERIC,
351 	NFORCE2,
352 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
353 	CK804,
354 	ADMA,
355 	MCP5x,
356 	SWNCQ,
357 };
358 
359 static const struct pci_device_id nv_pci_tbl[] = {
360 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
374 
375 	{ } /* terminate list */
376 };
377 
378 static struct pci_driver nv_pci_driver = {
379 	.name			= DRV_NAME,
380 	.id_table		= nv_pci_tbl,
381 	.probe			= nv_init_one,
382 #ifdef CONFIG_PM
383 	.suspend		= ata_pci_device_suspend,
384 	.resume			= nv_pci_device_resume,
385 #endif
386 	.remove			= ata_pci_remove_one,
387 };
388 
389 static struct scsi_host_template nv_sht = {
390 	ATA_BMDMA_SHT(DRV_NAME),
391 };
392 
393 static struct scsi_host_template nv_adma_sht = {
394 	ATA_NCQ_SHT(DRV_NAME),
395 	.can_queue		= NV_ADMA_MAX_CPBS,
396 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
397 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
398 	.slave_configure	= nv_adma_slave_config,
399 };
400 
401 static struct scsi_host_template nv_swncq_sht = {
402 	ATA_NCQ_SHT(DRV_NAME),
403 	.can_queue		= ATA_MAX_QUEUE,
404 	.sg_tablesize		= LIBATA_MAX_PRD,
405 	.dma_boundary		= ATA_DMA_BOUNDARY,
406 	.slave_configure	= nv_swncq_slave_config,
407 };
408 
409 static struct ata_port_operations nv_common_ops = {
410 	.inherits		= &ata_bmdma_port_ops,
411 	.scr_read		= nv_scr_read,
412 	.scr_write		= nv_scr_write,
413 };
414 
415 /* OSDL bz11195 reports that link doesn't come online after hardreset
416  * on generic nv's and there have been several other similar reports
417  * on linux-ide.  Disable hardreset for generic nv's.
418  */
419 static struct ata_port_operations nv_generic_ops = {
420 	.inherits		= &nv_common_ops,
421 	.hardreset		= ATA_OP_NULL,
422 };
423 
424 /* OSDL bz3352 reports that nf2/3 controllers can't determine device
425  * signature reliably.  Also, the following thread reports detection
426  * failure on cold boot with the standard debouncing timing.
427  *
428  * http://thread.gmane.org/gmane.linux.ide/34098
429  *
430  * Debounce with hotplug timing and request follow-up SRST.
431  */
432 static struct ata_port_operations nv_nf2_ops = {
433 	.inherits		= &nv_common_ops,
434 	.freeze			= nv_nf2_freeze,
435 	.thaw			= nv_nf2_thaw,
436 	.hardreset		= nv_noclassify_hardreset,
437 };
438 
439 /* For initial probing after boot and hot plugging, hardreset mostly
440  * works fine on CK804 but curiously, reprobing on the initial port by
441  * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
442  * in somewhat undeterministic way.  Use noclassify hardreset.
443  */
444 static struct ata_port_operations nv_ck804_ops = {
445 	.inherits		= &nv_common_ops,
446 	.freeze			= nv_ck804_freeze,
447 	.thaw			= nv_ck804_thaw,
448 	.hardreset		= nv_noclassify_hardreset,
449 	.host_stop		= nv_ck804_host_stop,
450 };
451 
452 static struct ata_port_operations nv_adma_ops = {
453 	.inherits		= &nv_ck804_ops,
454 
455 	.check_atapi_dma	= nv_adma_check_atapi_dma,
456 	.sff_tf_read		= nv_adma_tf_read,
457 	.qc_defer		= ata_std_qc_defer,
458 	.qc_prep		= nv_adma_qc_prep,
459 	.qc_issue		= nv_adma_qc_issue,
460 	.sff_irq_clear		= nv_adma_irq_clear,
461 
462 	.freeze			= nv_adma_freeze,
463 	.thaw			= nv_adma_thaw,
464 	.error_handler		= nv_adma_error_handler,
465 	.post_internal_cmd	= nv_adma_post_internal_cmd,
466 
467 	.port_start		= nv_adma_port_start,
468 	.port_stop		= nv_adma_port_stop,
469 #ifdef CONFIG_PM
470 	.port_suspend		= nv_adma_port_suspend,
471 	.port_resume		= nv_adma_port_resume,
472 #endif
473 	.host_stop		= nv_adma_host_stop,
474 };
475 
476 /* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
477  * work, hardreset should be used and hardreset can't report proper
478  * signature, which suggests that mcp5x is closer to nf2 as long as
479  * reset quirkiness is concerned.  Define separate ops for mcp5x with
480  * nv_noclassify_hardreset().
481  */
482 static struct ata_port_operations nv_mcp5x_ops = {
483 	.inherits		= &nv_common_ops,
484 	.hardreset		= nv_noclassify_hardreset,
485 };
486 
487 static struct ata_port_operations nv_swncq_ops = {
488 	.inherits		= &nv_mcp5x_ops,
489 
490 	.qc_defer		= ata_std_qc_defer,
491 	.qc_prep		= nv_swncq_qc_prep,
492 	.qc_issue		= nv_swncq_qc_issue,
493 
494 	.freeze			= nv_mcp55_freeze,
495 	.thaw			= nv_mcp55_thaw,
496 	.error_handler		= nv_swncq_error_handler,
497 
498 #ifdef CONFIG_PM
499 	.port_suspend		= nv_swncq_port_suspend,
500 	.port_resume		= nv_swncq_port_resume,
501 #endif
502 	.port_start		= nv_swncq_port_start,
503 };
504 
505 struct nv_pi_priv {
506 	irq_handler_t			irq_handler;
507 	struct scsi_host_template	*sht;
508 };
509 
510 #define NV_PI_PRIV(_irq_handler, _sht) \
511 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
512 
513 static const struct ata_port_info nv_port_info[] = {
514 	/* generic */
515 	{
516 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
517 		.pio_mask	= NV_PIO_MASK,
518 		.mwdma_mask	= NV_MWDMA_MASK,
519 		.udma_mask	= NV_UDMA_MASK,
520 		.port_ops	= &nv_generic_ops,
521 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
522 	},
523 	/* nforce2/3 */
524 	{
525 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
526 		.pio_mask	= NV_PIO_MASK,
527 		.mwdma_mask	= NV_MWDMA_MASK,
528 		.udma_mask	= NV_UDMA_MASK,
529 		.port_ops	= &nv_nf2_ops,
530 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
531 	},
532 	/* ck804 */
533 	{
534 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
535 		.pio_mask	= NV_PIO_MASK,
536 		.mwdma_mask	= NV_MWDMA_MASK,
537 		.udma_mask	= NV_UDMA_MASK,
538 		.port_ops	= &nv_ck804_ops,
539 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
540 	},
541 	/* ADMA */
542 	{
543 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
544 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
545 		.pio_mask	= NV_PIO_MASK,
546 		.mwdma_mask	= NV_MWDMA_MASK,
547 		.udma_mask	= NV_UDMA_MASK,
548 		.port_ops	= &nv_adma_ops,
549 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
550 	},
551 	/* MCP5x */
552 	{
553 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
554 		.pio_mask	= NV_PIO_MASK,
555 		.mwdma_mask	= NV_MWDMA_MASK,
556 		.udma_mask	= NV_UDMA_MASK,
557 		.port_ops	= &nv_mcp5x_ops,
558 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
559 	},
560 	/* SWNCQ */
561 	{
562 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
563 				  ATA_FLAG_NCQ,
564 		.pio_mask	= NV_PIO_MASK,
565 		.mwdma_mask	= NV_MWDMA_MASK,
566 		.udma_mask	= NV_UDMA_MASK,
567 		.port_ops	= &nv_swncq_ops,
568 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
569 	},
570 };
571 
572 MODULE_AUTHOR("NVIDIA");
573 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
574 MODULE_LICENSE("GPL");
575 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
576 MODULE_VERSION(DRV_VERSION);
577 
578 static int adma_enabled;
579 static int swncq_enabled = 1;
580 
581 static void nv_adma_register_mode(struct ata_port *ap)
582 {
583 	struct nv_adma_port_priv *pp = ap->private_data;
584 	void __iomem *mmio = pp->ctl_block;
585 	u16 tmp, status;
586 	int count = 0;
587 
588 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
589 		return;
590 
591 	status = readw(mmio + NV_ADMA_STAT);
592 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
593 		ndelay(50);
594 		status = readw(mmio + NV_ADMA_STAT);
595 		count++;
596 	}
597 	if (count == 20)
598 		ata_port_printk(ap, KERN_WARNING,
599 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
600 			status);
601 
602 	tmp = readw(mmio + NV_ADMA_CTL);
603 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
604 
605 	count = 0;
606 	status = readw(mmio + NV_ADMA_STAT);
607 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
608 		ndelay(50);
609 		status = readw(mmio + NV_ADMA_STAT);
610 		count++;
611 	}
612 	if (count == 20)
613 		ata_port_printk(ap, KERN_WARNING,
614 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
615 			 status);
616 
617 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
618 }
619 
620 static void nv_adma_mode(struct ata_port *ap)
621 {
622 	struct nv_adma_port_priv *pp = ap->private_data;
623 	void __iomem *mmio = pp->ctl_block;
624 	u16 tmp, status;
625 	int count = 0;
626 
627 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
628 		return;
629 
630 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
631 
632 	tmp = readw(mmio + NV_ADMA_CTL);
633 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
634 
635 	status = readw(mmio + NV_ADMA_STAT);
636 	while (((status & NV_ADMA_STAT_LEGACY) ||
637 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
638 		ndelay(50);
639 		status = readw(mmio + NV_ADMA_STAT);
640 		count++;
641 	}
642 	if (count == 20)
643 		ata_port_printk(ap, KERN_WARNING,
644 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
645 			status);
646 
647 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
648 }
649 
650 static int nv_adma_slave_config(struct scsi_device *sdev)
651 {
652 	struct ata_port *ap = ata_shost_to_port(sdev->host);
653 	struct nv_adma_port_priv *pp = ap->private_data;
654 	struct nv_adma_port_priv *port0, *port1;
655 	struct scsi_device *sdev0, *sdev1;
656 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
657 	unsigned long segment_boundary, flags;
658 	unsigned short sg_tablesize;
659 	int rc;
660 	int adma_enable;
661 	u32 current_reg, new_reg, config_mask;
662 
663 	rc = ata_scsi_slave_config(sdev);
664 
665 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
666 		/* Not a proper libata device, ignore */
667 		return rc;
668 
669 	spin_lock_irqsave(ap->lock, flags);
670 
671 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
672 		/*
673 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
674 		 * Therefore ATAPI commands are sent through the legacy interface.
675 		 * However, the legacy interface only supports 32-bit DMA.
676 		 * Restrict DMA parameters as required by the legacy interface
677 		 * when an ATAPI device is connected.
678 		 */
679 		segment_boundary = ATA_DMA_BOUNDARY;
680 		/* Subtract 1 since an extra entry may be needed for padding, see
681 		   libata-scsi.c */
682 		sg_tablesize = LIBATA_MAX_PRD - 1;
683 
684 		/* Since the legacy DMA engine is in use, we need to disable ADMA
685 		   on the port. */
686 		adma_enable = 0;
687 		nv_adma_register_mode(ap);
688 	} else {
689 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
690 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
691 		adma_enable = 1;
692 	}
693 
694 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
695 
696 	if (ap->port_no == 1)
697 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
698 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
699 	else
700 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
701 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
702 
703 	if (adma_enable) {
704 		new_reg = current_reg | config_mask;
705 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
706 	} else {
707 		new_reg = current_reg & ~config_mask;
708 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
709 	}
710 
711 	if (current_reg != new_reg)
712 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
713 
714 	port0 = ap->host->ports[0]->private_data;
715 	port1 = ap->host->ports[1]->private_data;
716 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
717 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
718 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
719 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
720 		/** We have to set the DMA mask to 32-bit if either port is in
721 		    ATAPI mode, since they are on the same PCI device which is
722 		    used for DMA mapping. If we set the mask we also need to set
723 		    the bounce limit on both ports to ensure that the block
724 		    layer doesn't feed addresses that cause DMA mapping to
725 		    choke. If either SCSI device is not allocated yet, it's OK
726 		    since that port will discover its correct setting when it
727 		    does get allocated.
728 		    Note: Setting 32-bit mask should not fail. */
729 		if (sdev0)
730 			blk_queue_bounce_limit(sdev0->request_queue,
731 					       ATA_DMA_MASK);
732 		if (sdev1)
733 			blk_queue_bounce_limit(sdev1->request_queue,
734 					       ATA_DMA_MASK);
735 
736 		pci_set_dma_mask(pdev, ATA_DMA_MASK);
737 	} else {
738 		/** This shouldn't fail as it was set to this value before */
739 		pci_set_dma_mask(pdev, pp->adma_dma_mask);
740 		if (sdev0)
741 			blk_queue_bounce_limit(sdev0->request_queue,
742 					       pp->adma_dma_mask);
743 		if (sdev1)
744 			blk_queue_bounce_limit(sdev1->request_queue,
745 					       pp->adma_dma_mask);
746 	}
747 
748 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
749 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
750 	ata_port_printk(ap, KERN_INFO,
751 		"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
752 		(unsigned long long)*ap->host->dev->dma_mask,
753 		segment_boundary, sg_tablesize);
754 
755 	spin_unlock_irqrestore(ap->lock, flags);
756 
757 	return rc;
758 }
759 
760 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
761 {
762 	struct nv_adma_port_priv *pp = qc->ap->private_data;
763 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
764 }
765 
766 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
767 {
768 	/* Other than when internal or pass-through commands are executed,
769 	   the only time this function will be called in ADMA mode will be
770 	   if a command fails. In the failure case we don't care about going
771 	   into register mode with ADMA commands pending, as the commands will
772 	   all shortly be aborted anyway. We assume that NCQ commands are not
773 	   issued via passthrough, which is the only way that switching into
774 	   ADMA mode could abort outstanding commands. */
775 	nv_adma_register_mode(ap);
776 
777 	ata_sff_tf_read(ap, tf);
778 }
779 
780 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
781 {
782 	unsigned int idx = 0;
783 
784 	if (tf->flags & ATA_TFLAG_ISADDR) {
785 		if (tf->flags & ATA_TFLAG_LBA48) {
786 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
787 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
788 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
789 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
790 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
791 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
792 		} else
793 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
794 
795 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
796 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
797 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
798 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
799 	}
800 
801 	if (tf->flags & ATA_TFLAG_DEVICE)
802 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
803 
804 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
805 
806 	while (idx < 12)
807 		cpb[idx++] = cpu_to_le16(IGN);
808 
809 	return idx;
810 }
811 
812 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
813 {
814 	struct nv_adma_port_priv *pp = ap->private_data;
815 	u8 flags = pp->cpb[cpb_num].resp_flags;
816 
817 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
818 
819 	if (unlikely((force_err ||
820 		     flags & (NV_CPB_RESP_ATA_ERR |
821 			      NV_CPB_RESP_CMD_ERR |
822 			      NV_CPB_RESP_CPB_ERR)))) {
823 		struct ata_eh_info *ehi = &ap->link.eh_info;
824 		int freeze = 0;
825 
826 		ata_ehi_clear_desc(ehi);
827 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
828 		if (flags & NV_CPB_RESP_ATA_ERR) {
829 			ata_ehi_push_desc(ehi, "ATA error");
830 			ehi->err_mask |= AC_ERR_DEV;
831 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
832 			ata_ehi_push_desc(ehi, "CMD error");
833 			ehi->err_mask |= AC_ERR_DEV;
834 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
835 			ata_ehi_push_desc(ehi, "CPB error");
836 			ehi->err_mask |= AC_ERR_SYSTEM;
837 			freeze = 1;
838 		} else {
839 			/* notifier error, but no error in CPB flags? */
840 			ata_ehi_push_desc(ehi, "unknown");
841 			ehi->err_mask |= AC_ERR_OTHER;
842 			freeze = 1;
843 		}
844 		/* Kill all commands. EH will determine what actually failed. */
845 		if (freeze)
846 			ata_port_freeze(ap);
847 		else
848 			ata_port_abort(ap);
849 		return 1;
850 	}
851 
852 	if (likely(flags & NV_CPB_RESP_DONE)) {
853 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
854 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
855 		if (likely(qc)) {
856 			DPRINTK("Completing qc from tag %d\n", cpb_num);
857 			ata_qc_complete(qc);
858 		} else {
859 			struct ata_eh_info *ehi = &ap->link.eh_info;
860 			/* Notifier bits set without a command may indicate the drive
861 			   is misbehaving. Raise host state machine violation on this
862 			   condition. */
863 			ata_port_printk(ap, KERN_ERR,
864 					"notifier for tag %d with no cmd?\n",
865 					cpb_num);
866 			ehi->err_mask |= AC_ERR_HSM;
867 			ehi->action |= ATA_EH_RESET;
868 			ata_port_freeze(ap);
869 			return 1;
870 		}
871 	}
872 	return 0;
873 }
874 
875 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
876 {
877 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
878 
879 	/* freeze if hotplugged */
880 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
881 		ata_port_freeze(ap);
882 		return 1;
883 	}
884 
885 	/* bail out if not our interrupt */
886 	if (!(irq_stat & NV_INT_DEV))
887 		return 0;
888 
889 	/* DEV interrupt w/ no active qc? */
890 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
891 		ata_sff_check_status(ap);
892 		return 1;
893 	}
894 
895 	/* handle interrupt */
896 	return ata_sff_host_intr(ap, qc);
897 }
898 
899 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
900 {
901 	struct ata_host *host = dev_instance;
902 	int i, handled = 0;
903 	u32 notifier_clears[2];
904 
905 	spin_lock(&host->lock);
906 
907 	for (i = 0; i < host->n_ports; i++) {
908 		struct ata_port *ap = host->ports[i];
909 		notifier_clears[i] = 0;
910 
911 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
912 			struct nv_adma_port_priv *pp = ap->private_data;
913 			void __iomem *mmio = pp->ctl_block;
914 			u16 status;
915 			u32 gen_ctl;
916 			u32 notifier, notifier_error;
917 
918 			/* if ADMA is disabled, use standard ata interrupt handler */
919 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
920 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
921 					>> (NV_INT_PORT_SHIFT * i);
922 				handled += nv_host_intr(ap, irq_stat);
923 				continue;
924 			}
925 
926 			/* if in ATA register mode, check for standard interrupts */
927 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
928 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
929 					>> (NV_INT_PORT_SHIFT * i);
930 				if (ata_tag_valid(ap->link.active_tag))
931 					/** NV_INT_DEV indication seems unreliable at times
932 					    at least in ADMA mode. Force it on always when a
933 					    command is active, to prevent losing interrupts. */
934 					irq_stat |= NV_INT_DEV;
935 				handled += nv_host_intr(ap, irq_stat);
936 			}
937 
938 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
939 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
940 			notifier_clears[i] = notifier | notifier_error;
941 
942 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
943 
944 			if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
945 			    !notifier_error)
946 				/* Nothing to do */
947 				continue;
948 
949 			status = readw(mmio + NV_ADMA_STAT);
950 
951 			/* Clear status. Ensure the controller sees the clearing before we start
952 			   looking at any of the CPB statuses, so that any CPB completions after
953 			   this point in the handler will raise another interrupt. */
954 			writew(status, mmio + NV_ADMA_STAT);
955 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
956 			rmb();
957 
958 			handled++; /* irq handled if we got here */
959 
960 			/* freeze if hotplugged or controller error */
961 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
962 					       NV_ADMA_STAT_HOTUNPLUG |
963 					       NV_ADMA_STAT_TIMEOUT |
964 					       NV_ADMA_STAT_SERROR))) {
965 				struct ata_eh_info *ehi = &ap->link.eh_info;
966 
967 				ata_ehi_clear_desc(ehi);
968 				__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
969 				if (status & NV_ADMA_STAT_TIMEOUT) {
970 					ehi->err_mask |= AC_ERR_SYSTEM;
971 					ata_ehi_push_desc(ehi, "timeout");
972 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
973 					ata_ehi_hotplugged(ehi);
974 					ata_ehi_push_desc(ehi, "hotplug");
975 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
976 					ata_ehi_hotplugged(ehi);
977 					ata_ehi_push_desc(ehi, "hot unplug");
978 				} else if (status & NV_ADMA_STAT_SERROR) {
979 					/* let libata analyze SError and figure out the cause */
980 					ata_ehi_push_desc(ehi, "SError");
981 				} else
982 					ata_ehi_push_desc(ehi, "unknown");
983 				ata_port_freeze(ap);
984 				continue;
985 			}
986 
987 			if (status & (NV_ADMA_STAT_DONE |
988 				      NV_ADMA_STAT_CPBERR |
989 				      NV_ADMA_STAT_CMD_COMPLETE)) {
990 				u32 check_commands = notifier_clears[i];
991 				int pos, error = 0;
992 
993 				if (status & NV_ADMA_STAT_CPBERR) {
994 					/* Check all active commands */
995 					if (ata_tag_valid(ap->link.active_tag))
996 						check_commands = 1 <<
997 							ap->link.active_tag;
998 					else
999 						check_commands = ap->
1000 							link.sactive;
1001 				}
1002 
1003 				/** Check CPBs for completed commands */
1004 				while ((pos = ffs(check_commands)) && !error) {
1005 					pos--;
1006 					error = nv_adma_check_cpb(ap, pos,
1007 						notifier_error & (1 << pos));
1008 					check_commands &= ~(1 << pos);
1009 				}
1010 			}
1011 		}
1012 	}
1013 
1014 	if (notifier_clears[0] || notifier_clears[1]) {
1015 		/* Note: Both notifier clear registers must be written
1016 		   if either is set, even if one is zero, according to NVIDIA. */
1017 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1018 		writel(notifier_clears[0], pp->notifier_clear_block);
1019 		pp = host->ports[1]->private_data;
1020 		writel(notifier_clears[1], pp->notifier_clear_block);
1021 	}
1022 
1023 	spin_unlock(&host->lock);
1024 
1025 	return IRQ_RETVAL(handled);
1026 }
1027 
1028 static void nv_adma_freeze(struct ata_port *ap)
1029 {
1030 	struct nv_adma_port_priv *pp = ap->private_data;
1031 	void __iomem *mmio = pp->ctl_block;
1032 	u16 tmp;
1033 
1034 	nv_ck804_freeze(ap);
1035 
1036 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1037 		return;
1038 
1039 	/* clear any outstanding CK804 notifications */
1040 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1041 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1042 
1043 	/* Disable interrupt */
1044 	tmp = readw(mmio + NV_ADMA_CTL);
1045 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1046 		mmio + NV_ADMA_CTL);
1047 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1048 }
1049 
1050 static void nv_adma_thaw(struct ata_port *ap)
1051 {
1052 	struct nv_adma_port_priv *pp = ap->private_data;
1053 	void __iomem *mmio = pp->ctl_block;
1054 	u16 tmp;
1055 
1056 	nv_ck804_thaw(ap);
1057 
1058 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1059 		return;
1060 
1061 	/* Enable interrupt */
1062 	tmp = readw(mmio + NV_ADMA_CTL);
1063 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1064 		mmio + NV_ADMA_CTL);
1065 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1066 }
1067 
1068 static void nv_adma_irq_clear(struct ata_port *ap)
1069 {
1070 	struct nv_adma_port_priv *pp = ap->private_data;
1071 	void __iomem *mmio = pp->ctl_block;
1072 	u32 notifier_clears[2];
1073 
1074 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1075 		ata_sff_irq_clear(ap);
1076 		return;
1077 	}
1078 
1079 	/* clear any outstanding CK804 notifications */
1080 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1081 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1082 
1083 	/* clear ADMA status */
1084 	writew(0xffff, mmio + NV_ADMA_STAT);
1085 
1086 	/* clear notifiers - note both ports need to be written with
1087 	   something even though we are only clearing on one */
1088 	if (ap->port_no == 0) {
1089 		notifier_clears[0] = 0xFFFFFFFF;
1090 		notifier_clears[1] = 0;
1091 	} else {
1092 		notifier_clears[0] = 0;
1093 		notifier_clears[1] = 0xFFFFFFFF;
1094 	}
1095 	pp = ap->host->ports[0]->private_data;
1096 	writel(notifier_clears[0], pp->notifier_clear_block);
1097 	pp = ap->host->ports[1]->private_data;
1098 	writel(notifier_clears[1], pp->notifier_clear_block);
1099 }
1100 
1101 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1102 {
1103 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1104 
1105 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1106 		ata_sff_post_internal_cmd(qc);
1107 }
1108 
1109 static int nv_adma_port_start(struct ata_port *ap)
1110 {
1111 	struct device *dev = ap->host->dev;
1112 	struct nv_adma_port_priv *pp;
1113 	int rc;
1114 	void *mem;
1115 	dma_addr_t mem_dma;
1116 	void __iomem *mmio;
1117 	struct pci_dev *pdev = to_pci_dev(dev);
1118 	u16 tmp;
1119 
1120 	VPRINTK("ENTER\n");
1121 
1122 	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1123 	   pad buffers */
1124 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1125 	if (rc)
1126 		return rc;
1127 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1128 	if (rc)
1129 		return rc;
1130 
1131 	rc = ata_port_start(ap);
1132 	if (rc)
1133 		return rc;
1134 
1135 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1136 	if (!pp)
1137 		return -ENOMEM;
1138 
1139 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1140 	       ap->port_no * NV_ADMA_PORT_SIZE;
1141 	pp->ctl_block = mmio;
1142 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1143 	pp->notifier_clear_block = pp->gen_block +
1144 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1145 
1146 	/* Now that the legacy PRD and padding buffer are allocated we can
1147 	   safely raise the DMA mask to allocate the CPB/APRD table.
1148 	   These are allowed to fail since we store the value that ends up
1149 	   being used to set as the bounce limit in slave_config later if
1150 	   needed. */
1151 	pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1152 	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1153 	pp->adma_dma_mask = *dev->dma_mask;
1154 
1155 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1156 				  &mem_dma, GFP_KERNEL);
1157 	if (!mem)
1158 		return -ENOMEM;
1159 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1160 
1161 	/*
1162 	 * First item in chunk of DMA memory:
1163 	 * 128-byte command parameter block (CPB)
1164 	 * one for each command tag
1165 	 */
1166 	pp->cpb     = mem;
1167 	pp->cpb_dma = mem_dma;
1168 
1169 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1170 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1171 
1172 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1173 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1174 
1175 	/*
1176 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1177 	 */
1178 	pp->aprd = mem;
1179 	pp->aprd_dma = mem_dma;
1180 
1181 	ap->private_data = pp;
1182 
1183 	/* clear any outstanding interrupt conditions */
1184 	writew(0xffff, mmio + NV_ADMA_STAT);
1185 
1186 	/* initialize port variables */
1187 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1188 
1189 	/* clear CPB fetch count */
1190 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1191 
1192 	/* clear GO for register mode, enable interrupt */
1193 	tmp = readw(mmio + NV_ADMA_CTL);
1194 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1195 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1196 
1197 	tmp = readw(mmio + NV_ADMA_CTL);
1198 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1199 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1200 	udelay(1);
1201 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1202 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1203 
1204 	return 0;
1205 }
1206 
1207 static void nv_adma_port_stop(struct ata_port *ap)
1208 {
1209 	struct nv_adma_port_priv *pp = ap->private_data;
1210 	void __iomem *mmio = pp->ctl_block;
1211 
1212 	VPRINTK("ENTER\n");
1213 	writew(0, mmio + NV_ADMA_CTL);
1214 }
1215 
1216 #ifdef CONFIG_PM
1217 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1218 {
1219 	struct nv_adma_port_priv *pp = ap->private_data;
1220 	void __iomem *mmio = pp->ctl_block;
1221 
1222 	/* Go to register mode - clears GO */
1223 	nv_adma_register_mode(ap);
1224 
1225 	/* clear CPB fetch count */
1226 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1227 
1228 	/* disable interrupt, shut down port */
1229 	writew(0, mmio + NV_ADMA_CTL);
1230 
1231 	return 0;
1232 }
1233 
1234 static int nv_adma_port_resume(struct ata_port *ap)
1235 {
1236 	struct nv_adma_port_priv *pp = ap->private_data;
1237 	void __iomem *mmio = pp->ctl_block;
1238 	u16 tmp;
1239 
1240 	/* set CPB block location */
1241 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1242 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1243 
1244 	/* clear any outstanding interrupt conditions */
1245 	writew(0xffff, mmio + NV_ADMA_STAT);
1246 
1247 	/* initialize port variables */
1248 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1249 
1250 	/* clear CPB fetch count */
1251 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1252 
1253 	/* clear GO for register mode, enable interrupt */
1254 	tmp = readw(mmio + NV_ADMA_CTL);
1255 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1256 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1257 
1258 	tmp = readw(mmio + NV_ADMA_CTL);
1259 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1260 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1261 	udelay(1);
1262 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1263 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1264 
1265 	return 0;
1266 }
1267 #endif
1268 
1269 static void nv_adma_setup_port(struct ata_port *ap)
1270 {
1271 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1272 	struct ata_ioports *ioport = &ap->ioaddr;
1273 
1274 	VPRINTK("ENTER\n");
1275 
1276 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1277 
1278 	ioport->cmd_addr	= mmio;
1279 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1280 	ioport->error_addr	=
1281 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1282 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1283 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1284 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1285 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1286 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1287 	ioport->status_addr	=
1288 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1289 	ioport->altstatus_addr	=
1290 	ioport->ctl_addr	= mmio + 0x20;
1291 }
1292 
1293 static int nv_adma_host_init(struct ata_host *host)
1294 {
1295 	struct pci_dev *pdev = to_pci_dev(host->dev);
1296 	unsigned int i;
1297 	u32 tmp32;
1298 
1299 	VPRINTK("ENTER\n");
1300 
1301 	/* enable ADMA on the ports */
1302 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1303 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1304 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1305 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1306 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1307 
1308 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1309 
1310 	for (i = 0; i < host->n_ports; i++)
1311 		nv_adma_setup_port(host->ports[i]);
1312 
1313 	return 0;
1314 }
1315 
1316 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1317 			      struct scatterlist *sg,
1318 			      int idx,
1319 			      struct nv_adma_prd *aprd)
1320 {
1321 	u8 flags = 0;
1322 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1323 		flags |= NV_APRD_WRITE;
1324 	if (idx == qc->n_elem - 1)
1325 		flags |= NV_APRD_END;
1326 	else if (idx != 4)
1327 		flags |= NV_APRD_CONT;
1328 
1329 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1330 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1331 	aprd->flags = flags;
1332 	aprd->packet_len = 0;
1333 }
1334 
1335 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1336 {
1337 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1338 	struct nv_adma_prd *aprd;
1339 	struct scatterlist *sg;
1340 	unsigned int si;
1341 
1342 	VPRINTK("ENTER\n");
1343 
1344 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1345 		aprd = (si < 5) ? &cpb->aprd[si] :
1346 			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1347 		nv_adma_fill_aprd(qc, sg, si, aprd);
1348 	}
1349 	if (si > 5)
1350 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1351 	else
1352 		cpb->next_aprd = cpu_to_le64(0);
1353 }
1354 
1355 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1356 {
1357 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1358 
1359 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1360 	   or interrupt-driven no-data commands. */
1361 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1362 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1363 		return 1;
1364 
1365 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1366 	   (qc->tf.protocol == ATA_PROT_NODATA))
1367 		return 0;
1368 
1369 	return 1;
1370 }
1371 
1372 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1373 {
1374 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1375 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1376 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1377 		       NV_CPB_CTL_IEN;
1378 
1379 	if (nv_adma_use_reg_mode(qc)) {
1380 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1381 			(qc->flags & ATA_QCFLAG_DMAMAP));
1382 		nv_adma_register_mode(qc->ap);
1383 		ata_sff_qc_prep(qc);
1384 		return;
1385 	}
1386 
1387 	cpb->resp_flags = NV_CPB_RESP_DONE;
1388 	wmb();
1389 	cpb->ctl_flags = 0;
1390 	wmb();
1391 
1392 	cpb->len		= 3;
1393 	cpb->tag		= qc->tag;
1394 	cpb->next_cpb_idx	= 0;
1395 
1396 	/* turn on NCQ flags for NCQ commands */
1397 	if (qc->tf.protocol == ATA_PROT_NCQ)
1398 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1399 
1400 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1401 
1402 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1403 
1404 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1405 		nv_adma_fill_sg(qc, cpb);
1406 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1407 	} else
1408 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1409 
1410 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1411 	   until we are finished filling in all of the contents */
1412 	wmb();
1413 	cpb->ctl_flags = ctl_flags;
1414 	wmb();
1415 	cpb->resp_flags = 0;
1416 }
1417 
1418 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1419 {
1420 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1421 	void __iomem *mmio = pp->ctl_block;
1422 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1423 
1424 	VPRINTK("ENTER\n");
1425 
1426 	/* We can't handle result taskfile with NCQ commands, since
1427 	   retrieving the taskfile switches us out of ADMA mode and would abort
1428 	   existing commands. */
1429 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1430 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1431 		ata_dev_printk(qc->dev, KERN_ERR,
1432 			"NCQ w/ RESULT_TF not allowed\n");
1433 		return AC_ERR_SYSTEM;
1434 	}
1435 
1436 	if (nv_adma_use_reg_mode(qc)) {
1437 		/* use ATA register mode */
1438 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1439 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1440 			(qc->flags & ATA_QCFLAG_DMAMAP));
1441 		nv_adma_register_mode(qc->ap);
1442 		return ata_sff_qc_issue(qc);
1443 	} else
1444 		nv_adma_mode(qc->ap);
1445 
1446 	/* write append register, command tag in lower 8 bits
1447 	   and (number of cpbs to append -1) in top 8 bits */
1448 	wmb();
1449 
1450 	if (curr_ncq != pp->last_issue_ncq) {
1451 		/* Seems to need some delay before switching between NCQ and
1452 		   non-NCQ commands, else we get command timeouts and such. */
1453 		udelay(20);
1454 		pp->last_issue_ncq = curr_ncq;
1455 	}
1456 
1457 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1458 
1459 	DPRINTK("Issued tag %u\n", qc->tag);
1460 
1461 	return 0;
1462 }
1463 
1464 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1465 {
1466 	struct ata_host *host = dev_instance;
1467 	unsigned int i;
1468 	unsigned int handled = 0;
1469 	unsigned long flags;
1470 
1471 	spin_lock_irqsave(&host->lock, flags);
1472 
1473 	for (i = 0; i < host->n_ports; i++) {
1474 		struct ata_port *ap;
1475 
1476 		ap = host->ports[i];
1477 		if (ap &&
1478 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1479 			struct ata_queued_cmd *qc;
1480 
1481 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1482 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1483 				handled += ata_sff_host_intr(ap, qc);
1484 			else
1485 				// No request pending?  Clear interrupt status
1486 				// anyway, in case there's one pending.
1487 				ap->ops->sff_check_status(ap);
1488 		}
1489 
1490 	}
1491 
1492 	spin_unlock_irqrestore(&host->lock, flags);
1493 
1494 	return IRQ_RETVAL(handled);
1495 }
1496 
1497 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1498 {
1499 	int i, handled = 0;
1500 
1501 	for (i = 0; i < host->n_ports; i++) {
1502 		struct ata_port *ap = host->ports[i];
1503 
1504 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1505 			handled += nv_host_intr(ap, irq_stat);
1506 
1507 		irq_stat >>= NV_INT_PORT_SHIFT;
1508 	}
1509 
1510 	return IRQ_RETVAL(handled);
1511 }
1512 
1513 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1514 {
1515 	struct ata_host *host = dev_instance;
1516 	u8 irq_stat;
1517 	irqreturn_t ret;
1518 
1519 	spin_lock(&host->lock);
1520 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1521 	ret = nv_do_interrupt(host, irq_stat);
1522 	spin_unlock(&host->lock);
1523 
1524 	return ret;
1525 }
1526 
1527 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1528 {
1529 	struct ata_host *host = dev_instance;
1530 	u8 irq_stat;
1531 	irqreturn_t ret;
1532 
1533 	spin_lock(&host->lock);
1534 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1535 	ret = nv_do_interrupt(host, irq_stat);
1536 	spin_unlock(&host->lock);
1537 
1538 	return ret;
1539 }
1540 
1541 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1542 {
1543 	if (sc_reg > SCR_CONTROL)
1544 		return -EINVAL;
1545 
1546 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1547 	return 0;
1548 }
1549 
1550 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1551 {
1552 	if (sc_reg > SCR_CONTROL)
1553 		return -EINVAL;
1554 
1555 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1556 	return 0;
1557 }
1558 
1559 static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
1560 				   unsigned long deadline)
1561 {
1562 	bool online;
1563 	int rc;
1564 
1565 	rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1566 				 &online, NULL);
1567 	return online ? -EAGAIN : rc;
1568 }
1569 
1570 static void nv_nf2_freeze(struct ata_port *ap)
1571 {
1572 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1573 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1574 	u8 mask;
1575 
1576 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1577 	mask &= ~(NV_INT_ALL << shift);
1578 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1579 }
1580 
1581 static void nv_nf2_thaw(struct ata_port *ap)
1582 {
1583 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1584 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1585 	u8 mask;
1586 
1587 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1588 
1589 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1590 	mask |= (NV_INT_MASK << shift);
1591 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1592 }
1593 
1594 static void nv_ck804_freeze(struct ata_port *ap)
1595 {
1596 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1597 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1598 	u8 mask;
1599 
1600 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1601 	mask &= ~(NV_INT_ALL << shift);
1602 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1603 }
1604 
1605 static void nv_ck804_thaw(struct ata_port *ap)
1606 {
1607 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1608 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1609 	u8 mask;
1610 
1611 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1612 
1613 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1614 	mask |= (NV_INT_MASK << shift);
1615 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1616 }
1617 
1618 static void nv_mcp55_freeze(struct ata_port *ap)
1619 {
1620 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1621 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1622 	u32 mask;
1623 
1624 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1625 
1626 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1627 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1628 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1629 	ata_sff_freeze(ap);
1630 }
1631 
1632 static void nv_mcp55_thaw(struct ata_port *ap)
1633 {
1634 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1635 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1636 	u32 mask;
1637 
1638 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1639 
1640 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1641 	mask |= (NV_INT_MASK_MCP55 << shift);
1642 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1643 	ata_sff_thaw(ap);
1644 }
1645 
1646 static void nv_adma_error_handler(struct ata_port *ap)
1647 {
1648 	struct nv_adma_port_priv *pp = ap->private_data;
1649 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1650 		void __iomem *mmio = pp->ctl_block;
1651 		int i;
1652 		u16 tmp;
1653 
1654 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1655 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1656 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1657 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1658 			u32 status = readw(mmio + NV_ADMA_STAT);
1659 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1660 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1661 
1662 			ata_port_printk(ap, KERN_ERR,
1663 				"EH in ADMA mode, notifier 0x%X "
1664 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1665 				"next cpb count 0x%X next cpb idx 0x%x\n",
1666 				notifier, notifier_error, gen_ctl, status,
1667 				cpb_count, next_cpb_idx);
1668 
1669 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1670 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1671 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1672 				    ap->link.sactive & (1 << i))
1673 					ata_port_printk(ap, KERN_ERR,
1674 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1675 						i, cpb->ctl_flags, cpb->resp_flags);
1676 			}
1677 		}
1678 
1679 		/* Push us back into port register mode for error handling. */
1680 		nv_adma_register_mode(ap);
1681 
1682 		/* Mark all of the CPBs as invalid to prevent them from
1683 		   being executed */
1684 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1685 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1686 
1687 		/* clear CPB fetch count */
1688 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1689 
1690 		/* Reset channel */
1691 		tmp = readw(mmio + NV_ADMA_CTL);
1692 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1693 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1694 		udelay(1);
1695 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1696 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1697 	}
1698 
1699 	ata_sff_error_handler(ap);
1700 }
1701 
1702 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1703 {
1704 	struct nv_swncq_port_priv *pp = ap->private_data;
1705 	struct defer_queue *dq = &pp->defer_queue;
1706 
1707 	/* queue is full */
1708 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1709 	dq->defer_bits |= (1 << qc->tag);
1710 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1711 }
1712 
1713 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1714 {
1715 	struct nv_swncq_port_priv *pp = ap->private_data;
1716 	struct defer_queue *dq = &pp->defer_queue;
1717 	unsigned int tag;
1718 
1719 	if (dq->head == dq->tail)	/* null queue */
1720 		return NULL;
1721 
1722 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1723 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1724 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1725 	dq->defer_bits &= ~(1 << tag);
1726 
1727 	return ata_qc_from_tag(ap, tag);
1728 }
1729 
1730 static void nv_swncq_fis_reinit(struct ata_port *ap)
1731 {
1732 	struct nv_swncq_port_priv *pp = ap->private_data;
1733 
1734 	pp->dhfis_bits = 0;
1735 	pp->dmafis_bits = 0;
1736 	pp->sdbfis_bits = 0;
1737 	pp->ncq_flags = 0;
1738 }
1739 
1740 static void nv_swncq_pp_reinit(struct ata_port *ap)
1741 {
1742 	struct nv_swncq_port_priv *pp = ap->private_data;
1743 	struct defer_queue *dq = &pp->defer_queue;
1744 
1745 	dq->head = 0;
1746 	dq->tail = 0;
1747 	dq->defer_bits = 0;
1748 	pp->qc_active = 0;
1749 	pp->last_issue_tag = ATA_TAG_POISON;
1750 	nv_swncq_fis_reinit(ap);
1751 }
1752 
1753 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1754 {
1755 	struct nv_swncq_port_priv *pp = ap->private_data;
1756 
1757 	writew(fis, pp->irq_block);
1758 }
1759 
1760 static void __ata_bmdma_stop(struct ata_port *ap)
1761 {
1762 	struct ata_queued_cmd qc;
1763 
1764 	qc.ap = ap;
1765 	ata_bmdma_stop(&qc);
1766 }
1767 
1768 static void nv_swncq_ncq_stop(struct ata_port *ap)
1769 {
1770 	struct nv_swncq_port_priv *pp = ap->private_data;
1771 	unsigned int i;
1772 	u32 sactive;
1773 	u32 done_mask;
1774 
1775 	ata_port_printk(ap, KERN_ERR,
1776 			"EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1777 			ap->qc_active, ap->link.sactive);
1778 	ata_port_printk(ap, KERN_ERR,
1779 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1780 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1781 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1782 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1783 
1784 	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1785 			ap->ops->sff_check_status(ap),
1786 			ioread8(ap->ioaddr.error_addr));
1787 
1788 	sactive = readl(pp->sactive_block);
1789 	done_mask = pp->qc_active ^ sactive;
1790 
1791 	ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1792 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1793 		u8 err = 0;
1794 		if (pp->qc_active & (1 << i))
1795 			err = 0;
1796 		else if (done_mask & (1 << i))
1797 			err = 1;
1798 		else
1799 			continue;
1800 
1801 		ata_port_printk(ap, KERN_ERR,
1802 				"tag 0x%x: %01x %01x %01x %01x %s\n", i,
1803 				(pp->dhfis_bits >> i) & 0x1,
1804 				(pp->dmafis_bits >> i) & 0x1,
1805 				(pp->sdbfis_bits >> i) & 0x1,
1806 				(sactive >> i) & 0x1,
1807 				(err ? "error! tag doesn't exit" : " "));
1808 	}
1809 
1810 	nv_swncq_pp_reinit(ap);
1811 	ap->ops->sff_irq_clear(ap);
1812 	__ata_bmdma_stop(ap);
1813 	nv_swncq_irq_clear(ap, 0xffff);
1814 }
1815 
1816 static void nv_swncq_error_handler(struct ata_port *ap)
1817 {
1818 	struct ata_eh_context *ehc = &ap->link.eh_context;
1819 
1820 	if (ap->link.sactive) {
1821 		nv_swncq_ncq_stop(ap);
1822 		ehc->i.action |= ATA_EH_RESET;
1823 	}
1824 
1825 	ata_sff_error_handler(ap);
1826 }
1827 
1828 #ifdef CONFIG_PM
1829 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1830 {
1831 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1832 	u32 tmp;
1833 
1834 	/* clear irq */
1835 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1836 
1837 	/* disable irq */
1838 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1839 
1840 	/* disable swncq */
1841 	tmp = readl(mmio + NV_CTL_MCP55);
1842 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1843 	writel(tmp, mmio + NV_CTL_MCP55);
1844 
1845 	return 0;
1846 }
1847 
1848 static int nv_swncq_port_resume(struct ata_port *ap)
1849 {
1850 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1851 	u32 tmp;
1852 
1853 	/* clear irq */
1854 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1855 
1856 	/* enable irq */
1857 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1858 
1859 	/* enable swncq */
1860 	tmp = readl(mmio + NV_CTL_MCP55);
1861 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1862 
1863 	return 0;
1864 }
1865 #endif
1866 
1867 static void nv_swncq_host_init(struct ata_host *host)
1868 {
1869 	u32 tmp;
1870 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1871 	struct pci_dev *pdev = to_pci_dev(host->dev);
1872 	u8 regval;
1873 
1874 	/* disable  ECO 398 */
1875 	pci_read_config_byte(pdev, 0x7f, &regval);
1876 	regval &= ~(1 << 7);
1877 	pci_write_config_byte(pdev, 0x7f, regval);
1878 
1879 	/* enable swncq */
1880 	tmp = readl(mmio + NV_CTL_MCP55);
1881 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1882 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1883 
1884 	/* enable irq intr */
1885 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1886 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1887 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1888 
1889 	/*  clear port irq */
1890 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1891 }
1892 
1893 static int nv_swncq_slave_config(struct scsi_device *sdev)
1894 {
1895 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1896 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1897 	struct ata_device *dev;
1898 	int rc;
1899 	u8 rev;
1900 	u8 check_maxtor = 0;
1901 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1902 
1903 	rc = ata_scsi_slave_config(sdev);
1904 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1905 		/* Not a proper libata device, ignore */
1906 		return rc;
1907 
1908 	dev = &ap->link.device[sdev->id];
1909 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1910 		return rc;
1911 
1912 	/* if MCP51 and Maxtor, then disable ncq */
1913 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1914 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1915 		check_maxtor = 1;
1916 
1917 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1918 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1919 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1920 		pci_read_config_byte(pdev, 0x8, &rev);
1921 		if (rev <= 0xa2)
1922 			check_maxtor = 1;
1923 	}
1924 
1925 	if (!check_maxtor)
1926 		return rc;
1927 
1928 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1929 
1930 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1931 		ata_scsi_change_queue_depth(sdev, 1);
1932 		ata_dev_printk(dev, KERN_NOTICE,
1933 			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1934 	}
1935 
1936 	return rc;
1937 }
1938 
1939 static int nv_swncq_port_start(struct ata_port *ap)
1940 {
1941 	struct device *dev = ap->host->dev;
1942 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1943 	struct nv_swncq_port_priv *pp;
1944 	int rc;
1945 
1946 	rc = ata_port_start(ap);
1947 	if (rc)
1948 		return rc;
1949 
1950 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1951 	if (!pp)
1952 		return -ENOMEM;
1953 
1954 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1955 				      &pp->prd_dma, GFP_KERNEL);
1956 	if (!pp->prd)
1957 		return -ENOMEM;
1958 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1959 
1960 	ap->private_data = pp;
1961 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1962 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1963 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1964 
1965 	return 0;
1966 }
1967 
1968 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1969 {
1970 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1971 		ata_sff_qc_prep(qc);
1972 		return;
1973 	}
1974 
1975 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1976 		return;
1977 
1978 	nv_swncq_fill_sg(qc);
1979 }
1980 
1981 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1982 {
1983 	struct ata_port *ap = qc->ap;
1984 	struct scatterlist *sg;
1985 	struct nv_swncq_port_priv *pp = ap->private_data;
1986 	struct ata_prd *prd;
1987 	unsigned int si, idx;
1988 
1989 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
1990 
1991 	idx = 0;
1992 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1993 		u32 addr, offset;
1994 		u32 sg_len, len;
1995 
1996 		addr = (u32)sg_dma_address(sg);
1997 		sg_len = sg_dma_len(sg);
1998 
1999 		while (sg_len) {
2000 			offset = addr & 0xffff;
2001 			len = sg_len;
2002 			if ((offset + sg_len) > 0x10000)
2003 				len = 0x10000 - offset;
2004 
2005 			prd[idx].addr = cpu_to_le32(addr);
2006 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2007 
2008 			idx++;
2009 			sg_len -= len;
2010 			addr += len;
2011 		}
2012 	}
2013 
2014 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2015 }
2016 
2017 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2018 					  struct ata_queued_cmd *qc)
2019 {
2020 	struct nv_swncq_port_priv *pp = ap->private_data;
2021 
2022 	if (qc == NULL)
2023 		return 0;
2024 
2025 	DPRINTK("Enter\n");
2026 
2027 	writel((1 << qc->tag), pp->sactive_block);
2028 	pp->last_issue_tag = qc->tag;
2029 	pp->dhfis_bits &= ~(1 << qc->tag);
2030 	pp->dmafis_bits &= ~(1 << qc->tag);
2031 	pp->qc_active |= (0x1 << qc->tag);
2032 
2033 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2034 	ap->ops->sff_exec_command(ap, &qc->tf);
2035 
2036 	DPRINTK("Issued tag %u\n", qc->tag);
2037 
2038 	return 0;
2039 }
2040 
2041 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2042 {
2043 	struct ata_port *ap = qc->ap;
2044 	struct nv_swncq_port_priv *pp = ap->private_data;
2045 
2046 	if (qc->tf.protocol != ATA_PROT_NCQ)
2047 		return ata_sff_qc_issue(qc);
2048 
2049 	DPRINTK("Enter\n");
2050 
2051 	if (!pp->qc_active)
2052 		nv_swncq_issue_atacmd(ap, qc);
2053 	else
2054 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2055 
2056 	return 0;
2057 }
2058 
2059 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2060 {
2061 	u32 serror;
2062 	struct ata_eh_info *ehi = &ap->link.eh_info;
2063 
2064 	ata_ehi_clear_desc(ehi);
2065 
2066 	/* AHCI needs SError cleared; otherwise, it might lock up */
2067 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2068 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2069 
2070 	/* analyze @irq_stat */
2071 	if (fis & NV_SWNCQ_IRQ_ADDED)
2072 		ata_ehi_push_desc(ehi, "hot plug");
2073 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2074 		ata_ehi_push_desc(ehi, "hot unplug");
2075 
2076 	ata_ehi_hotplugged(ehi);
2077 
2078 	/* okay, let's hand over to EH */
2079 	ehi->serror |= serror;
2080 
2081 	ata_port_freeze(ap);
2082 }
2083 
2084 static int nv_swncq_sdbfis(struct ata_port *ap)
2085 {
2086 	struct ata_queued_cmd *qc;
2087 	struct nv_swncq_port_priv *pp = ap->private_data;
2088 	struct ata_eh_info *ehi = &ap->link.eh_info;
2089 	u32 sactive;
2090 	int nr_done = 0;
2091 	u32 done_mask;
2092 	int i;
2093 	u8 host_stat;
2094 	u8 lack_dhfis = 0;
2095 
2096 	host_stat = ap->ops->bmdma_status(ap);
2097 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2098 		/* error when transfering data to/from memory */
2099 		ata_ehi_clear_desc(ehi);
2100 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2101 		ehi->err_mask |= AC_ERR_HOST_BUS;
2102 		ehi->action |= ATA_EH_RESET;
2103 		return -EINVAL;
2104 	}
2105 
2106 	ap->ops->sff_irq_clear(ap);
2107 	__ata_bmdma_stop(ap);
2108 
2109 	sactive = readl(pp->sactive_block);
2110 	done_mask = pp->qc_active ^ sactive;
2111 
2112 	if (unlikely(done_mask & sactive)) {
2113 		ata_ehi_clear_desc(ehi);
2114 		ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2115 				  "(%08x->%08x)", pp->qc_active, sactive);
2116 		ehi->err_mask |= AC_ERR_HSM;
2117 		ehi->action |= ATA_EH_RESET;
2118 		return -EINVAL;
2119 	}
2120 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
2121 		if (!(done_mask & (1 << i)))
2122 			continue;
2123 
2124 		qc = ata_qc_from_tag(ap, i);
2125 		if (qc) {
2126 			ata_qc_complete(qc);
2127 			pp->qc_active &= ~(1 << i);
2128 			pp->dhfis_bits &= ~(1 << i);
2129 			pp->dmafis_bits &= ~(1 << i);
2130 			pp->sdbfis_bits |= (1 << i);
2131 			nr_done++;
2132 		}
2133 	}
2134 
2135 	if (!ap->qc_active) {
2136 		DPRINTK("over\n");
2137 		nv_swncq_pp_reinit(ap);
2138 		return nr_done;
2139 	}
2140 
2141 	if (pp->qc_active & pp->dhfis_bits)
2142 		return nr_done;
2143 
2144 	if ((pp->ncq_flags & ncq_saw_backout) ||
2145 	    (pp->qc_active ^ pp->dhfis_bits))
2146 		/* if the controller cann't get a device to host register FIS,
2147 		 * The driver needs to reissue the new command.
2148 		 */
2149 		lack_dhfis = 1;
2150 
2151 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2152 		"SWNCQ:qc_active 0x%X defer_bits %X "
2153 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2154 		ap->print_id, ap->qc_active, pp->qc_active,
2155 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2156 		pp->dmafis_bits, pp->last_issue_tag);
2157 
2158 	nv_swncq_fis_reinit(ap);
2159 
2160 	if (lack_dhfis) {
2161 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2162 		nv_swncq_issue_atacmd(ap, qc);
2163 		return nr_done;
2164 	}
2165 
2166 	if (pp->defer_queue.defer_bits) {
2167 		/* send deferral queue command */
2168 		qc = nv_swncq_qc_from_dq(ap);
2169 		WARN_ON(qc == NULL);
2170 		nv_swncq_issue_atacmd(ap, qc);
2171 	}
2172 
2173 	return nr_done;
2174 }
2175 
2176 static inline u32 nv_swncq_tag(struct ata_port *ap)
2177 {
2178 	struct nv_swncq_port_priv *pp = ap->private_data;
2179 	u32 tag;
2180 
2181 	tag = readb(pp->tag_block) >> 2;
2182 	return (tag & 0x1f);
2183 }
2184 
2185 static int nv_swncq_dmafis(struct ata_port *ap)
2186 {
2187 	struct ata_queued_cmd *qc;
2188 	unsigned int rw;
2189 	u8 dmactl;
2190 	u32 tag;
2191 	struct nv_swncq_port_priv *pp = ap->private_data;
2192 
2193 	__ata_bmdma_stop(ap);
2194 	tag = nv_swncq_tag(ap);
2195 
2196 	DPRINTK("dma setup tag 0x%x\n", tag);
2197 	qc = ata_qc_from_tag(ap, tag);
2198 
2199 	if (unlikely(!qc))
2200 		return 0;
2201 
2202 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2203 
2204 	/* load PRD table addr. */
2205 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2206 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2207 
2208 	/* specify data direction, triple-check start bit is clear */
2209 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2210 	dmactl &= ~ATA_DMA_WR;
2211 	if (!rw)
2212 		dmactl |= ATA_DMA_WR;
2213 
2214 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2215 
2216 	return 1;
2217 }
2218 
2219 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2220 {
2221 	struct nv_swncq_port_priv *pp = ap->private_data;
2222 	struct ata_queued_cmd *qc;
2223 	struct ata_eh_info *ehi = &ap->link.eh_info;
2224 	u32 serror;
2225 	u8 ata_stat;
2226 	int rc = 0;
2227 
2228 	ata_stat = ap->ops->sff_check_status(ap);
2229 	nv_swncq_irq_clear(ap, fis);
2230 	if (!fis)
2231 		return;
2232 
2233 	if (ap->pflags & ATA_PFLAG_FROZEN)
2234 		return;
2235 
2236 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2237 		nv_swncq_hotplug(ap, fis);
2238 		return;
2239 	}
2240 
2241 	if (!pp->qc_active)
2242 		return;
2243 
2244 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2245 		return;
2246 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2247 
2248 	if (ata_stat & ATA_ERR) {
2249 		ata_ehi_clear_desc(ehi);
2250 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2251 		ehi->err_mask |= AC_ERR_DEV;
2252 		ehi->serror |= serror;
2253 		ehi->action |= ATA_EH_RESET;
2254 		ata_port_freeze(ap);
2255 		return;
2256 	}
2257 
2258 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2259 		/* If the IRQ is backout, driver must issue
2260 		 * the new command again some time later.
2261 		 */
2262 		pp->ncq_flags |= ncq_saw_backout;
2263 	}
2264 
2265 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2266 		pp->ncq_flags |= ncq_saw_sdb;
2267 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2268 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2269 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2270 			pp->dmafis_bits, readl(pp->sactive_block));
2271 		rc = nv_swncq_sdbfis(ap);
2272 		if (rc < 0)
2273 			goto irq_error;
2274 	}
2275 
2276 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2277 		/* The interrupt indicates the new command
2278 		 * was transmitted correctly to the drive.
2279 		 */
2280 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2281 		pp->ncq_flags |= ncq_saw_d2h;
2282 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2283 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2284 			ehi->err_mask |= AC_ERR_HSM;
2285 			ehi->action |= ATA_EH_RESET;
2286 			goto irq_error;
2287 		}
2288 
2289 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2290 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2291 			ata_stat = ap->ops->sff_check_status(ap);
2292 			if (ata_stat & ATA_BUSY)
2293 				goto irq_exit;
2294 
2295 			if (pp->defer_queue.defer_bits) {
2296 				DPRINTK("send next command\n");
2297 				qc = nv_swncq_qc_from_dq(ap);
2298 				nv_swncq_issue_atacmd(ap, qc);
2299 			}
2300 		}
2301 	}
2302 
2303 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2304 		/* program the dma controller with appropriate PRD buffers
2305 		 * and start the DMA transfer for requested command.
2306 		 */
2307 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2308 		pp->ncq_flags |= ncq_saw_dmas;
2309 		rc = nv_swncq_dmafis(ap);
2310 	}
2311 
2312 irq_exit:
2313 	return;
2314 irq_error:
2315 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2316 	ata_port_freeze(ap);
2317 	return;
2318 }
2319 
2320 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2321 {
2322 	struct ata_host *host = dev_instance;
2323 	unsigned int i;
2324 	unsigned int handled = 0;
2325 	unsigned long flags;
2326 	u32 irq_stat;
2327 
2328 	spin_lock_irqsave(&host->lock, flags);
2329 
2330 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2331 
2332 	for (i = 0; i < host->n_ports; i++) {
2333 		struct ata_port *ap = host->ports[i];
2334 
2335 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2336 			if (ap->link.sactive) {
2337 				nv_swncq_host_interrupt(ap, (u16)irq_stat);
2338 				handled = 1;
2339 			} else {
2340 				if (irq_stat)	/* reserve Hotplug */
2341 					nv_swncq_irq_clear(ap, 0xfff0);
2342 
2343 				handled += nv_host_intr(ap, (u8)irq_stat);
2344 			}
2345 		}
2346 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2347 	}
2348 
2349 	spin_unlock_irqrestore(&host->lock, flags);
2350 
2351 	return IRQ_RETVAL(handled);
2352 }
2353 
2354 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2355 {
2356 	static int printed_version;
2357 	const struct ata_port_info *ppi[] = { NULL, NULL };
2358 	struct nv_pi_priv *ipriv;
2359 	struct ata_host *host;
2360 	struct nv_host_priv *hpriv;
2361 	int rc;
2362 	u32 bar;
2363 	void __iomem *base;
2364 	unsigned long type = ent->driver_data;
2365 
2366         // Make sure this is a SATA controller by counting the number of bars
2367         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2368         // it's an IDE controller and we ignore it.
2369 	for (bar = 0; bar < 6; bar++)
2370 		if (pci_resource_start(pdev, bar) == 0)
2371 			return -ENODEV;
2372 
2373 	if (!printed_version++)
2374 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2375 
2376 	rc = pcim_enable_device(pdev);
2377 	if (rc)
2378 		return rc;
2379 
2380 	/* determine type and allocate host */
2381 	if (type == CK804 && adma_enabled) {
2382 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2383 		type = ADMA;
2384 	} else if (type == MCP5x && swncq_enabled) {
2385 		dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2386 		type = SWNCQ;
2387 	}
2388 
2389 	ppi[0] = &nv_port_info[type];
2390 	ipriv = ppi[0]->private_data;
2391 	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2392 	if (rc)
2393 		return rc;
2394 
2395 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2396 	if (!hpriv)
2397 		return -ENOMEM;
2398 	hpriv->type = type;
2399 	host->private_data = hpriv;
2400 
2401 	/* request and iomap NV_MMIO_BAR */
2402 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2403 	if (rc)
2404 		return rc;
2405 
2406 	/* configure SCR access */
2407 	base = host->iomap[NV_MMIO_BAR];
2408 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2409 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2410 
2411 	/* enable SATA space for CK804 */
2412 	if (type >= CK804) {
2413 		u8 regval;
2414 
2415 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2416 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2417 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2418 	}
2419 
2420 	/* init ADMA */
2421 	if (type == ADMA) {
2422 		rc = nv_adma_host_init(host);
2423 		if (rc)
2424 			return rc;
2425 	} else if (type == SWNCQ)
2426 		nv_swncq_host_init(host);
2427 
2428 	pci_set_master(pdev);
2429 	return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2430 				 IRQF_SHARED, ipriv->sht);
2431 }
2432 
2433 #ifdef CONFIG_PM
2434 static int nv_pci_device_resume(struct pci_dev *pdev)
2435 {
2436 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2437 	struct nv_host_priv *hpriv = host->private_data;
2438 	int rc;
2439 
2440 	rc = ata_pci_device_do_resume(pdev);
2441 	if (rc)
2442 		return rc;
2443 
2444 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2445 		if (hpriv->type >= CK804) {
2446 			u8 regval;
2447 
2448 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2449 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2450 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2451 		}
2452 		if (hpriv->type == ADMA) {
2453 			u32 tmp32;
2454 			struct nv_adma_port_priv *pp;
2455 			/* enable/disable ADMA on the ports appropriately */
2456 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2457 
2458 			pp = host->ports[0]->private_data;
2459 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2460 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2461 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2462 			else
2463 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2464 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2465 			pp = host->ports[1]->private_data;
2466 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2467 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2468 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2469 			else
2470 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2471 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2472 
2473 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2474 		}
2475 	}
2476 
2477 	ata_host_resume(host);
2478 
2479 	return 0;
2480 }
2481 #endif
2482 
2483 static void nv_ck804_host_stop(struct ata_host *host)
2484 {
2485 	struct pci_dev *pdev = to_pci_dev(host->dev);
2486 	u8 regval;
2487 
2488 	/* disable SATA space for CK804 */
2489 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2490 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2491 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2492 }
2493 
2494 static void nv_adma_host_stop(struct ata_host *host)
2495 {
2496 	struct pci_dev *pdev = to_pci_dev(host->dev);
2497 	u32 tmp32;
2498 
2499 	/* disable ADMA on the ports */
2500 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2501 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2502 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2503 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2504 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2505 
2506 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2507 
2508 	nv_ck804_host_stop(host);
2509 }
2510 
2511 static int __init nv_init(void)
2512 {
2513 	return pci_register_driver(&nv_pci_driver);
2514 }
2515 
2516 static void __exit nv_exit(void)
2517 {
2518 	pci_unregister_driver(&nv_pci_driver);
2519 }
2520 
2521 module_init(nv_init);
2522 module_exit(nv_exit);
2523 module_param_named(adma, adma_enabled, bool, 0444);
2524 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2525 module_param_named(swncq, swncq_enabled, bool, 0444);
2526 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2527 
2528