xref: /linux/drivers/ata/sata_nv.c (revision 0c93ea4064a209cdc36de8a9a3003d43d08f46f7)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= 0x1f,
61 	NV_MWDMA_MASK			= 0x07,
62 	NV_UDMA_MASK			= 0x7f,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 					  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 	/* MCP55 reg offset */
173 	NV_CTL_MCP55			= 0x400,
174 	NV_INT_STATUS_MCP55		= 0x440,
175 	NV_INT_ENABLE_MCP55		= 0x444,
176 	NV_NCQ_REG_MCP55		= 0x448,
177 
178 	/* MCP55 */
179 	NV_INT_ALL_MCP55		= 0xffff,
180 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
181 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
182 
183 	/* SWNCQ ENABLE BITS*/
184 	NV_CTL_PRI_SWNCQ		= 0x02,
185 	NV_CTL_SEC_SWNCQ		= 0x04,
186 
187 	/* SW NCQ status bits*/
188 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
189 	NV_SWNCQ_IRQ_PM			= (1 << 1),
190 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
191 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
192 
193 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
194 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
195 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
196 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
197 
198 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
199 					  NV_SWNCQ_IRQ_REMOVED,
200 
201 };
202 
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 	__le64			addr;
206 	__le32			len;
207 	u8			flags;
208 	u8			packet_len;
209 	__le16			reserved;
210 };
211 
212 enum nv_adma_regbits {
213 	CMDEND	= (1 << 15),		/* end of command list */
214 	WNB	= (1 << 14),		/* wait-not-BSY */
215 	IGN	= (1 << 13),		/* ignore this entry */
216 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
217 	DA2	= (1 << (2 + 8)),
218 	DA1	= (1 << (1 + 8)),
219 	DA0	= (1 << (0 + 8)),
220 };
221 
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 	u8			resp_flags;    /* 0 */
228 	u8			reserved1;     /* 1 */
229 	u8			ctl_flags;     /* 2 */
230 	/* len is length of taskfile in 64 bit words */
231 	u8			len;		/* 3  */
232 	u8			tag;           /* 4 */
233 	u8			next_cpb_idx;  /* 5 */
234 	__le16			reserved2;     /* 6-7 */
235 	__le16			tf[12];        /* 8-31 */
236 	struct nv_adma_prd	aprd[5];       /* 32-111 */
237 	__le64			next_aprd;     /* 112-119 */
238 	__le64			reserved3;     /* 120-127 */
239 };
240 
241 
242 struct nv_adma_port_priv {
243 	struct nv_adma_cpb	*cpb;
244 	dma_addr_t		cpb_dma;
245 	struct nv_adma_prd	*aprd;
246 	dma_addr_t		aprd_dma;
247 	void __iomem		*ctl_block;
248 	void __iomem		*gen_block;
249 	void __iomem		*notifier_clear_block;
250 	u64			adma_dma_mask;
251 	u8			flags;
252 	int			last_issue_ncq;
253 };
254 
255 struct nv_host_priv {
256 	unsigned long		type;
257 };
258 
259 struct defer_queue {
260 	u32		defer_bits;
261 	unsigned int	head;
262 	unsigned int	tail;
263 	unsigned int	tag[ATA_MAX_QUEUE];
264 };
265 
266 enum ncq_saw_flag_list {
267 	ncq_saw_d2h	= (1U << 0),
268 	ncq_saw_dmas	= (1U << 1),
269 	ncq_saw_sdb	= (1U << 2),
270 	ncq_saw_backout	= (1U << 3),
271 };
272 
273 struct nv_swncq_port_priv {
274 	struct ata_prd	*prd;	 /* our SG list */
275 	dma_addr_t	prd_dma; /* and its DMA mapping */
276 	void __iomem	*sactive_block;
277 	void __iomem	*irq_block;
278 	void __iomem	*tag_block;
279 	u32		qc_active;
280 
281 	unsigned int	last_issue_tag;
282 
283 	/* fifo circular queue to store deferral command */
284 	struct defer_queue defer_queue;
285 
286 	/* for NCQ interrupt analysis */
287 	u32		dhfis_bits;
288 	u32		dmafis_bits;
289 	u32		sdbfis_bits;
290 
291 	unsigned int	ncq_flags;
292 };
293 
294 
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296 
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 
308 static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
309 				   unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347 
348 enum nv_host_type
349 {
350 	GENERIC,
351 	NFORCE2,
352 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
353 	CK804,
354 	ADMA,
355 	MCP5x,
356 	SWNCQ,
357 };
358 
359 static const struct pci_device_id nv_pci_tbl[] = {
360 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
374 
375 	{ } /* terminate list */
376 };
377 
378 static struct pci_driver nv_pci_driver = {
379 	.name			= DRV_NAME,
380 	.id_table		= nv_pci_tbl,
381 	.probe			= nv_init_one,
382 #ifdef CONFIG_PM
383 	.suspend		= ata_pci_device_suspend,
384 	.resume			= nv_pci_device_resume,
385 #endif
386 	.remove			= ata_pci_remove_one,
387 };
388 
389 static struct scsi_host_template nv_sht = {
390 	ATA_BMDMA_SHT(DRV_NAME),
391 };
392 
393 static struct scsi_host_template nv_adma_sht = {
394 	ATA_NCQ_SHT(DRV_NAME),
395 	.can_queue		= NV_ADMA_MAX_CPBS,
396 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
397 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
398 	.slave_configure	= nv_adma_slave_config,
399 };
400 
401 static struct scsi_host_template nv_swncq_sht = {
402 	ATA_NCQ_SHT(DRV_NAME),
403 	.can_queue		= ATA_MAX_QUEUE,
404 	.sg_tablesize		= LIBATA_MAX_PRD,
405 	.dma_boundary		= ATA_DMA_BOUNDARY,
406 	.slave_configure	= nv_swncq_slave_config,
407 };
408 
409 static struct ata_port_operations nv_common_ops = {
410 	.inherits		= &ata_bmdma_port_ops,
411 	.scr_read		= nv_scr_read,
412 	.scr_write		= nv_scr_write,
413 };
414 
415 /* OSDL bz11195 reports that link doesn't come online after hardreset
416  * on generic nv's and there have been several other similar reports
417  * on linux-ide.  Disable hardreset for generic nv's.
418  */
419 static struct ata_port_operations nv_generic_ops = {
420 	.inherits		= &nv_common_ops,
421 	.hardreset		= ATA_OP_NULL,
422 };
423 
424 /* nf2 is ripe with hardreset related problems.
425  *
426  * kernel bz#3352 reports nf2/3 controllers can't determine device
427  * signature reliably.  The following thread reports detection failure
428  * on cold boot with the standard debouncing timing.
429  *
430  * http://thread.gmane.org/gmane.linux.ide/34098
431  *
432  * And bz#12176 reports that hardreset simply doesn't work on nf2.
433  * Give up on it and just don't do hardreset.
434  */
435 static struct ata_port_operations nv_nf2_ops = {
436 	.inherits		= &nv_generic_ops,
437 	.freeze			= nv_nf2_freeze,
438 	.thaw			= nv_nf2_thaw,
439 };
440 
441 /* For initial probing after boot and hot plugging, hardreset mostly
442  * works fine on CK804 but curiously, reprobing on the initial port by
443  * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
444  * in somewhat undeterministic way.  Use noclassify hardreset.
445  */
446 static struct ata_port_operations nv_ck804_ops = {
447 	.inherits		= &nv_common_ops,
448 	.freeze			= nv_ck804_freeze,
449 	.thaw			= nv_ck804_thaw,
450 	.hardreset		= nv_noclassify_hardreset,
451 	.host_stop		= nv_ck804_host_stop,
452 };
453 
454 static struct ata_port_operations nv_adma_ops = {
455 	.inherits		= &nv_ck804_ops,
456 
457 	.check_atapi_dma	= nv_adma_check_atapi_dma,
458 	.sff_tf_read		= nv_adma_tf_read,
459 	.qc_defer		= ata_std_qc_defer,
460 	.qc_prep		= nv_adma_qc_prep,
461 	.qc_issue		= nv_adma_qc_issue,
462 	.sff_irq_clear		= nv_adma_irq_clear,
463 
464 	.freeze			= nv_adma_freeze,
465 	.thaw			= nv_adma_thaw,
466 	.error_handler		= nv_adma_error_handler,
467 	.post_internal_cmd	= nv_adma_post_internal_cmd,
468 
469 	.port_start		= nv_adma_port_start,
470 	.port_stop		= nv_adma_port_stop,
471 #ifdef CONFIG_PM
472 	.port_suspend		= nv_adma_port_suspend,
473 	.port_resume		= nv_adma_port_resume,
474 #endif
475 	.host_stop		= nv_adma_host_stop,
476 };
477 
478 /* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
479  * work, hardreset should be used and hardreset can't report proper
480  * signature, which suggests that mcp5x is closer to nf2 as long as
481  * reset quirkiness is concerned.  Define separate ops for mcp5x with
482  * nv_noclassify_hardreset().
483  */
484 static struct ata_port_operations nv_mcp5x_ops = {
485 	.inherits		= &nv_common_ops,
486 	.hardreset		= nv_noclassify_hardreset,
487 };
488 
489 static struct ata_port_operations nv_swncq_ops = {
490 	.inherits		= &nv_mcp5x_ops,
491 
492 	.qc_defer		= ata_std_qc_defer,
493 	.qc_prep		= nv_swncq_qc_prep,
494 	.qc_issue		= nv_swncq_qc_issue,
495 
496 	.freeze			= nv_mcp55_freeze,
497 	.thaw			= nv_mcp55_thaw,
498 	.error_handler		= nv_swncq_error_handler,
499 
500 #ifdef CONFIG_PM
501 	.port_suspend		= nv_swncq_port_suspend,
502 	.port_resume		= nv_swncq_port_resume,
503 #endif
504 	.port_start		= nv_swncq_port_start,
505 };
506 
507 struct nv_pi_priv {
508 	irq_handler_t			irq_handler;
509 	struct scsi_host_template	*sht;
510 };
511 
512 #define NV_PI_PRIV(_irq_handler, _sht) \
513 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
514 
515 static const struct ata_port_info nv_port_info[] = {
516 	/* generic */
517 	{
518 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
519 		.pio_mask	= NV_PIO_MASK,
520 		.mwdma_mask	= NV_MWDMA_MASK,
521 		.udma_mask	= NV_UDMA_MASK,
522 		.port_ops	= &nv_generic_ops,
523 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
524 	},
525 	/* nforce2/3 */
526 	{
527 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
528 		.pio_mask	= NV_PIO_MASK,
529 		.mwdma_mask	= NV_MWDMA_MASK,
530 		.udma_mask	= NV_UDMA_MASK,
531 		.port_ops	= &nv_nf2_ops,
532 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
533 	},
534 	/* ck804 */
535 	{
536 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
537 		.pio_mask	= NV_PIO_MASK,
538 		.mwdma_mask	= NV_MWDMA_MASK,
539 		.udma_mask	= NV_UDMA_MASK,
540 		.port_ops	= &nv_ck804_ops,
541 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
542 	},
543 	/* ADMA */
544 	{
545 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
546 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
547 		.pio_mask	= NV_PIO_MASK,
548 		.mwdma_mask	= NV_MWDMA_MASK,
549 		.udma_mask	= NV_UDMA_MASK,
550 		.port_ops	= &nv_adma_ops,
551 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
552 	},
553 	/* MCP5x */
554 	{
555 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
556 		.pio_mask	= NV_PIO_MASK,
557 		.mwdma_mask	= NV_MWDMA_MASK,
558 		.udma_mask	= NV_UDMA_MASK,
559 		.port_ops	= &nv_mcp5x_ops,
560 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
561 	},
562 	/* SWNCQ */
563 	{
564 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
565 				  ATA_FLAG_NCQ,
566 		.pio_mask	= NV_PIO_MASK,
567 		.mwdma_mask	= NV_MWDMA_MASK,
568 		.udma_mask	= NV_UDMA_MASK,
569 		.port_ops	= &nv_swncq_ops,
570 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
571 	},
572 };
573 
574 MODULE_AUTHOR("NVIDIA");
575 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
576 MODULE_LICENSE("GPL");
577 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
578 MODULE_VERSION(DRV_VERSION);
579 
580 static int adma_enabled;
581 static int swncq_enabled = 1;
582 
583 static void nv_adma_register_mode(struct ata_port *ap)
584 {
585 	struct nv_adma_port_priv *pp = ap->private_data;
586 	void __iomem *mmio = pp->ctl_block;
587 	u16 tmp, status;
588 	int count = 0;
589 
590 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
591 		return;
592 
593 	status = readw(mmio + NV_ADMA_STAT);
594 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
595 		ndelay(50);
596 		status = readw(mmio + NV_ADMA_STAT);
597 		count++;
598 	}
599 	if (count == 20)
600 		ata_port_printk(ap, KERN_WARNING,
601 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
602 			status);
603 
604 	tmp = readw(mmio + NV_ADMA_CTL);
605 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
606 
607 	count = 0;
608 	status = readw(mmio + NV_ADMA_STAT);
609 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
610 		ndelay(50);
611 		status = readw(mmio + NV_ADMA_STAT);
612 		count++;
613 	}
614 	if (count == 20)
615 		ata_port_printk(ap, KERN_WARNING,
616 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
617 			 status);
618 
619 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
620 }
621 
622 static void nv_adma_mode(struct ata_port *ap)
623 {
624 	struct nv_adma_port_priv *pp = ap->private_data;
625 	void __iomem *mmio = pp->ctl_block;
626 	u16 tmp, status;
627 	int count = 0;
628 
629 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
630 		return;
631 
632 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
633 
634 	tmp = readw(mmio + NV_ADMA_CTL);
635 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
636 
637 	status = readw(mmio + NV_ADMA_STAT);
638 	while (((status & NV_ADMA_STAT_LEGACY) ||
639 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
640 		ndelay(50);
641 		status = readw(mmio + NV_ADMA_STAT);
642 		count++;
643 	}
644 	if (count == 20)
645 		ata_port_printk(ap, KERN_WARNING,
646 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
647 			status);
648 
649 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
650 }
651 
652 static int nv_adma_slave_config(struct scsi_device *sdev)
653 {
654 	struct ata_port *ap = ata_shost_to_port(sdev->host);
655 	struct nv_adma_port_priv *pp = ap->private_data;
656 	struct nv_adma_port_priv *port0, *port1;
657 	struct scsi_device *sdev0, *sdev1;
658 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
659 	unsigned long segment_boundary, flags;
660 	unsigned short sg_tablesize;
661 	int rc;
662 	int adma_enable;
663 	u32 current_reg, new_reg, config_mask;
664 
665 	rc = ata_scsi_slave_config(sdev);
666 
667 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
668 		/* Not a proper libata device, ignore */
669 		return rc;
670 
671 	spin_lock_irqsave(ap->lock, flags);
672 
673 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
674 		/*
675 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
676 		 * Therefore ATAPI commands are sent through the legacy interface.
677 		 * However, the legacy interface only supports 32-bit DMA.
678 		 * Restrict DMA parameters as required by the legacy interface
679 		 * when an ATAPI device is connected.
680 		 */
681 		segment_boundary = ATA_DMA_BOUNDARY;
682 		/* Subtract 1 since an extra entry may be needed for padding, see
683 		   libata-scsi.c */
684 		sg_tablesize = LIBATA_MAX_PRD - 1;
685 
686 		/* Since the legacy DMA engine is in use, we need to disable ADMA
687 		   on the port. */
688 		adma_enable = 0;
689 		nv_adma_register_mode(ap);
690 	} else {
691 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
692 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
693 		adma_enable = 1;
694 	}
695 
696 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
697 
698 	if (ap->port_no == 1)
699 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
700 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
701 	else
702 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
703 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
704 
705 	if (adma_enable) {
706 		new_reg = current_reg | config_mask;
707 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
708 	} else {
709 		new_reg = current_reg & ~config_mask;
710 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
711 	}
712 
713 	if (current_reg != new_reg)
714 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
715 
716 	port0 = ap->host->ports[0]->private_data;
717 	port1 = ap->host->ports[1]->private_data;
718 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
719 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
720 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
721 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
722 		/** We have to set the DMA mask to 32-bit if either port is in
723 		    ATAPI mode, since they are on the same PCI device which is
724 		    used for DMA mapping. If we set the mask we also need to set
725 		    the bounce limit on both ports to ensure that the block
726 		    layer doesn't feed addresses that cause DMA mapping to
727 		    choke. If either SCSI device is not allocated yet, it's OK
728 		    since that port will discover its correct setting when it
729 		    does get allocated.
730 		    Note: Setting 32-bit mask should not fail. */
731 		if (sdev0)
732 			blk_queue_bounce_limit(sdev0->request_queue,
733 					       ATA_DMA_MASK);
734 		if (sdev1)
735 			blk_queue_bounce_limit(sdev1->request_queue,
736 					       ATA_DMA_MASK);
737 
738 		pci_set_dma_mask(pdev, ATA_DMA_MASK);
739 	} else {
740 		/** This shouldn't fail as it was set to this value before */
741 		pci_set_dma_mask(pdev, pp->adma_dma_mask);
742 		if (sdev0)
743 			blk_queue_bounce_limit(sdev0->request_queue,
744 					       pp->adma_dma_mask);
745 		if (sdev1)
746 			blk_queue_bounce_limit(sdev1->request_queue,
747 					       pp->adma_dma_mask);
748 	}
749 
750 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
751 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
752 	ata_port_printk(ap, KERN_INFO,
753 		"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
754 		(unsigned long long)*ap->host->dev->dma_mask,
755 		segment_boundary, sg_tablesize);
756 
757 	spin_unlock_irqrestore(ap->lock, flags);
758 
759 	return rc;
760 }
761 
762 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
763 {
764 	struct nv_adma_port_priv *pp = qc->ap->private_data;
765 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
766 }
767 
768 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
769 {
770 	/* Other than when internal or pass-through commands are executed,
771 	   the only time this function will be called in ADMA mode will be
772 	   if a command fails. In the failure case we don't care about going
773 	   into register mode with ADMA commands pending, as the commands will
774 	   all shortly be aborted anyway. We assume that NCQ commands are not
775 	   issued via passthrough, which is the only way that switching into
776 	   ADMA mode could abort outstanding commands. */
777 	nv_adma_register_mode(ap);
778 
779 	ata_sff_tf_read(ap, tf);
780 }
781 
782 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
783 {
784 	unsigned int idx = 0;
785 
786 	if (tf->flags & ATA_TFLAG_ISADDR) {
787 		if (tf->flags & ATA_TFLAG_LBA48) {
788 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
789 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
790 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
791 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
792 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
793 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
794 		} else
795 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
796 
797 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
798 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
799 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
800 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
801 	}
802 
803 	if (tf->flags & ATA_TFLAG_DEVICE)
804 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
805 
806 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
807 
808 	while (idx < 12)
809 		cpb[idx++] = cpu_to_le16(IGN);
810 
811 	return idx;
812 }
813 
814 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
815 {
816 	struct nv_adma_port_priv *pp = ap->private_data;
817 	u8 flags = pp->cpb[cpb_num].resp_flags;
818 
819 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
820 
821 	if (unlikely((force_err ||
822 		     flags & (NV_CPB_RESP_ATA_ERR |
823 			      NV_CPB_RESP_CMD_ERR |
824 			      NV_CPB_RESP_CPB_ERR)))) {
825 		struct ata_eh_info *ehi = &ap->link.eh_info;
826 		int freeze = 0;
827 
828 		ata_ehi_clear_desc(ehi);
829 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
830 		if (flags & NV_CPB_RESP_ATA_ERR) {
831 			ata_ehi_push_desc(ehi, "ATA error");
832 			ehi->err_mask |= AC_ERR_DEV;
833 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
834 			ata_ehi_push_desc(ehi, "CMD error");
835 			ehi->err_mask |= AC_ERR_DEV;
836 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
837 			ata_ehi_push_desc(ehi, "CPB error");
838 			ehi->err_mask |= AC_ERR_SYSTEM;
839 			freeze = 1;
840 		} else {
841 			/* notifier error, but no error in CPB flags? */
842 			ata_ehi_push_desc(ehi, "unknown");
843 			ehi->err_mask |= AC_ERR_OTHER;
844 			freeze = 1;
845 		}
846 		/* Kill all commands. EH will determine what actually failed. */
847 		if (freeze)
848 			ata_port_freeze(ap);
849 		else
850 			ata_port_abort(ap);
851 		return 1;
852 	}
853 
854 	if (likely(flags & NV_CPB_RESP_DONE)) {
855 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
856 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
857 		if (likely(qc)) {
858 			DPRINTK("Completing qc from tag %d\n", cpb_num);
859 			ata_qc_complete(qc);
860 		} else {
861 			struct ata_eh_info *ehi = &ap->link.eh_info;
862 			/* Notifier bits set without a command may indicate the drive
863 			   is misbehaving. Raise host state machine violation on this
864 			   condition. */
865 			ata_port_printk(ap, KERN_ERR,
866 					"notifier for tag %d with no cmd?\n",
867 					cpb_num);
868 			ehi->err_mask |= AC_ERR_HSM;
869 			ehi->action |= ATA_EH_RESET;
870 			ata_port_freeze(ap);
871 			return 1;
872 		}
873 	}
874 	return 0;
875 }
876 
877 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
878 {
879 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
880 
881 	/* freeze if hotplugged */
882 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
883 		ata_port_freeze(ap);
884 		return 1;
885 	}
886 
887 	/* bail out if not our interrupt */
888 	if (!(irq_stat & NV_INT_DEV))
889 		return 0;
890 
891 	/* DEV interrupt w/ no active qc? */
892 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
893 		ata_sff_check_status(ap);
894 		return 1;
895 	}
896 
897 	/* handle interrupt */
898 	return ata_sff_host_intr(ap, qc);
899 }
900 
901 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
902 {
903 	struct ata_host *host = dev_instance;
904 	int i, handled = 0;
905 	u32 notifier_clears[2];
906 
907 	spin_lock(&host->lock);
908 
909 	for (i = 0; i < host->n_ports; i++) {
910 		struct ata_port *ap = host->ports[i];
911 		notifier_clears[i] = 0;
912 
913 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
914 			struct nv_adma_port_priv *pp = ap->private_data;
915 			void __iomem *mmio = pp->ctl_block;
916 			u16 status;
917 			u32 gen_ctl;
918 			u32 notifier, notifier_error;
919 
920 			/* if ADMA is disabled, use standard ata interrupt handler */
921 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
922 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
923 					>> (NV_INT_PORT_SHIFT * i);
924 				handled += nv_host_intr(ap, irq_stat);
925 				continue;
926 			}
927 
928 			/* if in ATA register mode, check for standard interrupts */
929 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
930 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
931 					>> (NV_INT_PORT_SHIFT * i);
932 				if (ata_tag_valid(ap->link.active_tag))
933 					/** NV_INT_DEV indication seems unreliable at times
934 					    at least in ADMA mode. Force it on always when a
935 					    command is active, to prevent losing interrupts. */
936 					irq_stat |= NV_INT_DEV;
937 				handled += nv_host_intr(ap, irq_stat);
938 			}
939 
940 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
941 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
942 			notifier_clears[i] = notifier | notifier_error;
943 
944 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
945 
946 			if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
947 			    !notifier_error)
948 				/* Nothing to do */
949 				continue;
950 
951 			status = readw(mmio + NV_ADMA_STAT);
952 
953 			/* Clear status. Ensure the controller sees the clearing before we start
954 			   looking at any of the CPB statuses, so that any CPB completions after
955 			   this point in the handler will raise another interrupt. */
956 			writew(status, mmio + NV_ADMA_STAT);
957 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
958 			rmb();
959 
960 			handled++; /* irq handled if we got here */
961 
962 			/* freeze if hotplugged or controller error */
963 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
964 					       NV_ADMA_STAT_HOTUNPLUG |
965 					       NV_ADMA_STAT_TIMEOUT |
966 					       NV_ADMA_STAT_SERROR))) {
967 				struct ata_eh_info *ehi = &ap->link.eh_info;
968 
969 				ata_ehi_clear_desc(ehi);
970 				__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
971 				if (status & NV_ADMA_STAT_TIMEOUT) {
972 					ehi->err_mask |= AC_ERR_SYSTEM;
973 					ata_ehi_push_desc(ehi, "timeout");
974 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
975 					ata_ehi_hotplugged(ehi);
976 					ata_ehi_push_desc(ehi, "hotplug");
977 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
978 					ata_ehi_hotplugged(ehi);
979 					ata_ehi_push_desc(ehi, "hot unplug");
980 				} else if (status & NV_ADMA_STAT_SERROR) {
981 					/* let libata analyze SError and figure out the cause */
982 					ata_ehi_push_desc(ehi, "SError");
983 				} else
984 					ata_ehi_push_desc(ehi, "unknown");
985 				ata_port_freeze(ap);
986 				continue;
987 			}
988 
989 			if (status & (NV_ADMA_STAT_DONE |
990 				      NV_ADMA_STAT_CPBERR |
991 				      NV_ADMA_STAT_CMD_COMPLETE)) {
992 				u32 check_commands = notifier_clears[i];
993 				int pos, error = 0;
994 
995 				if (status & NV_ADMA_STAT_CPBERR) {
996 					/* Check all active commands */
997 					if (ata_tag_valid(ap->link.active_tag))
998 						check_commands = 1 <<
999 							ap->link.active_tag;
1000 					else
1001 						check_commands = ap->
1002 							link.sactive;
1003 				}
1004 
1005 				/** Check CPBs for completed commands */
1006 				while ((pos = ffs(check_commands)) && !error) {
1007 					pos--;
1008 					error = nv_adma_check_cpb(ap, pos,
1009 						notifier_error & (1 << pos));
1010 					check_commands &= ~(1 << pos);
1011 				}
1012 			}
1013 		}
1014 	}
1015 
1016 	if (notifier_clears[0] || notifier_clears[1]) {
1017 		/* Note: Both notifier clear registers must be written
1018 		   if either is set, even if one is zero, according to NVIDIA. */
1019 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1020 		writel(notifier_clears[0], pp->notifier_clear_block);
1021 		pp = host->ports[1]->private_data;
1022 		writel(notifier_clears[1], pp->notifier_clear_block);
1023 	}
1024 
1025 	spin_unlock(&host->lock);
1026 
1027 	return IRQ_RETVAL(handled);
1028 }
1029 
1030 static void nv_adma_freeze(struct ata_port *ap)
1031 {
1032 	struct nv_adma_port_priv *pp = ap->private_data;
1033 	void __iomem *mmio = pp->ctl_block;
1034 	u16 tmp;
1035 
1036 	nv_ck804_freeze(ap);
1037 
1038 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1039 		return;
1040 
1041 	/* clear any outstanding CK804 notifications */
1042 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1043 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1044 
1045 	/* Disable interrupt */
1046 	tmp = readw(mmio + NV_ADMA_CTL);
1047 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1048 		mmio + NV_ADMA_CTL);
1049 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1050 }
1051 
1052 static void nv_adma_thaw(struct ata_port *ap)
1053 {
1054 	struct nv_adma_port_priv *pp = ap->private_data;
1055 	void __iomem *mmio = pp->ctl_block;
1056 	u16 tmp;
1057 
1058 	nv_ck804_thaw(ap);
1059 
1060 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1061 		return;
1062 
1063 	/* Enable interrupt */
1064 	tmp = readw(mmio + NV_ADMA_CTL);
1065 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1066 		mmio + NV_ADMA_CTL);
1067 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1068 }
1069 
1070 static void nv_adma_irq_clear(struct ata_port *ap)
1071 {
1072 	struct nv_adma_port_priv *pp = ap->private_data;
1073 	void __iomem *mmio = pp->ctl_block;
1074 	u32 notifier_clears[2];
1075 
1076 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1077 		ata_sff_irq_clear(ap);
1078 		return;
1079 	}
1080 
1081 	/* clear any outstanding CK804 notifications */
1082 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1083 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1084 
1085 	/* clear ADMA status */
1086 	writew(0xffff, mmio + NV_ADMA_STAT);
1087 
1088 	/* clear notifiers - note both ports need to be written with
1089 	   something even though we are only clearing on one */
1090 	if (ap->port_no == 0) {
1091 		notifier_clears[0] = 0xFFFFFFFF;
1092 		notifier_clears[1] = 0;
1093 	} else {
1094 		notifier_clears[0] = 0;
1095 		notifier_clears[1] = 0xFFFFFFFF;
1096 	}
1097 	pp = ap->host->ports[0]->private_data;
1098 	writel(notifier_clears[0], pp->notifier_clear_block);
1099 	pp = ap->host->ports[1]->private_data;
1100 	writel(notifier_clears[1], pp->notifier_clear_block);
1101 }
1102 
1103 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1104 {
1105 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1106 
1107 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1108 		ata_sff_post_internal_cmd(qc);
1109 }
1110 
1111 static int nv_adma_port_start(struct ata_port *ap)
1112 {
1113 	struct device *dev = ap->host->dev;
1114 	struct nv_adma_port_priv *pp;
1115 	int rc;
1116 	void *mem;
1117 	dma_addr_t mem_dma;
1118 	void __iomem *mmio;
1119 	struct pci_dev *pdev = to_pci_dev(dev);
1120 	u16 tmp;
1121 
1122 	VPRINTK("ENTER\n");
1123 
1124 	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1125 	   pad buffers */
1126 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1127 	if (rc)
1128 		return rc;
1129 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1130 	if (rc)
1131 		return rc;
1132 
1133 	rc = ata_port_start(ap);
1134 	if (rc)
1135 		return rc;
1136 
1137 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1138 	if (!pp)
1139 		return -ENOMEM;
1140 
1141 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1142 	       ap->port_no * NV_ADMA_PORT_SIZE;
1143 	pp->ctl_block = mmio;
1144 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1145 	pp->notifier_clear_block = pp->gen_block +
1146 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1147 
1148 	/* Now that the legacy PRD and padding buffer are allocated we can
1149 	   safely raise the DMA mask to allocate the CPB/APRD table.
1150 	   These are allowed to fail since we store the value that ends up
1151 	   being used to set as the bounce limit in slave_config later if
1152 	   needed. */
1153 	pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1154 	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1155 	pp->adma_dma_mask = *dev->dma_mask;
1156 
1157 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1158 				  &mem_dma, GFP_KERNEL);
1159 	if (!mem)
1160 		return -ENOMEM;
1161 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1162 
1163 	/*
1164 	 * First item in chunk of DMA memory:
1165 	 * 128-byte command parameter block (CPB)
1166 	 * one for each command tag
1167 	 */
1168 	pp->cpb     = mem;
1169 	pp->cpb_dma = mem_dma;
1170 
1171 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1172 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1173 
1174 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1175 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1176 
1177 	/*
1178 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1179 	 */
1180 	pp->aprd = mem;
1181 	pp->aprd_dma = mem_dma;
1182 
1183 	ap->private_data = pp;
1184 
1185 	/* clear any outstanding interrupt conditions */
1186 	writew(0xffff, mmio + NV_ADMA_STAT);
1187 
1188 	/* initialize port variables */
1189 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1190 
1191 	/* clear CPB fetch count */
1192 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1193 
1194 	/* clear GO for register mode, enable interrupt */
1195 	tmp = readw(mmio + NV_ADMA_CTL);
1196 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1197 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1198 
1199 	tmp = readw(mmio + NV_ADMA_CTL);
1200 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1201 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1202 	udelay(1);
1203 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1204 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1205 
1206 	return 0;
1207 }
1208 
1209 static void nv_adma_port_stop(struct ata_port *ap)
1210 {
1211 	struct nv_adma_port_priv *pp = ap->private_data;
1212 	void __iomem *mmio = pp->ctl_block;
1213 
1214 	VPRINTK("ENTER\n");
1215 	writew(0, mmio + NV_ADMA_CTL);
1216 }
1217 
1218 #ifdef CONFIG_PM
1219 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1220 {
1221 	struct nv_adma_port_priv *pp = ap->private_data;
1222 	void __iomem *mmio = pp->ctl_block;
1223 
1224 	/* Go to register mode - clears GO */
1225 	nv_adma_register_mode(ap);
1226 
1227 	/* clear CPB fetch count */
1228 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1229 
1230 	/* disable interrupt, shut down port */
1231 	writew(0, mmio + NV_ADMA_CTL);
1232 
1233 	return 0;
1234 }
1235 
1236 static int nv_adma_port_resume(struct ata_port *ap)
1237 {
1238 	struct nv_adma_port_priv *pp = ap->private_data;
1239 	void __iomem *mmio = pp->ctl_block;
1240 	u16 tmp;
1241 
1242 	/* set CPB block location */
1243 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1244 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1245 
1246 	/* clear any outstanding interrupt conditions */
1247 	writew(0xffff, mmio + NV_ADMA_STAT);
1248 
1249 	/* initialize port variables */
1250 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1251 
1252 	/* clear CPB fetch count */
1253 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1254 
1255 	/* clear GO for register mode, enable interrupt */
1256 	tmp = readw(mmio + NV_ADMA_CTL);
1257 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1258 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1259 
1260 	tmp = readw(mmio + NV_ADMA_CTL);
1261 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1262 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1263 	udelay(1);
1264 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1265 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1266 
1267 	return 0;
1268 }
1269 #endif
1270 
1271 static void nv_adma_setup_port(struct ata_port *ap)
1272 {
1273 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1274 	struct ata_ioports *ioport = &ap->ioaddr;
1275 
1276 	VPRINTK("ENTER\n");
1277 
1278 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1279 
1280 	ioport->cmd_addr	= mmio;
1281 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1282 	ioport->error_addr	=
1283 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1284 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1285 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1286 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1287 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1288 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1289 	ioport->status_addr	=
1290 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1291 	ioport->altstatus_addr	=
1292 	ioport->ctl_addr	= mmio + 0x20;
1293 }
1294 
1295 static int nv_adma_host_init(struct ata_host *host)
1296 {
1297 	struct pci_dev *pdev = to_pci_dev(host->dev);
1298 	unsigned int i;
1299 	u32 tmp32;
1300 
1301 	VPRINTK("ENTER\n");
1302 
1303 	/* enable ADMA on the ports */
1304 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1305 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1306 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1307 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1308 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1309 
1310 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1311 
1312 	for (i = 0; i < host->n_ports; i++)
1313 		nv_adma_setup_port(host->ports[i]);
1314 
1315 	return 0;
1316 }
1317 
1318 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1319 			      struct scatterlist *sg,
1320 			      int idx,
1321 			      struct nv_adma_prd *aprd)
1322 {
1323 	u8 flags = 0;
1324 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1325 		flags |= NV_APRD_WRITE;
1326 	if (idx == qc->n_elem - 1)
1327 		flags |= NV_APRD_END;
1328 	else if (idx != 4)
1329 		flags |= NV_APRD_CONT;
1330 
1331 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1332 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1333 	aprd->flags = flags;
1334 	aprd->packet_len = 0;
1335 }
1336 
1337 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1338 {
1339 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1340 	struct nv_adma_prd *aprd;
1341 	struct scatterlist *sg;
1342 	unsigned int si;
1343 
1344 	VPRINTK("ENTER\n");
1345 
1346 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1347 		aprd = (si < 5) ? &cpb->aprd[si] :
1348 			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1349 		nv_adma_fill_aprd(qc, sg, si, aprd);
1350 	}
1351 	if (si > 5)
1352 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1353 	else
1354 		cpb->next_aprd = cpu_to_le64(0);
1355 }
1356 
1357 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1358 {
1359 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1360 
1361 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1362 	   or interrupt-driven no-data commands. */
1363 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1364 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1365 		return 1;
1366 
1367 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1368 	   (qc->tf.protocol == ATA_PROT_NODATA))
1369 		return 0;
1370 
1371 	return 1;
1372 }
1373 
1374 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1375 {
1376 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1377 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1378 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1379 		       NV_CPB_CTL_IEN;
1380 
1381 	if (nv_adma_use_reg_mode(qc)) {
1382 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1383 			(qc->flags & ATA_QCFLAG_DMAMAP));
1384 		nv_adma_register_mode(qc->ap);
1385 		ata_sff_qc_prep(qc);
1386 		return;
1387 	}
1388 
1389 	cpb->resp_flags = NV_CPB_RESP_DONE;
1390 	wmb();
1391 	cpb->ctl_flags = 0;
1392 	wmb();
1393 
1394 	cpb->len		= 3;
1395 	cpb->tag		= qc->tag;
1396 	cpb->next_cpb_idx	= 0;
1397 
1398 	/* turn on NCQ flags for NCQ commands */
1399 	if (qc->tf.protocol == ATA_PROT_NCQ)
1400 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1401 
1402 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1403 
1404 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1405 
1406 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1407 		nv_adma_fill_sg(qc, cpb);
1408 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1409 	} else
1410 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1411 
1412 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1413 	   until we are finished filling in all of the contents */
1414 	wmb();
1415 	cpb->ctl_flags = ctl_flags;
1416 	wmb();
1417 	cpb->resp_flags = 0;
1418 }
1419 
1420 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1421 {
1422 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1423 	void __iomem *mmio = pp->ctl_block;
1424 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1425 
1426 	VPRINTK("ENTER\n");
1427 
1428 	/* We can't handle result taskfile with NCQ commands, since
1429 	   retrieving the taskfile switches us out of ADMA mode and would abort
1430 	   existing commands. */
1431 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1432 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1433 		ata_dev_printk(qc->dev, KERN_ERR,
1434 			"NCQ w/ RESULT_TF not allowed\n");
1435 		return AC_ERR_SYSTEM;
1436 	}
1437 
1438 	if (nv_adma_use_reg_mode(qc)) {
1439 		/* use ATA register mode */
1440 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1441 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1442 			(qc->flags & ATA_QCFLAG_DMAMAP));
1443 		nv_adma_register_mode(qc->ap);
1444 		return ata_sff_qc_issue(qc);
1445 	} else
1446 		nv_adma_mode(qc->ap);
1447 
1448 	/* write append register, command tag in lower 8 bits
1449 	   and (number of cpbs to append -1) in top 8 bits */
1450 	wmb();
1451 
1452 	if (curr_ncq != pp->last_issue_ncq) {
1453 		/* Seems to need some delay before switching between NCQ and
1454 		   non-NCQ commands, else we get command timeouts and such. */
1455 		udelay(20);
1456 		pp->last_issue_ncq = curr_ncq;
1457 	}
1458 
1459 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1460 
1461 	DPRINTK("Issued tag %u\n", qc->tag);
1462 
1463 	return 0;
1464 }
1465 
1466 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1467 {
1468 	struct ata_host *host = dev_instance;
1469 	unsigned int i;
1470 	unsigned int handled = 0;
1471 	unsigned long flags;
1472 
1473 	spin_lock_irqsave(&host->lock, flags);
1474 
1475 	for (i = 0; i < host->n_ports; i++) {
1476 		struct ata_port *ap;
1477 
1478 		ap = host->ports[i];
1479 		if (ap &&
1480 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1481 			struct ata_queued_cmd *qc;
1482 
1483 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1484 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1485 				handled += ata_sff_host_intr(ap, qc);
1486 			else
1487 				// No request pending?  Clear interrupt status
1488 				// anyway, in case there's one pending.
1489 				ap->ops->sff_check_status(ap);
1490 		}
1491 
1492 	}
1493 
1494 	spin_unlock_irqrestore(&host->lock, flags);
1495 
1496 	return IRQ_RETVAL(handled);
1497 }
1498 
1499 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1500 {
1501 	int i, handled = 0;
1502 
1503 	for (i = 0; i < host->n_ports; i++) {
1504 		struct ata_port *ap = host->ports[i];
1505 
1506 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1507 			handled += nv_host_intr(ap, irq_stat);
1508 
1509 		irq_stat >>= NV_INT_PORT_SHIFT;
1510 	}
1511 
1512 	return IRQ_RETVAL(handled);
1513 }
1514 
1515 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1516 {
1517 	struct ata_host *host = dev_instance;
1518 	u8 irq_stat;
1519 	irqreturn_t ret;
1520 
1521 	spin_lock(&host->lock);
1522 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1523 	ret = nv_do_interrupt(host, irq_stat);
1524 	spin_unlock(&host->lock);
1525 
1526 	return ret;
1527 }
1528 
1529 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1530 {
1531 	struct ata_host *host = dev_instance;
1532 	u8 irq_stat;
1533 	irqreturn_t ret;
1534 
1535 	spin_lock(&host->lock);
1536 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1537 	ret = nv_do_interrupt(host, irq_stat);
1538 	spin_unlock(&host->lock);
1539 
1540 	return ret;
1541 }
1542 
1543 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1544 {
1545 	if (sc_reg > SCR_CONTROL)
1546 		return -EINVAL;
1547 
1548 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1549 	return 0;
1550 }
1551 
1552 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1553 {
1554 	if (sc_reg > SCR_CONTROL)
1555 		return -EINVAL;
1556 
1557 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1558 	return 0;
1559 }
1560 
1561 static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
1562 				   unsigned long deadline)
1563 {
1564 	bool online;
1565 	int rc;
1566 
1567 	rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1568 				 &online, NULL);
1569 	return online ? -EAGAIN : rc;
1570 }
1571 
1572 static void nv_nf2_freeze(struct ata_port *ap)
1573 {
1574 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1575 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1576 	u8 mask;
1577 
1578 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1579 	mask &= ~(NV_INT_ALL << shift);
1580 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1581 }
1582 
1583 static void nv_nf2_thaw(struct ata_port *ap)
1584 {
1585 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1586 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1587 	u8 mask;
1588 
1589 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1590 
1591 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1592 	mask |= (NV_INT_MASK << shift);
1593 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1594 }
1595 
1596 static void nv_ck804_freeze(struct ata_port *ap)
1597 {
1598 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1599 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1600 	u8 mask;
1601 
1602 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1603 	mask &= ~(NV_INT_ALL << shift);
1604 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1605 }
1606 
1607 static void nv_ck804_thaw(struct ata_port *ap)
1608 {
1609 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1610 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1611 	u8 mask;
1612 
1613 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1614 
1615 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1616 	mask |= (NV_INT_MASK << shift);
1617 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1618 }
1619 
1620 static void nv_mcp55_freeze(struct ata_port *ap)
1621 {
1622 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1623 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1624 	u32 mask;
1625 
1626 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1627 
1628 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1629 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1630 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1631 	ata_sff_freeze(ap);
1632 }
1633 
1634 static void nv_mcp55_thaw(struct ata_port *ap)
1635 {
1636 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1637 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1638 	u32 mask;
1639 
1640 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1641 
1642 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1643 	mask |= (NV_INT_MASK_MCP55 << shift);
1644 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1645 	ata_sff_thaw(ap);
1646 }
1647 
1648 static void nv_adma_error_handler(struct ata_port *ap)
1649 {
1650 	struct nv_adma_port_priv *pp = ap->private_data;
1651 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1652 		void __iomem *mmio = pp->ctl_block;
1653 		int i;
1654 		u16 tmp;
1655 
1656 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1657 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1658 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1659 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1660 			u32 status = readw(mmio + NV_ADMA_STAT);
1661 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1662 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1663 
1664 			ata_port_printk(ap, KERN_ERR,
1665 				"EH in ADMA mode, notifier 0x%X "
1666 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1667 				"next cpb count 0x%X next cpb idx 0x%x\n",
1668 				notifier, notifier_error, gen_ctl, status,
1669 				cpb_count, next_cpb_idx);
1670 
1671 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1672 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1673 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1674 				    ap->link.sactive & (1 << i))
1675 					ata_port_printk(ap, KERN_ERR,
1676 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1677 						i, cpb->ctl_flags, cpb->resp_flags);
1678 			}
1679 		}
1680 
1681 		/* Push us back into port register mode for error handling. */
1682 		nv_adma_register_mode(ap);
1683 
1684 		/* Mark all of the CPBs as invalid to prevent them from
1685 		   being executed */
1686 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1687 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1688 
1689 		/* clear CPB fetch count */
1690 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1691 
1692 		/* Reset channel */
1693 		tmp = readw(mmio + NV_ADMA_CTL);
1694 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1695 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1696 		udelay(1);
1697 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1698 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1699 	}
1700 
1701 	ata_sff_error_handler(ap);
1702 }
1703 
1704 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1705 {
1706 	struct nv_swncq_port_priv *pp = ap->private_data;
1707 	struct defer_queue *dq = &pp->defer_queue;
1708 
1709 	/* queue is full */
1710 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1711 	dq->defer_bits |= (1 << qc->tag);
1712 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1713 }
1714 
1715 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1716 {
1717 	struct nv_swncq_port_priv *pp = ap->private_data;
1718 	struct defer_queue *dq = &pp->defer_queue;
1719 	unsigned int tag;
1720 
1721 	if (dq->head == dq->tail)	/* null queue */
1722 		return NULL;
1723 
1724 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1725 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1726 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1727 	dq->defer_bits &= ~(1 << tag);
1728 
1729 	return ata_qc_from_tag(ap, tag);
1730 }
1731 
1732 static void nv_swncq_fis_reinit(struct ata_port *ap)
1733 {
1734 	struct nv_swncq_port_priv *pp = ap->private_data;
1735 
1736 	pp->dhfis_bits = 0;
1737 	pp->dmafis_bits = 0;
1738 	pp->sdbfis_bits = 0;
1739 	pp->ncq_flags = 0;
1740 }
1741 
1742 static void nv_swncq_pp_reinit(struct ata_port *ap)
1743 {
1744 	struct nv_swncq_port_priv *pp = ap->private_data;
1745 	struct defer_queue *dq = &pp->defer_queue;
1746 
1747 	dq->head = 0;
1748 	dq->tail = 0;
1749 	dq->defer_bits = 0;
1750 	pp->qc_active = 0;
1751 	pp->last_issue_tag = ATA_TAG_POISON;
1752 	nv_swncq_fis_reinit(ap);
1753 }
1754 
1755 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1756 {
1757 	struct nv_swncq_port_priv *pp = ap->private_data;
1758 
1759 	writew(fis, pp->irq_block);
1760 }
1761 
1762 static void __ata_bmdma_stop(struct ata_port *ap)
1763 {
1764 	struct ata_queued_cmd qc;
1765 
1766 	qc.ap = ap;
1767 	ata_bmdma_stop(&qc);
1768 }
1769 
1770 static void nv_swncq_ncq_stop(struct ata_port *ap)
1771 {
1772 	struct nv_swncq_port_priv *pp = ap->private_data;
1773 	unsigned int i;
1774 	u32 sactive;
1775 	u32 done_mask;
1776 
1777 	ata_port_printk(ap, KERN_ERR,
1778 			"EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1779 			ap->qc_active, ap->link.sactive);
1780 	ata_port_printk(ap, KERN_ERR,
1781 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1782 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1783 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1784 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1785 
1786 	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1787 			ap->ops->sff_check_status(ap),
1788 			ioread8(ap->ioaddr.error_addr));
1789 
1790 	sactive = readl(pp->sactive_block);
1791 	done_mask = pp->qc_active ^ sactive;
1792 
1793 	ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1794 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1795 		u8 err = 0;
1796 		if (pp->qc_active & (1 << i))
1797 			err = 0;
1798 		else if (done_mask & (1 << i))
1799 			err = 1;
1800 		else
1801 			continue;
1802 
1803 		ata_port_printk(ap, KERN_ERR,
1804 				"tag 0x%x: %01x %01x %01x %01x %s\n", i,
1805 				(pp->dhfis_bits >> i) & 0x1,
1806 				(pp->dmafis_bits >> i) & 0x1,
1807 				(pp->sdbfis_bits >> i) & 0x1,
1808 				(sactive >> i) & 0x1,
1809 				(err ? "error! tag doesn't exit" : " "));
1810 	}
1811 
1812 	nv_swncq_pp_reinit(ap);
1813 	ap->ops->sff_irq_clear(ap);
1814 	__ata_bmdma_stop(ap);
1815 	nv_swncq_irq_clear(ap, 0xffff);
1816 }
1817 
1818 static void nv_swncq_error_handler(struct ata_port *ap)
1819 {
1820 	struct ata_eh_context *ehc = &ap->link.eh_context;
1821 
1822 	if (ap->link.sactive) {
1823 		nv_swncq_ncq_stop(ap);
1824 		ehc->i.action |= ATA_EH_RESET;
1825 	}
1826 
1827 	ata_sff_error_handler(ap);
1828 }
1829 
1830 #ifdef CONFIG_PM
1831 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1832 {
1833 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1834 	u32 tmp;
1835 
1836 	/* clear irq */
1837 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1838 
1839 	/* disable irq */
1840 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1841 
1842 	/* disable swncq */
1843 	tmp = readl(mmio + NV_CTL_MCP55);
1844 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1845 	writel(tmp, mmio + NV_CTL_MCP55);
1846 
1847 	return 0;
1848 }
1849 
1850 static int nv_swncq_port_resume(struct ata_port *ap)
1851 {
1852 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1853 	u32 tmp;
1854 
1855 	/* clear irq */
1856 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1857 
1858 	/* enable irq */
1859 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1860 
1861 	/* enable swncq */
1862 	tmp = readl(mmio + NV_CTL_MCP55);
1863 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1864 
1865 	return 0;
1866 }
1867 #endif
1868 
1869 static void nv_swncq_host_init(struct ata_host *host)
1870 {
1871 	u32 tmp;
1872 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1873 	struct pci_dev *pdev = to_pci_dev(host->dev);
1874 	u8 regval;
1875 
1876 	/* disable  ECO 398 */
1877 	pci_read_config_byte(pdev, 0x7f, &regval);
1878 	regval &= ~(1 << 7);
1879 	pci_write_config_byte(pdev, 0x7f, regval);
1880 
1881 	/* enable swncq */
1882 	tmp = readl(mmio + NV_CTL_MCP55);
1883 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1884 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1885 
1886 	/* enable irq intr */
1887 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1888 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1889 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1890 
1891 	/*  clear port irq */
1892 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1893 }
1894 
1895 static int nv_swncq_slave_config(struct scsi_device *sdev)
1896 {
1897 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1898 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1899 	struct ata_device *dev;
1900 	int rc;
1901 	u8 rev;
1902 	u8 check_maxtor = 0;
1903 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1904 
1905 	rc = ata_scsi_slave_config(sdev);
1906 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1907 		/* Not a proper libata device, ignore */
1908 		return rc;
1909 
1910 	dev = &ap->link.device[sdev->id];
1911 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1912 		return rc;
1913 
1914 	/* if MCP51 and Maxtor, then disable ncq */
1915 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1916 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1917 		check_maxtor = 1;
1918 
1919 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1920 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1921 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1922 		pci_read_config_byte(pdev, 0x8, &rev);
1923 		if (rev <= 0xa2)
1924 			check_maxtor = 1;
1925 	}
1926 
1927 	if (!check_maxtor)
1928 		return rc;
1929 
1930 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1931 
1932 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1933 		ata_scsi_change_queue_depth(sdev, 1);
1934 		ata_dev_printk(dev, KERN_NOTICE,
1935 			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1936 	}
1937 
1938 	return rc;
1939 }
1940 
1941 static int nv_swncq_port_start(struct ata_port *ap)
1942 {
1943 	struct device *dev = ap->host->dev;
1944 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1945 	struct nv_swncq_port_priv *pp;
1946 	int rc;
1947 
1948 	rc = ata_port_start(ap);
1949 	if (rc)
1950 		return rc;
1951 
1952 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1953 	if (!pp)
1954 		return -ENOMEM;
1955 
1956 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1957 				      &pp->prd_dma, GFP_KERNEL);
1958 	if (!pp->prd)
1959 		return -ENOMEM;
1960 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1961 
1962 	ap->private_data = pp;
1963 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1964 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1965 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1966 
1967 	return 0;
1968 }
1969 
1970 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1971 {
1972 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1973 		ata_sff_qc_prep(qc);
1974 		return;
1975 	}
1976 
1977 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1978 		return;
1979 
1980 	nv_swncq_fill_sg(qc);
1981 }
1982 
1983 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1984 {
1985 	struct ata_port *ap = qc->ap;
1986 	struct scatterlist *sg;
1987 	struct nv_swncq_port_priv *pp = ap->private_data;
1988 	struct ata_prd *prd;
1989 	unsigned int si, idx;
1990 
1991 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
1992 
1993 	idx = 0;
1994 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1995 		u32 addr, offset;
1996 		u32 sg_len, len;
1997 
1998 		addr = (u32)sg_dma_address(sg);
1999 		sg_len = sg_dma_len(sg);
2000 
2001 		while (sg_len) {
2002 			offset = addr & 0xffff;
2003 			len = sg_len;
2004 			if ((offset + sg_len) > 0x10000)
2005 				len = 0x10000 - offset;
2006 
2007 			prd[idx].addr = cpu_to_le32(addr);
2008 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2009 
2010 			idx++;
2011 			sg_len -= len;
2012 			addr += len;
2013 		}
2014 	}
2015 
2016 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2017 }
2018 
2019 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2020 					  struct ata_queued_cmd *qc)
2021 {
2022 	struct nv_swncq_port_priv *pp = ap->private_data;
2023 
2024 	if (qc == NULL)
2025 		return 0;
2026 
2027 	DPRINTK("Enter\n");
2028 
2029 	writel((1 << qc->tag), pp->sactive_block);
2030 	pp->last_issue_tag = qc->tag;
2031 	pp->dhfis_bits &= ~(1 << qc->tag);
2032 	pp->dmafis_bits &= ~(1 << qc->tag);
2033 	pp->qc_active |= (0x1 << qc->tag);
2034 
2035 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2036 	ap->ops->sff_exec_command(ap, &qc->tf);
2037 
2038 	DPRINTK("Issued tag %u\n", qc->tag);
2039 
2040 	return 0;
2041 }
2042 
2043 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2044 {
2045 	struct ata_port *ap = qc->ap;
2046 	struct nv_swncq_port_priv *pp = ap->private_data;
2047 
2048 	if (qc->tf.protocol != ATA_PROT_NCQ)
2049 		return ata_sff_qc_issue(qc);
2050 
2051 	DPRINTK("Enter\n");
2052 
2053 	if (!pp->qc_active)
2054 		nv_swncq_issue_atacmd(ap, qc);
2055 	else
2056 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2057 
2058 	return 0;
2059 }
2060 
2061 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2062 {
2063 	u32 serror;
2064 	struct ata_eh_info *ehi = &ap->link.eh_info;
2065 
2066 	ata_ehi_clear_desc(ehi);
2067 
2068 	/* AHCI needs SError cleared; otherwise, it might lock up */
2069 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2070 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2071 
2072 	/* analyze @irq_stat */
2073 	if (fis & NV_SWNCQ_IRQ_ADDED)
2074 		ata_ehi_push_desc(ehi, "hot plug");
2075 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2076 		ata_ehi_push_desc(ehi, "hot unplug");
2077 
2078 	ata_ehi_hotplugged(ehi);
2079 
2080 	/* okay, let's hand over to EH */
2081 	ehi->serror |= serror;
2082 
2083 	ata_port_freeze(ap);
2084 }
2085 
2086 static int nv_swncq_sdbfis(struct ata_port *ap)
2087 {
2088 	struct ata_queued_cmd *qc;
2089 	struct nv_swncq_port_priv *pp = ap->private_data;
2090 	struct ata_eh_info *ehi = &ap->link.eh_info;
2091 	u32 sactive;
2092 	int nr_done = 0;
2093 	u32 done_mask;
2094 	int i;
2095 	u8 host_stat;
2096 	u8 lack_dhfis = 0;
2097 
2098 	host_stat = ap->ops->bmdma_status(ap);
2099 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2100 		/* error when transfering data to/from memory */
2101 		ata_ehi_clear_desc(ehi);
2102 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2103 		ehi->err_mask |= AC_ERR_HOST_BUS;
2104 		ehi->action |= ATA_EH_RESET;
2105 		return -EINVAL;
2106 	}
2107 
2108 	ap->ops->sff_irq_clear(ap);
2109 	__ata_bmdma_stop(ap);
2110 
2111 	sactive = readl(pp->sactive_block);
2112 	done_mask = pp->qc_active ^ sactive;
2113 
2114 	if (unlikely(done_mask & sactive)) {
2115 		ata_ehi_clear_desc(ehi);
2116 		ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2117 				  "(%08x->%08x)", pp->qc_active, sactive);
2118 		ehi->err_mask |= AC_ERR_HSM;
2119 		ehi->action |= ATA_EH_RESET;
2120 		return -EINVAL;
2121 	}
2122 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
2123 		if (!(done_mask & (1 << i)))
2124 			continue;
2125 
2126 		qc = ata_qc_from_tag(ap, i);
2127 		if (qc) {
2128 			ata_qc_complete(qc);
2129 			pp->qc_active &= ~(1 << i);
2130 			pp->dhfis_bits &= ~(1 << i);
2131 			pp->dmafis_bits &= ~(1 << i);
2132 			pp->sdbfis_bits |= (1 << i);
2133 			nr_done++;
2134 		}
2135 	}
2136 
2137 	if (!ap->qc_active) {
2138 		DPRINTK("over\n");
2139 		nv_swncq_pp_reinit(ap);
2140 		return nr_done;
2141 	}
2142 
2143 	if (pp->qc_active & pp->dhfis_bits)
2144 		return nr_done;
2145 
2146 	if ((pp->ncq_flags & ncq_saw_backout) ||
2147 	    (pp->qc_active ^ pp->dhfis_bits))
2148 		/* if the controller cann't get a device to host register FIS,
2149 		 * The driver needs to reissue the new command.
2150 		 */
2151 		lack_dhfis = 1;
2152 
2153 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2154 		"SWNCQ:qc_active 0x%X defer_bits %X "
2155 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2156 		ap->print_id, ap->qc_active, pp->qc_active,
2157 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2158 		pp->dmafis_bits, pp->last_issue_tag);
2159 
2160 	nv_swncq_fis_reinit(ap);
2161 
2162 	if (lack_dhfis) {
2163 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2164 		nv_swncq_issue_atacmd(ap, qc);
2165 		return nr_done;
2166 	}
2167 
2168 	if (pp->defer_queue.defer_bits) {
2169 		/* send deferral queue command */
2170 		qc = nv_swncq_qc_from_dq(ap);
2171 		WARN_ON(qc == NULL);
2172 		nv_swncq_issue_atacmd(ap, qc);
2173 	}
2174 
2175 	return nr_done;
2176 }
2177 
2178 static inline u32 nv_swncq_tag(struct ata_port *ap)
2179 {
2180 	struct nv_swncq_port_priv *pp = ap->private_data;
2181 	u32 tag;
2182 
2183 	tag = readb(pp->tag_block) >> 2;
2184 	return (tag & 0x1f);
2185 }
2186 
2187 static int nv_swncq_dmafis(struct ata_port *ap)
2188 {
2189 	struct ata_queued_cmd *qc;
2190 	unsigned int rw;
2191 	u8 dmactl;
2192 	u32 tag;
2193 	struct nv_swncq_port_priv *pp = ap->private_data;
2194 
2195 	__ata_bmdma_stop(ap);
2196 	tag = nv_swncq_tag(ap);
2197 
2198 	DPRINTK("dma setup tag 0x%x\n", tag);
2199 	qc = ata_qc_from_tag(ap, tag);
2200 
2201 	if (unlikely(!qc))
2202 		return 0;
2203 
2204 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2205 
2206 	/* load PRD table addr. */
2207 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2208 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2209 
2210 	/* specify data direction, triple-check start bit is clear */
2211 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2212 	dmactl &= ~ATA_DMA_WR;
2213 	if (!rw)
2214 		dmactl |= ATA_DMA_WR;
2215 
2216 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2217 
2218 	return 1;
2219 }
2220 
2221 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2222 {
2223 	struct nv_swncq_port_priv *pp = ap->private_data;
2224 	struct ata_queued_cmd *qc;
2225 	struct ata_eh_info *ehi = &ap->link.eh_info;
2226 	u32 serror;
2227 	u8 ata_stat;
2228 	int rc = 0;
2229 
2230 	ata_stat = ap->ops->sff_check_status(ap);
2231 	nv_swncq_irq_clear(ap, fis);
2232 	if (!fis)
2233 		return;
2234 
2235 	if (ap->pflags & ATA_PFLAG_FROZEN)
2236 		return;
2237 
2238 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2239 		nv_swncq_hotplug(ap, fis);
2240 		return;
2241 	}
2242 
2243 	if (!pp->qc_active)
2244 		return;
2245 
2246 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2247 		return;
2248 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2249 
2250 	if (ata_stat & ATA_ERR) {
2251 		ata_ehi_clear_desc(ehi);
2252 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2253 		ehi->err_mask |= AC_ERR_DEV;
2254 		ehi->serror |= serror;
2255 		ehi->action |= ATA_EH_RESET;
2256 		ata_port_freeze(ap);
2257 		return;
2258 	}
2259 
2260 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2261 		/* If the IRQ is backout, driver must issue
2262 		 * the new command again some time later.
2263 		 */
2264 		pp->ncq_flags |= ncq_saw_backout;
2265 	}
2266 
2267 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2268 		pp->ncq_flags |= ncq_saw_sdb;
2269 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2270 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2271 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2272 			pp->dmafis_bits, readl(pp->sactive_block));
2273 		rc = nv_swncq_sdbfis(ap);
2274 		if (rc < 0)
2275 			goto irq_error;
2276 	}
2277 
2278 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2279 		/* The interrupt indicates the new command
2280 		 * was transmitted correctly to the drive.
2281 		 */
2282 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2283 		pp->ncq_flags |= ncq_saw_d2h;
2284 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2285 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2286 			ehi->err_mask |= AC_ERR_HSM;
2287 			ehi->action |= ATA_EH_RESET;
2288 			goto irq_error;
2289 		}
2290 
2291 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2292 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2293 			ata_stat = ap->ops->sff_check_status(ap);
2294 			if (ata_stat & ATA_BUSY)
2295 				goto irq_exit;
2296 
2297 			if (pp->defer_queue.defer_bits) {
2298 				DPRINTK("send next command\n");
2299 				qc = nv_swncq_qc_from_dq(ap);
2300 				nv_swncq_issue_atacmd(ap, qc);
2301 			}
2302 		}
2303 	}
2304 
2305 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2306 		/* program the dma controller with appropriate PRD buffers
2307 		 * and start the DMA transfer for requested command.
2308 		 */
2309 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2310 		pp->ncq_flags |= ncq_saw_dmas;
2311 		rc = nv_swncq_dmafis(ap);
2312 	}
2313 
2314 irq_exit:
2315 	return;
2316 irq_error:
2317 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2318 	ata_port_freeze(ap);
2319 	return;
2320 }
2321 
2322 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2323 {
2324 	struct ata_host *host = dev_instance;
2325 	unsigned int i;
2326 	unsigned int handled = 0;
2327 	unsigned long flags;
2328 	u32 irq_stat;
2329 
2330 	spin_lock_irqsave(&host->lock, flags);
2331 
2332 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2333 
2334 	for (i = 0; i < host->n_ports; i++) {
2335 		struct ata_port *ap = host->ports[i];
2336 
2337 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2338 			if (ap->link.sactive) {
2339 				nv_swncq_host_interrupt(ap, (u16)irq_stat);
2340 				handled = 1;
2341 			} else {
2342 				if (irq_stat)	/* reserve Hotplug */
2343 					nv_swncq_irq_clear(ap, 0xfff0);
2344 
2345 				handled += nv_host_intr(ap, (u8)irq_stat);
2346 			}
2347 		}
2348 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2349 	}
2350 
2351 	spin_unlock_irqrestore(&host->lock, flags);
2352 
2353 	return IRQ_RETVAL(handled);
2354 }
2355 
2356 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2357 {
2358 	static int printed_version;
2359 	const struct ata_port_info *ppi[] = { NULL, NULL };
2360 	struct nv_pi_priv *ipriv;
2361 	struct ata_host *host;
2362 	struct nv_host_priv *hpriv;
2363 	int rc;
2364 	u32 bar;
2365 	void __iomem *base;
2366 	unsigned long type = ent->driver_data;
2367 
2368         // Make sure this is a SATA controller by counting the number of bars
2369         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2370         // it's an IDE controller and we ignore it.
2371 	for (bar = 0; bar < 6; bar++)
2372 		if (pci_resource_start(pdev, bar) == 0)
2373 			return -ENODEV;
2374 
2375 	if (!printed_version++)
2376 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2377 
2378 	rc = pcim_enable_device(pdev);
2379 	if (rc)
2380 		return rc;
2381 
2382 	/* determine type and allocate host */
2383 	if (type == CK804 && adma_enabled) {
2384 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2385 		type = ADMA;
2386 	} else if (type == MCP5x && swncq_enabled) {
2387 		dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2388 		type = SWNCQ;
2389 	}
2390 
2391 	ppi[0] = &nv_port_info[type];
2392 	ipriv = ppi[0]->private_data;
2393 	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2394 	if (rc)
2395 		return rc;
2396 
2397 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2398 	if (!hpriv)
2399 		return -ENOMEM;
2400 	hpriv->type = type;
2401 	host->private_data = hpriv;
2402 
2403 	/* request and iomap NV_MMIO_BAR */
2404 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2405 	if (rc)
2406 		return rc;
2407 
2408 	/* configure SCR access */
2409 	base = host->iomap[NV_MMIO_BAR];
2410 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2411 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2412 
2413 	/* enable SATA space for CK804 */
2414 	if (type >= CK804) {
2415 		u8 regval;
2416 
2417 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2418 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2419 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2420 	}
2421 
2422 	/* init ADMA */
2423 	if (type == ADMA) {
2424 		rc = nv_adma_host_init(host);
2425 		if (rc)
2426 			return rc;
2427 	} else if (type == SWNCQ)
2428 		nv_swncq_host_init(host);
2429 
2430 	pci_set_master(pdev);
2431 	return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2432 				 IRQF_SHARED, ipriv->sht);
2433 }
2434 
2435 #ifdef CONFIG_PM
2436 static int nv_pci_device_resume(struct pci_dev *pdev)
2437 {
2438 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2439 	struct nv_host_priv *hpriv = host->private_data;
2440 	int rc;
2441 
2442 	rc = ata_pci_device_do_resume(pdev);
2443 	if (rc)
2444 		return rc;
2445 
2446 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2447 		if (hpriv->type >= CK804) {
2448 			u8 regval;
2449 
2450 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2451 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2452 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2453 		}
2454 		if (hpriv->type == ADMA) {
2455 			u32 tmp32;
2456 			struct nv_adma_port_priv *pp;
2457 			/* enable/disable ADMA on the ports appropriately */
2458 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2459 
2460 			pp = host->ports[0]->private_data;
2461 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2462 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2463 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2464 			else
2465 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2466 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2467 			pp = host->ports[1]->private_data;
2468 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2469 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2470 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2471 			else
2472 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2473 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2474 
2475 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2476 		}
2477 	}
2478 
2479 	ata_host_resume(host);
2480 
2481 	return 0;
2482 }
2483 #endif
2484 
2485 static void nv_ck804_host_stop(struct ata_host *host)
2486 {
2487 	struct pci_dev *pdev = to_pci_dev(host->dev);
2488 	u8 regval;
2489 
2490 	/* disable SATA space for CK804 */
2491 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2492 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2493 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2494 }
2495 
2496 static void nv_adma_host_stop(struct ata_host *host)
2497 {
2498 	struct pci_dev *pdev = to_pci_dev(host->dev);
2499 	u32 tmp32;
2500 
2501 	/* disable ADMA on the ports */
2502 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2503 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2504 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2505 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2506 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2507 
2508 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2509 
2510 	nv_ck804_host_stop(host);
2511 }
2512 
2513 static int __init nv_init(void)
2514 {
2515 	return pci_register_driver(&nv_pci_driver);
2516 }
2517 
2518 static void __exit nv_exit(void)
2519 {
2520 	pci_unregister_driver(&nv_pci_driver);
2521 }
2522 
2523 module_init(nv_init);
2524 module_exit(nv_exit);
2525 module_param_named(adma, adma_enabled, bool, 0444);
2526 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2527 module_param_named(swncq, swncq_enabled, bool, 0444);
2528 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2529 
2530