xref: /linux/drivers/ata/sata_nv.c (revision 55b61fec22caa3e7872caea6c4100fc75cb8f49b)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.3"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= 0x1f,
61 	NV_MWDMA_MASK			= 0x07,
62 	NV_UDMA_MASK			= 0x7f,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 	 				  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 };
173 
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 	__le64			addr;
177 	__le32			len;
178 	u8			flags;
179 	u8			packet_len;
180 	__le16			reserved;
181 };
182 
183 enum nv_adma_regbits {
184 	CMDEND	= (1 << 15),		/* end of command list */
185 	WNB	= (1 << 14),		/* wait-not-BSY */
186 	IGN	= (1 << 13),		/* ignore this entry */
187 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
188 	DA2	= (1 << (2 + 8)),
189 	DA1	= (1 << (1 + 8)),
190 	DA0	= (1 << (0 + 8)),
191 };
192 
193 /* ADMA Command Parameter Block
194    The first 5 SG segments are stored inside the Command Parameter Block itself.
195    If there are more than 5 segments the remainder are stored in a separate
196    memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 	u8			resp_flags;    /* 0 */
199 	u8			reserved1;     /* 1 */
200 	u8			ctl_flags;     /* 2 */
201 	/* len is length of taskfile in 64 bit words */
202  	u8			len;           /* 3  */
203 	u8			tag;           /* 4 */
204 	u8			next_cpb_idx;  /* 5 */
205 	__le16			reserved2;     /* 6-7 */
206 	__le16			tf[12];        /* 8-31 */
207 	struct nv_adma_prd	aprd[5];       /* 32-111 */
208 	__le64			next_aprd;     /* 112-119 */
209 	__le64			reserved3;     /* 120-127 */
210 };
211 
212 
213 struct nv_adma_port_priv {
214 	struct nv_adma_cpb	*cpb;
215 	dma_addr_t		cpb_dma;
216 	struct nv_adma_prd	*aprd;
217 	dma_addr_t		aprd_dma;
218 	void __iomem *		ctl_block;
219 	void __iomem *		gen_block;
220 	void __iomem *		notifier_clear_block;
221 	u8			flags;
222 	int			last_issue_ncq;
223 };
224 
225 struct nv_host_priv {
226 	unsigned long		type;
227 };
228 
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230 
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 static void nv_remove_one (struct pci_dev *pdev);
233 #ifdef CONFIG_PM
234 static int nv_pci_device_resume(struct pci_dev *pdev);
235 #endif
236 static void nv_ck804_host_stop(struct ata_host *host);
237 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
238 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
239 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
240 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
241 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
242 
243 static void nv_nf2_freeze(struct ata_port *ap);
244 static void nv_nf2_thaw(struct ata_port *ap);
245 static void nv_ck804_freeze(struct ata_port *ap);
246 static void nv_ck804_thaw(struct ata_port *ap);
247 static void nv_error_handler(struct ata_port *ap);
248 static int nv_adma_slave_config(struct scsi_device *sdev);
249 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
250 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
251 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
252 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
253 static void nv_adma_irq_clear(struct ata_port *ap);
254 static int nv_adma_port_start(struct ata_port *ap);
255 static void nv_adma_port_stop(struct ata_port *ap);
256 #ifdef CONFIG_PM
257 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
258 static int nv_adma_port_resume(struct ata_port *ap);
259 #endif
260 static void nv_adma_error_handler(struct ata_port *ap);
261 static void nv_adma_host_stop(struct ata_host *host);
262 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
263 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
264 
265 enum nv_host_type
266 {
267 	GENERIC,
268 	NFORCE2,
269 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
270 	CK804,
271 	ADMA
272 };
273 
274 static const struct pci_device_id nv_pci_tbl[] = {
275 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
276 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
277 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
278 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
279 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
280 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
281 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
282 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
283 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
284 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
285 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
286 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
287 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
288 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
289 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 		PCI_ANY_ID, PCI_ANY_ID,
291 		PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
292 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
293 		PCI_ANY_ID, PCI_ANY_ID,
294 		PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
295 
296 	{ } /* terminate list */
297 };
298 
299 static struct pci_driver nv_pci_driver = {
300 	.name			= DRV_NAME,
301 	.id_table		= nv_pci_tbl,
302 	.probe			= nv_init_one,
303 #ifdef CONFIG_PM
304 	.suspend		= ata_pci_device_suspend,
305 	.resume			= nv_pci_device_resume,
306 #endif
307 	.remove			= nv_remove_one,
308 };
309 
310 static struct scsi_host_template nv_sht = {
311 	.module			= THIS_MODULE,
312 	.name			= DRV_NAME,
313 	.ioctl			= ata_scsi_ioctl,
314 	.queuecommand		= ata_scsi_queuecmd,
315 	.can_queue		= ATA_DEF_QUEUE,
316 	.this_id		= ATA_SHT_THIS_ID,
317 	.sg_tablesize		= LIBATA_MAX_PRD,
318 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
319 	.emulated		= ATA_SHT_EMULATED,
320 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
321 	.proc_name		= DRV_NAME,
322 	.dma_boundary		= ATA_DMA_BOUNDARY,
323 	.slave_configure	= ata_scsi_slave_config,
324 	.slave_destroy		= ata_scsi_slave_destroy,
325 	.bios_param		= ata_std_bios_param,
326 #ifdef CONFIG_PM
327 	.suspend		= ata_scsi_device_suspend,
328 	.resume			= ata_scsi_device_resume,
329 #endif
330 };
331 
332 static struct scsi_host_template nv_adma_sht = {
333 	.module			= THIS_MODULE,
334 	.name			= DRV_NAME,
335 	.ioctl			= ata_scsi_ioctl,
336 	.queuecommand		= ata_scsi_queuecmd,
337 	.can_queue		= NV_ADMA_MAX_CPBS,
338 	.this_id		= ATA_SHT_THIS_ID,
339 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
340 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
341 	.emulated		= ATA_SHT_EMULATED,
342 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
343 	.proc_name		= DRV_NAME,
344 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
345 	.slave_configure	= nv_adma_slave_config,
346 	.slave_destroy		= ata_scsi_slave_destroy,
347 	.bios_param		= ata_std_bios_param,
348 #ifdef CONFIG_PM
349 	.suspend		= ata_scsi_device_suspend,
350 	.resume			= ata_scsi_device_resume,
351 #endif
352 };
353 
354 static const struct ata_port_operations nv_generic_ops = {
355 	.port_disable		= ata_port_disable,
356 	.tf_load		= ata_tf_load,
357 	.tf_read		= ata_tf_read,
358 	.exec_command		= ata_exec_command,
359 	.check_status		= ata_check_status,
360 	.dev_select		= ata_std_dev_select,
361 	.bmdma_setup		= ata_bmdma_setup,
362 	.bmdma_start		= ata_bmdma_start,
363 	.bmdma_stop		= ata_bmdma_stop,
364 	.bmdma_status		= ata_bmdma_status,
365 	.qc_prep		= ata_qc_prep,
366 	.qc_issue		= ata_qc_issue_prot,
367 	.freeze			= ata_bmdma_freeze,
368 	.thaw			= ata_bmdma_thaw,
369 	.error_handler		= nv_error_handler,
370 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
371 	.data_xfer		= ata_data_xfer,
372 	.irq_clear		= ata_bmdma_irq_clear,
373 	.irq_on			= ata_irq_on,
374 	.irq_ack		= ata_irq_ack,
375 	.scr_read		= nv_scr_read,
376 	.scr_write		= nv_scr_write,
377 	.port_start		= ata_port_start,
378 };
379 
380 static const struct ata_port_operations nv_nf2_ops = {
381 	.port_disable		= ata_port_disable,
382 	.tf_load		= ata_tf_load,
383 	.tf_read		= ata_tf_read,
384 	.exec_command		= ata_exec_command,
385 	.check_status		= ata_check_status,
386 	.dev_select		= ata_std_dev_select,
387 	.bmdma_setup		= ata_bmdma_setup,
388 	.bmdma_start		= ata_bmdma_start,
389 	.bmdma_stop		= ata_bmdma_stop,
390 	.bmdma_status		= ata_bmdma_status,
391 	.qc_prep		= ata_qc_prep,
392 	.qc_issue		= ata_qc_issue_prot,
393 	.freeze			= nv_nf2_freeze,
394 	.thaw			= nv_nf2_thaw,
395 	.error_handler		= nv_error_handler,
396 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
397 	.data_xfer		= ata_data_xfer,
398 	.irq_clear		= ata_bmdma_irq_clear,
399 	.irq_on			= ata_irq_on,
400 	.irq_ack		= ata_irq_ack,
401 	.scr_read		= nv_scr_read,
402 	.scr_write		= nv_scr_write,
403 	.port_start		= ata_port_start,
404 };
405 
406 static const struct ata_port_operations nv_ck804_ops = {
407 	.port_disable		= ata_port_disable,
408 	.tf_load		= ata_tf_load,
409 	.tf_read		= ata_tf_read,
410 	.exec_command		= ata_exec_command,
411 	.check_status		= ata_check_status,
412 	.dev_select		= ata_std_dev_select,
413 	.bmdma_setup		= ata_bmdma_setup,
414 	.bmdma_start		= ata_bmdma_start,
415 	.bmdma_stop		= ata_bmdma_stop,
416 	.bmdma_status		= ata_bmdma_status,
417 	.qc_prep		= ata_qc_prep,
418 	.qc_issue		= ata_qc_issue_prot,
419 	.freeze			= nv_ck804_freeze,
420 	.thaw			= nv_ck804_thaw,
421 	.error_handler		= nv_error_handler,
422 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
423 	.data_xfer		= ata_data_xfer,
424 	.irq_clear		= ata_bmdma_irq_clear,
425 	.irq_on			= ata_irq_on,
426 	.irq_ack		= ata_irq_ack,
427 	.scr_read		= nv_scr_read,
428 	.scr_write		= nv_scr_write,
429 	.port_start		= ata_port_start,
430 	.host_stop		= nv_ck804_host_stop,
431 };
432 
433 static const struct ata_port_operations nv_adma_ops = {
434 	.port_disable		= ata_port_disable,
435 	.tf_load		= ata_tf_load,
436 	.tf_read		= nv_adma_tf_read,
437 	.check_atapi_dma	= nv_adma_check_atapi_dma,
438 	.exec_command		= ata_exec_command,
439 	.check_status		= ata_check_status,
440 	.dev_select		= ata_std_dev_select,
441 	.bmdma_setup		= ata_bmdma_setup,
442 	.bmdma_start		= ata_bmdma_start,
443 	.bmdma_stop		= ata_bmdma_stop,
444 	.bmdma_status		= ata_bmdma_status,
445 	.qc_prep		= nv_adma_qc_prep,
446 	.qc_issue		= nv_adma_qc_issue,
447 	.freeze			= nv_ck804_freeze,
448 	.thaw			= nv_ck804_thaw,
449 	.error_handler		= nv_adma_error_handler,
450 	.post_internal_cmd	= nv_adma_post_internal_cmd,
451 	.data_xfer		= ata_data_xfer,
452 	.irq_clear		= nv_adma_irq_clear,
453 	.irq_on			= ata_irq_on,
454 	.irq_ack		= ata_irq_ack,
455 	.scr_read		= nv_scr_read,
456 	.scr_write		= nv_scr_write,
457 	.port_start		= nv_adma_port_start,
458 	.port_stop		= nv_adma_port_stop,
459 #ifdef CONFIG_PM
460 	.port_suspend		= nv_adma_port_suspend,
461 	.port_resume		= nv_adma_port_resume,
462 #endif
463 	.host_stop		= nv_adma_host_stop,
464 };
465 
466 static struct ata_port_info nv_port_info[] = {
467 	/* generic */
468 	{
469 		.sht		= &nv_sht,
470 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
471 				  ATA_FLAG_HRST_TO_RESUME,
472 		.pio_mask	= NV_PIO_MASK,
473 		.mwdma_mask	= NV_MWDMA_MASK,
474 		.udma_mask	= NV_UDMA_MASK,
475 		.port_ops	= &nv_generic_ops,
476 		.irq_handler	= nv_generic_interrupt,
477 	},
478 	/* nforce2/3 */
479 	{
480 		.sht		= &nv_sht,
481 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
482 				  ATA_FLAG_HRST_TO_RESUME,
483 		.pio_mask	= NV_PIO_MASK,
484 		.mwdma_mask	= NV_MWDMA_MASK,
485 		.udma_mask	= NV_UDMA_MASK,
486 		.port_ops	= &nv_nf2_ops,
487 		.irq_handler	= nv_nf2_interrupt,
488 	},
489 	/* ck804 */
490 	{
491 		.sht		= &nv_sht,
492 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
493 				  ATA_FLAG_HRST_TO_RESUME,
494 		.pio_mask	= NV_PIO_MASK,
495 		.mwdma_mask	= NV_MWDMA_MASK,
496 		.udma_mask	= NV_UDMA_MASK,
497 		.port_ops	= &nv_ck804_ops,
498 		.irq_handler	= nv_ck804_interrupt,
499 	},
500 	/* ADMA */
501 	{
502 		.sht		= &nv_adma_sht,
503 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
504 				  ATA_FLAG_HRST_TO_RESUME |
505 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
506 		.pio_mask	= NV_PIO_MASK,
507 		.mwdma_mask	= NV_MWDMA_MASK,
508 		.udma_mask	= NV_UDMA_MASK,
509 		.port_ops	= &nv_adma_ops,
510 		.irq_handler	= nv_adma_interrupt,
511 	},
512 };
513 
514 MODULE_AUTHOR("NVIDIA");
515 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
516 MODULE_LICENSE("GPL");
517 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
518 MODULE_VERSION(DRV_VERSION);
519 
520 static int adma_enabled = 1;
521 
522 static void nv_adma_register_mode(struct ata_port *ap)
523 {
524 	struct nv_adma_port_priv *pp = ap->private_data;
525 	void __iomem *mmio = pp->ctl_block;
526 	u16 tmp, status;
527 	int count = 0;
528 
529 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
530 		return;
531 
532 	status = readw(mmio + NV_ADMA_STAT);
533 	while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
534 		ndelay(50);
535 		status = readw(mmio + NV_ADMA_STAT);
536 		count++;
537 	}
538 	if(count == 20)
539 		ata_port_printk(ap, KERN_WARNING,
540 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
541 			status);
542 
543 	tmp = readw(mmio + NV_ADMA_CTL);
544 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
545 
546 	count = 0;
547 	status = readw(mmio + NV_ADMA_STAT);
548 	while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
549 		ndelay(50);
550 		status = readw(mmio + NV_ADMA_STAT);
551 		count++;
552 	}
553 	if(count == 20)
554 		ata_port_printk(ap, KERN_WARNING,
555 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
556 			 status);
557 
558 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
559 }
560 
561 static void nv_adma_mode(struct ata_port *ap)
562 {
563 	struct nv_adma_port_priv *pp = ap->private_data;
564 	void __iomem *mmio = pp->ctl_block;
565 	u16 tmp, status;
566 	int count = 0;
567 
568 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
569 		return;
570 
571 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
572 
573 	tmp = readw(mmio + NV_ADMA_CTL);
574 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
575 
576 	status = readw(mmio + NV_ADMA_STAT);
577 	while(((status & NV_ADMA_STAT_LEGACY) ||
578 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
579 		ndelay(50);
580 		status = readw(mmio + NV_ADMA_STAT);
581 		count++;
582 	}
583 	if(count == 20)
584 		ata_port_printk(ap, KERN_WARNING,
585 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
586 			status);
587 
588 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
589 }
590 
591 static int nv_adma_slave_config(struct scsi_device *sdev)
592 {
593 	struct ata_port *ap = ata_shost_to_port(sdev->host);
594 	struct nv_adma_port_priv *pp = ap->private_data;
595 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
596 	u64 bounce_limit;
597 	unsigned long segment_boundary;
598 	unsigned short sg_tablesize;
599 	int rc;
600 	int adma_enable;
601 	u32 current_reg, new_reg, config_mask;
602 
603 	rc = ata_scsi_slave_config(sdev);
604 
605 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
606 		/* Not a proper libata device, ignore */
607 		return rc;
608 
609 	if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
610 		/*
611 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
612 		 * Therefore ATAPI commands are sent through the legacy interface.
613 		 * However, the legacy interface only supports 32-bit DMA.
614 		 * Restrict DMA parameters as required by the legacy interface
615 		 * when an ATAPI device is connected.
616 		 */
617 		bounce_limit = ATA_DMA_MASK;
618 		segment_boundary = ATA_DMA_BOUNDARY;
619 		/* Subtract 1 since an extra entry may be needed for padding, see
620 		   libata-scsi.c */
621 		sg_tablesize = LIBATA_MAX_PRD - 1;
622 
623 		/* Since the legacy DMA engine is in use, we need to disable ADMA
624 		   on the port. */
625 		adma_enable = 0;
626 		nv_adma_register_mode(ap);
627 	}
628 	else {
629 		bounce_limit = *ap->dev->dma_mask;
630 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
631 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
632 		adma_enable = 1;
633 	}
634 
635 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
636 
637 	if(ap->port_no == 1)
638 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
639 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
640 	else
641 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
642 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
643 
644 	if(adma_enable) {
645 		new_reg = current_reg | config_mask;
646 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
647 	}
648 	else {
649 		new_reg = current_reg & ~config_mask;
650 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
651 	}
652 
653 	if(current_reg != new_reg)
654 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
655 
656 	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
657 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
658 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
659 	ata_port_printk(ap, KERN_INFO,
660 		"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
661 		(unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
662 	return rc;
663 }
664 
665 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
666 {
667 	struct nv_adma_port_priv *pp = qc->ap->private_data;
668 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
669 }
670 
671 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
672 {
673 	/* Since commands where a result TF is requested are not
674 	   executed in ADMA mode, the only time this function will be called
675 	   in ADMA mode will be if a command fails. In this case we
676 	   don't care about going into register mode with ADMA commands
677 	   pending, as the commands will all shortly be aborted anyway. */
678 	nv_adma_register_mode(ap);
679 
680 	ata_tf_read(ap, tf);
681 }
682 
683 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
684 {
685 	unsigned int idx = 0;
686 
687 	if(tf->flags & ATA_TFLAG_ISADDR) {
688 		if (tf->flags & ATA_TFLAG_LBA48) {
689 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
690 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
691 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
692 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
693 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
694 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
695 		} else
696 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
697 
698 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
699 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
700 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
701 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
702 	}
703 
704 	if(tf->flags & ATA_TFLAG_DEVICE)
705 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
706 
707 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
708 
709 	while(idx < 12)
710 		cpb[idx++] = cpu_to_le16(IGN);
711 
712 	return idx;
713 }
714 
715 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
716 {
717 	struct nv_adma_port_priv *pp = ap->private_data;
718 	u8 flags = pp->cpb[cpb_num].resp_flags;
719 
720 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
721 
722 	if (unlikely((force_err ||
723 		     flags & (NV_CPB_RESP_ATA_ERR |
724 			      NV_CPB_RESP_CMD_ERR |
725 			      NV_CPB_RESP_CPB_ERR)))) {
726 		struct ata_eh_info *ehi = &ap->eh_info;
727 		int freeze = 0;
728 
729 		ata_ehi_clear_desc(ehi);
730 		ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
731 		if (flags & NV_CPB_RESP_ATA_ERR) {
732 			ata_ehi_push_desc(ehi, ": ATA error");
733 			ehi->err_mask |= AC_ERR_DEV;
734 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
735 			ata_ehi_push_desc(ehi, ": CMD error");
736 			ehi->err_mask |= AC_ERR_DEV;
737 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
738 			ata_ehi_push_desc(ehi, ": CPB error");
739 			ehi->err_mask |= AC_ERR_SYSTEM;
740 			freeze = 1;
741 		} else {
742 			/* notifier error, but no error in CPB flags? */
743 			ehi->err_mask |= AC_ERR_OTHER;
744 			freeze = 1;
745 		}
746 		/* Kill all commands. EH will determine what actually failed. */
747 		if (freeze)
748 			ata_port_freeze(ap);
749 		else
750 			ata_port_abort(ap);
751 		return 1;
752 	}
753 
754 	if (likely(flags & NV_CPB_RESP_DONE)) {
755 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
756 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
757 		if (likely(qc)) {
758 			DPRINTK("Completing qc from tag %d\n",cpb_num);
759 			ata_qc_complete(qc);
760 		} else {
761 			struct ata_eh_info *ehi = &ap->eh_info;
762 			/* Notifier bits set without a command may indicate the drive
763 			   is misbehaving. Raise host state machine violation on this
764 			   condition. */
765 			ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
766 				cpb_num);
767 			ehi->err_mask |= AC_ERR_HSM;
768 			ehi->action |= ATA_EH_SOFTRESET;
769 			ata_port_freeze(ap);
770 			return 1;
771 		}
772 	}
773 	return 0;
774 }
775 
776 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
777 {
778 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
779 
780 	/* freeze if hotplugged */
781 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
782 		ata_port_freeze(ap);
783 		return 1;
784 	}
785 
786 	/* bail out if not our interrupt */
787 	if (!(irq_stat & NV_INT_DEV))
788 		return 0;
789 
790 	/* DEV interrupt w/ no active qc? */
791 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
792 		ata_check_status(ap);
793 		return 1;
794 	}
795 
796 	/* handle interrupt */
797 	return ata_host_intr(ap, qc);
798 }
799 
800 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
801 {
802 	struct ata_host *host = dev_instance;
803 	int i, handled = 0;
804 	u32 notifier_clears[2];
805 
806 	spin_lock(&host->lock);
807 
808 	for (i = 0; i < host->n_ports; i++) {
809 		struct ata_port *ap = host->ports[i];
810 		notifier_clears[i] = 0;
811 
812 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
813 			struct nv_adma_port_priv *pp = ap->private_data;
814 			void __iomem *mmio = pp->ctl_block;
815 			u16 status;
816 			u32 gen_ctl;
817 			u32 notifier, notifier_error;
818 
819 			/* if in ATA register mode, use standard ata interrupt handler */
820 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
821 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
822 					>> (NV_INT_PORT_SHIFT * i);
823 				if(ata_tag_valid(ap->active_tag))
824 					/** NV_INT_DEV indication seems unreliable at times
825 					    at least in ADMA mode. Force it on always when a
826 					    command is active, to prevent losing interrupts. */
827 					irq_stat |= NV_INT_DEV;
828 				handled += nv_host_intr(ap, irq_stat);
829 				continue;
830 			}
831 
832 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
833 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
834 			notifier_clears[i] = notifier | notifier_error;
835 
836 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
837 
838 			if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
839 			    !notifier_error)
840 				/* Nothing to do */
841 				continue;
842 
843 			status = readw(mmio + NV_ADMA_STAT);
844 
845 			/* Clear status. Ensure the controller sees the clearing before we start
846 			   looking at any of the CPB statuses, so that any CPB completions after
847 			   this point in the handler will raise another interrupt. */
848 			writew(status, mmio + NV_ADMA_STAT);
849 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
850 			rmb();
851 
852 			handled++; /* irq handled if we got here */
853 
854 			/* freeze if hotplugged or controller error */
855 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
856 					       NV_ADMA_STAT_HOTUNPLUG |
857 					       NV_ADMA_STAT_TIMEOUT |
858 					       NV_ADMA_STAT_SERROR))) {
859 				struct ata_eh_info *ehi = &ap->eh_info;
860 
861 				ata_ehi_clear_desc(ehi);
862 				ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
863 				if (status & NV_ADMA_STAT_TIMEOUT) {
864 					ehi->err_mask |= AC_ERR_SYSTEM;
865 					ata_ehi_push_desc(ehi, ": timeout");
866 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
867 					ata_ehi_hotplugged(ehi);
868 					ata_ehi_push_desc(ehi, ": hotplug");
869 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
870 					ata_ehi_hotplugged(ehi);
871 					ata_ehi_push_desc(ehi, ": hot unplug");
872 				} else if (status & NV_ADMA_STAT_SERROR) {
873 					/* let libata analyze SError and figure out the cause */
874 					ata_ehi_push_desc(ehi, ": SError");
875 				}
876 				ata_port_freeze(ap);
877 				continue;
878 			}
879 
880 			if (status & (NV_ADMA_STAT_DONE |
881 				      NV_ADMA_STAT_CPBERR)) {
882 				u32 check_commands;
883 				int pos, error = 0;
884 
885 				if(ata_tag_valid(ap->active_tag))
886 					check_commands = 1 << ap->active_tag;
887 				else
888 					check_commands = ap->sactive;
889 
890 				/** Check CPBs for completed commands */
891 				while ((pos = ffs(check_commands)) && !error) {
892 					pos--;
893 					error = nv_adma_check_cpb(ap, pos,
894 						notifier_error & (1 << pos) );
895 					check_commands &= ~(1 << pos );
896 				}
897 			}
898 		}
899 	}
900 
901 	if(notifier_clears[0] || notifier_clears[1]) {
902 		/* Note: Both notifier clear registers must be written
903 		   if either is set, even if one is zero, according to NVIDIA. */
904 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
905 		writel(notifier_clears[0], pp->notifier_clear_block);
906 		pp = host->ports[1]->private_data;
907 		writel(notifier_clears[1], pp->notifier_clear_block);
908 	}
909 
910 	spin_unlock(&host->lock);
911 
912 	return IRQ_RETVAL(handled);
913 }
914 
915 static void nv_adma_irq_clear(struct ata_port *ap)
916 {
917 	struct nv_adma_port_priv *pp = ap->private_data;
918 	void __iomem *mmio = pp->ctl_block;
919 	u16 status = readw(mmio + NV_ADMA_STAT);
920 	u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
921 	u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
922 	void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
923 
924 	/* clear ADMA status */
925 	writew(status, mmio + NV_ADMA_STAT);
926 	writel(notifier | notifier_error,
927 	       pp->notifier_clear_block);
928 
929 	/** clear legacy status */
930 	iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
931 }
932 
933 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
934 {
935 	struct nv_adma_port_priv *pp = qc->ap->private_data;
936 
937 	if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
938 		ata_bmdma_post_internal_cmd(qc);
939 }
940 
941 static int nv_adma_port_start(struct ata_port *ap)
942 {
943 	struct device *dev = ap->host->dev;
944 	struct nv_adma_port_priv *pp;
945 	int rc;
946 	void *mem;
947 	dma_addr_t mem_dma;
948 	void __iomem *mmio;
949 	u16 tmp;
950 
951 	VPRINTK("ENTER\n");
952 
953 	rc = ata_port_start(ap);
954 	if (rc)
955 		return rc;
956 
957 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
958 	if (!pp)
959 		return -ENOMEM;
960 
961 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
962 	       ap->port_no * NV_ADMA_PORT_SIZE;
963 	pp->ctl_block = mmio;
964 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
965 	pp->notifier_clear_block = pp->gen_block +
966 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
967 
968 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
969 				  &mem_dma, GFP_KERNEL);
970 	if (!mem)
971 		return -ENOMEM;
972 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
973 
974 	/*
975 	 * First item in chunk of DMA memory:
976 	 * 128-byte command parameter block (CPB)
977 	 * one for each command tag
978 	 */
979 	pp->cpb     = mem;
980 	pp->cpb_dma = mem_dma;
981 
982 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
983 	writel((mem_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
984 
985 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
986 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
987 
988 	/*
989 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
990 	 */
991 	pp->aprd = mem;
992 	pp->aprd_dma = mem_dma;
993 
994 	ap->private_data = pp;
995 
996 	/* clear any outstanding interrupt conditions */
997 	writew(0xffff, mmio + NV_ADMA_STAT);
998 
999 	/* initialize port variables */
1000 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1001 
1002 	/* clear CPB fetch count */
1003 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1004 
1005 	/* clear GO for register mode, enable interrupt */
1006 	tmp = readw(mmio + NV_ADMA_CTL);
1007 	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1008 		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1009 
1010 	tmp = readw(mmio + NV_ADMA_CTL);
1011 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1012 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1013 	udelay(1);
1014 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1015 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1016 
1017 	return 0;
1018 }
1019 
1020 static void nv_adma_port_stop(struct ata_port *ap)
1021 {
1022 	struct nv_adma_port_priv *pp = ap->private_data;
1023 	void __iomem *mmio = pp->ctl_block;
1024 
1025 	VPRINTK("ENTER\n");
1026 	writew(0, mmio + NV_ADMA_CTL);
1027 }
1028 
1029 #ifdef CONFIG_PM
1030 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1031 {
1032 	struct nv_adma_port_priv *pp = ap->private_data;
1033 	void __iomem *mmio = pp->ctl_block;
1034 
1035 	/* Go to register mode - clears GO */
1036 	nv_adma_register_mode(ap);
1037 
1038 	/* clear CPB fetch count */
1039 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1040 
1041 	/* disable interrupt, shut down port */
1042 	writew(0, mmio + NV_ADMA_CTL);
1043 
1044 	return 0;
1045 }
1046 
1047 static int nv_adma_port_resume(struct ata_port *ap)
1048 {
1049 	struct nv_adma_port_priv *pp = ap->private_data;
1050 	void __iomem *mmio = pp->ctl_block;
1051 	u16 tmp;
1052 
1053 	/* set CPB block location */
1054 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1055 	writel((pp->cpb_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1056 
1057 	/* clear any outstanding interrupt conditions */
1058 	writew(0xffff, mmio + NV_ADMA_STAT);
1059 
1060 	/* initialize port variables */
1061 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1062 
1063 	/* clear CPB fetch count */
1064 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1065 
1066 	/* clear GO for register mode, enable interrupt */
1067 	tmp = readw(mmio + NV_ADMA_CTL);
1068 	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1069 		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1070 
1071 	tmp = readw(mmio + NV_ADMA_CTL);
1072 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1073 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1074 	udelay(1);
1075 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1076 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1077 
1078 	return 0;
1079 }
1080 #endif
1081 
1082 static void nv_adma_setup_port(struct ata_port *ap)
1083 {
1084 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1085 	struct ata_ioports *ioport = &ap->ioaddr;
1086 
1087 	VPRINTK("ENTER\n");
1088 
1089 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1090 
1091 	ioport->cmd_addr	= mmio;
1092 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1093 	ioport->error_addr	=
1094 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1095 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1096 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1097 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1098 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1099 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1100 	ioport->status_addr	=
1101 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1102 	ioport->altstatus_addr	=
1103 	ioport->ctl_addr	= mmio + 0x20;
1104 }
1105 
1106 static int nv_adma_host_init(struct ata_host *host)
1107 {
1108 	struct pci_dev *pdev = to_pci_dev(host->dev);
1109 	unsigned int i;
1110 	u32 tmp32;
1111 
1112 	VPRINTK("ENTER\n");
1113 
1114 	/* enable ADMA on the ports */
1115 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1116 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1117 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1118 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1119 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1120 
1121 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1122 
1123 	for (i = 0; i < host->n_ports; i++)
1124 		nv_adma_setup_port(host->ports[i]);
1125 
1126 	return 0;
1127 }
1128 
1129 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1130 			      struct scatterlist *sg,
1131 			      int idx,
1132 			      struct nv_adma_prd *aprd)
1133 {
1134 	u8 flags = 0;
1135 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1136 		flags |= NV_APRD_WRITE;
1137 	if (idx == qc->n_elem - 1)
1138 		flags |= NV_APRD_END;
1139 	else if (idx != 4)
1140 		flags |= NV_APRD_CONT;
1141 
1142 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1143 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1144 	aprd->flags = flags;
1145 	aprd->packet_len = 0;
1146 }
1147 
1148 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1149 {
1150 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1151 	unsigned int idx;
1152 	struct nv_adma_prd *aprd;
1153 	struct scatterlist *sg;
1154 
1155 	VPRINTK("ENTER\n");
1156 
1157 	idx = 0;
1158 
1159 	ata_for_each_sg(sg, qc) {
1160 		aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1161 		nv_adma_fill_aprd(qc, sg, idx, aprd);
1162 		idx++;
1163 	}
1164 	if (idx > 5)
1165 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1166 	else
1167 		cpb->next_aprd = cpu_to_le64(0);
1168 }
1169 
1170 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1171 {
1172 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1173 
1174 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1175 	   or interrupt-driven no-data commands, where a result taskfile
1176 	   is not required. */
1177 	if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1178 	   (qc->tf.flags & ATA_TFLAG_POLLING) ||
1179 	   (qc->flags & ATA_QCFLAG_RESULT_TF))
1180 		return 1;
1181 
1182 	if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1183 	   (qc->tf.protocol == ATA_PROT_NODATA))
1184 		return 0;
1185 
1186 	return 1;
1187 }
1188 
1189 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1190 {
1191 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1192 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1193 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1194 		       NV_CPB_CTL_IEN;
1195 
1196 	if (nv_adma_use_reg_mode(qc)) {
1197 		nv_adma_register_mode(qc->ap);
1198 		ata_qc_prep(qc);
1199 		return;
1200 	}
1201 
1202 	cpb->resp_flags = NV_CPB_RESP_DONE;
1203 	wmb();
1204 	cpb->ctl_flags = 0;
1205 	wmb();
1206 
1207 	cpb->len		= 3;
1208 	cpb->tag		= qc->tag;
1209 	cpb->next_cpb_idx	= 0;
1210 
1211 	/* turn on NCQ flags for NCQ commands */
1212 	if (qc->tf.protocol == ATA_PROT_NCQ)
1213 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1214 
1215 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1216 
1217 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1218 
1219 	if(qc->flags & ATA_QCFLAG_DMAMAP) {
1220 		nv_adma_fill_sg(qc, cpb);
1221 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1222 	} else
1223 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1224 
1225 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1226 	   finished filling in all of the contents */
1227 	wmb();
1228 	cpb->ctl_flags = ctl_flags;
1229 	wmb();
1230 	cpb->resp_flags = 0;
1231 }
1232 
1233 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1234 {
1235 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1236 	void __iomem *mmio = pp->ctl_block;
1237 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1238 
1239 	VPRINTK("ENTER\n");
1240 
1241 	if (nv_adma_use_reg_mode(qc)) {
1242 		/* use ATA register mode */
1243 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1244 		nv_adma_register_mode(qc->ap);
1245 		return ata_qc_issue_prot(qc);
1246 	} else
1247 		nv_adma_mode(qc->ap);
1248 
1249 	/* write append register, command tag in lower 8 bits
1250 	   and (number of cpbs to append -1) in top 8 bits */
1251 	wmb();
1252 
1253 	if(curr_ncq != pp->last_issue_ncq) {
1254 	   	/* Seems to need some delay before switching between NCQ and non-NCQ
1255 		   commands, else we get command timeouts and such. */
1256 		udelay(20);
1257 		pp->last_issue_ncq = curr_ncq;
1258 	}
1259 
1260 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1261 
1262 	DPRINTK("Issued tag %u\n",qc->tag);
1263 
1264 	return 0;
1265 }
1266 
1267 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1268 {
1269 	struct ata_host *host = dev_instance;
1270 	unsigned int i;
1271 	unsigned int handled = 0;
1272 	unsigned long flags;
1273 
1274 	spin_lock_irqsave(&host->lock, flags);
1275 
1276 	for (i = 0; i < host->n_ports; i++) {
1277 		struct ata_port *ap;
1278 
1279 		ap = host->ports[i];
1280 		if (ap &&
1281 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1282 			struct ata_queued_cmd *qc;
1283 
1284 			qc = ata_qc_from_tag(ap, ap->active_tag);
1285 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1286 				handled += ata_host_intr(ap, qc);
1287 			else
1288 				// No request pending?  Clear interrupt status
1289 				// anyway, in case there's one pending.
1290 				ap->ops->check_status(ap);
1291 		}
1292 
1293 	}
1294 
1295 	spin_unlock_irqrestore(&host->lock, flags);
1296 
1297 	return IRQ_RETVAL(handled);
1298 }
1299 
1300 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1301 {
1302 	int i, handled = 0;
1303 
1304 	for (i = 0; i < host->n_ports; i++) {
1305 		struct ata_port *ap = host->ports[i];
1306 
1307 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1308 			handled += nv_host_intr(ap, irq_stat);
1309 
1310 		irq_stat >>= NV_INT_PORT_SHIFT;
1311 	}
1312 
1313 	return IRQ_RETVAL(handled);
1314 }
1315 
1316 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1317 {
1318 	struct ata_host *host = dev_instance;
1319 	u8 irq_stat;
1320 	irqreturn_t ret;
1321 
1322 	spin_lock(&host->lock);
1323 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1324 	ret = nv_do_interrupt(host, irq_stat);
1325 	spin_unlock(&host->lock);
1326 
1327 	return ret;
1328 }
1329 
1330 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1331 {
1332 	struct ata_host *host = dev_instance;
1333 	u8 irq_stat;
1334 	irqreturn_t ret;
1335 
1336 	spin_lock(&host->lock);
1337 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1338 	ret = nv_do_interrupt(host, irq_stat);
1339 	spin_unlock(&host->lock);
1340 
1341 	return ret;
1342 }
1343 
1344 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1345 {
1346 	if (sc_reg > SCR_CONTROL)
1347 		return 0xffffffffU;
1348 
1349 	return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1350 }
1351 
1352 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1353 {
1354 	if (sc_reg > SCR_CONTROL)
1355 		return;
1356 
1357 	iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1358 }
1359 
1360 static void nv_nf2_freeze(struct ata_port *ap)
1361 {
1362 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1363 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1364 	u8 mask;
1365 
1366 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1367 	mask &= ~(NV_INT_ALL << shift);
1368 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1369 }
1370 
1371 static void nv_nf2_thaw(struct ata_port *ap)
1372 {
1373 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1374 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1375 	u8 mask;
1376 
1377 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1378 
1379 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1380 	mask |= (NV_INT_MASK << shift);
1381 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1382 }
1383 
1384 static void nv_ck804_freeze(struct ata_port *ap)
1385 {
1386 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1387 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1388 	u8 mask;
1389 
1390 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1391 	mask &= ~(NV_INT_ALL << shift);
1392 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1393 }
1394 
1395 static void nv_ck804_thaw(struct ata_port *ap)
1396 {
1397 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1398 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1399 	u8 mask;
1400 
1401 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1402 
1403 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1404 	mask |= (NV_INT_MASK << shift);
1405 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1406 }
1407 
1408 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1409 {
1410 	unsigned int dummy;
1411 
1412 	/* SATA hardreset fails to retrieve proper device signature on
1413 	 * some controllers.  Don't classify on hardreset.  For more
1414 	 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1415 	 */
1416 	return sata_std_hardreset(ap, &dummy);
1417 }
1418 
1419 static void nv_error_handler(struct ata_port *ap)
1420 {
1421 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1422 			   nv_hardreset, ata_std_postreset);
1423 }
1424 
1425 static void nv_adma_error_handler(struct ata_port *ap)
1426 {
1427 	struct nv_adma_port_priv *pp = ap->private_data;
1428 	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1429 		void __iomem *mmio = pp->ctl_block;
1430 		int i;
1431 		u16 tmp;
1432 
1433 		if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1434 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1435 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1436 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1437 			u32 status = readw(mmio + NV_ADMA_STAT);
1438 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1439 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1440 
1441 			ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1442 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1443 				"next cpb count 0x%X next cpb idx 0x%x\n",
1444 				notifier, notifier_error, gen_ctl, status,
1445 				cpb_count, next_cpb_idx);
1446 
1447 			for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1448 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1449 				if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1450 				    ap->sactive & (1 << i) )
1451 					ata_port_printk(ap, KERN_ERR,
1452 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1453 						i, cpb->ctl_flags, cpb->resp_flags);
1454 			}
1455 		}
1456 
1457 		/* Push us back into port register mode for error handling. */
1458 		nv_adma_register_mode(ap);
1459 
1460 		/* Mark all of the CPBs as invalid to prevent them from being executed */
1461 		for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1462 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1463 
1464 		/* clear CPB fetch count */
1465 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1466 
1467 		/* Reset channel */
1468 		tmp = readw(mmio + NV_ADMA_CTL);
1469 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1470 		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1471 		udelay(1);
1472 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1473 		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1474 	}
1475 
1476 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1477 			   nv_hardreset, ata_std_postreset);
1478 }
1479 
1480 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1481 {
1482 	static int printed_version = 0;
1483 	const struct ata_port_info *ppi[2];
1484 	struct ata_host *host;
1485 	struct nv_host_priv *hpriv;
1486 	int rc;
1487 	u32 bar;
1488 	void __iomem *base;
1489 	unsigned long type = ent->driver_data;
1490 
1491         // Make sure this is a SATA controller by counting the number of bars
1492         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
1493         // it's an IDE controller and we ignore it.
1494 	for (bar=0; bar<6; bar++)
1495 		if (pci_resource_start(pdev, bar) == 0)
1496 			return -ENODEV;
1497 
1498 	if (!printed_version++)
1499 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1500 
1501 	rc = pcim_enable_device(pdev);
1502 	if (rc)
1503 		return rc;
1504 
1505 	/* determine type and allocate host */
1506 	if (type >= CK804 && adma_enabled) {
1507 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1508 		type = ADMA;
1509 	}
1510 
1511 	ppi[0] = ppi[1] = &nv_port_info[type];
1512 	rc = ata_pci_prepare_native_host(pdev, ppi, 2, &host);
1513 	if (rc)
1514 		return rc;
1515 
1516 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1517 	if (!hpriv)
1518 		return -ENOMEM;
1519 	hpriv->type = type;
1520 	host->private_data = hpriv;
1521 
1522 	/* set 64bit dma masks, may fail */
1523 	if (type == ADMA) {
1524 		if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
1525 			pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1526 	}
1527 
1528 	/* request and iomap NV_MMIO_BAR */
1529 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
1530 	if (rc)
1531 		return rc;
1532 
1533 	/* configure SCR access */
1534 	base = host->iomap[NV_MMIO_BAR];
1535 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1536 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1537 
1538 	/* enable SATA space for CK804 */
1539 	if (type >= CK804) {
1540 		u8 regval;
1541 
1542 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1543 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1544 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1545 	}
1546 
1547 	/* init ADMA */
1548 	if (type == ADMA) {
1549 		rc = nv_adma_host_init(host);
1550 		if (rc)
1551 			return rc;
1552 	}
1553 
1554 	pci_set_master(pdev);
1555 	return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
1556 				 IRQF_SHARED, ppi[0]->sht);
1557 }
1558 
1559 static void nv_remove_one (struct pci_dev *pdev)
1560 {
1561 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1562 	struct nv_host_priv *hpriv = host->private_data;
1563 
1564 	ata_pci_remove_one(pdev);
1565 	kfree(hpriv);
1566 }
1567 
1568 #ifdef CONFIG_PM
1569 static int nv_pci_device_resume(struct pci_dev *pdev)
1570 {
1571 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1572 	struct nv_host_priv *hpriv = host->private_data;
1573 	int rc;
1574 
1575 	rc = ata_pci_device_do_resume(pdev);
1576 	if(rc)
1577 		return rc;
1578 
1579 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1580 		if(hpriv->type >= CK804) {
1581 			u8 regval;
1582 
1583 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1584 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1585 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1586 		}
1587 		if(hpriv->type == ADMA) {
1588 			u32 tmp32;
1589 			struct nv_adma_port_priv *pp;
1590 			/* enable/disable ADMA on the ports appropriately */
1591 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1592 
1593 			pp = host->ports[0]->private_data;
1594 			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1595 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1596 				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1597 			else
1598 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
1599 				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1600 			pp = host->ports[1]->private_data;
1601 			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1602 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1603 				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1604 			else
1605 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
1606 				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1607 
1608 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1609 		}
1610 	}
1611 
1612 	ata_host_resume(host);
1613 
1614 	return 0;
1615 }
1616 #endif
1617 
1618 static void nv_ck804_host_stop(struct ata_host *host)
1619 {
1620 	struct pci_dev *pdev = to_pci_dev(host->dev);
1621 	u8 regval;
1622 
1623 	/* disable SATA space for CK804 */
1624 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1625 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1626 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1627 }
1628 
1629 static void nv_adma_host_stop(struct ata_host *host)
1630 {
1631 	struct pci_dev *pdev = to_pci_dev(host->dev);
1632 	u32 tmp32;
1633 
1634 	/* disable ADMA on the ports */
1635 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1636 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1637 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1638 		   NV_MCP_SATA_CFG_20_PORT1_EN |
1639 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1640 
1641 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1642 
1643 	nv_ck804_host_stop(host);
1644 }
1645 
1646 static int __init nv_init(void)
1647 {
1648 	return pci_register_driver(&nv_pci_driver);
1649 }
1650 
1651 static void __exit nv_exit(void)
1652 {
1653 	pci_unregister_driver(&nv_pci_driver);
1654 }
1655 
1656 module_init(nv_init);
1657 module_exit(nv_exit);
1658 module_param_named(adma, adma_enabled, bool, 0444);
1659 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
1660