xref: /linux/drivers/ata/sata_nv.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.3"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= 0x1f,
61 	NV_MWDMA_MASK			= 0x07,
62 	NV_UDMA_MASK			= 0x7f,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 	 				  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 };
173 
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 	__le64			addr;
177 	__le32			len;
178 	u8			flags;
179 	u8			packet_len;
180 	__le16			reserved;
181 };
182 
183 enum nv_adma_regbits {
184 	CMDEND	= (1 << 15),		/* end of command list */
185 	WNB	= (1 << 14),		/* wait-not-BSY */
186 	IGN	= (1 << 13),		/* ignore this entry */
187 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
188 	DA2	= (1 << (2 + 8)),
189 	DA1	= (1 << (1 + 8)),
190 	DA0	= (1 << (0 + 8)),
191 };
192 
193 /* ADMA Command Parameter Block
194    The first 5 SG segments are stored inside the Command Parameter Block itself.
195    If there are more than 5 segments the remainder are stored in a separate
196    memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 	u8			resp_flags;    /* 0 */
199 	u8			reserved1;     /* 1 */
200 	u8			ctl_flags;     /* 2 */
201 	/* len is length of taskfile in 64 bit words */
202  	u8			len;           /* 3  */
203 	u8			tag;           /* 4 */
204 	u8			next_cpb_idx;  /* 5 */
205 	__le16			reserved2;     /* 6-7 */
206 	__le16			tf[12];        /* 8-31 */
207 	struct nv_adma_prd	aprd[5];       /* 32-111 */
208 	__le64			next_aprd;     /* 112-119 */
209 	__le64			reserved3;     /* 120-127 */
210 };
211 
212 
213 struct nv_adma_port_priv {
214 	struct nv_adma_cpb	*cpb;
215 	dma_addr_t		cpb_dma;
216 	struct nv_adma_prd	*aprd;
217 	dma_addr_t		aprd_dma;
218 	void __iomem *		ctl_block;
219 	void __iomem *		gen_block;
220 	void __iomem *		notifier_clear_block;
221 	u8			flags;
222 	int			last_issue_ncq;
223 };
224 
225 struct nv_host_priv {
226 	unsigned long		type;
227 };
228 
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230 
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 static void nv_remove_one (struct pci_dev *pdev);
233 #ifdef CONFIG_PM
234 static int nv_pci_device_resume(struct pci_dev *pdev);
235 #endif
236 static void nv_ck804_host_stop(struct ata_host *host);
237 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
238 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
239 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
240 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
241 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
242 
243 static void nv_nf2_freeze(struct ata_port *ap);
244 static void nv_nf2_thaw(struct ata_port *ap);
245 static void nv_ck804_freeze(struct ata_port *ap);
246 static void nv_ck804_thaw(struct ata_port *ap);
247 static void nv_error_handler(struct ata_port *ap);
248 static int nv_adma_slave_config(struct scsi_device *sdev);
249 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
250 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
251 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
252 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
253 static void nv_adma_irq_clear(struct ata_port *ap);
254 static int nv_adma_port_start(struct ata_port *ap);
255 static void nv_adma_port_stop(struct ata_port *ap);
256 #ifdef CONFIG_PM
257 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
258 static int nv_adma_port_resume(struct ata_port *ap);
259 #endif
260 static void nv_adma_error_handler(struct ata_port *ap);
261 static void nv_adma_host_stop(struct ata_host *host);
262 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
263 
264 enum nv_host_type
265 {
266 	GENERIC,
267 	NFORCE2,
268 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
269 	CK804,
270 	ADMA
271 };
272 
273 static const struct pci_device_id nv_pci_tbl[] = {
274 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
275 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
276 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
277 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
278 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
279 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
280 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
281 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
282 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
283 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
284 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
285 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
286 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
287 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
288 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
289 		PCI_ANY_ID, PCI_ANY_ID,
290 		PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
291 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
292 		PCI_ANY_ID, PCI_ANY_ID,
293 		PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
294 
295 	{ } /* terminate list */
296 };
297 
298 static struct pci_driver nv_pci_driver = {
299 	.name			= DRV_NAME,
300 	.id_table		= nv_pci_tbl,
301 	.probe			= nv_init_one,
302 #ifdef CONFIG_PM
303 	.suspend		= ata_pci_device_suspend,
304 	.resume			= nv_pci_device_resume,
305 #endif
306 	.remove			= nv_remove_one,
307 };
308 
309 static struct scsi_host_template nv_sht = {
310 	.module			= THIS_MODULE,
311 	.name			= DRV_NAME,
312 	.ioctl			= ata_scsi_ioctl,
313 	.queuecommand		= ata_scsi_queuecmd,
314 	.can_queue		= ATA_DEF_QUEUE,
315 	.this_id		= ATA_SHT_THIS_ID,
316 	.sg_tablesize		= LIBATA_MAX_PRD,
317 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
318 	.emulated		= ATA_SHT_EMULATED,
319 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
320 	.proc_name		= DRV_NAME,
321 	.dma_boundary		= ATA_DMA_BOUNDARY,
322 	.slave_configure	= ata_scsi_slave_config,
323 	.slave_destroy		= ata_scsi_slave_destroy,
324 	.bios_param		= ata_std_bios_param,
325 #ifdef CONFIG_PM
326 	.suspend		= ata_scsi_device_suspend,
327 	.resume			= ata_scsi_device_resume,
328 #endif
329 };
330 
331 static struct scsi_host_template nv_adma_sht = {
332 	.module			= THIS_MODULE,
333 	.name			= DRV_NAME,
334 	.ioctl			= ata_scsi_ioctl,
335 	.queuecommand		= ata_scsi_queuecmd,
336 	.can_queue		= NV_ADMA_MAX_CPBS,
337 	.this_id		= ATA_SHT_THIS_ID,
338 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
339 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
340 	.emulated		= ATA_SHT_EMULATED,
341 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
342 	.proc_name		= DRV_NAME,
343 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
344 	.slave_configure	= nv_adma_slave_config,
345 	.slave_destroy		= ata_scsi_slave_destroy,
346 	.bios_param		= ata_std_bios_param,
347 #ifdef CONFIG_PM
348 	.suspend		= ata_scsi_device_suspend,
349 	.resume			= ata_scsi_device_resume,
350 #endif
351 };
352 
353 static const struct ata_port_operations nv_generic_ops = {
354 	.port_disable		= ata_port_disable,
355 	.tf_load		= ata_tf_load,
356 	.tf_read		= ata_tf_read,
357 	.exec_command		= ata_exec_command,
358 	.check_status		= ata_check_status,
359 	.dev_select		= ata_std_dev_select,
360 	.bmdma_setup		= ata_bmdma_setup,
361 	.bmdma_start		= ata_bmdma_start,
362 	.bmdma_stop		= ata_bmdma_stop,
363 	.bmdma_status		= ata_bmdma_status,
364 	.qc_prep		= ata_qc_prep,
365 	.qc_issue		= ata_qc_issue_prot,
366 	.freeze			= ata_bmdma_freeze,
367 	.thaw			= ata_bmdma_thaw,
368 	.error_handler		= nv_error_handler,
369 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
370 	.data_xfer		= ata_data_xfer,
371 	.irq_handler		= nv_generic_interrupt,
372 	.irq_clear		= ata_bmdma_irq_clear,
373 	.irq_on			= ata_irq_on,
374 	.irq_ack		= ata_irq_ack,
375 	.scr_read		= nv_scr_read,
376 	.scr_write		= nv_scr_write,
377 	.port_start		= ata_port_start,
378 };
379 
380 static const struct ata_port_operations nv_nf2_ops = {
381 	.port_disable		= ata_port_disable,
382 	.tf_load		= ata_tf_load,
383 	.tf_read		= ata_tf_read,
384 	.exec_command		= ata_exec_command,
385 	.check_status		= ata_check_status,
386 	.dev_select		= ata_std_dev_select,
387 	.bmdma_setup		= ata_bmdma_setup,
388 	.bmdma_start		= ata_bmdma_start,
389 	.bmdma_stop		= ata_bmdma_stop,
390 	.bmdma_status		= ata_bmdma_status,
391 	.qc_prep		= ata_qc_prep,
392 	.qc_issue		= ata_qc_issue_prot,
393 	.freeze			= nv_nf2_freeze,
394 	.thaw			= nv_nf2_thaw,
395 	.error_handler		= nv_error_handler,
396 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
397 	.data_xfer		= ata_data_xfer,
398 	.irq_handler		= nv_nf2_interrupt,
399 	.irq_clear		= ata_bmdma_irq_clear,
400 	.irq_on			= ata_irq_on,
401 	.irq_ack		= ata_irq_ack,
402 	.scr_read		= nv_scr_read,
403 	.scr_write		= nv_scr_write,
404 	.port_start		= ata_port_start,
405 };
406 
407 static const struct ata_port_operations nv_ck804_ops = {
408 	.port_disable		= ata_port_disable,
409 	.tf_load		= ata_tf_load,
410 	.tf_read		= ata_tf_read,
411 	.exec_command		= ata_exec_command,
412 	.check_status		= ata_check_status,
413 	.dev_select		= ata_std_dev_select,
414 	.bmdma_setup		= ata_bmdma_setup,
415 	.bmdma_start		= ata_bmdma_start,
416 	.bmdma_stop		= ata_bmdma_stop,
417 	.bmdma_status		= ata_bmdma_status,
418 	.qc_prep		= ata_qc_prep,
419 	.qc_issue		= ata_qc_issue_prot,
420 	.freeze			= nv_ck804_freeze,
421 	.thaw			= nv_ck804_thaw,
422 	.error_handler		= nv_error_handler,
423 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
424 	.data_xfer		= ata_data_xfer,
425 	.irq_handler		= nv_ck804_interrupt,
426 	.irq_clear		= ata_bmdma_irq_clear,
427 	.irq_on			= ata_irq_on,
428 	.irq_ack		= ata_irq_ack,
429 	.scr_read		= nv_scr_read,
430 	.scr_write		= nv_scr_write,
431 	.port_start		= ata_port_start,
432 	.host_stop		= nv_ck804_host_stop,
433 };
434 
435 static const struct ata_port_operations nv_adma_ops = {
436 	.port_disable		= ata_port_disable,
437 	.tf_load		= ata_tf_load,
438 	.tf_read		= ata_tf_read,
439 	.check_atapi_dma	= nv_adma_check_atapi_dma,
440 	.exec_command		= ata_exec_command,
441 	.check_status		= ata_check_status,
442 	.dev_select		= ata_std_dev_select,
443 	.bmdma_setup		= ata_bmdma_setup,
444 	.bmdma_start		= ata_bmdma_start,
445 	.bmdma_stop		= ata_bmdma_stop,
446 	.bmdma_status		= ata_bmdma_status,
447 	.qc_prep		= nv_adma_qc_prep,
448 	.qc_issue		= nv_adma_qc_issue,
449 	.freeze			= nv_ck804_freeze,
450 	.thaw			= nv_ck804_thaw,
451 	.error_handler		= nv_adma_error_handler,
452 	.post_internal_cmd	= nv_adma_post_internal_cmd,
453 	.data_xfer		= ata_data_xfer,
454 	.irq_handler		= nv_adma_interrupt,
455 	.irq_clear		= nv_adma_irq_clear,
456 	.irq_on			= ata_irq_on,
457 	.irq_ack		= ata_irq_ack,
458 	.scr_read		= nv_scr_read,
459 	.scr_write		= nv_scr_write,
460 	.port_start		= nv_adma_port_start,
461 	.port_stop		= nv_adma_port_stop,
462 #ifdef CONFIG_PM
463 	.port_suspend		= nv_adma_port_suspend,
464 	.port_resume		= nv_adma_port_resume,
465 #endif
466 	.host_stop		= nv_adma_host_stop,
467 };
468 
469 static struct ata_port_info nv_port_info[] = {
470 	/* generic */
471 	{
472 		.sht		= &nv_sht,
473 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
474 				  ATA_FLAG_HRST_TO_RESUME,
475 		.pio_mask	= NV_PIO_MASK,
476 		.mwdma_mask	= NV_MWDMA_MASK,
477 		.udma_mask	= NV_UDMA_MASK,
478 		.port_ops	= &nv_generic_ops,
479 	},
480 	/* nforce2/3 */
481 	{
482 		.sht		= &nv_sht,
483 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 				  ATA_FLAG_HRST_TO_RESUME,
485 		.pio_mask	= NV_PIO_MASK,
486 		.mwdma_mask	= NV_MWDMA_MASK,
487 		.udma_mask	= NV_UDMA_MASK,
488 		.port_ops	= &nv_nf2_ops,
489 	},
490 	/* ck804 */
491 	{
492 		.sht		= &nv_sht,
493 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
494 				  ATA_FLAG_HRST_TO_RESUME,
495 		.pio_mask	= NV_PIO_MASK,
496 		.mwdma_mask	= NV_MWDMA_MASK,
497 		.udma_mask	= NV_UDMA_MASK,
498 		.port_ops	= &nv_ck804_ops,
499 	},
500 	/* ADMA */
501 	{
502 		.sht		= &nv_adma_sht,
503 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
504 				  ATA_FLAG_HRST_TO_RESUME |
505 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
506 		.pio_mask	= NV_PIO_MASK,
507 		.mwdma_mask	= NV_MWDMA_MASK,
508 		.udma_mask	= NV_UDMA_MASK,
509 		.port_ops	= &nv_adma_ops,
510 	},
511 };
512 
513 MODULE_AUTHOR("NVIDIA");
514 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
515 MODULE_LICENSE("GPL");
516 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
517 MODULE_VERSION(DRV_VERSION);
518 
519 static int adma_enabled = 1;
520 
521 static void nv_adma_register_mode(struct ata_port *ap)
522 {
523 	struct nv_adma_port_priv *pp = ap->private_data;
524 	void __iomem *mmio = pp->ctl_block;
525 	u16 tmp, status;
526 	int count = 0;
527 
528 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
529 		return;
530 
531 	status = readw(mmio + NV_ADMA_STAT);
532 	while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
533 		ndelay(50);
534 		status = readw(mmio + NV_ADMA_STAT);
535 		count++;
536 	}
537 	if(count == 20)
538 		ata_port_printk(ap, KERN_WARNING,
539 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
540 			status);
541 
542 	tmp = readw(mmio + NV_ADMA_CTL);
543 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
544 
545 	count = 0;
546 	status = readw(mmio + NV_ADMA_STAT);
547 	while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
548 		ndelay(50);
549 		status = readw(mmio + NV_ADMA_STAT);
550 		count++;
551 	}
552 	if(count == 20)
553 		ata_port_printk(ap, KERN_WARNING,
554 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
555 			 status);
556 
557 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
558 }
559 
560 static void nv_adma_mode(struct ata_port *ap)
561 {
562 	struct nv_adma_port_priv *pp = ap->private_data;
563 	void __iomem *mmio = pp->ctl_block;
564 	u16 tmp, status;
565 	int count = 0;
566 
567 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
568 		return;
569 
570 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
571 
572 	tmp = readw(mmio + NV_ADMA_CTL);
573 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
574 
575 	status = readw(mmio + NV_ADMA_STAT);
576 	while(((status & NV_ADMA_STAT_LEGACY) ||
577 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
578 		ndelay(50);
579 		status = readw(mmio + NV_ADMA_STAT);
580 		count++;
581 	}
582 	if(count == 20)
583 		ata_port_printk(ap, KERN_WARNING,
584 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
585 			status);
586 
587 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
588 }
589 
590 static int nv_adma_slave_config(struct scsi_device *sdev)
591 {
592 	struct ata_port *ap = ata_shost_to_port(sdev->host);
593 	struct nv_adma_port_priv *pp = ap->private_data;
594 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
595 	u64 bounce_limit;
596 	unsigned long segment_boundary;
597 	unsigned short sg_tablesize;
598 	int rc;
599 	int adma_enable;
600 	u32 current_reg, new_reg, config_mask;
601 
602 	rc = ata_scsi_slave_config(sdev);
603 
604 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
605 		/* Not a proper libata device, ignore */
606 		return rc;
607 
608 	if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
609 		/*
610 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
611 		 * Therefore ATAPI commands are sent through the legacy interface.
612 		 * However, the legacy interface only supports 32-bit DMA.
613 		 * Restrict DMA parameters as required by the legacy interface
614 		 * when an ATAPI device is connected.
615 		 */
616 		bounce_limit = ATA_DMA_MASK;
617 		segment_boundary = ATA_DMA_BOUNDARY;
618 		/* Subtract 1 since an extra entry may be needed for padding, see
619 		   libata-scsi.c */
620 		sg_tablesize = LIBATA_MAX_PRD - 1;
621 
622 		/* Since the legacy DMA engine is in use, we need to disable ADMA
623 		   on the port. */
624 		adma_enable = 0;
625 		nv_adma_register_mode(ap);
626 	}
627 	else {
628 		bounce_limit = *ap->dev->dma_mask;
629 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
630 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
631 		adma_enable = 1;
632 	}
633 
634 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
635 
636 	if(ap->port_no == 1)
637 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
638 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
639 	else
640 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
641 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
642 
643 	if(adma_enable) {
644 		new_reg = current_reg | config_mask;
645 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
646 	}
647 	else {
648 		new_reg = current_reg & ~config_mask;
649 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
650 	}
651 
652 	if(current_reg != new_reg)
653 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
654 
655 	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
656 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
657 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
658 	ata_port_printk(ap, KERN_INFO,
659 		"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
660 		(unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
661 	return rc;
662 }
663 
664 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
665 {
666 	struct nv_adma_port_priv *pp = qc->ap->private_data;
667 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
668 }
669 
670 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
671 {
672 	unsigned int idx = 0;
673 
674 	if(tf->flags & ATA_TFLAG_ISADDR) {
675 		if (tf->flags & ATA_TFLAG_LBA48) {
676 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
677 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
678 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
679 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
680 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
681 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
682 		} else
683 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
684 
685 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
686 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
687 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
688 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
689 	}
690 
691 	if(tf->flags & ATA_TFLAG_DEVICE)
692 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
693 
694 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
695 
696 	while(idx < 12)
697 		cpb[idx++] = cpu_to_le16(IGN);
698 
699 	return idx;
700 }
701 
702 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
703 {
704 	struct nv_adma_port_priv *pp = ap->private_data;
705 	u8 flags = pp->cpb[cpb_num].resp_flags;
706 
707 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
708 
709 	if (unlikely((force_err ||
710 		     flags & (NV_CPB_RESP_ATA_ERR |
711 			      NV_CPB_RESP_CMD_ERR |
712 			      NV_CPB_RESP_CPB_ERR)))) {
713 		struct ata_eh_info *ehi = &ap->eh_info;
714 		int freeze = 0;
715 
716 		ata_ehi_clear_desc(ehi);
717 		ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
718 		if (flags & NV_CPB_RESP_ATA_ERR) {
719 			ata_ehi_push_desc(ehi, ": ATA error");
720 			ehi->err_mask |= AC_ERR_DEV;
721 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
722 			ata_ehi_push_desc(ehi, ": CMD error");
723 			ehi->err_mask |= AC_ERR_DEV;
724 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
725 			ata_ehi_push_desc(ehi, ": CPB error");
726 			ehi->err_mask |= AC_ERR_SYSTEM;
727 			freeze = 1;
728 		} else {
729 			/* notifier error, but no error in CPB flags? */
730 			ehi->err_mask |= AC_ERR_OTHER;
731 			freeze = 1;
732 		}
733 		/* Kill all commands. EH will determine what actually failed. */
734 		if (freeze)
735 			ata_port_freeze(ap);
736 		else
737 			ata_port_abort(ap);
738 		return 1;
739 	}
740 
741 	if (flags & NV_CPB_RESP_DONE) {
742 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
743 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
744 		if (likely(qc)) {
745 			/* Grab the ATA port status for non-NCQ commands.
746 			   For NCQ commands the current status may have nothing to do with
747 			   the command just completed. */
748 			if (qc->tf.protocol != ATA_PROT_NCQ) {
749 				u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
750 				qc->err_mask |= ac_err_mask(ata_status);
751 			}
752 			DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
753 				qc->err_mask);
754 			ata_qc_complete(qc);
755 		} else {
756 			struct ata_eh_info *ehi = &ap->eh_info;
757 			/* Notifier bits set without a command may indicate the drive
758 			   is misbehaving. Raise host state machine violation on this
759 			   condition. */
760 			ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
761 				cpb_num);
762 			ehi->err_mask |= AC_ERR_HSM;
763 			ehi->action |= ATA_EH_SOFTRESET;
764 			ata_port_freeze(ap);
765 			return 1;
766 		}
767 	}
768 	return 0;
769 }
770 
771 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
772 {
773 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
774 
775 	/* freeze if hotplugged */
776 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
777 		ata_port_freeze(ap);
778 		return 1;
779 	}
780 
781 	/* bail out if not our interrupt */
782 	if (!(irq_stat & NV_INT_DEV))
783 		return 0;
784 
785 	/* DEV interrupt w/ no active qc? */
786 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
787 		ata_check_status(ap);
788 		return 1;
789 	}
790 
791 	/* handle interrupt */
792 	return ata_host_intr(ap, qc);
793 }
794 
795 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
796 {
797 	struct ata_host *host = dev_instance;
798 	int i, handled = 0;
799 	u32 notifier_clears[2];
800 
801 	spin_lock(&host->lock);
802 
803 	for (i = 0; i < host->n_ports; i++) {
804 		struct ata_port *ap = host->ports[i];
805 		notifier_clears[i] = 0;
806 
807 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
808 			struct nv_adma_port_priv *pp = ap->private_data;
809 			void __iomem *mmio = pp->ctl_block;
810 			u16 status;
811 			u32 gen_ctl;
812 			u32 notifier, notifier_error;
813 
814 			/* if in ATA register mode, use standard ata interrupt handler */
815 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
816 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
817 					>> (NV_INT_PORT_SHIFT * i);
818 				if(ata_tag_valid(ap->active_tag))
819 					/** NV_INT_DEV indication seems unreliable at times
820 					    at least in ADMA mode. Force it on always when a
821 					    command is active, to prevent losing interrupts. */
822 					irq_stat |= NV_INT_DEV;
823 				handled += nv_host_intr(ap, irq_stat);
824 				continue;
825 			}
826 
827 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
828 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
829 			notifier_clears[i] = notifier | notifier_error;
830 
831 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
832 
833 			if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
834 			    !notifier_error)
835 				/* Nothing to do */
836 				continue;
837 
838 			status = readw(mmio + NV_ADMA_STAT);
839 
840 			/* Clear status. Ensure the controller sees the clearing before we start
841 			   looking at any of the CPB statuses, so that any CPB completions after
842 			   this point in the handler will raise another interrupt. */
843 			writew(status, mmio + NV_ADMA_STAT);
844 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
845 			rmb();
846 
847 			handled++; /* irq handled if we got here */
848 
849 			/* freeze if hotplugged or controller error */
850 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
851 					       NV_ADMA_STAT_HOTUNPLUG |
852 					       NV_ADMA_STAT_TIMEOUT |
853 					       NV_ADMA_STAT_SERROR))) {
854 				struct ata_eh_info *ehi = &ap->eh_info;
855 
856 				ata_ehi_clear_desc(ehi);
857 				ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
858 				if (status & NV_ADMA_STAT_TIMEOUT) {
859 					ehi->err_mask |= AC_ERR_SYSTEM;
860 					ata_ehi_push_desc(ehi, ": timeout");
861 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
862 					ata_ehi_hotplugged(ehi);
863 					ata_ehi_push_desc(ehi, ": hotplug");
864 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
865 					ata_ehi_hotplugged(ehi);
866 					ata_ehi_push_desc(ehi, ": hot unplug");
867 				} else if (status & NV_ADMA_STAT_SERROR) {
868 					/* let libata analyze SError and figure out the cause */
869 					ata_ehi_push_desc(ehi, ": SError");
870 				}
871 				ata_port_freeze(ap);
872 				continue;
873 			}
874 
875 			if (status & (NV_ADMA_STAT_DONE |
876 				      NV_ADMA_STAT_CPBERR)) {
877 				u32 check_commands;
878 				int pos, error = 0;
879 
880 				if(ata_tag_valid(ap->active_tag))
881 					check_commands = 1 << ap->active_tag;
882 				else
883 					check_commands = ap->sactive;
884 
885 				/** Check CPBs for completed commands */
886 				while ((pos = ffs(check_commands)) && !error) {
887 					pos--;
888 					error = nv_adma_check_cpb(ap, pos,
889 						notifier_error & (1 << pos) );
890 					check_commands &= ~(1 << pos );
891 				}
892 			}
893 		}
894 	}
895 
896 	if(notifier_clears[0] || notifier_clears[1]) {
897 		/* Note: Both notifier clear registers must be written
898 		   if either is set, even if one is zero, according to NVIDIA. */
899 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
900 		writel(notifier_clears[0], pp->notifier_clear_block);
901 		pp = host->ports[1]->private_data;
902 		writel(notifier_clears[1], pp->notifier_clear_block);
903 	}
904 
905 	spin_unlock(&host->lock);
906 
907 	return IRQ_RETVAL(handled);
908 }
909 
910 static void nv_adma_irq_clear(struct ata_port *ap)
911 {
912 	struct nv_adma_port_priv *pp = ap->private_data;
913 	void __iomem *mmio = pp->ctl_block;
914 	u16 status = readw(mmio + NV_ADMA_STAT);
915 	u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
916 	u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
917 	void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
918 
919 	/* clear ADMA status */
920 	writew(status, mmio + NV_ADMA_STAT);
921 	writel(notifier | notifier_error,
922 	       pp->notifier_clear_block);
923 
924 	/** clear legacy status */
925 	iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
926 }
927 
928 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
929 {
930 	struct nv_adma_port_priv *pp = qc->ap->private_data;
931 
932 	if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
933 		ata_bmdma_post_internal_cmd(qc);
934 }
935 
936 static int nv_adma_port_start(struct ata_port *ap)
937 {
938 	struct device *dev = ap->host->dev;
939 	struct nv_adma_port_priv *pp;
940 	int rc;
941 	void *mem;
942 	dma_addr_t mem_dma;
943 	void __iomem *mmio;
944 	u16 tmp;
945 
946 	VPRINTK("ENTER\n");
947 
948 	rc = ata_port_start(ap);
949 	if (rc)
950 		return rc;
951 
952 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
953 	if (!pp)
954 		return -ENOMEM;
955 
956 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
957 	       ap->port_no * NV_ADMA_PORT_SIZE;
958 	pp->ctl_block = mmio;
959 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
960 	pp->notifier_clear_block = pp->gen_block +
961 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
962 
963 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
964 				  &mem_dma, GFP_KERNEL);
965 	if (!mem)
966 		return -ENOMEM;
967 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
968 
969 	/*
970 	 * First item in chunk of DMA memory:
971 	 * 128-byte command parameter block (CPB)
972 	 * one for each command tag
973 	 */
974 	pp->cpb     = mem;
975 	pp->cpb_dma = mem_dma;
976 
977 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
978 	writel((mem_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
979 
980 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
981 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
982 
983 	/*
984 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
985 	 */
986 	pp->aprd = mem;
987 	pp->aprd_dma = mem_dma;
988 
989 	ap->private_data = pp;
990 
991 	/* clear any outstanding interrupt conditions */
992 	writew(0xffff, mmio + NV_ADMA_STAT);
993 
994 	/* initialize port variables */
995 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
996 
997 	/* clear CPB fetch count */
998 	writew(0, mmio + NV_ADMA_CPB_COUNT);
999 
1000 	/* clear GO for register mode, enable interrupt */
1001 	tmp = readw(mmio + NV_ADMA_CTL);
1002 	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1003 		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1004 
1005 	tmp = readw(mmio + NV_ADMA_CTL);
1006 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1007 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1008 	udelay(1);
1009 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1010 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1011 
1012 	return 0;
1013 }
1014 
1015 static void nv_adma_port_stop(struct ata_port *ap)
1016 {
1017 	struct nv_adma_port_priv *pp = ap->private_data;
1018 	void __iomem *mmio = pp->ctl_block;
1019 
1020 	VPRINTK("ENTER\n");
1021 	writew(0, mmio + NV_ADMA_CTL);
1022 }
1023 
1024 #ifdef CONFIG_PM
1025 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1026 {
1027 	struct nv_adma_port_priv *pp = ap->private_data;
1028 	void __iomem *mmio = pp->ctl_block;
1029 
1030 	/* Go to register mode - clears GO */
1031 	nv_adma_register_mode(ap);
1032 
1033 	/* clear CPB fetch count */
1034 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1035 
1036 	/* disable interrupt, shut down port */
1037 	writew(0, mmio + NV_ADMA_CTL);
1038 
1039 	return 0;
1040 }
1041 
1042 static int nv_adma_port_resume(struct ata_port *ap)
1043 {
1044 	struct nv_adma_port_priv *pp = ap->private_data;
1045 	void __iomem *mmio = pp->ctl_block;
1046 	u16 tmp;
1047 
1048 	/* set CPB block location */
1049 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1050 	writel((pp->cpb_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1051 
1052 	/* clear any outstanding interrupt conditions */
1053 	writew(0xffff, mmio + NV_ADMA_STAT);
1054 
1055 	/* initialize port variables */
1056 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1057 
1058 	/* clear CPB fetch count */
1059 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1060 
1061 	/* clear GO for register mode, enable interrupt */
1062 	tmp = readw(mmio + NV_ADMA_CTL);
1063 	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1064 		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1065 
1066 	tmp = readw(mmio + NV_ADMA_CTL);
1067 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1068 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1069 	udelay(1);
1070 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1071 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1072 
1073 	return 0;
1074 }
1075 #endif
1076 
1077 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1078 {
1079 	void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1080 	struct ata_ioports *ioport = &probe_ent->port[port];
1081 
1082 	VPRINTK("ENTER\n");
1083 
1084 	mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1085 
1086 	ioport->cmd_addr	= mmio;
1087 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1088 	ioport->error_addr	=
1089 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1090 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1091 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1092 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1093 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1094 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1095 	ioport->status_addr	=
1096 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1097 	ioport->altstatus_addr	=
1098 	ioport->ctl_addr	= mmio + 0x20;
1099 }
1100 
1101 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1102 {
1103 	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1104 	unsigned int i;
1105 	u32 tmp32;
1106 
1107 	VPRINTK("ENTER\n");
1108 
1109 	/* enable ADMA on the ports */
1110 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1111 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1112 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1113 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1114 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1115 
1116 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1117 
1118 	for (i = 0; i < probe_ent->n_ports; i++)
1119 		nv_adma_setup_port(probe_ent, i);
1120 
1121 	return 0;
1122 }
1123 
1124 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1125 			      struct scatterlist *sg,
1126 			      int idx,
1127 			      struct nv_adma_prd *aprd)
1128 {
1129 	u8 flags = 0;
1130 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1131 		flags |= NV_APRD_WRITE;
1132 	if (idx == qc->n_elem - 1)
1133 		flags |= NV_APRD_END;
1134 	else if (idx != 4)
1135 		flags |= NV_APRD_CONT;
1136 
1137 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1138 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1139 	aprd->flags = flags;
1140 	aprd->packet_len = 0;
1141 }
1142 
1143 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1144 {
1145 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1146 	unsigned int idx;
1147 	struct nv_adma_prd *aprd;
1148 	struct scatterlist *sg;
1149 
1150 	VPRINTK("ENTER\n");
1151 
1152 	idx = 0;
1153 
1154 	ata_for_each_sg(sg, qc) {
1155 		aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1156 		nv_adma_fill_aprd(qc, sg, idx, aprd);
1157 		idx++;
1158 	}
1159 	if (idx > 5)
1160 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1161 	else
1162 		cpb->next_aprd = cpu_to_le64(0);
1163 }
1164 
1165 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1166 {
1167 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1168 
1169 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1170 	   or interrupt-driven no-data commands. */
1171 	if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1172 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1173 		return 1;
1174 
1175 	if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1176 	   (qc->tf.protocol == ATA_PROT_NODATA))
1177 		return 0;
1178 
1179 	return 1;
1180 }
1181 
1182 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1183 {
1184 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1185 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1186 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1187 		       NV_CPB_CTL_IEN;
1188 
1189 	if (nv_adma_use_reg_mode(qc)) {
1190 		nv_adma_register_mode(qc->ap);
1191 		ata_qc_prep(qc);
1192 		return;
1193 	}
1194 
1195 	cpb->resp_flags = NV_CPB_RESP_DONE;
1196 	wmb();
1197 	cpb->ctl_flags = 0;
1198 	wmb();
1199 
1200 	cpb->len		= 3;
1201 	cpb->tag		= qc->tag;
1202 	cpb->next_cpb_idx	= 0;
1203 
1204 	/* turn on NCQ flags for NCQ commands */
1205 	if (qc->tf.protocol == ATA_PROT_NCQ)
1206 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1207 
1208 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1209 
1210 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1211 
1212 	if(qc->flags & ATA_QCFLAG_DMAMAP) {
1213 		nv_adma_fill_sg(qc, cpb);
1214 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1215 	} else
1216 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1217 
1218 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1219 	   finished filling in all of the contents */
1220 	wmb();
1221 	cpb->ctl_flags = ctl_flags;
1222 	wmb();
1223 	cpb->resp_flags = 0;
1224 }
1225 
1226 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1227 {
1228 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1229 	void __iomem *mmio = pp->ctl_block;
1230 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1231 
1232 	VPRINTK("ENTER\n");
1233 
1234 	if (nv_adma_use_reg_mode(qc)) {
1235 		/* use ATA register mode */
1236 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1237 		nv_adma_register_mode(qc->ap);
1238 		return ata_qc_issue_prot(qc);
1239 	} else
1240 		nv_adma_mode(qc->ap);
1241 
1242 	/* write append register, command tag in lower 8 bits
1243 	   and (number of cpbs to append -1) in top 8 bits */
1244 	wmb();
1245 
1246 	if(curr_ncq != pp->last_issue_ncq) {
1247 	   	/* Seems to need some delay before switching between NCQ and non-NCQ
1248 		   commands, else we get command timeouts and such. */
1249 		udelay(20);
1250 		pp->last_issue_ncq = curr_ncq;
1251 	}
1252 
1253 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1254 
1255 	DPRINTK("Issued tag %u\n",qc->tag);
1256 
1257 	return 0;
1258 }
1259 
1260 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1261 {
1262 	struct ata_host *host = dev_instance;
1263 	unsigned int i;
1264 	unsigned int handled = 0;
1265 	unsigned long flags;
1266 
1267 	spin_lock_irqsave(&host->lock, flags);
1268 
1269 	for (i = 0; i < host->n_ports; i++) {
1270 		struct ata_port *ap;
1271 
1272 		ap = host->ports[i];
1273 		if (ap &&
1274 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1275 			struct ata_queued_cmd *qc;
1276 
1277 			qc = ata_qc_from_tag(ap, ap->active_tag);
1278 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1279 				handled += ata_host_intr(ap, qc);
1280 			else
1281 				// No request pending?  Clear interrupt status
1282 				// anyway, in case there's one pending.
1283 				ap->ops->check_status(ap);
1284 		}
1285 
1286 	}
1287 
1288 	spin_unlock_irqrestore(&host->lock, flags);
1289 
1290 	return IRQ_RETVAL(handled);
1291 }
1292 
1293 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1294 {
1295 	int i, handled = 0;
1296 
1297 	for (i = 0; i < host->n_ports; i++) {
1298 		struct ata_port *ap = host->ports[i];
1299 
1300 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1301 			handled += nv_host_intr(ap, irq_stat);
1302 
1303 		irq_stat >>= NV_INT_PORT_SHIFT;
1304 	}
1305 
1306 	return IRQ_RETVAL(handled);
1307 }
1308 
1309 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1310 {
1311 	struct ata_host *host = dev_instance;
1312 	u8 irq_stat;
1313 	irqreturn_t ret;
1314 
1315 	spin_lock(&host->lock);
1316 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1317 	ret = nv_do_interrupt(host, irq_stat);
1318 	spin_unlock(&host->lock);
1319 
1320 	return ret;
1321 }
1322 
1323 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1324 {
1325 	struct ata_host *host = dev_instance;
1326 	u8 irq_stat;
1327 	irqreturn_t ret;
1328 
1329 	spin_lock(&host->lock);
1330 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1331 	ret = nv_do_interrupt(host, irq_stat);
1332 	spin_unlock(&host->lock);
1333 
1334 	return ret;
1335 }
1336 
1337 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1338 {
1339 	if (sc_reg > SCR_CONTROL)
1340 		return 0xffffffffU;
1341 
1342 	return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1343 }
1344 
1345 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1346 {
1347 	if (sc_reg > SCR_CONTROL)
1348 		return;
1349 
1350 	iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1351 }
1352 
1353 static void nv_nf2_freeze(struct ata_port *ap)
1354 {
1355 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1356 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1357 	u8 mask;
1358 
1359 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1360 	mask &= ~(NV_INT_ALL << shift);
1361 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1362 }
1363 
1364 static void nv_nf2_thaw(struct ata_port *ap)
1365 {
1366 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1367 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1368 	u8 mask;
1369 
1370 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1371 
1372 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1373 	mask |= (NV_INT_MASK << shift);
1374 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1375 }
1376 
1377 static void nv_ck804_freeze(struct ata_port *ap)
1378 {
1379 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1380 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1381 	u8 mask;
1382 
1383 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1384 	mask &= ~(NV_INT_ALL << shift);
1385 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1386 }
1387 
1388 static void nv_ck804_thaw(struct ata_port *ap)
1389 {
1390 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1391 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1392 	u8 mask;
1393 
1394 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1395 
1396 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1397 	mask |= (NV_INT_MASK << shift);
1398 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1399 }
1400 
1401 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1402 {
1403 	unsigned int dummy;
1404 
1405 	/* SATA hardreset fails to retrieve proper device signature on
1406 	 * some controllers.  Don't classify on hardreset.  For more
1407 	 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1408 	 */
1409 	return sata_std_hardreset(ap, &dummy);
1410 }
1411 
1412 static void nv_error_handler(struct ata_port *ap)
1413 {
1414 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1415 			   nv_hardreset, ata_std_postreset);
1416 }
1417 
1418 static void nv_adma_error_handler(struct ata_port *ap)
1419 {
1420 	struct nv_adma_port_priv *pp = ap->private_data;
1421 	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1422 		void __iomem *mmio = pp->ctl_block;
1423 		int i;
1424 		u16 tmp;
1425 
1426 		if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1427 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1428 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1429 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1430 			u32 status = readw(mmio + NV_ADMA_STAT);
1431 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1432 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1433 
1434 			ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1435 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1436 				"next cpb count 0x%X next cpb idx 0x%x\n",
1437 				notifier, notifier_error, gen_ctl, status,
1438 				cpb_count, next_cpb_idx);
1439 
1440 			for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1441 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1442 				if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1443 				    ap->sactive & (1 << i) )
1444 					ata_port_printk(ap, KERN_ERR,
1445 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1446 						i, cpb->ctl_flags, cpb->resp_flags);
1447 			}
1448 		}
1449 
1450 		/* Push us back into port register mode for error handling. */
1451 		nv_adma_register_mode(ap);
1452 
1453 		/* Mark all of the CPBs as invalid to prevent them from being executed */
1454 		for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1455 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1456 
1457 		/* clear CPB fetch count */
1458 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1459 
1460 		/* Reset channel */
1461 		tmp = readw(mmio + NV_ADMA_CTL);
1462 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1463 		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1464 		udelay(1);
1465 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1466 		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1467 	}
1468 
1469 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1470 			   nv_hardreset, ata_std_postreset);
1471 }
1472 
1473 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1474 {
1475 	static int printed_version = 0;
1476 	struct ata_port_info *ppi[2];
1477 	struct ata_probe_ent *probe_ent;
1478 	struct nv_host_priv *hpriv;
1479 	int rc;
1480 	u32 bar;
1481 	void __iomem *base;
1482 	unsigned long type = ent->driver_data;
1483 	int mask_set = 0;
1484 
1485         // Make sure this is a SATA controller by counting the number of bars
1486         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
1487         // it's an IDE controller and we ignore it.
1488 	for (bar=0; bar<6; bar++)
1489 		if (pci_resource_start(pdev, bar) == 0)
1490 			return -ENODEV;
1491 
1492 	if (!printed_version++)
1493 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1494 
1495 	rc = pcim_enable_device(pdev);
1496 	if (rc)
1497 		return rc;
1498 
1499 	rc = pci_request_regions(pdev, DRV_NAME);
1500 	if (rc) {
1501 		pcim_pin_device(pdev);
1502 		return rc;
1503 	}
1504 
1505 	if(type >= CK804 && adma_enabled) {
1506 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1507 		type = ADMA;
1508 		if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1509 		   !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1510 			mask_set = 1;
1511 	}
1512 
1513 	if(!mask_set) {
1514 		rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1515 		if (rc)
1516 			return rc;
1517 		rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1518 		if (rc)
1519 			return rc;
1520 	}
1521 
1522 	rc = -ENOMEM;
1523 
1524 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1525 	if (!hpriv)
1526 		return -ENOMEM;
1527 
1528 	ppi[0] = ppi[1] = &nv_port_info[type];
1529 	probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1530 	if (!probe_ent)
1531 		return -ENOMEM;
1532 
1533 	if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1534 		return -EIO;
1535 	probe_ent->iomap = pcim_iomap_table(pdev);
1536 
1537 	probe_ent->private_data = hpriv;
1538 	hpriv->type = type;
1539 
1540 	base = probe_ent->iomap[NV_MMIO_BAR];
1541 	probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1542 	probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1543 
1544 	/* enable SATA space for CK804 */
1545 	if (type >= CK804) {
1546 		u8 regval;
1547 
1548 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1549 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1550 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1551 	}
1552 
1553 	pci_set_master(pdev);
1554 
1555 	if (type == ADMA) {
1556 		rc = nv_adma_host_init(probe_ent);
1557 		if (rc)
1558 			return rc;
1559 	}
1560 
1561 	rc = ata_device_add(probe_ent);
1562 	if (rc != NV_PORTS)
1563 		return -ENODEV;
1564 
1565 	devm_kfree(&pdev->dev, probe_ent);
1566 	return 0;
1567 }
1568 
1569 static void nv_remove_one (struct pci_dev *pdev)
1570 {
1571 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1572 	struct nv_host_priv *hpriv = host->private_data;
1573 
1574 	ata_pci_remove_one(pdev);
1575 	kfree(hpriv);
1576 }
1577 
1578 #ifdef CONFIG_PM
1579 static int nv_pci_device_resume(struct pci_dev *pdev)
1580 {
1581 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1582 	struct nv_host_priv *hpriv = host->private_data;
1583 	int rc;
1584 
1585 	rc = ata_pci_device_do_resume(pdev);
1586 	if(rc)
1587 		return rc;
1588 
1589 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1590 		if(hpriv->type >= CK804) {
1591 			u8 regval;
1592 
1593 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1594 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1595 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1596 		}
1597 		if(hpriv->type == ADMA) {
1598 			u32 tmp32;
1599 			struct nv_adma_port_priv *pp;
1600 			/* enable/disable ADMA on the ports appropriately */
1601 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1602 
1603 			pp = host->ports[0]->private_data;
1604 			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1605 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1606 				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1607 			else
1608 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
1609 				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1610 			pp = host->ports[1]->private_data;
1611 			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1612 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1613 				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1614 			else
1615 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
1616 				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1617 
1618 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1619 		}
1620 	}
1621 
1622 	ata_host_resume(host);
1623 
1624 	return 0;
1625 }
1626 #endif
1627 
1628 static void nv_ck804_host_stop(struct ata_host *host)
1629 {
1630 	struct pci_dev *pdev = to_pci_dev(host->dev);
1631 	u8 regval;
1632 
1633 	/* disable SATA space for CK804 */
1634 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1635 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1636 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1637 }
1638 
1639 static void nv_adma_host_stop(struct ata_host *host)
1640 {
1641 	struct pci_dev *pdev = to_pci_dev(host->dev);
1642 	u32 tmp32;
1643 
1644 	/* disable ADMA on the ports */
1645 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1646 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1647 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1648 		   NV_MCP_SATA_CFG_20_PORT1_EN |
1649 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1650 
1651 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1652 
1653 	nv_ck804_host_stop(host);
1654 }
1655 
1656 static int __init nv_init(void)
1657 {
1658 	return pci_register_driver(&nv_pci_driver);
1659 }
1660 
1661 static void __exit nv_exit(void)
1662 {
1663 	pci_unregister_driver(&nv_pci_driver);
1664 }
1665 
1666 module_init(nv_init);
1667 module_exit(nv_exit);
1668 module_param_named(adma, adma_enabled, bool, 0444);
1669 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
1670