xref: /linux/drivers/ata/sata_nv.c (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.2"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_PORTS			= 2,
58 	NV_PIO_MASK			= 0x1f,
59 	NV_MWDMA_MASK			= 0x07,
60 	NV_UDMA_MASK			= 0x7f,
61 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
62 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
63 
64 	/* INT_STATUS/ENABLE */
65 	NV_INT_STATUS			= 0x10,
66 	NV_INT_ENABLE			= 0x11,
67 	NV_INT_STATUS_CK804		= 0x440,
68 	NV_INT_ENABLE_CK804		= 0x441,
69 
70 	/* INT_STATUS/ENABLE bits */
71 	NV_INT_DEV			= 0x01,
72 	NV_INT_PM			= 0x02,
73 	NV_INT_ADDED			= 0x04,
74 	NV_INT_REMOVED			= 0x08,
75 
76 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
77 
78 	NV_INT_ALL			= 0x0f,
79 	NV_INT_MASK			= NV_INT_DEV |
80 					  NV_INT_ADDED | NV_INT_REMOVED,
81 
82 	/* INT_CONFIG */
83 	NV_INT_CONFIG			= 0x12,
84 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
85 
86 	// For PCI config register 20
87 	NV_MCP_SATA_CFG_20		= 0x50,
88 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
89 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
90 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
91 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
92 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
93 
94 	NV_ADMA_MAX_CPBS		= 32,
95 	NV_ADMA_CPB_SZ			= 128,
96 	NV_ADMA_APRD_SZ			= 16,
97 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
98 					   NV_ADMA_APRD_SZ,
99 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
100 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
102 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
103 
104 	/* BAR5 offset to ADMA general registers */
105 	NV_ADMA_GEN			= 0x400,
106 	NV_ADMA_GEN_CTL			= 0x00,
107 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
108 
109 	/* BAR5 offset to ADMA ports */
110 	NV_ADMA_PORT			= 0x480,
111 
112 	/* size of ADMA port register space  */
113 	NV_ADMA_PORT_SIZE		= 0x100,
114 
115 	/* ADMA port registers */
116 	NV_ADMA_CTL			= 0x40,
117 	NV_ADMA_CPB_COUNT		= 0x42,
118 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
119 	NV_ADMA_STAT			= 0x44,
120 	NV_ADMA_CPB_BASE_LOW		= 0x48,
121 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
122 	NV_ADMA_APPEND			= 0x50,
123 	NV_ADMA_NOTIFIER		= 0x68,
124 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
125 
126 	/* NV_ADMA_CTL register bits */
127 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
128 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
129 	NV_ADMA_CTL_GO			= (1 << 7),
130 	NV_ADMA_CTL_AIEN		= (1 << 8),
131 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
132 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
133 
134 	/* CPB response flag bits */
135 	NV_CPB_RESP_DONE		= (1 << 0),
136 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
137 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
138 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
139 
140 	/* CPB control flag bits */
141 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
142 	NV_CPB_CTL_QUEUE		= (1 << 1),
143 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
144 	NV_CPB_CTL_IEN			= (1 << 3),
145 	NV_CPB_CTL_FPDMA		= (1 << 4),
146 
147 	/* APRD flags */
148 	NV_APRD_WRITE			= (1 << 1),
149 	NV_APRD_END			= (1 << 2),
150 	NV_APRD_CONT			= (1 << 3),
151 
152 	/* NV_ADMA_STAT flags */
153 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
154 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
155 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
156 	NV_ADMA_STAT_CPBERR		= (1 << 4),
157 	NV_ADMA_STAT_SERROR		= (1 << 5),
158 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
159 	NV_ADMA_STAT_IDLE		= (1 << 8),
160 	NV_ADMA_STAT_LEGACY		= (1 << 9),
161 	NV_ADMA_STAT_STOPPED		= (1 << 10),
162 	NV_ADMA_STAT_DONE		= (1 << 12),
163 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
164 	 				  NV_ADMA_STAT_TIMEOUT,
165 
166 	/* port flags */
167 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
168 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
169 
170 };
171 
172 /* ADMA Physical Region Descriptor - one SG segment */
173 struct nv_adma_prd {
174 	__le64			addr;
175 	__le32			len;
176 	u8			flags;
177 	u8			packet_len;
178 	__le16			reserved;
179 };
180 
181 enum nv_adma_regbits {
182 	CMDEND	= (1 << 15),		/* end of command list */
183 	WNB	= (1 << 14),		/* wait-not-BSY */
184 	IGN	= (1 << 13),		/* ignore this entry */
185 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
186 	DA2	= (1 << (2 + 8)),
187 	DA1	= (1 << (1 + 8)),
188 	DA0	= (1 << (0 + 8)),
189 };
190 
191 /* ADMA Command Parameter Block
192    The first 5 SG segments are stored inside the Command Parameter Block itself.
193    If there are more than 5 segments the remainder are stored in a separate
194    memory area indicated by next_aprd. */
195 struct nv_adma_cpb {
196 	u8			resp_flags;    /* 0 */
197 	u8			reserved1;     /* 1 */
198 	u8			ctl_flags;     /* 2 */
199 	/* len is length of taskfile in 64 bit words */
200  	u8			len;           /* 3  */
201 	u8			tag;           /* 4 */
202 	u8			next_cpb_idx;  /* 5 */
203 	__le16			reserved2;     /* 6-7 */
204 	__le16			tf[12];        /* 8-31 */
205 	struct nv_adma_prd	aprd[5];       /* 32-111 */
206 	__le64			next_aprd;     /* 112-119 */
207 	__le64			reserved3;     /* 120-127 */
208 };
209 
210 
211 struct nv_adma_port_priv {
212 	struct nv_adma_cpb	*cpb;
213 	dma_addr_t		cpb_dma;
214 	struct nv_adma_prd	*aprd;
215 	dma_addr_t		aprd_dma;
216 	u8			flags;
217 };
218 
219 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
220 
221 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
222 static void nv_ck804_host_stop(struct ata_host *host);
223 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
224 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
225 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
226 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
227 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
228 
229 static void nv_nf2_freeze(struct ata_port *ap);
230 static void nv_nf2_thaw(struct ata_port *ap);
231 static void nv_ck804_freeze(struct ata_port *ap);
232 static void nv_ck804_thaw(struct ata_port *ap);
233 static void nv_error_handler(struct ata_port *ap);
234 static int nv_adma_slave_config(struct scsi_device *sdev);
235 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
236 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239 static void nv_adma_irq_clear(struct ata_port *ap);
240 static int nv_adma_port_start(struct ata_port *ap);
241 static void nv_adma_port_stop(struct ata_port *ap);
242 static void nv_adma_error_handler(struct ata_port *ap);
243 static void nv_adma_host_stop(struct ata_host *host);
244 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247 static u8 nv_adma_bmdma_status(struct ata_port *ap);
248 
249 enum nv_host_type
250 {
251 	GENERIC,
252 	NFORCE2,
253 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
254 	CK804,
255 	ADMA
256 };
257 
258 static const struct pci_device_id nv_pci_tbl[] = {
259 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
260 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
261 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
262 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
263 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
264 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
265 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
266 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
267 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
268 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
269 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
270 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
271 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
272 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
273 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
274 		PCI_ANY_ID, PCI_ANY_ID,
275 		PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
276 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
277 		PCI_ANY_ID, PCI_ANY_ID,
278 		PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
279 
280 	{ } /* terminate list */
281 };
282 
283 static struct pci_driver nv_pci_driver = {
284 	.name			= DRV_NAME,
285 	.id_table		= nv_pci_tbl,
286 	.probe			= nv_init_one,
287 	.remove			= ata_pci_remove_one,
288 };
289 
290 static struct scsi_host_template nv_sht = {
291 	.module			= THIS_MODULE,
292 	.name			= DRV_NAME,
293 	.ioctl			= ata_scsi_ioctl,
294 	.queuecommand		= ata_scsi_queuecmd,
295 	.can_queue		= ATA_DEF_QUEUE,
296 	.this_id		= ATA_SHT_THIS_ID,
297 	.sg_tablesize		= LIBATA_MAX_PRD,
298 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
299 	.emulated		= ATA_SHT_EMULATED,
300 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
301 	.proc_name		= DRV_NAME,
302 	.dma_boundary		= ATA_DMA_BOUNDARY,
303 	.slave_configure	= ata_scsi_slave_config,
304 	.slave_destroy		= ata_scsi_slave_destroy,
305 	.bios_param		= ata_std_bios_param,
306 };
307 
308 static struct scsi_host_template nv_adma_sht = {
309 	.module			= THIS_MODULE,
310 	.name			= DRV_NAME,
311 	.ioctl			= ata_scsi_ioctl,
312 	.queuecommand		= ata_scsi_queuecmd,
313 	.can_queue		= NV_ADMA_MAX_CPBS,
314 	.this_id		= ATA_SHT_THIS_ID,
315 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
316 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
317 	.emulated		= ATA_SHT_EMULATED,
318 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
319 	.proc_name		= DRV_NAME,
320 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
321 	.slave_configure	= nv_adma_slave_config,
322 	.slave_destroy		= ata_scsi_slave_destroy,
323 	.bios_param		= ata_std_bios_param,
324 };
325 
326 static const struct ata_port_operations nv_generic_ops = {
327 	.port_disable		= ata_port_disable,
328 	.tf_load		= ata_tf_load,
329 	.tf_read		= ata_tf_read,
330 	.exec_command		= ata_exec_command,
331 	.check_status		= ata_check_status,
332 	.dev_select		= ata_std_dev_select,
333 	.bmdma_setup		= ata_bmdma_setup,
334 	.bmdma_start		= ata_bmdma_start,
335 	.bmdma_stop		= ata_bmdma_stop,
336 	.bmdma_status		= ata_bmdma_status,
337 	.qc_prep		= ata_qc_prep,
338 	.qc_issue		= ata_qc_issue_prot,
339 	.freeze			= ata_bmdma_freeze,
340 	.thaw			= ata_bmdma_thaw,
341 	.error_handler		= nv_error_handler,
342 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
343 	.data_xfer		= ata_pio_data_xfer,
344 	.irq_handler		= nv_generic_interrupt,
345 	.irq_clear		= ata_bmdma_irq_clear,
346 	.scr_read		= nv_scr_read,
347 	.scr_write		= nv_scr_write,
348 	.port_start		= ata_port_start,
349 	.port_stop		= ata_port_stop,
350 	.host_stop		= ata_pci_host_stop,
351 };
352 
353 static const struct ata_port_operations nv_nf2_ops = {
354 	.port_disable		= ata_port_disable,
355 	.tf_load		= ata_tf_load,
356 	.tf_read		= ata_tf_read,
357 	.exec_command		= ata_exec_command,
358 	.check_status		= ata_check_status,
359 	.dev_select		= ata_std_dev_select,
360 	.bmdma_setup		= ata_bmdma_setup,
361 	.bmdma_start		= ata_bmdma_start,
362 	.bmdma_stop		= ata_bmdma_stop,
363 	.bmdma_status		= ata_bmdma_status,
364 	.qc_prep		= ata_qc_prep,
365 	.qc_issue		= ata_qc_issue_prot,
366 	.freeze			= nv_nf2_freeze,
367 	.thaw			= nv_nf2_thaw,
368 	.error_handler		= nv_error_handler,
369 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
370 	.data_xfer		= ata_pio_data_xfer,
371 	.irq_handler		= nv_nf2_interrupt,
372 	.irq_clear		= ata_bmdma_irq_clear,
373 	.scr_read		= nv_scr_read,
374 	.scr_write		= nv_scr_write,
375 	.port_start		= ata_port_start,
376 	.port_stop		= ata_port_stop,
377 	.host_stop		= ata_pci_host_stop,
378 };
379 
380 static const struct ata_port_operations nv_ck804_ops = {
381 	.port_disable		= ata_port_disable,
382 	.tf_load		= ata_tf_load,
383 	.tf_read		= ata_tf_read,
384 	.exec_command		= ata_exec_command,
385 	.check_status		= ata_check_status,
386 	.dev_select		= ata_std_dev_select,
387 	.bmdma_setup		= ata_bmdma_setup,
388 	.bmdma_start		= ata_bmdma_start,
389 	.bmdma_stop		= ata_bmdma_stop,
390 	.bmdma_status		= ata_bmdma_status,
391 	.qc_prep		= ata_qc_prep,
392 	.qc_issue		= ata_qc_issue_prot,
393 	.freeze			= nv_ck804_freeze,
394 	.thaw			= nv_ck804_thaw,
395 	.error_handler		= nv_error_handler,
396 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
397 	.data_xfer		= ata_pio_data_xfer,
398 	.irq_handler		= nv_ck804_interrupt,
399 	.irq_clear		= ata_bmdma_irq_clear,
400 	.scr_read		= nv_scr_read,
401 	.scr_write		= nv_scr_write,
402 	.port_start		= ata_port_start,
403 	.port_stop		= ata_port_stop,
404 	.host_stop		= nv_ck804_host_stop,
405 };
406 
407 static const struct ata_port_operations nv_adma_ops = {
408 	.port_disable		= ata_port_disable,
409 	.tf_load		= ata_tf_load,
410 	.tf_read		= ata_tf_read,
411 	.check_atapi_dma	= nv_adma_check_atapi_dma,
412 	.exec_command		= ata_exec_command,
413 	.check_status		= ata_check_status,
414 	.dev_select		= ata_std_dev_select,
415 	.bmdma_setup		= nv_adma_bmdma_setup,
416 	.bmdma_start		= nv_adma_bmdma_start,
417 	.bmdma_stop		= nv_adma_bmdma_stop,
418 	.bmdma_status		= nv_adma_bmdma_status,
419 	.qc_prep		= nv_adma_qc_prep,
420 	.qc_issue		= nv_adma_qc_issue,
421 	.freeze			= nv_ck804_freeze,
422 	.thaw			= nv_ck804_thaw,
423 	.error_handler		= nv_adma_error_handler,
424 	.post_internal_cmd	= nv_adma_bmdma_stop,
425 	.data_xfer		= ata_mmio_data_xfer,
426 	.irq_handler		= nv_adma_interrupt,
427 	.irq_clear		= nv_adma_irq_clear,
428 	.scr_read		= nv_scr_read,
429 	.scr_write		= nv_scr_write,
430 	.port_start		= nv_adma_port_start,
431 	.port_stop		= nv_adma_port_stop,
432 	.host_stop		= nv_adma_host_stop,
433 };
434 
435 static struct ata_port_info nv_port_info[] = {
436 	/* generic */
437 	{
438 		.sht		= &nv_sht,
439 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
440 				  ATA_FLAG_HRST_TO_RESUME,
441 		.pio_mask	= NV_PIO_MASK,
442 		.mwdma_mask	= NV_MWDMA_MASK,
443 		.udma_mask	= NV_UDMA_MASK,
444 		.port_ops	= &nv_generic_ops,
445 	},
446 	/* nforce2/3 */
447 	{
448 		.sht		= &nv_sht,
449 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
450 				  ATA_FLAG_HRST_TO_RESUME,
451 		.pio_mask	= NV_PIO_MASK,
452 		.mwdma_mask	= NV_MWDMA_MASK,
453 		.udma_mask	= NV_UDMA_MASK,
454 		.port_ops	= &nv_nf2_ops,
455 	},
456 	/* ck804 */
457 	{
458 		.sht		= &nv_sht,
459 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
460 				  ATA_FLAG_HRST_TO_RESUME,
461 		.pio_mask	= NV_PIO_MASK,
462 		.mwdma_mask	= NV_MWDMA_MASK,
463 		.udma_mask	= NV_UDMA_MASK,
464 		.port_ops	= &nv_ck804_ops,
465 	},
466 	/* ADMA */
467 	{
468 		.sht		= &nv_adma_sht,
469 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
470 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
471 		.pio_mask	= NV_PIO_MASK,
472 		.mwdma_mask	= NV_MWDMA_MASK,
473 		.udma_mask	= NV_UDMA_MASK,
474 		.port_ops	= &nv_adma_ops,
475 	},
476 };
477 
478 MODULE_AUTHOR("NVIDIA");
479 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
480 MODULE_LICENSE("GPL");
481 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
482 MODULE_VERSION(DRV_VERSION);
483 
484 static int adma_enabled = 1;
485 
486 static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
487 					        unsigned int port_no)
488 {
489 	mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
490 	return mmio;
491 }
492 
493 static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
494 {
495 	return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
496 }
497 
498 static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
499 {
500 	return (ap->host->mmio_base + NV_ADMA_GEN);
501 }
502 
503 static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
504 {
505 	return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
506 }
507 
508 static void nv_adma_register_mode(struct ata_port *ap)
509 {
510 	void __iomem *mmio = nv_adma_ctl_block(ap);
511 	struct nv_adma_port_priv *pp = ap->private_data;
512 	u16 tmp;
513 
514 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
515 		return;
516 
517 	tmp = readw(mmio + NV_ADMA_CTL);
518 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
519 
520 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
521 }
522 
523 static void nv_adma_mode(struct ata_port *ap)
524 {
525 	void __iomem *mmio = nv_adma_ctl_block(ap);
526 	struct nv_adma_port_priv *pp = ap->private_data;
527 	u16 tmp;
528 
529 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
530 		return;
531 
532 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
533 
534 	tmp = readw(mmio + NV_ADMA_CTL);
535 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
536 
537 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
538 }
539 
540 static int nv_adma_slave_config(struct scsi_device *sdev)
541 {
542 	struct ata_port *ap = ata_shost_to_port(sdev->host);
543 	struct nv_adma_port_priv *pp = ap->private_data;
544 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
545 	u64 bounce_limit;
546 	unsigned long segment_boundary;
547 	unsigned short sg_tablesize;
548 	int rc;
549 	int adma_enable;
550 	u32 current_reg, new_reg, config_mask;
551 
552 	rc = ata_scsi_slave_config(sdev);
553 
554 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
555 		/* Not a proper libata device, ignore */
556 		return rc;
557 
558 	if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
559 		/*
560 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
561 		 * Therefore ATAPI commands are sent through the legacy interface.
562 		 * However, the legacy interface only supports 32-bit DMA.
563 		 * Restrict DMA parameters as required by the legacy interface
564 		 * when an ATAPI device is connected.
565 		 */
566 		bounce_limit = ATA_DMA_MASK;
567 		segment_boundary = ATA_DMA_BOUNDARY;
568 		/* Subtract 1 since an extra entry may be needed for padding, see
569 		   libata-scsi.c */
570 		sg_tablesize = LIBATA_MAX_PRD - 1;
571 
572 		/* Since the legacy DMA engine is in use, we need to disable ADMA
573 		   on the port. */
574 		adma_enable = 0;
575 		nv_adma_register_mode(ap);
576 	}
577 	else {
578 		bounce_limit = *ap->dev->dma_mask;
579 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
580 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
581 		adma_enable = 1;
582 	}
583 
584 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
585 
586 	if(ap->port_no == 1)
587 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
588 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
589 	else
590 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
591 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
592 
593 	if(adma_enable) {
594 		new_reg = current_reg | config_mask;
595 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
596 	}
597 	else {
598 		new_reg = current_reg & ~config_mask;
599 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
600 	}
601 
602 	if(current_reg != new_reg)
603 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
604 
605 	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
606 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
607 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
608 	ata_port_printk(ap, KERN_INFO,
609 		"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
610 		(unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
611 	return rc;
612 }
613 
614 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
615 {
616 	struct nv_adma_port_priv *pp = qc->ap->private_data;
617 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
618 }
619 
620 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
621 {
622 	unsigned int idx = 0;
623 
624 	cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
625 
626 	if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
627 		cpb[idx++] = cpu_to_le16(IGN);
628 		cpb[idx++] = cpu_to_le16(IGN);
629 		cpb[idx++] = cpu_to_le16(IGN);
630 		cpb[idx++] = cpu_to_le16(IGN);
631 		cpb[idx++] = cpu_to_le16(IGN);
632 	}
633 	else {
634 		cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature);
635 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
636 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
637 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
638 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
639 	}
640 	cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
641 	cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
642 	cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
643 	cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
644 	cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
645 
646 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
647 
648 	return idx;
649 }
650 
651 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
652 {
653 	struct nv_adma_port_priv *pp = ap->private_data;
654 	int complete = 0, have_err = 0;
655 	u8 flags = pp->cpb[cpb_num].resp_flags;
656 
657 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
658 
659 	if (flags & NV_CPB_RESP_DONE) {
660 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
661 		complete = 1;
662 	}
663 	if (flags & NV_CPB_RESP_ATA_ERR) {
664 		ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
665 		have_err = 1;
666 		complete = 1;
667 	}
668 	if (flags & NV_CPB_RESP_CMD_ERR) {
669 		ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
670 		have_err = 1;
671 		complete = 1;
672 	}
673 	if (flags & NV_CPB_RESP_CPB_ERR) {
674 		ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
675 		have_err = 1;
676 		complete = 1;
677 	}
678 	if(complete || force_err)
679 	{
680 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
681 		if(likely(qc)) {
682 			u8 ata_status = 0;
683 			/* Only use the ATA port status for non-NCQ commands.
684 			   For NCQ commands the current status may have nothing to do with
685 			   the command just completed. */
686 			if(qc->tf.protocol != ATA_PROT_NCQ)
687 				ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
688 
689 			if(have_err || force_err)
690 				ata_status |= ATA_ERR;
691 
692 			qc->err_mask |= ac_err_mask(ata_status);
693 			DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
694 				qc->err_mask);
695 			ata_qc_complete(qc);
696 		}
697 	}
698 }
699 
700 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
701 {
702 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
703 
704 	/* freeze if hotplugged */
705 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
706 		ata_port_freeze(ap);
707 		return 1;
708 	}
709 
710 	/* bail out if not our interrupt */
711 	if (!(irq_stat & NV_INT_DEV))
712 		return 0;
713 
714 	/* DEV interrupt w/ no active qc? */
715 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
716 		ata_check_status(ap);
717 		return 1;
718 	}
719 
720 	/* handle interrupt */
721 	return ata_host_intr(ap, qc);
722 }
723 
724 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
725 {
726 	struct ata_host *host = dev_instance;
727 	int i, handled = 0;
728 	u32 notifier_clears[2];
729 
730 	spin_lock(&host->lock);
731 
732 	for (i = 0; i < host->n_ports; i++) {
733 		struct ata_port *ap = host->ports[i];
734 		notifier_clears[i] = 0;
735 
736 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
737 			struct nv_adma_port_priv *pp = ap->private_data;
738 			void __iomem *mmio = nv_adma_ctl_block(ap);
739 			u16 status;
740 			u32 gen_ctl;
741 			int have_global_err = 0;
742 			u32 notifier, notifier_error;
743 
744 			/* if in ATA register mode, use standard ata interrupt handler */
745 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
746 				u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
747 					>> (NV_INT_PORT_SHIFT * i);
748 				if(ata_tag_valid(ap->active_tag))
749 					/** NV_INT_DEV indication seems unreliable at times
750 					    at least in ADMA mode. Force it on always when a
751 					    command is active, to prevent losing interrupts. */
752 					irq_stat |= NV_INT_DEV;
753 				handled += nv_host_intr(ap, irq_stat);
754 				continue;
755 			}
756 
757 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
758 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
759 			notifier_clears[i] = notifier | notifier_error;
760 
761 			gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
762 
763 			if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
764 			    !notifier_error)
765 				/* Nothing to do */
766 				continue;
767 
768 			status = readw(mmio + NV_ADMA_STAT);
769 
770 			/* Clear status. Ensure the controller sees the clearing before we start
771 			   looking at any of the CPB statuses, so that any CPB completions after
772 			   this point in the handler will raise another interrupt. */
773 			writew(status, mmio + NV_ADMA_STAT);
774 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
775 			rmb();
776 
777 			/* freeze if hotplugged */
778 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
779 				ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
780 				ata_port_freeze(ap);
781 				handled++;
782 				continue;
783 			}
784 
785 			if (status & NV_ADMA_STAT_TIMEOUT) {
786 				ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
787 				have_global_err = 1;
788 			}
789 			if (status & NV_ADMA_STAT_CPBERR) {
790 				ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
791 				have_global_err = 1;
792 			}
793 			if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
794 				/** Check CPBs for completed commands */
795 
796 				if(ata_tag_valid(ap->active_tag))
797 					/* Non-NCQ command */
798 					nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
799 						(notifier_error & (1 << ap->active_tag)));
800 				else {
801 					int pos;
802 					u32 active = ap->sactive;
803 					while( (pos = ffs(active)) ) {
804 						pos--;
805 						nv_adma_check_cpb(ap, pos, have_global_err ||
806 							(notifier_error & (1 << pos)) );
807 						active &= ~(1 << pos );
808 					}
809 				}
810 			}
811 
812 			handled++; /* irq handled if we got here */
813 		}
814 	}
815 
816 	if(notifier_clears[0] || notifier_clears[1]) {
817 		/* Note: Both notifier clear registers must be written
818 		   if either is set, even if one is zero, according to NVIDIA. */
819 		writel(notifier_clears[0],
820 			nv_adma_notifier_clear_block(host->ports[0]));
821 		writel(notifier_clears[1],
822 			nv_adma_notifier_clear_block(host->ports[1]));
823 	}
824 
825 	spin_unlock(&host->lock);
826 
827 	return IRQ_RETVAL(handled);
828 }
829 
830 static void nv_adma_irq_clear(struct ata_port *ap)
831 {
832 	void __iomem *mmio = nv_adma_ctl_block(ap);
833 	u16 status = readw(mmio + NV_ADMA_STAT);
834 	u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
835 	u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
836 	unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
837 
838 	/* clear ADMA status */
839 	writew(status, mmio + NV_ADMA_STAT);
840 	writel(notifier | notifier_error,
841 	       nv_adma_notifier_clear_block(ap));
842 
843 	/** clear legacy status */
844 	outb(inb(dma_stat_addr), dma_stat_addr);
845 }
846 
847 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
848 {
849 	struct ata_port *ap = qc->ap;
850 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
851 	struct nv_adma_port_priv *pp = ap->private_data;
852 	u8 dmactl;
853 
854 	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
855 		WARN_ON(1);
856 		return;
857 	}
858 
859 	/* load PRD table addr. */
860 	outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
861 
862 	/* specify data direction, triple-check start bit is clear */
863 	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
864 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
865 	if (!rw)
866 		dmactl |= ATA_DMA_WR;
867 
868 	outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
869 
870 	/* issue r/w command */
871 	ata_exec_command(ap, &qc->tf);
872 }
873 
874 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
875 {
876 	struct ata_port *ap = qc->ap;
877 	struct nv_adma_port_priv *pp = ap->private_data;
878 	u8 dmactl;
879 
880 	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
881 		WARN_ON(1);
882 		return;
883 	}
884 
885 	/* start host DMA transaction */
886 	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
887 	outb(dmactl | ATA_DMA_START,
888 	     ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
889 }
890 
891 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
892 {
893 	struct ata_port *ap = qc->ap;
894 	struct nv_adma_port_priv *pp = ap->private_data;
895 
896 	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
897 		return;
898 
899 	/* clear start/stop bit */
900 	outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
901 		ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
902 
903 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
904 	ata_altstatus(ap);        /* dummy read */
905 }
906 
907 static u8 nv_adma_bmdma_status(struct ata_port *ap)
908 {
909 	struct nv_adma_port_priv *pp = ap->private_data;
910 
911 	WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
912 
913 	return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
914 }
915 
916 static int nv_adma_port_start(struct ata_port *ap)
917 {
918 	struct device *dev = ap->host->dev;
919 	struct nv_adma_port_priv *pp;
920 	int rc;
921 	void *mem;
922 	dma_addr_t mem_dma;
923 	void __iomem *mmio = nv_adma_ctl_block(ap);
924 	u16 tmp;
925 
926 	VPRINTK("ENTER\n");
927 
928 	rc = ata_port_start(ap);
929 	if (rc)
930 		return rc;
931 
932 	pp = kzalloc(sizeof(*pp), GFP_KERNEL);
933 	if (!pp) {
934 		rc = -ENOMEM;
935 		goto err_out;
936 	}
937 
938 	mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
939 				 &mem_dma, GFP_KERNEL);
940 
941 	if (!mem) {
942 		rc = -ENOMEM;
943 		goto err_out_kfree;
944 	}
945 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
946 
947 	/*
948 	 * First item in chunk of DMA memory:
949 	 * 128-byte command parameter block (CPB)
950 	 * one for each command tag
951 	 */
952 	pp->cpb     = mem;
953 	pp->cpb_dma = mem_dma;
954 
955 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
956 	writel((mem_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
957 
958 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
959 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
960 
961 	/*
962 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
963 	 */
964 	pp->aprd = mem;
965 	pp->aprd_dma = mem_dma;
966 
967 	ap->private_data = pp;
968 
969 	/* clear any outstanding interrupt conditions */
970 	writew(0xffff, mmio + NV_ADMA_STAT);
971 
972 	/* initialize port variables */
973 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
974 
975 	/* clear CPB fetch count */
976 	writew(0, mmio + NV_ADMA_CPB_COUNT);
977 
978 	/* clear GO for register mode */
979 	tmp = readw(mmio + NV_ADMA_CTL);
980 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
981 
982 	tmp = readw(mmio + NV_ADMA_CTL);
983 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
984 	readl( mmio + NV_ADMA_CTL );	/* flush posted write */
985 	udelay(1);
986 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
987 	readl( mmio + NV_ADMA_CTL );	/* flush posted write */
988 
989 	return 0;
990 
991 err_out_kfree:
992 	kfree(pp);
993 err_out:
994 	ata_port_stop(ap);
995 	return rc;
996 }
997 
998 static void nv_adma_port_stop(struct ata_port *ap)
999 {
1000 	struct device *dev = ap->host->dev;
1001 	struct nv_adma_port_priv *pp = ap->private_data;
1002 	void __iomem *mmio = nv_adma_ctl_block(ap);
1003 
1004 	VPRINTK("ENTER\n");
1005 
1006 	writew(0, mmio + NV_ADMA_CTL);
1007 
1008 	ap->private_data = NULL;
1009 	dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1010 	kfree(pp);
1011 	ata_port_stop(ap);
1012 }
1013 
1014 
1015 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1016 {
1017 	void __iomem *mmio = probe_ent->mmio_base;
1018 	struct ata_ioports *ioport = &probe_ent->port[port];
1019 
1020 	VPRINTK("ENTER\n");
1021 
1022 	mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1023 
1024 	ioport->cmd_addr	= (unsigned long) mmio;
1025 	ioport->data_addr	= (unsigned long) mmio + (ATA_REG_DATA * 4);
1026 	ioport->error_addr	=
1027 	ioport->feature_addr	= (unsigned long) mmio + (ATA_REG_ERR * 4);
1028 	ioport->nsect_addr	= (unsigned long) mmio + (ATA_REG_NSECT * 4);
1029 	ioport->lbal_addr	= (unsigned long) mmio + (ATA_REG_LBAL * 4);
1030 	ioport->lbam_addr	= (unsigned long) mmio + (ATA_REG_LBAM * 4);
1031 	ioport->lbah_addr	= (unsigned long) mmio + (ATA_REG_LBAH * 4);
1032 	ioport->device_addr	= (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1033 	ioport->status_addr	=
1034 	ioport->command_addr	= (unsigned long) mmio + (ATA_REG_STATUS * 4);
1035 	ioport->altstatus_addr	=
1036 	ioport->ctl_addr	= (unsigned long) mmio + 0x20;
1037 }
1038 
1039 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1040 {
1041 	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1042 	unsigned int i;
1043 	u32 tmp32;
1044 
1045 	VPRINTK("ENTER\n");
1046 
1047 	/* enable ADMA on the ports */
1048 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1049 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1050 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1051 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1052 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1053 
1054 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1055 
1056 	for (i = 0; i < probe_ent->n_ports; i++)
1057 		nv_adma_setup_port(probe_ent, i);
1058 
1059 	for (i = 0; i < probe_ent->n_ports; i++) {
1060 		void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1061 		u16 tmp;
1062 
1063 		/* enable interrupt, clear reset if not already clear */
1064 		tmp = readw(mmio + NV_ADMA_CTL);
1065 		writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1066 	}
1067 
1068 	return 0;
1069 }
1070 
1071 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1072 			      struct scatterlist *sg,
1073 			      int idx,
1074 			      struct nv_adma_prd *aprd)
1075 {
1076 	u8 flags;
1077 
1078 	memset(aprd, 0, sizeof(struct nv_adma_prd));
1079 
1080 	flags = 0;
1081 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1082 		flags |= NV_APRD_WRITE;
1083 	if (idx == qc->n_elem - 1)
1084 		flags |= NV_APRD_END;
1085 	else if (idx != 4)
1086 		flags |= NV_APRD_CONT;
1087 
1088 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1089 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1090 	aprd->flags = flags;
1091 }
1092 
1093 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1094 {
1095 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1096 	unsigned int idx;
1097 	struct nv_adma_prd *aprd;
1098 	struct scatterlist *sg;
1099 
1100 	VPRINTK("ENTER\n");
1101 
1102 	idx = 0;
1103 
1104 	ata_for_each_sg(sg, qc) {
1105 		aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1106 		nv_adma_fill_aprd(qc, sg, idx, aprd);
1107 		idx++;
1108 	}
1109 	if (idx > 5)
1110 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1111 }
1112 
1113 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1114 {
1115 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1116 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1117 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1118 		       NV_CPB_CTL_APRD_VALID |
1119 		       NV_CPB_CTL_IEN;
1120 
1121 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1122 
1123 	if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1124 	     (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1125 		nv_adma_register_mode(qc->ap);
1126 		ata_qc_prep(qc);
1127 		return;
1128 	}
1129 
1130 	memset(cpb, 0, sizeof(struct nv_adma_cpb));
1131 
1132 	cpb->len		= 3;
1133 	cpb->tag		= qc->tag;
1134 	cpb->next_cpb_idx	= 0;
1135 
1136 	/* turn on NCQ flags for NCQ commands */
1137 	if (qc->tf.protocol == ATA_PROT_NCQ)
1138 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1139 
1140 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1141 
1142 	nv_adma_fill_sg(qc, cpb);
1143 
1144 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1145 	   finished filling in all of the contents */
1146 	wmb();
1147 	cpb->ctl_flags = ctl_flags;
1148 }
1149 
1150 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1151 {
1152 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1153 	void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1154 
1155 	VPRINTK("ENTER\n");
1156 
1157 	if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1158 	     (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1159 		/* use ATA register mode */
1160 		VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1161 		nv_adma_register_mode(qc->ap);
1162 		return ata_qc_issue_prot(qc);
1163 	} else
1164 		nv_adma_mode(qc->ap);
1165 
1166 	/* write append register, command tag in lower 8 bits
1167 	   and (number of cpbs to append -1) in top 8 bits */
1168 	wmb();
1169 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1170 
1171 	DPRINTK("Issued tag %u\n",qc->tag);
1172 
1173 	return 0;
1174 }
1175 
1176 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1177 {
1178 	struct ata_host *host = dev_instance;
1179 	unsigned int i;
1180 	unsigned int handled = 0;
1181 	unsigned long flags;
1182 
1183 	spin_lock_irqsave(&host->lock, flags);
1184 
1185 	for (i = 0; i < host->n_ports; i++) {
1186 		struct ata_port *ap;
1187 
1188 		ap = host->ports[i];
1189 		if (ap &&
1190 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1191 			struct ata_queued_cmd *qc;
1192 
1193 			qc = ata_qc_from_tag(ap, ap->active_tag);
1194 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1195 				handled += ata_host_intr(ap, qc);
1196 			else
1197 				// No request pending?  Clear interrupt status
1198 				// anyway, in case there's one pending.
1199 				ap->ops->check_status(ap);
1200 		}
1201 
1202 	}
1203 
1204 	spin_unlock_irqrestore(&host->lock, flags);
1205 
1206 	return IRQ_RETVAL(handled);
1207 }
1208 
1209 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1210 {
1211 	int i, handled = 0;
1212 
1213 	for (i = 0; i < host->n_ports; i++) {
1214 		struct ata_port *ap = host->ports[i];
1215 
1216 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1217 			handled += nv_host_intr(ap, irq_stat);
1218 
1219 		irq_stat >>= NV_INT_PORT_SHIFT;
1220 	}
1221 
1222 	return IRQ_RETVAL(handled);
1223 }
1224 
1225 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1226 {
1227 	struct ata_host *host = dev_instance;
1228 	u8 irq_stat;
1229 	irqreturn_t ret;
1230 
1231 	spin_lock(&host->lock);
1232 	irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1233 	ret = nv_do_interrupt(host, irq_stat);
1234 	spin_unlock(&host->lock);
1235 
1236 	return ret;
1237 }
1238 
1239 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1240 {
1241 	struct ata_host *host = dev_instance;
1242 	u8 irq_stat;
1243 	irqreturn_t ret;
1244 
1245 	spin_lock(&host->lock);
1246 	irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1247 	ret = nv_do_interrupt(host, irq_stat);
1248 	spin_unlock(&host->lock);
1249 
1250 	return ret;
1251 }
1252 
1253 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1254 {
1255 	if (sc_reg > SCR_CONTROL)
1256 		return 0xffffffffU;
1257 
1258 	return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1259 }
1260 
1261 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1262 {
1263 	if (sc_reg > SCR_CONTROL)
1264 		return;
1265 
1266 	iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1267 }
1268 
1269 static void nv_nf2_freeze(struct ata_port *ap)
1270 {
1271 	unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1272 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1273 	u8 mask;
1274 
1275 	mask = inb(scr_addr + NV_INT_ENABLE);
1276 	mask &= ~(NV_INT_ALL << shift);
1277 	outb(mask, scr_addr + NV_INT_ENABLE);
1278 }
1279 
1280 static void nv_nf2_thaw(struct ata_port *ap)
1281 {
1282 	unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1283 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1284 	u8 mask;
1285 
1286 	outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1287 
1288 	mask = inb(scr_addr + NV_INT_ENABLE);
1289 	mask |= (NV_INT_MASK << shift);
1290 	outb(mask, scr_addr + NV_INT_ENABLE);
1291 }
1292 
1293 static void nv_ck804_freeze(struct ata_port *ap)
1294 {
1295 	void __iomem *mmio_base = ap->host->mmio_base;
1296 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1297 	u8 mask;
1298 
1299 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1300 	mask &= ~(NV_INT_ALL << shift);
1301 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1302 }
1303 
1304 static void nv_ck804_thaw(struct ata_port *ap)
1305 {
1306 	void __iomem *mmio_base = ap->host->mmio_base;
1307 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1308 	u8 mask;
1309 
1310 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1311 
1312 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1313 	mask |= (NV_INT_MASK << shift);
1314 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1315 }
1316 
1317 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1318 {
1319 	unsigned int dummy;
1320 
1321 	/* SATA hardreset fails to retrieve proper device signature on
1322 	 * some controllers.  Don't classify on hardreset.  For more
1323 	 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1324 	 */
1325 	return sata_std_hardreset(ap, &dummy);
1326 }
1327 
1328 static void nv_error_handler(struct ata_port *ap)
1329 {
1330 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1331 			   nv_hardreset, ata_std_postreset);
1332 }
1333 
1334 static void nv_adma_error_handler(struct ata_port *ap)
1335 {
1336 	struct nv_adma_port_priv *pp = ap->private_data;
1337 	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1338 		void __iomem *mmio = nv_adma_ctl_block(ap);
1339 		int i;
1340 		u16 tmp;
1341 
1342 		u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1343 		u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1344 		u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1345 		u32 status = readw(mmio + NV_ADMA_STAT);
1346 
1347 		ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1348 			"notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1349 			notifier, notifier_error, gen_ctl, status);
1350 
1351 		for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1352 			struct nv_adma_cpb *cpb = &pp->cpb[i];
1353 			if( cpb->ctl_flags || cpb->resp_flags )
1354 				ata_port_printk(ap, KERN_ERR,
1355 					"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1356 					i, cpb->ctl_flags, cpb->resp_flags);
1357 		}
1358 
1359 		/* Push us back into port register mode for error handling. */
1360 		nv_adma_register_mode(ap);
1361 
1362 		ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1363 
1364 		/* Mark all of the CPBs as invalid to prevent them from being executed */
1365 		for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1366 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1367 
1368 		/* clear CPB fetch count */
1369 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1370 
1371 		/* Reset channel */
1372 		tmp = readw(mmio + NV_ADMA_CTL);
1373 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1374 		readl( mmio + NV_ADMA_CTL );	/* flush posted write */
1375 		udelay(1);
1376 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1377 		readl( mmio + NV_ADMA_CTL );	/* flush posted write */
1378 	}
1379 
1380 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1381 			   nv_hardreset, ata_std_postreset);
1382 }
1383 
1384 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1385 {
1386 	static int printed_version = 0;
1387 	struct ata_port_info *ppi[2];
1388 	struct ata_probe_ent *probe_ent;
1389 	int pci_dev_busy = 0;
1390 	int rc;
1391 	u32 bar;
1392 	unsigned long base;
1393 	unsigned long type = ent->driver_data;
1394 	int mask_set = 0;
1395 
1396         // Make sure this is a SATA controller by counting the number of bars
1397         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
1398         // it's an IDE controller and we ignore it.
1399 	for (bar=0; bar<6; bar++)
1400 		if (pci_resource_start(pdev, bar) == 0)
1401 			return -ENODEV;
1402 
1403 	if (	!printed_version++)
1404 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1405 
1406 	rc = pci_enable_device(pdev);
1407 	if (rc)
1408 		goto err_out;
1409 
1410 	rc = pci_request_regions(pdev, DRV_NAME);
1411 	if (rc) {
1412 		pci_dev_busy = 1;
1413 		goto err_out_disable;
1414 	}
1415 
1416 	if(type >= CK804 && adma_enabled) {
1417 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1418 		type = ADMA;
1419 		if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1420 		   !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1421 			mask_set = 1;
1422 	}
1423 
1424 	if(!mask_set) {
1425 		rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1426 		if (rc)
1427 			goto err_out_regions;
1428 		rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1429 		if (rc)
1430 			goto err_out_regions;
1431 	}
1432 
1433 	rc = -ENOMEM;
1434 
1435 	ppi[0] = ppi[1] = &nv_port_info[type];
1436 	probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1437 	if (!probe_ent)
1438 		goto err_out_regions;
1439 
1440 	probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1441 	if (!probe_ent->mmio_base) {
1442 		rc = -EIO;
1443 		goto err_out_free_ent;
1444 	}
1445 
1446 	base = (unsigned long)probe_ent->mmio_base;
1447 
1448 	probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1449 	probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1450 
1451 	/* enable SATA space for CK804 */
1452 	if (type >= CK804) {
1453 		u8 regval;
1454 
1455 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1456 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1457 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1458 	}
1459 
1460 	pci_set_master(pdev);
1461 
1462 	if (type == ADMA) {
1463 		rc = nv_adma_host_init(probe_ent);
1464 		if (rc)
1465 			goto err_out_iounmap;
1466 	}
1467 
1468 	rc = ata_device_add(probe_ent);
1469 	if (rc != NV_PORTS)
1470 		goto err_out_iounmap;
1471 
1472 	kfree(probe_ent);
1473 
1474 	return 0;
1475 
1476 err_out_iounmap:
1477 	pci_iounmap(pdev, probe_ent->mmio_base);
1478 err_out_free_ent:
1479 	kfree(probe_ent);
1480 err_out_regions:
1481 	pci_release_regions(pdev);
1482 err_out_disable:
1483 	if (!pci_dev_busy)
1484 		pci_disable_device(pdev);
1485 err_out:
1486 	return rc;
1487 }
1488 
1489 static void nv_ck804_host_stop(struct ata_host *host)
1490 {
1491 	struct pci_dev *pdev = to_pci_dev(host->dev);
1492 	u8 regval;
1493 
1494 	/* disable SATA space for CK804 */
1495 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1496 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1497 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1498 
1499 	ata_pci_host_stop(host);
1500 }
1501 
1502 static void nv_adma_host_stop(struct ata_host *host)
1503 {
1504 	struct pci_dev *pdev = to_pci_dev(host->dev);
1505 	int i;
1506 	u32 tmp32;
1507 
1508 	for (i = 0; i < host->n_ports; i++) {
1509 		void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1510 		u16 tmp;
1511 
1512 		/* disable interrupt */
1513 		tmp = readw(mmio + NV_ADMA_CTL);
1514 		writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1515 	}
1516 
1517 	/* disable ADMA on the ports */
1518 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1519 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1520 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1521 		   NV_MCP_SATA_CFG_20_PORT1_EN |
1522 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1523 
1524 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1525 
1526 	nv_ck804_host_stop(host);
1527 }
1528 
1529 static int __init nv_init(void)
1530 {
1531 	return pci_register_driver(&nv_pci_driver);
1532 }
1533 
1534 static void __exit nv_exit(void)
1535 {
1536 	pci_unregister_driver(&nv_pci_driver);
1537 }
1538 
1539 module_init(nv_init);
1540 module_exit(nv_exit);
1541 module_param_named(adma, adma_enabled, bool, 0444);
1542 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
1543