xref: /linux/drivers/ata/sata_nv.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= 0x1f,
61 	NV_MWDMA_MASK			= 0x07,
62 	NV_UDMA_MASK			= 0x7f,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 	 				  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 };
173 
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 	__le64			addr;
177 	__le32			len;
178 	u8			flags;
179 	u8			packet_len;
180 	__le16			reserved;
181 };
182 
183 enum nv_adma_regbits {
184 	CMDEND	= (1 << 15),		/* end of command list */
185 	WNB	= (1 << 14),		/* wait-not-BSY */
186 	IGN	= (1 << 13),		/* ignore this entry */
187 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
188 	DA2	= (1 << (2 + 8)),
189 	DA1	= (1 << (1 + 8)),
190 	DA0	= (1 << (0 + 8)),
191 };
192 
193 /* ADMA Command Parameter Block
194    The first 5 SG segments are stored inside the Command Parameter Block itself.
195    If there are more than 5 segments the remainder are stored in a separate
196    memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 	u8			resp_flags;    /* 0 */
199 	u8			reserved1;     /* 1 */
200 	u8			ctl_flags;     /* 2 */
201 	/* len is length of taskfile in 64 bit words */
202  	u8			len;           /* 3  */
203 	u8			tag;           /* 4 */
204 	u8			next_cpb_idx;  /* 5 */
205 	__le16			reserved2;     /* 6-7 */
206 	__le16			tf[12];        /* 8-31 */
207 	struct nv_adma_prd	aprd[5];       /* 32-111 */
208 	__le64			next_aprd;     /* 112-119 */
209 	__le64			reserved3;     /* 120-127 */
210 };
211 
212 
213 struct nv_adma_port_priv {
214 	struct nv_adma_cpb	*cpb;
215 	dma_addr_t		cpb_dma;
216 	struct nv_adma_prd	*aprd;
217 	dma_addr_t		aprd_dma;
218 	void __iomem *		ctl_block;
219 	void __iomem *		gen_block;
220 	void __iomem *		notifier_clear_block;
221 	u8			flags;
222 	int			last_issue_ncq;
223 };
224 
225 struct nv_host_priv {
226 	unsigned long		type;
227 };
228 
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230 
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 #ifdef CONFIG_PM
233 static int nv_pci_device_resume(struct pci_dev *pdev);
234 #endif
235 static void nv_ck804_host_stop(struct ata_host *host);
236 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
237 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
238 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
239 static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
240 static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
241 
242 static void nv_nf2_freeze(struct ata_port *ap);
243 static void nv_nf2_thaw(struct ata_port *ap);
244 static void nv_ck804_freeze(struct ata_port *ap);
245 static void nv_ck804_thaw(struct ata_port *ap);
246 static void nv_error_handler(struct ata_port *ap);
247 static int nv_adma_slave_config(struct scsi_device *sdev);
248 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
249 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
250 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
251 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
252 static void nv_adma_irq_clear(struct ata_port *ap);
253 static int nv_adma_port_start(struct ata_port *ap);
254 static void nv_adma_port_stop(struct ata_port *ap);
255 #ifdef CONFIG_PM
256 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
257 static int nv_adma_port_resume(struct ata_port *ap);
258 #endif
259 static void nv_adma_freeze(struct ata_port *ap);
260 static void nv_adma_thaw(struct ata_port *ap);
261 static void nv_adma_error_handler(struct ata_port *ap);
262 static void nv_adma_host_stop(struct ata_host *host);
263 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
264 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
265 
266 enum nv_host_type
267 {
268 	GENERIC,
269 	NFORCE2,
270 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
271 	CK804,
272 	ADMA
273 };
274 
275 static const struct pci_device_id nv_pci_tbl[] = {
276 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
277 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
278 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
279 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
280 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
281 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
282 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
283 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
284 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
285 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
286 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
287 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
288 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
289 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
290 
291 	{ } /* terminate list */
292 };
293 
294 static struct pci_driver nv_pci_driver = {
295 	.name			= DRV_NAME,
296 	.id_table		= nv_pci_tbl,
297 	.probe			= nv_init_one,
298 #ifdef CONFIG_PM
299 	.suspend		= ata_pci_device_suspend,
300 	.resume			= nv_pci_device_resume,
301 #endif
302 	.remove			= ata_pci_remove_one,
303 };
304 
305 static struct scsi_host_template nv_sht = {
306 	.module			= THIS_MODULE,
307 	.name			= DRV_NAME,
308 	.ioctl			= ata_scsi_ioctl,
309 	.queuecommand		= ata_scsi_queuecmd,
310 	.can_queue		= ATA_DEF_QUEUE,
311 	.this_id		= ATA_SHT_THIS_ID,
312 	.sg_tablesize		= LIBATA_MAX_PRD,
313 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
314 	.emulated		= ATA_SHT_EMULATED,
315 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
316 	.proc_name		= DRV_NAME,
317 	.dma_boundary		= ATA_DMA_BOUNDARY,
318 	.slave_configure	= ata_scsi_slave_config,
319 	.slave_destroy		= ata_scsi_slave_destroy,
320 	.bios_param		= ata_std_bios_param,
321 };
322 
323 static struct scsi_host_template nv_adma_sht = {
324 	.module			= THIS_MODULE,
325 	.name			= DRV_NAME,
326 	.ioctl			= ata_scsi_ioctl,
327 	.queuecommand		= ata_scsi_queuecmd,
328 	.change_queue_depth	= ata_scsi_change_queue_depth,
329 	.can_queue		= NV_ADMA_MAX_CPBS,
330 	.this_id		= ATA_SHT_THIS_ID,
331 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
332 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
333 	.emulated		= ATA_SHT_EMULATED,
334 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
335 	.proc_name		= DRV_NAME,
336 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
337 	.slave_configure	= nv_adma_slave_config,
338 	.slave_destroy		= ata_scsi_slave_destroy,
339 	.bios_param		= ata_std_bios_param,
340 };
341 
342 static const struct ata_port_operations nv_generic_ops = {
343 	.tf_load		= ata_tf_load,
344 	.tf_read		= ata_tf_read,
345 	.exec_command		= ata_exec_command,
346 	.check_status		= ata_check_status,
347 	.dev_select		= ata_std_dev_select,
348 	.bmdma_setup		= ata_bmdma_setup,
349 	.bmdma_start		= ata_bmdma_start,
350 	.bmdma_stop		= ata_bmdma_stop,
351 	.bmdma_status		= ata_bmdma_status,
352 	.qc_prep		= ata_qc_prep,
353 	.qc_issue		= ata_qc_issue_prot,
354 	.freeze			= ata_bmdma_freeze,
355 	.thaw			= ata_bmdma_thaw,
356 	.error_handler		= nv_error_handler,
357 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
358 	.data_xfer		= ata_data_xfer,
359 	.irq_clear		= ata_bmdma_irq_clear,
360 	.irq_on			= ata_irq_on,
361 	.scr_read		= nv_scr_read,
362 	.scr_write		= nv_scr_write,
363 	.port_start		= ata_port_start,
364 };
365 
366 static const struct ata_port_operations nv_nf2_ops = {
367 	.tf_load		= ata_tf_load,
368 	.tf_read		= ata_tf_read,
369 	.exec_command		= ata_exec_command,
370 	.check_status		= ata_check_status,
371 	.dev_select		= ata_std_dev_select,
372 	.bmdma_setup		= ata_bmdma_setup,
373 	.bmdma_start		= ata_bmdma_start,
374 	.bmdma_stop		= ata_bmdma_stop,
375 	.bmdma_status		= ata_bmdma_status,
376 	.qc_prep		= ata_qc_prep,
377 	.qc_issue		= ata_qc_issue_prot,
378 	.freeze			= nv_nf2_freeze,
379 	.thaw			= nv_nf2_thaw,
380 	.error_handler		= nv_error_handler,
381 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
382 	.data_xfer		= ata_data_xfer,
383 	.irq_clear		= ata_bmdma_irq_clear,
384 	.irq_on			= ata_irq_on,
385 	.scr_read		= nv_scr_read,
386 	.scr_write		= nv_scr_write,
387 	.port_start		= ata_port_start,
388 };
389 
390 static const struct ata_port_operations nv_ck804_ops = {
391 	.tf_load		= ata_tf_load,
392 	.tf_read		= ata_tf_read,
393 	.exec_command		= ata_exec_command,
394 	.check_status		= ata_check_status,
395 	.dev_select		= ata_std_dev_select,
396 	.bmdma_setup		= ata_bmdma_setup,
397 	.bmdma_start		= ata_bmdma_start,
398 	.bmdma_stop		= ata_bmdma_stop,
399 	.bmdma_status		= ata_bmdma_status,
400 	.qc_prep		= ata_qc_prep,
401 	.qc_issue		= ata_qc_issue_prot,
402 	.freeze			= nv_ck804_freeze,
403 	.thaw			= nv_ck804_thaw,
404 	.error_handler		= nv_error_handler,
405 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
406 	.data_xfer		= ata_data_xfer,
407 	.irq_clear		= ata_bmdma_irq_clear,
408 	.irq_on			= ata_irq_on,
409 	.scr_read		= nv_scr_read,
410 	.scr_write		= nv_scr_write,
411 	.port_start		= ata_port_start,
412 	.host_stop		= nv_ck804_host_stop,
413 };
414 
415 static const struct ata_port_operations nv_adma_ops = {
416 	.tf_load		= ata_tf_load,
417 	.tf_read		= nv_adma_tf_read,
418 	.check_atapi_dma	= nv_adma_check_atapi_dma,
419 	.exec_command		= ata_exec_command,
420 	.check_status		= ata_check_status,
421 	.dev_select		= ata_std_dev_select,
422 	.bmdma_setup		= ata_bmdma_setup,
423 	.bmdma_start		= ata_bmdma_start,
424 	.bmdma_stop		= ata_bmdma_stop,
425 	.bmdma_status		= ata_bmdma_status,
426 	.qc_defer		= ata_std_qc_defer,
427 	.qc_prep		= nv_adma_qc_prep,
428 	.qc_issue		= nv_adma_qc_issue,
429 	.freeze			= nv_adma_freeze,
430 	.thaw			= nv_adma_thaw,
431 	.error_handler		= nv_adma_error_handler,
432 	.post_internal_cmd	= nv_adma_post_internal_cmd,
433 	.data_xfer		= ata_data_xfer,
434 	.irq_clear		= nv_adma_irq_clear,
435 	.irq_on			= ata_irq_on,
436 	.scr_read		= nv_scr_read,
437 	.scr_write		= nv_scr_write,
438 	.port_start		= nv_adma_port_start,
439 	.port_stop		= nv_adma_port_stop,
440 #ifdef CONFIG_PM
441 	.port_suspend		= nv_adma_port_suspend,
442 	.port_resume		= nv_adma_port_resume,
443 #endif
444 	.host_stop		= nv_adma_host_stop,
445 };
446 
447 static const struct ata_port_info nv_port_info[] = {
448 	/* generic */
449 	{
450 		.sht		= &nv_sht,
451 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
452 		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
453 		.pio_mask	= NV_PIO_MASK,
454 		.mwdma_mask	= NV_MWDMA_MASK,
455 		.udma_mask	= NV_UDMA_MASK,
456 		.port_ops	= &nv_generic_ops,
457 		.irq_handler	= nv_generic_interrupt,
458 	},
459 	/* nforce2/3 */
460 	{
461 		.sht		= &nv_sht,
462 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
463 		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
464 		.pio_mask	= NV_PIO_MASK,
465 		.mwdma_mask	= NV_MWDMA_MASK,
466 		.udma_mask	= NV_UDMA_MASK,
467 		.port_ops	= &nv_nf2_ops,
468 		.irq_handler	= nv_nf2_interrupt,
469 	},
470 	/* ck804 */
471 	{
472 		.sht		= &nv_sht,
473 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
474 		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
475 		.pio_mask	= NV_PIO_MASK,
476 		.mwdma_mask	= NV_MWDMA_MASK,
477 		.udma_mask	= NV_UDMA_MASK,
478 		.port_ops	= &nv_ck804_ops,
479 		.irq_handler	= nv_ck804_interrupt,
480 	},
481 	/* ADMA */
482 	{
483 		.sht		= &nv_adma_sht,
484 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
485 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
486 		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
487 		.pio_mask	= NV_PIO_MASK,
488 		.mwdma_mask	= NV_MWDMA_MASK,
489 		.udma_mask	= NV_UDMA_MASK,
490 		.port_ops	= &nv_adma_ops,
491 		.irq_handler	= nv_adma_interrupt,
492 	},
493 };
494 
495 MODULE_AUTHOR("NVIDIA");
496 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
497 MODULE_LICENSE("GPL");
498 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
499 MODULE_VERSION(DRV_VERSION);
500 
501 static int adma_enabled = 1;
502 
503 static void nv_adma_register_mode(struct ata_port *ap)
504 {
505 	struct nv_adma_port_priv *pp = ap->private_data;
506 	void __iomem *mmio = pp->ctl_block;
507 	u16 tmp, status;
508 	int count = 0;
509 
510 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
511 		return;
512 
513 	status = readw(mmio + NV_ADMA_STAT);
514 	while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
515 		ndelay(50);
516 		status = readw(mmio + NV_ADMA_STAT);
517 		count++;
518 	}
519 	if(count == 20)
520 		ata_port_printk(ap, KERN_WARNING,
521 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
522 			status);
523 
524 	tmp = readw(mmio + NV_ADMA_CTL);
525 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
526 
527 	count = 0;
528 	status = readw(mmio + NV_ADMA_STAT);
529 	while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
530 		ndelay(50);
531 		status = readw(mmio + NV_ADMA_STAT);
532 		count++;
533 	}
534 	if(count == 20)
535 		ata_port_printk(ap, KERN_WARNING,
536 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
537 			 status);
538 
539 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
540 }
541 
542 static void nv_adma_mode(struct ata_port *ap)
543 {
544 	struct nv_adma_port_priv *pp = ap->private_data;
545 	void __iomem *mmio = pp->ctl_block;
546 	u16 tmp, status;
547 	int count = 0;
548 
549 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
550 		return;
551 
552 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
553 
554 	tmp = readw(mmio + NV_ADMA_CTL);
555 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
556 
557 	status = readw(mmio + NV_ADMA_STAT);
558 	while(((status & NV_ADMA_STAT_LEGACY) ||
559 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
560 		ndelay(50);
561 		status = readw(mmio + NV_ADMA_STAT);
562 		count++;
563 	}
564 	if(count == 20)
565 		ata_port_printk(ap, KERN_WARNING,
566 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
567 			status);
568 
569 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
570 }
571 
572 static int nv_adma_slave_config(struct scsi_device *sdev)
573 {
574 	struct ata_port *ap = ata_shost_to_port(sdev->host);
575 	struct nv_adma_port_priv *pp = ap->private_data;
576 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
577 	u64 bounce_limit;
578 	unsigned long segment_boundary;
579 	unsigned short sg_tablesize;
580 	int rc;
581 	int adma_enable;
582 	u32 current_reg, new_reg, config_mask;
583 
584 	rc = ata_scsi_slave_config(sdev);
585 
586 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
587 		/* Not a proper libata device, ignore */
588 		return rc;
589 
590 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
591 		/*
592 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
593 		 * Therefore ATAPI commands are sent through the legacy interface.
594 		 * However, the legacy interface only supports 32-bit DMA.
595 		 * Restrict DMA parameters as required by the legacy interface
596 		 * when an ATAPI device is connected.
597 		 */
598 		bounce_limit = ATA_DMA_MASK;
599 		segment_boundary = ATA_DMA_BOUNDARY;
600 		/* Subtract 1 since an extra entry may be needed for padding, see
601 		   libata-scsi.c */
602 		sg_tablesize = LIBATA_MAX_PRD - 1;
603 
604 		/* Since the legacy DMA engine is in use, we need to disable ADMA
605 		   on the port. */
606 		adma_enable = 0;
607 		nv_adma_register_mode(ap);
608 	}
609 	else {
610 		bounce_limit = *ap->dev->dma_mask;
611 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
612 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
613 		adma_enable = 1;
614 	}
615 
616 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
617 
618 	if(ap->port_no == 1)
619 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
620 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
621 	else
622 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
623 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
624 
625 	if(adma_enable) {
626 		new_reg = current_reg | config_mask;
627 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
628 	}
629 	else {
630 		new_reg = current_reg & ~config_mask;
631 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
632 	}
633 
634 	if(current_reg != new_reg)
635 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
636 
637 	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
638 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
639 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
640 	ata_port_printk(ap, KERN_INFO,
641 		"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
642 		(unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
643 	return rc;
644 }
645 
646 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
647 {
648 	struct nv_adma_port_priv *pp = qc->ap->private_data;
649 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
650 }
651 
652 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
653 {
654 	/* Since commands where a result TF is requested are not
655 	   executed in ADMA mode, the only time this function will be called
656 	   in ADMA mode will be if a command fails. In this case we
657 	   don't care about going into register mode with ADMA commands
658 	   pending, as the commands will all shortly be aborted anyway. */
659 	nv_adma_register_mode(ap);
660 
661 	ata_tf_read(ap, tf);
662 }
663 
664 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
665 {
666 	unsigned int idx = 0;
667 
668 	if(tf->flags & ATA_TFLAG_ISADDR) {
669 		if (tf->flags & ATA_TFLAG_LBA48) {
670 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
671 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
672 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
673 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
674 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
675 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
676 		} else
677 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
678 
679 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
680 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
681 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
682 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
683 	}
684 
685 	if(tf->flags & ATA_TFLAG_DEVICE)
686 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
687 
688 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
689 
690 	while(idx < 12)
691 		cpb[idx++] = cpu_to_le16(IGN);
692 
693 	return idx;
694 }
695 
696 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
697 {
698 	struct nv_adma_port_priv *pp = ap->private_data;
699 	u8 flags = pp->cpb[cpb_num].resp_flags;
700 
701 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
702 
703 	if (unlikely((force_err ||
704 		     flags & (NV_CPB_RESP_ATA_ERR |
705 			      NV_CPB_RESP_CMD_ERR |
706 			      NV_CPB_RESP_CPB_ERR)))) {
707 		struct ata_eh_info *ehi = &ap->link.eh_info;
708 		int freeze = 0;
709 
710 		ata_ehi_clear_desc(ehi);
711 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags );
712 		if (flags & NV_CPB_RESP_ATA_ERR) {
713 			ata_ehi_push_desc(ehi, "ATA error");
714 			ehi->err_mask |= AC_ERR_DEV;
715 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
716 			ata_ehi_push_desc(ehi, "CMD error");
717 			ehi->err_mask |= AC_ERR_DEV;
718 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
719 			ata_ehi_push_desc(ehi, "CPB error");
720 			ehi->err_mask |= AC_ERR_SYSTEM;
721 			freeze = 1;
722 		} else {
723 			/* notifier error, but no error in CPB flags? */
724 			ata_ehi_push_desc(ehi, "unknown");
725 			ehi->err_mask |= AC_ERR_OTHER;
726 			freeze = 1;
727 		}
728 		/* Kill all commands. EH will determine what actually failed. */
729 		if (freeze)
730 			ata_port_freeze(ap);
731 		else
732 			ata_port_abort(ap);
733 		return 1;
734 	}
735 
736 	if (likely(flags & NV_CPB_RESP_DONE)) {
737 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
738 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
739 		if (likely(qc)) {
740 			DPRINTK("Completing qc from tag %d\n",cpb_num);
741 			ata_qc_complete(qc);
742 		} else {
743 			struct ata_eh_info *ehi = &ap->link.eh_info;
744 			/* Notifier bits set without a command may indicate the drive
745 			   is misbehaving. Raise host state machine violation on this
746 			   condition. */
747 			ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
748 				cpb_num);
749 			ehi->err_mask |= AC_ERR_HSM;
750 			ehi->action |= ATA_EH_SOFTRESET;
751 			ata_port_freeze(ap);
752 			return 1;
753 		}
754 	}
755 	return 0;
756 }
757 
758 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
759 {
760 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
761 
762 	/* freeze if hotplugged */
763 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
764 		ata_port_freeze(ap);
765 		return 1;
766 	}
767 
768 	/* bail out if not our interrupt */
769 	if (!(irq_stat & NV_INT_DEV))
770 		return 0;
771 
772 	/* DEV interrupt w/ no active qc? */
773 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
774 		ata_check_status(ap);
775 		return 1;
776 	}
777 
778 	/* handle interrupt */
779 	return ata_host_intr(ap, qc);
780 }
781 
782 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
783 {
784 	struct ata_host *host = dev_instance;
785 	int i, handled = 0;
786 	u32 notifier_clears[2];
787 
788 	spin_lock(&host->lock);
789 
790 	for (i = 0; i < host->n_ports; i++) {
791 		struct ata_port *ap = host->ports[i];
792 		notifier_clears[i] = 0;
793 
794 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
795 			struct nv_adma_port_priv *pp = ap->private_data;
796 			void __iomem *mmio = pp->ctl_block;
797 			u16 status;
798 			u32 gen_ctl;
799 			u32 notifier, notifier_error;
800 
801 			/* if ADMA is disabled, use standard ata interrupt handler */
802 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
803 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
804 					>> (NV_INT_PORT_SHIFT * i);
805 				handled += nv_host_intr(ap, irq_stat);
806 				continue;
807 			}
808 
809 			/* if in ATA register mode, check for standard interrupts */
810 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
811 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
812 					>> (NV_INT_PORT_SHIFT * i);
813 				if(ata_tag_valid(ap->link.active_tag))
814 					/** NV_INT_DEV indication seems unreliable at times
815 					    at least in ADMA mode. Force it on always when a
816 					    command is active, to prevent losing interrupts. */
817 					irq_stat |= NV_INT_DEV;
818 				handled += nv_host_intr(ap, irq_stat);
819 			}
820 
821 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
822 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
823 			notifier_clears[i] = notifier | notifier_error;
824 
825 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
826 
827 			if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
828 			    !notifier_error)
829 				/* Nothing to do */
830 				continue;
831 
832 			status = readw(mmio + NV_ADMA_STAT);
833 
834 			/* Clear status. Ensure the controller sees the clearing before we start
835 			   looking at any of the CPB statuses, so that any CPB completions after
836 			   this point in the handler will raise another interrupt. */
837 			writew(status, mmio + NV_ADMA_STAT);
838 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
839 			rmb();
840 
841 			handled++; /* irq handled if we got here */
842 
843 			/* freeze if hotplugged or controller error */
844 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
845 					       NV_ADMA_STAT_HOTUNPLUG |
846 					       NV_ADMA_STAT_TIMEOUT |
847 					       NV_ADMA_STAT_SERROR))) {
848 				struct ata_eh_info *ehi = &ap->link.eh_info;
849 
850 				ata_ehi_clear_desc(ehi);
851 				__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status );
852 				if (status & NV_ADMA_STAT_TIMEOUT) {
853 					ehi->err_mask |= AC_ERR_SYSTEM;
854 					ata_ehi_push_desc(ehi, "timeout");
855 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
856 					ata_ehi_hotplugged(ehi);
857 					ata_ehi_push_desc(ehi, "hotplug");
858 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
859 					ata_ehi_hotplugged(ehi);
860 					ata_ehi_push_desc(ehi, "hot unplug");
861 				} else if (status & NV_ADMA_STAT_SERROR) {
862 					/* let libata analyze SError and figure out the cause */
863 					ata_ehi_push_desc(ehi, "SError");
864 				} else
865 					ata_ehi_push_desc(ehi, "unknown");
866 				ata_port_freeze(ap);
867 				continue;
868 			}
869 
870 			if (status & (NV_ADMA_STAT_DONE |
871 				      NV_ADMA_STAT_CPBERR)) {
872 				u32 check_commands;
873 				int pos, error = 0;
874 
875 				if(ata_tag_valid(ap->link.active_tag))
876 					check_commands = 1 << ap->link.active_tag;
877 				else
878 					check_commands = ap->link.sactive;
879 
880 				/** Check CPBs for completed commands */
881 				while ((pos = ffs(check_commands)) && !error) {
882 					pos--;
883 					error = nv_adma_check_cpb(ap, pos,
884 						notifier_error & (1 << pos) );
885 					check_commands &= ~(1 << pos );
886 				}
887 			}
888 		}
889 	}
890 
891 	if(notifier_clears[0] || notifier_clears[1]) {
892 		/* Note: Both notifier clear registers must be written
893 		   if either is set, even if one is zero, according to NVIDIA. */
894 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
895 		writel(notifier_clears[0], pp->notifier_clear_block);
896 		pp = host->ports[1]->private_data;
897 		writel(notifier_clears[1], pp->notifier_clear_block);
898 	}
899 
900 	spin_unlock(&host->lock);
901 
902 	return IRQ_RETVAL(handled);
903 }
904 
905 static void nv_adma_freeze(struct ata_port *ap)
906 {
907 	struct nv_adma_port_priv *pp = ap->private_data;
908 	void __iomem *mmio = pp->ctl_block;
909 	u16 tmp;
910 
911 	nv_ck804_freeze(ap);
912 
913 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
914 		return;
915 
916 	/* clear any outstanding CK804 notifications */
917 	writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
918 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
919 
920 	/* Disable interrupt */
921 	tmp = readw(mmio + NV_ADMA_CTL);
922 	writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
923 		mmio + NV_ADMA_CTL);
924 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
925 }
926 
927 static void nv_adma_thaw(struct ata_port *ap)
928 {
929 	struct nv_adma_port_priv *pp = ap->private_data;
930 	void __iomem *mmio = pp->ctl_block;
931 	u16 tmp;
932 
933 	nv_ck804_thaw(ap);
934 
935 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
936 		return;
937 
938 	/* Enable interrupt */
939 	tmp = readw(mmio + NV_ADMA_CTL);
940 	writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
941 		mmio + NV_ADMA_CTL);
942 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
943 }
944 
945 static void nv_adma_irq_clear(struct ata_port *ap)
946 {
947 	struct nv_adma_port_priv *pp = ap->private_data;
948 	void __iomem *mmio = pp->ctl_block;
949 	u32 notifier_clears[2];
950 
951 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
952 		ata_bmdma_irq_clear(ap);
953 		return;
954 	}
955 
956 	/* clear any outstanding CK804 notifications */
957 	writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
958 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
959 
960 	/* clear ADMA status */
961 	writew(0xffff, mmio + NV_ADMA_STAT);
962 
963 	/* clear notifiers - note both ports need to be written with
964 	   something even though we are only clearing on one */
965 	if (ap->port_no == 0) {
966 		notifier_clears[0] = 0xFFFFFFFF;
967 		notifier_clears[1] = 0;
968 	} else {
969 		notifier_clears[0] = 0;
970 		notifier_clears[1] = 0xFFFFFFFF;
971 	}
972 	pp = ap->host->ports[0]->private_data;
973 	writel(notifier_clears[0], pp->notifier_clear_block);
974 	pp = ap->host->ports[1]->private_data;
975 	writel(notifier_clears[1], pp->notifier_clear_block);
976 }
977 
978 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
979 {
980 	struct nv_adma_port_priv *pp = qc->ap->private_data;
981 
982 	if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
983 		ata_bmdma_post_internal_cmd(qc);
984 }
985 
986 static int nv_adma_port_start(struct ata_port *ap)
987 {
988 	struct device *dev = ap->host->dev;
989 	struct nv_adma_port_priv *pp;
990 	int rc;
991 	void *mem;
992 	dma_addr_t mem_dma;
993 	void __iomem *mmio;
994 	u16 tmp;
995 
996 	VPRINTK("ENTER\n");
997 
998 	rc = ata_port_start(ap);
999 	if (rc)
1000 		return rc;
1001 
1002 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1003 	if (!pp)
1004 		return -ENOMEM;
1005 
1006 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1007 	       ap->port_no * NV_ADMA_PORT_SIZE;
1008 	pp->ctl_block = mmio;
1009 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1010 	pp->notifier_clear_block = pp->gen_block +
1011 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1012 
1013 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1014 				  &mem_dma, GFP_KERNEL);
1015 	if (!mem)
1016 		return -ENOMEM;
1017 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1018 
1019 	/*
1020 	 * First item in chunk of DMA memory:
1021 	 * 128-byte command parameter block (CPB)
1022 	 * one for each command tag
1023 	 */
1024 	pp->cpb     = mem;
1025 	pp->cpb_dma = mem_dma;
1026 
1027 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1028 	writel((mem_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1029 
1030 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1031 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1032 
1033 	/*
1034 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1035 	 */
1036 	pp->aprd = mem;
1037 	pp->aprd_dma = mem_dma;
1038 
1039 	ap->private_data = pp;
1040 
1041 	/* clear any outstanding interrupt conditions */
1042 	writew(0xffff, mmio + NV_ADMA_STAT);
1043 
1044 	/* initialize port variables */
1045 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1046 
1047 	/* clear CPB fetch count */
1048 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1049 
1050 	/* clear GO for register mode, enable interrupt */
1051 	tmp = readw(mmio + NV_ADMA_CTL);
1052 	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1053 		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1054 
1055 	tmp = readw(mmio + NV_ADMA_CTL);
1056 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1057 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1058 	udelay(1);
1059 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1060 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1061 
1062 	return 0;
1063 }
1064 
1065 static void nv_adma_port_stop(struct ata_port *ap)
1066 {
1067 	struct nv_adma_port_priv *pp = ap->private_data;
1068 	void __iomem *mmio = pp->ctl_block;
1069 
1070 	VPRINTK("ENTER\n");
1071 	writew(0, mmio + NV_ADMA_CTL);
1072 }
1073 
1074 #ifdef CONFIG_PM
1075 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1076 {
1077 	struct nv_adma_port_priv *pp = ap->private_data;
1078 	void __iomem *mmio = pp->ctl_block;
1079 
1080 	/* Go to register mode - clears GO */
1081 	nv_adma_register_mode(ap);
1082 
1083 	/* clear CPB fetch count */
1084 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1085 
1086 	/* disable interrupt, shut down port */
1087 	writew(0, mmio + NV_ADMA_CTL);
1088 
1089 	return 0;
1090 }
1091 
1092 static int nv_adma_port_resume(struct ata_port *ap)
1093 {
1094 	struct nv_adma_port_priv *pp = ap->private_data;
1095 	void __iomem *mmio = pp->ctl_block;
1096 	u16 tmp;
1097 
1098 	/* set CPB block location */
1099 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1100 	writel((pp->cpb_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1101 
1102 	/* clear any outstanding interrupt conditions */
1103 	writew(0xffff, mmio + NV_ADMA_STAT);
1104 
1105 	/* initialize port variables */
1106 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1107 
1108 	/* clear CPB fetch count */
1109 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1110 
1111 	/* clear GO for register mode, enable interrupt */
1112 	tmp = readw(mmio + NV_ADMA_CTL);
1113 	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1114 		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1115 
1116 	tmp = readw(mmio + NV_ADMA_CTL);
1117 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1118 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1119 	udelay(1);
1120 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1121 	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1122 
1123 	return 0;
1124 }
1125 #endif
1126 
1127 static void nv_adma_setup_port(struct ata_port *ap)
1128 {
1129 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1130 	struct ata_ioports *ioport = &ap->ioaddr;
1131 
1132 	VPRINTK("ENTER\n");
1133 
1134 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1135 
1136 	ioport->cmd_addr	= mmio;
1137 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1138 	ioport->error_addr	=
1139 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1140 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1141 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1142 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1143 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1144 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1145 	ioport->status_addr	=
1146 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1147 	ioport->altstatus_addr	=
1148 	ioport->ctl_addr	= mmio + 0x20;
1149 }
1150 
1151 static int nv_adma_host_init(struct ata_host *host)
1152 {
1153 	struct pci_dev *pdev = to_pci_dev(host->dev);
1154 	unsigned int i;
1155 	u32 tmp32;
1156 
1157 	VPRINTK("ENTER\n");
1158 
1159 	/* enable ADMA on the ports */
1160 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1161 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1162 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1163 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1164 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1165 
1166 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1167 
1168 	for (i = 0; i < host->n_ports; i++)
1169 		nv_adma_setup_port(host->ports[i]);
1170 
1171 	return 0;
1172 }
1173 
1174 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1175 			      struct scatterlist *sg,
1176 			      int idx,
1177 			      struct nv_adma_prd *aprd)
1178 {
1179 	u8 flags = 0;
1180 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1181 		flags |= NV_APRD_WRITE;
1182 	if (idx == qc->n_elem - 1)
1183 		flags |= NV_APRD_END;
1184 	else if (idx != 4)
1185 		flags |= NV_APRD_CONT;
1186 
1187 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1188 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1189 	aprd->flags = flags;
1190 	aprd->packet_len = 0;
1191 }
1192 
1193 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1194 {
1195 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1196 	unsigned int idx;
1197 	struct nv_adma_prd *aprd;
1198 	struct scatterlist *sg;
1199 
1200 	VPRINTK("ENTER\n");
1201 
1202 	idx = 0;
1203 
1204 	ata_for_each_sg(sg, qc) {
1205 		aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1206 		nv_adma_fill_aprd(qc, sg, idx, aprd);
1207 		idx++;
1208 	}
1209 	if (idx > 5)
1210 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1211 	else
1212 		cpb->next_aprd = cpu_to_le64(0);
1213 }
1214 
1215 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1216 {
1217 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1218 
1219 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1220 	   or interrupt-driven no-data commands, where a result taskfile
1221 	   is not required. */
1222 	if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1223 	   (qc->tf.flags & ATA_TFLAG_POLLING) ||
1224 	   (qc->flags & ATA_QCFLAG_RESULT_TF))
1225 		return 1;
1226 
1227 	if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1228 	   (qc->tf.protocol == ATA_PROT_NODATA))
1229 		return 0;
1230 
1231 	return 1;
1232 }
1233 
1234 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1235 {
1236 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1237 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1238 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1239 		       NV_CPB_CTL_IEN;
1240 
1241 	if (nv_adma_use_reg_mode(qc)) {
1242 		nv_adma_register_mode(qc->ap);
1243 		ata_qc_prep(qc);
1244 		return;
1245 	}
1246 
1247 	cpb->resp_flags = NV_CPB_RESP_DONE;
1248 	wmb();
1249 	cpb->ctl_flags = 0;
1250 	wmb();
1251 
1252 	cpb->len		= 3;
1253 	cpb->tag		= qc->tag;
1254 	cpb->next_cpb_idx	= 0;
1255 
1256 	/* turn on NCQ flags for NCQ commands */
1257 	if (qc->tf.protocol == ATA_PROT_NCQ)
1258 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1259 
1260 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1261 
1262 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1263 
1264 	if(qc->flags & ATA_QCFLAG_DMAMAP) {
1265 		nv_adma_fill_sg(qc, cpb);
1266 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1267 	} else
1268 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1269 
1270 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1271 	   finished filling in all of the contents */
1272 	wmb();
1273 	cpb->ctl_flags = ctl_flags;
1274 	wmb();
1275 	cpb->resp_flags = 0;
1276 }
1277 
1278 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1279 {
1280 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1281 	void __iomem *mmio = pp->ctl_block;
1282 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1283 
1284 	VPRINTK("ENTER\n");
1285 
1286 	if (nv_adma_use_reg_mode(qc)) {
1287 		/* use ATA register mode */
1288 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1289 		nv_adma_register_mode(qc->ap);
1290 		return ata_qc_issue_prot(qc);
1291 	} else
1292 		nv_adma_mode(qc->ap);
1293 
1294 	/* write append register, command tag in lower 8 bits
1295 	   and (number of cpbs to append -1) in top 8 bits */
1296 	wmb();
1297 
1298 	if(curr_ncq != pp->last_issue_ncq) {
1299 	   	/* Seems to need some delay before switching between NCQ and non-NCQ
1300 		   commands, else we get command timeouts and such. */
1301 		udelay(20);
1302 		pp->last_issue_ncq = curr_ncq;
1303 	}
1304 
1305 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1306 
1307 	DPRINTK("Issued tag %u\n",qc->tag);
1308 
1309 	return 0;
1310 }
1311 
1312 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1313 {
1314 	struct ata_host *host = dev_instance;
1315 	unsigned int i;
1316 	unsigned int handled = 0;
1317 	unsigned long flags;
1318 
1319 	spin_lock_irqsave(&host->lock, flags);
1320 
1321 	for (i = 0; i < host->n_ports; i++) {
1322 		struct ata_port *ap;
1323 
1324 		ap = host->ports[i];
1325 		if (ap &&
1326 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1327 			struct ata_queued_cmd *qc;
1328 
1329 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1330 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1331 				handled += ata_host_intr(ap, qc);
1332 			else
1333 				// No request pending?  Clear interrupt status
1334 				// anyway, in case there's one pending.
1335 				ap->ops->check_status(ap);
1336 		}
1337 
1338 	}
1339 
1340 	spin_unlock_irqrestore(&host->lock, flags);
1341 
1342 	return IRQ_RETVAL(handled);
1343 }
1344 
1345 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1346 {
1347 	int i, handled = 0;
1348 
1349 	for (i = 0; i < host->n_ports; i++) {
1350 		struct ata_port *ap = host->ports[i];
1351 
1352 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1353 			handled += nv_host_intr(ap, irq_stat);
1354 
1355 		irq_stat >>= NV_INT_PORT_SHIFT;
1356 	}
1357 
1358 	return IRQ_RETVAL(handled);
1359 }
1360 
1361 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1362 {
1363 	struct ata_host *host = dev_instance;
1364 	u8 irq_stat;
1365 	irqreturn_t ret;
1366 
1367 	spin_lock(&host->lock);
1368 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1369 	ret = nv_do_interrupt(host, irq_stat);
1370 	spin_unlock(&host->lock);
1371 
1372 	return ret;
1373 }
1374 
1375 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1376 {
1377 	struct ata_host *host = dev_instance;
1378 	u8 irq_stat;
1379 	irqreturn_t ret;
1380 
1381 	spin_lock(&host->lock);
1382 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1383 	ret = nv_do_interrupt(host, irq_stat);
1384 	spin_unlock(&host->lock);
1385 
1386 	return ret;
1387 }
1388 
1389 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1390 {
1391 	if (sc_reg > SCR_CONTROL)
1392 		return -EINVAL;
1393 
1394 	*val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1395 	return 0;
1396 }
1397 
1398 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1399 {
1400 	if (sc_reg > SCR_CONTROL)
1401 		return -EINVAL;
1402 
1403 	iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1404 	return 0;
1405 }
1406 
1407 static void nv_nf2_freeze(struct ata_port *ap)
1408 {
1409 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1410 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1411 	u8 mask;
1412 
1413 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1414 	mask &= ~(NV_INT_ALL << shift);
1415 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1416 }
1417 
1418 static void nv_nf2_thaw(struct ata_port *ap)
1419 {
1420 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1421 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1422 	u8 mask;
1423 
1424 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1425 
1426 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1427 	mask |= (NV_INT_MASK << shift);
1428 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1429 }
1430 
1431 static void nv_ck804_freeze(struct ata_port *ap)
1432 {
1433 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1434 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1435 	u8 mask;
1436 
1437 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1438 	mask &= ~(NV_INT_ALL << shift);
1439 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1440 }
1441 
1442 static void nv_ck804_thaw(struct ata_port *ap)
1443 {
1444 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1445 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1446 	u8 mask;
1447 
1448 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1449 
1450 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1451 	mask |= (NV_INT_MASK << shift);
1452 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1453 }
1454 
1455 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1456 			unsigned long deadline)
1457 {
1458 	unsigned int dummy;
1459 
1460 	/* SATA hardreset fails to retrieve proper device signature on
1461 	 * some controllers.  Don't classify on hardreset.  For more
1462 	 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1463 	 */
1464 	return sata_std_hardreset(link, &dummy, deadline);
1465 }
1466 
1467 static void nv_error_handler(struct ata_port *ap)
1468 {
1469 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1470 			   nv_hardreset, ata_std_postreset);
1471 }
1472 
1473 static void nv_adma_error_handler(struct ata_port *ap)
1474 {
1475 	struct nv_adma_port_priv *pp = ap->private_data;
1476 	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1477 		void __iomem *mmio = pp->ctl_block;
1478 		int i;
1479 		u16 tmp;
1480 
1481 		if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1482 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1483 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1484 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1485 			u32 status = readw(mmio + NV_ADMA_STAT);
1486 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1487 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1488 
1489 			ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1490 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1491 				"next cpb count 0x%X next cpb idx 0x%x\n",
1492 				notifier, notifier_error, gen_ctl, status,
1493 				cpb_count, next_cpb_idx);
1494 
1495 			for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1496 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1497 				if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1498 				    ap->link.sactive & (1 << i) )
1499 					ata_port_printk(ap, KERN_ERR,
1500 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1501 						i, cpb->ctl_flags, cpb->resp_flags);
1502 			}
1503 		}
1504 
1505 		/* Push us back into port register mode for error handling. */
1506 		nv_adma_register_mode(ap);
1507 
1508 		/* Mark all of the CPBs as invalid to prevent them from being executed */
1509 		for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1510 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1511 
1512 		/* clear CPB fetch count */
1513 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1514 
1515 		/* Reset channel */
1516 		tmp = readw(mmio + NV_ADMA_CTL);
1517 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1518 		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1519 		udelay(1);
1520 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1521 		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1522 	}
1523 
1524 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1525 			   nv_hardreset, ata_std_postreset);
1526 }
1527 
1528 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1529 {
1530 	static int printed_version = 0;
1531 	const struct ata_port_info *ppi[] = { NULL, NULL };
1532 	struct ata_host *host;
1533 	struct nv_host_priv *hpriv;
1534 	int rc;
1535 	u32 bar;
1536 	void __iomem *base;
1537 	unsigned long type = ent->driver_data;
1538 
1539         // Make sure this is a SATA controller by counting the number of bars
1540         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
1541         // it's an IDE controller and we ignore it.
1542 	for (bar=0; bar<6; bar++)
1543 		if (pci_resource_start(pdev, bar) == 0)
1544 			return -ENODEV;
1545 
1546 	if (!printed_version++)
1547 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1548 
1549 	rc = pcim_enable_device(pdev);
1550 	if (rc)
1551 		return rc;
1552 
1553 	/* determine type and allocate host */
1554 	if (type >= CK804 && adma_enabled) {
1555 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1556 		type = ADMA;
1557 	}
1558 
1559 	ppi[0] = &nv_port_info[type];
1560 	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
1561 	if (rc)
1562 		return rc;
1563 
1564 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1565 	if (!hpriv)
1566 		return -ENOMEM;
1567 	hpriv->type = type;
1568 	host->private_data = hpriv;
1569 
1570 	/* set 64bit dma masks, may fail */
1571 	if (type == ADMA) {
1572 		if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
1573 			pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1574 	}
1575 
1576 	/* request and iomap NV_MMIO_BAR */
1577 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
1578 	if (rc)
1579 		return rc;
1580 
1581 	/* configure SCR access */
1582 	base = host->iomap[NV_MMIO_BAR];
1583 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1584 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1585 
1586 	/* enable SATA space for CK804 */
1587 	if (type >= CK804) {
1588 		u8 regval;
1589 
1590 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1591 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1592 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1593 	}
1594 
1595 	/* init ADMA */
1596 	if (type == ADMA) {
1597 		rc = nv_adma_host_init(host);
1598 		if (rc)
1599 			return rc;
1600 	}
1601 
1602 	pci_set_master(pdev);
1603 	return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
1604 				 IRQF_SHARED, ppi[0]->sht);
1605 }
1606 
1607 #ifdef CONFIG_PM
1608 static int nv_pci_device_resume(struct pci_dev *pdev)
1609 {
1610 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1611 	struct nv_host_priv *hpriv = host->private_data;
1612 	int rc;
1613 
1614 	rc = ata_pci_device_do_resume(pdev);
1615 	if(rc)
1616 		return rc;
1617 
1618 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1619 		if(hpriv->type >= CK804) {
1620 			u8 regval;
1621 
1622 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1623 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1624 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1625 		}
1626 		if(hpriv->type == ADMA) {
1627 			u32 tmp32;
1628 			struct nv_adma_port_priv *pp;
1629 			/* enable/disable ADMA on the ports appropriately */
1630 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1631 
1632 			pp = host->ports[0]->private_data;
1633 			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1634 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1635 				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1636 			else
1637 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
1638 				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1639 			pp = host->ports[1]->private_data;
1640 			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1641 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1642 				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1643 			else
1644 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
1645 				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1646 
1647 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1648 		}
1649 	}
1650 
1651 	ata_host_resume(host);
1652 
1653 	return 0;
1654 }
1655 #endif
1656 
1657 static void nv_ck804_host_stop(struct ata_host *host)
1658 {
1659 	struct pci_dev *pdev = to_pci_dev(host->dev);
1660 	u8 regval;
1661 
1662 	/* disable SATA space for CK804 */
1663 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1664 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1665 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1666 }
1667 
1668 static void nv_adma_host_stop(struct ata_host *host)
1669 {
1670 	struct pci_dev *pdev = to_pci_dev(host->dev);
1671 	u32 tmp32;
1672 
1673 	/* disable ADMA on the ports */
1674 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1675 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1676 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1677 		   NV_MCP_SATA_CFG_20_PORT1_EN |
1678 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1679 
1680 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1681 
1682 	nv_ck804_host_stop(host);
1683 }
1684 
1685 static int __init nv_init(void)
1686 {
1687 	return pci_register_driver(&nv_pci_driver);
1688 }
1689 
1690 static void __exit nv_exit(void)
1691 {
1692 	pci_unregister_driver(&nv_pci_driver);
1693 }
1694 
1695 module_init(nv_init);
1696 module_exit(nv_exit);
1697 module_param_named(adma, adma_enabled, bool, 0444);
1698 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
1699