1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sata_nv.c - NVIDIA nForce SATA
4 *
5 * Copyright 2004 NVIDIA Corp. All rights reserved.
6 * Copyright 2004 Andrew Chew
7 *
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
10 *
11 * No hardware documentation available outside of NVIDIA.
12 * This driver programs the NVIDIA SATA controller in a similar
13 * fashion as with other PCI IDE BMDMA controllers, with a few
14 * NV-specific details such as register offsets, SATA phy location,
15 * hotplug info, etc.
16 *
17 * CK804/MCP04 controllers support an alternate programming interface
18 * similar to the ADMA specification (with some modifications).
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20 * sent through the legacy interface.
21 */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/gfp.h>
26 #include <linux/pci.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <linux/libata.h>
34 #include <trace/events/libata.h>
35
36 #define DRV_NAME "sata_nv"
37 #define DRV_VERSION "3.5"
38
39 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
40
41 enum {
42 NV_MMIO_BAR = 5,
43
44 NV_PORTS = 2,
45 NV_PIO_MASK = ATA_PIO4,
46 NV_MWDMA_MASK = ATA_MWDMA2,
47 NV_UDMA_MASK = ATA_UDMA6,
48 NV_PORT0_SCR_REG_OFFSET = 0x00,
49 NV_PORT1_SCR_REG_OFFSET = 0x40,
50
51 /* INT_STATUS/ENABLE */
52 NV_INT_STATUS = 0x10,
53 NV_INT_ENABLE = 0x11,
54 NV_INT_STATUS_CK804 = 0x440,
55 NV_INT_ENABLE_CK804 = 0x441,
56
57 /* INT_STATUS/ENABLE bits */
58 NV_INT_DEV = 0x01,
59 NV_INT_PM = 0x02,
60 NV_INT_ADDED = 0x04,
61 NV_INT_REMOVED = 0x08,
62
63 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
64
65 NV_INT_ALL = 0x0f,
66 NV_INT_MASK = NV_INT_DEV |
67 NV_INT_ADDED | NV_INT_REMOVED,
68
69 /* INT_CONFIG */
70 NV_INT_CONFIG = 0x12,
71 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
72
73 // For PCI config register 20
74 NV_MCP_SATA_CFG_20 = 0x50,
75 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
76 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
77 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
78 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
79 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
80
81 NV_ADMA_MAX_CPBS = 32,
82 NV_ADMA_CPB_SZ = 128,
83 NV_ADMA_APRD_SZ = 16,
84 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
85 NV_ADMA_APRD_SZ,
86 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
87 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
88 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
89 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
90
91 /* BAR5 offset to ADMA general registers */
92 NV_ADMA_GEN = 0x400,
93 NV_ADMA_GEN_CTL = 0x00,
94 NV_ADMA_NOTIFIER_CLEAR = 0x30,
95
96 /* BAR5 offset to ADMA ports */
97 NV_ADMA_PORT = 0x480,
98
99 /* size of ADMA port register space */
100 NV_ADMA_PORT_SIZE = 0x100,
101
102 /* ADMA port registers */
103 NV_ADMA_CTL = 0x40,
104 NV_ADMA_CPB_COUNT = 0x42,
105 NV_ADMA_NEXT_CPB_IDX = 0x43,
106 NV_ADMA_STAT = 0x44,
107 NV_ADMA_CPB_BASE_LOW = 0x48,
108 NV_ADMA_CPB_BASE_HIGH = 0x4C,
109 NV_ADMA_APPEND = 0x50,
110 NV_ADMA_NOTIFIER = 0x68,
111 NV_ADMA_NOTIFIER_ERROR = 0x6C,
112
113 /* NV_ADMA_CTL register bits */
114 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
115 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
116 NV_ADMA_CTL_GO = (1 << 7),
117 NV_ADMA_CTL_AIEN = (1 << 8),
118 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
119 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
120
121 /* CPB response flag bits */
122 NV_CPB_RESP_DONE = (1 << 0),
123 NV_CPB_RESP_ATA_ERR = (1 << 3),
124 NV_CPB_RESP_CMD_ERR = (1 << 4),
125 NV_CPB_RESP_CPB_ERR = (1 << 7),
126
127 /* CPB control flag bits */
128 NV_CPB_CTL_CPB_VALID = (1 << 0),
129 NV_CPB_CTL_QUEUE = (1 << 1),
130 NV_CPB_CTL_APRD_VALID = (1 << 2),
131 NV_CPB_CTL_IEN = (1 << 3),
132 NV_CPB_CTL_FPDMA = (1 << 4),
133
134 /* APRD flags */
135 NV_APRD_WRITE = (1 << 1),
136 NV_APRD_END = (1 << 2),
137 NV_APRD_CONT = (1 << 3),
138
139 /* NV_ADMA_STAT flags */
140 NV_ADMA_STAT_TIMEOUT = (1 << 0),
141 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
142 NV_ADMA_STAT_HOTPLUG = (1 << 2),
143 NV_ADMA_STAT_CPBERR = (1 << 4),
144 NV_ADMA_STAT_SERROR = (1 << 5),
145 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
146 NV_ADMA_STAT_IDLE = (1 << 8),
147 NV_ADMA_STAT_LEGACY = (1 << 9),
148 NV_ADMA_STAT_STOPPED = (1 << 10),
149 NV_ADMA_STAT_DONE = (1 << 12),
150 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
151 NV_ADMA_STAT_TIMEOUT,
152
153 /* port flags */
154 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
155 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
156
157 /* MCP55 reg offset */
158 NV_CTL_MCP55 = 0x400,
159 NV_INT_STATUS_MCP55 = 0x440,
160 NV_INT_ENABLE_MCP55 = 0x444,
161 NV_NCQ_REG_MCP55 = 0x448,
162
163 /* MCP55 */
164 NV_INT_ALL_MCP55 = 0xffff,
165 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
166 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
167
168 /* SWNCQ ENABLE BITS*/
169 NV_CTL_PRI_SWNCQ = 0x02,
170 NV_CTL_SEC_SWNCQ = 0x04,
171
172 /* SW NCQ status bits*/
173 NV_SWNCQ_IRQ_DEV = (1 << 0),
174 NV_SWNCQ_IRQ_PM = (1 << 1),
175 NV_SWNCQ_IRQ_ADDED = (1 << 2),
176 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
177
178 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
179 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
180 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
181 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
182
183 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
184 NV_SWNCQ_IRQ_REMOVED,
185
186 };
187
188 /* ADMA Physical Region Descriptor - one SG segment */
189 struct nv_adma_prd {
190 __le64 addr;
191 __le32 len;
192 u8 flags;
193 u8 packet_len;
194 __le16 reserved;
195 };
196
197 enum nv_adma_regbits {
198 CMDEND = (1 << 15), /* end of command list */
199 WNB = (1 << 14), /* wait-not-BSY */
200 IGN = (1 << 13), /* ignore this entry */
201 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
202 DA2 = (1 << (2 + 8)),
203 DA1 = (1 << (1 + 8)),
204 DA0 = (1 << (0 + 8)),
205 };
206
207 /* ADMA Command Parameter Block
208 The first 5 SG segments are stored inside the Command Parameter Block itself.
209 If there are more than 5 segments the remainder are stored in a separate
210 memory area indicated by next_aprd. */
211 struct nv_adma_cpb {
212 u8 resp_flags; /* 0 */
213 u8 reserved1; /* 1 */
214 u8 ctl_flags; /* 2 */
215 /* len is length of taskfile in 64 bit words */
216 u8 len; /* 3 */
217 u8 tag; /* 4 */
218 u8 next_cpb_idx; /* 5 */
219 __le16 reserved2; /* 6-7 */
220 __le16 tf[12]; /* 8-31 */
221 struct nv_adma_prd aprd[5]; /* 32-111 */
222 __le64 next_aprd; /* 112-119 */
223 __le64 reserved3; /* 120-127 */
224 };
225
226
227 struct nv_adma_port_priv {
228 struct nv_adma_cpb *cpb;
229 dma_addr_t cpb_dma;
230 struct nv_adma_prd *aprd;
231 dma_addr_t aprd_dma;
232 void __iomem *ctl_block;
233 void __iomem *gen_block;
234 void __iomem *notifier_clear_block;
235 u64 adma_dma_mask;
236 u8 flags;
237 int last_issue_ncq;
238 };
239
240 struct nv_host_priv {
241 unsigned long type;
242 };
243
244 struct defer_queue {
245 u32 defer_bits;
246 unsigned int head;
247 unsigned int tail;
248 unsigned int tag[ATA_MAX_QUEUE];
249 };
250
251 enum ncq_saw_flag_list {
252 ncq_saw_d2h = (1U << 0),
253 ncq_saw_dmas = (1U << 1),
254 ncq_saw_sdb = (1U << 2),
255 ncq_saw_backout = (1U << 3),
256 };
257
258 struct nv_swncq_port_priv {
259 struct ata_bmdma_prd *prd; /* our SG list */
260 dma_addr_t prd_dma; /* and its DMA mapping */
261 void __iomem *sactive_block;
262 void __iomem *irq_block;
263 void __iomem *tag_block;
264 u32 qc_active;
265
266 unsigned int last_issue_tag;
267
268 /* fifo circular queue to store deferral command */
269 struct defer_queue defer_queue;
270
271 /* for NCQ interrupt analysis */
272 u32 dhfis_bits;
273 u32 dmafis_bits;
274 u32 sdbfis_bits;
275
276 unsigned int ncq_flags;
277 };
278
279
280 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
281
282 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
283 #ifdef CONFIG_PM_SLEEP
284 static int nv_pci_device_resume(struct pci_dev *pdev);
285 #endif
286 static void nv_ck804_host_stop(struct ata_host *host);
287 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
288 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
289 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
290 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
291 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
292
293 static int nv_hardreset(struct ata_link *link, unsigned int *class,
294 unsigned long deadline);
295 static void nv_nf2_freeze(struct ata_port *ap);
296 static void nv_nf2_thaw(struct ata_port *ap);
297 static void nv_ck804_freeze(struct ata_port *ap);
298 static void nv_ck804_thaw(struct ata_port *ap);
299 static int nv_adma_device_configure(struct scsi_device *sdev,
300 struct queue_limits *lim);
301 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
302 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
303 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
304 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
305 static void nv_adma_irq_clear(struct ata_port *ap);
306 static int nv_adma_port_start(struct ata_port *ap);
307 static void nv_adma_port_stop(struct ata_port *ap);
308 #ifdef CONFIG_PM
309 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
310 static int nv_adma_port_resume(struct ata_port *ap);
311 #endif
312 static void nv_adma_freeze(struct ata_port *ap);
313 static void nv_adma_thaw(struct ata_port *ap);
314 static void nv_adma_error_handler(struct ata_port *ap);
315 static void nv_adma_host_stop(struct ata_host *host);
316 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
317 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
318
319 static void nv_mcp55_thaw(struct ata_port *ap);
320 static void nv_mcp55_freeze(struct ata_port *ap);
321 static void nv_swncq_error_handler(struct ata_port *ap);
322 static int nv_swncq_device_configure(struct scsi_device *sdev,
323 struct queue_limits *lim);
324 static int nv_swncq_port_start(struct ata_port *ap);
325 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
326 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
327 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
328 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
329 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
330 #ifdef CONFIG_PM
331 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
332 static int nv_swncq_port_resume(struct ata_port *ap);
333 #endif
334
335 enum nv_host_type
336 {
337 GENERIC,
338 NFORCE2,
339 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
340 CK804,
341 ADMA,
342 MCP5x,
343 SWNCQ,
344 };
345
346 static const struct pci_device_id nv_pci_tbl[] = {
347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
361
362 { } /* terminate list */
363 };
364
365 static struct pci_driver nv_pci_driver = {
366 .name = DRV_NAME,
367 .id_table = nv_pci_tbl,
368 .probe = nv_init_one,
369 #ifdef CONFIG_PM_SLEEP
370 .suspend = ata_pci_device_suspend,
371 .resume = nv_pci_device_resume,
372 #endif
373 .remove = ata_pci_remove_one,
374 };
375
376 static const struct scsi_host_template nv_sht = {
377 ATA_BMDMA_SHT(DRV_NAME),
378 };
379
380 static const struct scsi_host_template nv_adma_sht = {
381 __ATA_BASE_SHT(DRV_NAME),
382 .can_queue = NV_ADMA_MAX_CPBS,
383 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
384 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
385 .device_configure = nv_adma_device_configure,
386 .sdev_groups = ata_ncq_sdev_groups,
387 .change_queue_depth = ata_scsi_change_queue_depth,
388 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
389 };
390
391 static const struct scsi_host_template nv_swncq_sht = {
392 __ATA_BASE_SHT(DRV_NAME),
393 .can_queue = ATA_MAX_QUEUE - 1,
394 .sg_tablesize = LIBATA_MAX_PRD,
395 .dma_boundary = ATA_DMA_BOUNDARY,
396 .device_configure = nv_swncq_device_configure,
397 .sdev_groups = ata_ncq_sdev_groups,
398 .change_queue_depth = ata_scsi_change_queue_depth,
399 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
400 };
401
402 /*
403 * NV SATA controllers have various different problems with hardreset
404 * protocol depending on the specific controller and device.
405 *
406 * GENERIC:
407 *
408 * bko11195 reports that link doesn't come online after hardreset on
409 * generic nv's and there have been several other similar reports on
410 * linux-ide.
411 *
412 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
413 * softreset.
414 *
415 * NF2/3:
416 *
417 * bko3352 reports nf2/3 controllers can't determine device signature
418 * reliably after hardreset. The following thread reports detection
419 * failure on cold boot with the standard debouncing timing.
420 *
421 * http://thread.gmane.org/gmane.linux.ide/34098
422 *
423 * bko12176 reports that hardreset fails to bring up the link during
424 * boot on nf2.
425 *
426 * CK804:
427 *
428 * For initial probing after boot and hot plugging, hardreset mostly
429 * works fine on CK804 but curiously, reprobing on the initial port
430 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
431 * FIS in somewhat undeterministic way.
432 *
433 * SWNCQ:
434 *
435 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
436 * hardreset should be used and hardreset can't report proper
437 * signature, which suggests that mcp5x is closer to nf2 as long as
438 * reset quirkiness is concerned.
439 *
440 * bko12703 reports that boot probing fails for intel SSD with
441 * hardreset. Link fails to come online. Softreset works fine.
442 *
443 * The failures are varied but the following patterns seem true for
444 * all flavors.
445 *
446 * - Softreset during boot always works.
447 *
448 * - Hardreset during boot sometimes fails to bring up the link on
449 * certain comibnations and device signature acquisition is
450 * unreliable.
451 *
452 * - Hardreset is often necessary after hotplug.
453 *
454 * So, preferring softreset for boot probing and error handling (as
455 * hardreset might bring down the link) but using hardreset for
456 * post-boot probing should work around the above issues in most
457 * cases. Define nv_hardreset() which only kicks in for post-boot
458 * probing and use it for all variants.
459 */
460 static struct ata_port_operations nv_generic_ops = {
461 .inherits = &ata_bmdma_port_ops,
462 .lost_interrupt = ATA_OP_NULL,
463 .scr_read = nv_scr_read,
464 .scr_write = nv_scr_write,
465 .hardreset = nv_hardreset,
466 };
467
468 static struct ata_port_operations nv_nf2_ops = {
469 .inherits = &nv_generic_ops,
470 .freeze = nv_nf2_freeze,
471 .thaw = nv_nf2_thaw,
472 };
473
474 static struct ata_port_operations nv_ck804_ops = {
475 .inherits = &nv_generic_ops,
476 .freeze = nv_ck804_freeze,
477 .thaw = nv_ck804_thaw,
478 .host_stop = nv_ck804_host_stop,
479 };
480
481 static struct ata_port_operations nv_adma_ops = {
482 .inherits = &nv_ck804_ops,
483
484 .check_atapi_dma = nv_adma_check_atapi_dma,
485 .sff_tf_read = nv_adma_tf_read,
486 .qc_defer = ata_std_qc_defer,
487 .qc_prep = nv_adma_qc_prep,
488 .qc_issue = nv_adma_qc_issue,
489 .sff_irq_clear = nv_adma_irq_clear,
490
491 .freeze = nv_adma_freeze,
492 .thaw = nv_adma_thaw,
493 .error_handler = nv_adma_error_handler,
494 .post_internal_cmd = nv_adma_post_internal_cmd,
495
496 .port_start = nv_adma_port_start,
497 .port_stop = nv_adma_port_stop,
498 #ifdef CONFIG_PM
499 .port_suspend = nv_adma_port_suspend,
500 .port_resume = nv_adma_port_resume,
501 #endif
502 .host_stop = nv_adma_host_stop,
503 };
504
505 static struct ata_port_operations nv_swncq_ops = {
506 .inherits = &nv_generic_ops,
507
508 .qc_defer = ata_std_qc_defer,
509 .qc_prep = nv_swncq_qc_prep,
510 .qc_issue = nv_swncq_qc_issue,
511
512 .freeze = nv_mcp55_freeze,
513 .thaw = nv_mcp55_thaw,
514 .error_handler = nv_swncq_error_handler,
515
516 #ifdef CONFIG_PM
517 .port_suspend = nv_swncq_port_suspend,
518 .port_resume = nv_swncq_port_resume,
519 #endif
520 .port_start = nv_swncq_port_start,
521 };
522
523 struct nv_pi_priv {
524 irq_handler_t irq_handler;
525 const struct scsi_host_template *sht;
526 };
527
528 #define NV_PI_PRIV(_irq_handler, _sht) \
529 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
530
531 static const struct ata_port_info nv_port_info[] = {
532 /* generic */
533 {
534 .flags = ATA_FLAG_SATA,
535 .pio_mask = NV_PIO_MASK,
536 .mwdma_mask = NV_MWDMA_MASK,
537 .udma_mask = NV_UDMA_MASK,
538 .port_ops = &nv_generic_ops,
539 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
540 },
541 /* nforce2/3 */
542 {
543 .flags = ATA_FLAG_SATA,
544 .pio_mask = NV_PIO_MASK,
545 .mwdma_mask = NV_MWDMA_MASK,
546 .udma_mask = NV_UDMA_MASK,
547 .port_ops = &nv_nf2_ops,
548 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
549 },
550 /* ck804 */
551 {
552 .flags = ATA_FLAG_SATA,
553 .pio_mask = NV_PIO_MASK,
554 .mwdma_mask = NV_MWDMA_MASK,
555 .udma_mask = NV_UDMA_MASK,
556 .port_ops = &nv_ck804_ops,
557 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
558 },
559 /* ADMA */
560 {
561 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
562 .pio_mask = NV_PIO_MASK,
563 .mwdma_mask = NV_MWDMA_MASK,
564 .udma_mask = NV_UDMA_MASK,
565 .port_ops = &nv_adma_ops,
566 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
567 },
568 /* MCP5x */
569 {
570 .flags = ATA_FLAG_SATA,
571 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK,
574 .port_ops = &nv_generic_ops,
575 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
576 },
577 /* SWNCQ */
578 {
579 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
580 .pio_mask = NV_PIO_MASK,
581 .mwdma_mask = NV_MWDMA_MASK,
582 .udma_mask = NV_UDMA_MASK,
583 .port_ops = &nv_swncq_ops,
584 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
585 },
586 };
587
588 MODULE_AUTHOR("NVIDIA");
589 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
590 MODULE_LICENSE("GPL");
591 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
592 MODULE_VERSION(DRV_VERSION);
593
594 static bool adma_enabled;
595 static bool swncq_enabled = true;
596 static bool msi_enabled;
597
nv_adma_register_mode(struct ata_port * ap)598 static void nv_adma_register_mode(struct ata_port *ap)
599 {
600 struct nv_adma_port_priv *pp = ap->private_data;
601 void __iomem *mmio = pp->ctl_block;
602 u16 tmp, status;
603 int count = 0;
604
605 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
606 return;
607
608 status = readw(mmio + NV_ADMA_STAT);
609 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
610 ndelay(50);
611 status = readw(mmio + NV_ADMA_STAT);
612 count++;
613 }
614 if (count == 20)
615 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
616 status);
617
618 tmp = readw(mmio + NV_ADMA_CTL);
619 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
620
621 count = 0;
622 status = readw(mmio + NV_ADMA_STAT);
623 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
624 ndelay(50);
625 status = readw(mmio + NV_ADMA_STAT);
626 count++;
627 }
628 if (count == 20)
629 ata_port_warn(ap,
630 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
631 status);
632
633 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
634 }
635
nv_adma_mode(struct ata_port * ap)636 static void nv_adma_mode(struct ata_port *ap)
637 {
638 struct nv_adma_port_priv *pp = ap->private_data;
639 void __iomem *mmio = pp->ctl_block;
640 u16 tmp, status;
641 int count = 0;
642
643 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
644 return;
645
646 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
647
648 tmp = readw(mmio + NV_ADMA_CTL);
649 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
650
651 status = readw(mmio + NV_ADMA_STAT);
652 while (((status & NV_ADMA_STAT_LEGACY) ||
653 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
654 ndelay(50);
655 status = readw(mmio + NV_ADMA_STAT);
656 count++;
657 }
658 if (count == 20)
659 ata_port_warn(ap,
660 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
661 status);
662
663 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
664 }
665
nv_adma_device_configure(struct scsi_device * sdev,struct queue_limits * lim)666 static int nv_adma_device_configure(struct scsi_device *sdev,
667 struct queue_limits *lim)
668 {
669 struct ata_port *ap = ata_shost_to_port(sdev->host);
670 struct nv_adma_port_priv *pp = ap->private_data;
671 struct nv_adma_port_priv *port0, *port1;
672 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
673 unsigned long segment_boundary, flags;
674 unsigned short sg_tablesize;
675 int rc;
676 int adma_enable;
677 u32 current_reg, new_reg, config_mask;
678
679 rc = ata_scsi_device_configure(sdev, lim);
680
681 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
682 /* Not a proper libata device, ignore */
683 return rc;
684
685 spin_lock_irqsave(ap->lock, flags);
686
687 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
688 /*
689 * NVIDIA reports that ADMA mode does not support ATAPI commands.
690 * Therefore ATAPI commands are sent through the legacy interface.
691 * However, the legacy interface only supports 32-bit DMA.
692 * Restrict DMA parameters as required by the legacy interface
693 * when an ATAPI device is connected.
694 */
695 segment_boundary = ATA_DMA_BOUNDARY;
696 /* Subtract 1 since an extra entry may be needed for padding, see
697 libata-scsi.c */
698 sg_tablesize = LIBATA_MAX_PRD - 1;
699
700 /* Since the legacy DMA engine is in use, we need to disable ADMA
701 on the port. */
702 adma_enable = 0;
703 nv_adma_register_mode(ap);
704 } else {
705 segment_boundary = NV_ADMA_DMA_BOUNDARY;
706 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
707 adma_enable = 1;
708 }
709
710 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
711
712 if (ap->port_no == 1)
713 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
714 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
715 else
716 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
717 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
718
719 if (adma_enable) {
720 new_reg = current_reg | config_mask;
721 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
722 } else {
723 new_reg = current_reg & ~config_mask;
724 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
725 }
726
727 if (current_reg != new_reg)
728 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
729
730 port0 = ap->host->ports[0]->private_data;
731 port1 = ap->host->ports[1]->private_data;
732 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
733 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
734 /*
735 * We have to set the DMA mask to 32-bit if either port is in
736 * ATAPI mode, since they are on the same PCI device which is
737 * used for DMA mapping. If either SCSI device is not allocated
738 * yet, it's OK since that port will discover its correct
739 * setting when it does get allocated.
740 */
741 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
742 } else {
743 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
744 }
745
746 lim->seg_boundary_mask = segment_boundary;
747 lim->max_segments = sg_tablesize;
748 ata_port_info(ap,
749 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
750 (unsigned long long)*ap->host->dev->dma_mask,
751 segment_boundary, sg_tablesize);
752
753 spin_unlock_irqrestore(ap->lock, flags);
754
755 return rc;
756 }
757
nv_adma_check_atapi_dma(struct ata_queued_cmd * qc)758 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
759 {
760 struct nv_adma_port_priv *pp = qc->ap->private_data;
761 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
762 }
763
nv_adma_tf_read(struct ata_port * ap,struct ata_taskfile * tf)764 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
765 {
766 /* Other than when internal or pass-through commands are executed,
767 the only time this function will be called in ADMA mode will be
768 if a command fails. In the failure case we don't care about going
769 into register mode with ADMA commands pending, as the commands will
770 all shortly be aborted anyway. We assume that NCQ commands are not
771 issued via passthrough, which is the only way that switching into
772 ADMA mode could abort outstanding commands. */
773 nv_adma_register_mode(ap);
774
775 ata_sff_tf_read(ap, tf);
776 }
777
nv_adma_tf_to_cpb(struct ata_taskfile * tf,__le16 * cpb)778 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
779 {
780 unsigned int idx = 0;
781
782 if (tf->flags & ATA_TFLAG_ISADDR) {
783 if (tf->flags & ATA_TFLAG_LBA48) {
784 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
785 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
786 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
787 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
788 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
789 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
790 } else
791 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
792
793 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
794 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
795 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
796 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
797 }
798
799 if (tf->flags & ATA_TFLAG_DEVICE)
800 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
801
802 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
803
804 while (idx < 12)
805 cpb[idx++] = cpu_to_le16(IGN);
806
807 return idx;
808 }
809
nv_adma_check_cpb(struct ata_port * ap,int cpb_num,int force_err)810 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
811 {
812 struct nv_adma_port_priv *pp = ap->private_data;
813 u8 flags = pp->cpb[cpb_num].resp_flags;
814
815 ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
816
817 if (unlikely((force_err ||
818 flags & (NV_CPB_RESP_ATA_ERR |
819 NV_CPB_RESP_CMD_ERR |
820 NV_CPB_RESP_CPB_ERR)))) {
821 struct ata_eh_info *ehi = &ap->link.eh_info;
822 int freeze = 0;
823
824 ata_ehi_clear_desc(ehi);
825 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
826 if (flags & NV_CPB_RESP_ATA_ERR) {
827 ata_ehi_push_desc(ehi, "ATA error");
828 ehi->err_mask |= AC_ERR_DEV;
829 } else if (flags & NV_CPB_RESP_CMD_ERR) {
830 ata_ehi_push_desc(ehi, "CMD error");
831 ehi->err_mask |= AC_ERR_DEV;
832 } else if (flags & NV_CPB_RESP_CPB_ERR) {
833 ata_ehi_push_desc(ehi, "CPB error");
834 ehi->err_mask |= AC_ERR_SYSTEM;
835 freeze = 1;
836 } else {
837 /* notifier error, but no error in CPB flags? */
838 ata_ehi_push_desc(ehi, "unknown");
839 ehi->err_mask |= AC_ERR_OTHER;
840 freeze = 1;
841 }
842 /* Kill all commands. EH will determine what actually failed. */
843 if (freeze)
844 ata_port_freeze(ap);
845 else
846 ata_port_abort(ap);
847 return -1;
848 }
849
850 if (likely(flags & NV_CPB_RESP_DONE))
851 return 1;
852 return 0;
853 }
854
nv_host_intr(struct ata_port * ap,u8 irq_stat)855 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
856 {
857 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
858
859 /* freeze if hotplugged */
860 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
861 ata_port_freeze(ap);
862 return 1;
863 }
864
865 /* bail out if not our interrupt */
866 if (!(irq_stat & NV_INT_DEV))
867 return 0;
868
869 /* DEV interrupt w/ no active qc? */
870 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
871 ata_sff_check_status(ap);
872 return 1;
873 }
874
875 /* handle interrupt */
876 return ata_bmdma_port_intr(ap, qc);
877 }
878
nv_adma_interrupt(int irq,void * dev_instance)879 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
880 {
881 struct ata_host *host = dev_instance;
882 int i, handled = 0;
883 u32 notifier_clears[2];
884
885 spin_lock(&host->lock);
886
887 for (i = 0; i < host->n_ports; i++) {
888 struct ata_port *ap = host->ports[i];
889 struct nv_adma_port_priv *pp = ap->private_data;
890 void __iomem *mmio = pp->ctl_block;
891 u16 status;
892 u32 gen_ctl;
893 u32 notifier, notifier_error;
894
895 notifier_clears[i] = 0;
896
897 /* if ADMA is disabled, use standard ata interrupt handler */
898 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
899 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
900 >> (NV_INT_PORT_SHIFT * i);
901 handled += nv_host_intr(ap, irq_stat);
902 continue;
903 }
904
905 /* if in ATA register mode, check for standard interrupts */
906 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
907 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
908 >> (NV_INT_PORT_SHIFT * i);
909 if (ata_tag_valid(ap->link.active_tag))
910 /** NV_INT_DEV indication seems unreliable
911 at times at least in ADMA mode. Force it
912 on always when a command is active, to
913 prevent losing interrupts. */
914 irq_stat |= NV_INT_DEV;
915 handled += nv_host_intr(ap, irq_stat);
916 }
917
918 notifier = readl(mmio + NV_ADMA_NOTIFIER);
919 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
920 notifier_clears[i] = notifier | notifier_error;
921
922 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
923
924 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
925 !notifier_error)
926 /* Nothing to do */
927 continue;
928
929 status = readw(mmio + NV_ADMA_STAT);
930
931 /*
932 * Clear status. Ensure the controller sees the
933 * clearing before we start looking at any of the CPB
934 * statuses, so that any CPB completions after this
935 * point in the handler will raise another interrupt.
936 */
937 writew(status, mmio + NV_ADMA_STAT);
938 readw(mmio + NV_ADMA_STAT); /* flush posted write */
939 rmb();
940
941 handled++; /* irq handled if we got here */
942
943 /* freeze if hotplugged or controller error */
944 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
945 NV_ADMA_STAT_HOTUNPLUG |
946 NV_ADMA_STAT_TIMEOUT |
947 NV_ADMA_STAT_SERROR))) {
948 struct ata_eh_info *ehi = &ap->link.eh_info;
949
950 ata_ehi_clear_desc(ehi);
951 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
952 if (status & NV_ADMA_STAT_TIMEOUT) {
953 ehi->err_mask |= AC_ERR_SYSTEM;
954 ata_ehi_push_desc(ehi, "timeout");
955 } else if (status & NV_ADMA_STAT_HOTPLUG) {
956 ata_ehi_hotplugged(ehi);
957 ata_ehi_push_desc(ehi, "hotplug");
958 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
959 ata_ehi_hotplugged(ehi);
960 ata_ehi_push_desc(ehi, "hot unplug");
961 } else if (status & NV_ADMA_STAT_SERROR) {
962 /* let EH analyze SError and figure out cause */
963 ata_ehi_push_desc(ehi, "SError");
964 } else
965 ata_ehi_push_desc(ehi, "unknown");
966 ata_port_freeze(ap);
967 continue;
968 }
969
970 if (status & (NV_ADMA_STAT_DONE |
971 NV_ADMA_STAT_CPBERR |
972 NV_ADMA_STAT_CMD_COMPLETE)) {
973 u32 check_commands = notifier_clears[i];
974 u32 done_mask = 0;
975 int pos, rc;
976
977 if (status & NV_ADMA_STAT_CPBERR) {
978 /* check all active commands */
979 if (ata_tag_valid(ap->link.active_tag))
980 check_commands = 1 <<
981 ap->link.active_tag;
982 else
983 check_commands = ap->link.sactive;
984 }
985
986 /* check CPBs for completed commands */
987 while ((pos = ffs(check_commands))) {
988 pos--;
989 rc = nv_adma_check_cpb(ap, pos,
990 notifier_error & (1 << pos));
991 if (rc > 0)
992 done_mask |= 1 << pos;
993 else if (unlikely(rc < 0))
994 check_commands = 0;
995 check_commands &= ~(1 << pos);
996 }
997 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
998 }
999 }
1000
1001 if (notifier_clears[0] || notifier_clears[1]) {
1002 /* Note: Both notifier clear registers must be written
1003 if either is set, even if one is zero, according to NVIDIA. */
1004 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1005 writel(notifier_clears[0], pp->notifier_clear_block);
1006 pp = host->ports[1]->private_data;
1007 writel(notifier_clears[1], pp->notifier_clear_block);
1008 }
1009
1010 spin_unlock(&host->lock);
1011
1012 return IRQ_RETVAL(handled);
1013 }
1014
nv_adma_freeze(struct ata_port * ap)1015 static void nv_adma_freeze(struct ata_port *ap)
1016 {
1017 struct nv_adma_port_priv *pp = ap->private_data;
1018 void __iomem *mmio = pp->ctl_block;
1019 u16 tmp;
1020
1021 nv_ck804_freeze(ap);
1022
1023 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1024 return;
1025
1026 /* clear any outstanding CK804 notifications */
1027 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1028 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1029
1030 /* Disable interrupt */
1031 tmp = readw(mmio + NV_ADMA_CTL);
1032 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1033 mmio + NV_ADMA_CTL);
1034 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1035 }
1036
nv_adma_thaw(struct ata_port * ap)1037 static void nv_adma_thaw(struct ata_port *ap)
1038 {
1039 struct nv_adma_port_priv *pp = ap->private_data;
1040 void __iomem *mmio = pp->ctl_block;
1041 u16 tmp;
1042
1043 nv_ck804_thaw(ap);
1044
1045 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1046 return;
1047
1048 /* Enable interrupt */
1049 tmp = readw(mmio + NV_ADMA_CTL);
1050 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1051 mmio + NV_ADMA_CTL);
1052 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1053 }
1054
nv_adma_irq_clear(struct ata_port * ap)1055 static void nv_adma_irq_clear(struct ata_port *ap)
1056 {
1057 struct nv_adma_port_priv *pp = ap->private_data;
1058 void __iomem *mmio = pp->ctl_block;
1059 u32 notifier_clears[2];
1060
1061 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1062 ata_bmdma_irq_clear(ap);
1063 return;
1064 }
1065
1066 /* clear any outstanding CK804 notifications */
1067 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1068 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1069
1070 /* clear ADMA status */
1071 writew(0xffff, mmio + NV_ADMA_STAT);
1072
1073 /* clear notifiers - note both ports need to be written with
1074 something even though we are only clearing on one */
1075 if (ap->port_no == 0) {
1076 notifier_clears[0] = 0xFFFFFFFF;
1077 notifier_clears[1] = 0;
1078 } else {
1079 notifier_clears[0] = 0;
1080 notifier_clears[1] = 0xFFFFFFFF;
1081 }
1082 pp = ap->host->ports[0]->private_data;
1083 writel(notifier_clears[0], pp->notifier_clear_block);
1084 pp = ap->host->ports[1]->private_data;
1085 writel(notifier_clears[1], pp->notifier_clear_block);
1086 }
1087
nv_adma_post_internal_cmd(struct ata_queued_cmd * qc)1088 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1089 {
1090 struct nv_adma_port_priv *pp = qc->ap->private_data;
1091
1092 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1093 ata_bmdma_post_internal_cmd(qc);
1094 }
1095
nv_adma_port_start(struct ata_port * ap)1096 static int nv_adma_port_start(struct ata_port *ap)
1097 {
1098 struct device *dev = ap->host->dev;
1099 struct nv_adma_port_priv *pp;
1100 int rc;
1101 void *mem;
1102 dma_addr_t mem_dma;
1103 void __iomem *mmio;
1104 struct pci_dev *pdev = to_pci_dev(dev);
1105 u16 tmp;
1106
1107 /*
1108 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1109 * pad buffers.
1110 */
1111 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1112 if (rc)
1113 return rc;
1114
1115 /* we might fallback to bmdma, allocate bmdma resources */
1116 rc = ata_bmdma_port_start(ap);
1117 if (rc)
1118 return rc;
1119
1120 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1121 if (!pp)
1122 return -ENOMEM;
1123
1124 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1125 ap->port_no * NV_ADMA_PORT_SIZE;
1126 pp->ctl_block = mmio;
1127 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1128 pp->notifier_clear_block = pp->gen_block +
1129 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1130
1131 /*
1132 * Now that the legacy PRD and padding buffer are allocated we can
1133 * raise the DMA mask to allocate the CPB/APRD table.
1134 */
1135 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1136
1137 pp->adma_dma_mask = *dev->dma_mask;
1138
1139 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1140 &mem_dma, GFP_KERNEL);
1141 if (!mem)
1142 return -ENOMEM;
1143
1144 /*
1145 * First item in chunk of DMA memory:
1146 * 128-byte command parameter block (CPB)
1147 * one for each command tag
1148 */
1149 pp->cpb = mem;
1150 pp->cpb_dma = mem_dma;
1151
1152 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1153 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1154
1155 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1156 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1157
1158 /*
1159 * Second item: block of ADMA_SGTBL_LEN s/g entries
1160 */
1161 pp->aprd = mem;
1162 pp->aprd_dma = mem_dma;
1163
1164 ap->private_data = pp;
1165
1166 /* clear any outstanding interrupt conditions */
1167 writew(0xffff, mmio + NV_ADMA_STAT);
1168
1169 /* initialize port variables */
1170 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1171
1172 /* clear CPB fetch count */
1173 writew(0, mmio + NV_ADMA_CPB_COUNT);
1174
1175 /* clear GO for register mode, enable interrupt */
1176 tmp = readw(mmio + NV_ADMA_CTL);
1177 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1178 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1179
1180 tmp = readw(mmio + NV_ADMA_CTL);
1181 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1182 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1183 udelay(1);
1184 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1185 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1186
1187 return 0;
1188 }
1189
nv_adma_port_stop(struct ata_port * ap)1190 static void nv_adma_port_stop(struct ata_port *ap)
1191 {
1192 struct nv_adma_port_priv *pp = ap->private_data;
1193 void __iomem *mmio = pp->ctl_block;
1194
1195 writew(0, mmio + NV_ADMA_CTL);
1196 }
1197
1198 #ifdef CONFIG_PM
nv_adma_port_suspend(struct ata_port * ap,pm_message_t mesg)1199 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1200 {
1201 struct nv_adma_port_priv *pp = ap->private_data;
1202 void __iomem *mmio = pp->ctl_block;
1203
1204 /* Go to register mode - clears GO */
1205 nv_adma_register_mode(ap);
1206
1207 /* clear CPB fetch count */
1208 writew(0, mmio + NV_ADMA_CPB_COUNT);
1209
1210 /* disable interrupt, shut down port */
1211 writew(0, mmio + NV_ADMA_CTL);
1212
1213 return 0;
1214 }
1215
nv_adma_port_resume(struct ata_port * ap)1216 static int nv_adma_port_resume(struct ata_port *ap)
1217 {
1218 struct nv_adma_port_priv *pp = ap->private_data;
1219 void __iomem *mmio = pp->ctl_block;
1220 u16 tmp;
1221
1222 /* set CPB block location */
1223 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1224 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1225
1226 /* clear any outstanding interrupt conditions */
1227 writew(0xffff, mmio + NV_ADMA_STAT);
1228
1229 /* initialize port variables */
1230 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1231
1232 /* clear CPB fetch count */
1233 writew(0, mmio + NV_ADMA_CPB_COUNT);
1234
1235 /* clear GO for register mode, enable interrupt */
1236 tmp = readw(mmio + NV_ADMA_CTL);
1237 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1238 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1239
1240 tmp = readw(mmio + NV_ADMA_CTL);
1241 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1242 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1243 udelay(1);
1244 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1245 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1246
1247 return 0;
1248 }
1249 #endif
1250
nv_adma_setup_port(struct ata_port * ap)1251 static void nv_adma_setup_port(struct ata_port *ap)
1252 {
1253 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1254 struct ata_ioports *ioport = &ap->ioaddr;
1255
1256 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1257
1258 ioport->cmd_addr = mmio;
1259 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1260 ioport->error_addr =
1261 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1262 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1263 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1264 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1265 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1266 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1267 ioport->status_addr =
1268 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1269 ioport->altstatus_addr =
1270 ioport->ctl_addr = mmio + 0x20;
1271 }
1272
nv_adma_host_init(struct ata_host * host)1273 static int nv_adma_host_init(struct ata_host *host)
1274 {
1275 struct pci_dev *pdev = to_pci_dev(host->dev);
1276 unsigned int i;
1277 u32 tmp32;
1278
1279 /* enable ADMA on the ports */
1280 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1281 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1282 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1283 NV_MCP_SATA_CFG_20_PORT1_EN |
1284 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1285
1286 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1287
1288 for (i = 0; i < host->n_ports; i++)
1289 nv_adma_setup_port(host->ports[i]);
1290
1291 return 0;
1292 }
1293
nv_adma_fill_aprd(struct ata_queued_cmd * qc,struct scatterlist * sg,int idx,struct nv_adma_prd * aprd)1294 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1295 struct scatterlist *sg,
1296 int idx,
1297 struct nv_adma_prd *aprd)
1298 {
1299 u8 flags = 0;
1300 if (qc->tf.flags & ATA_TFLAG_WRITE)
1301 flags |= NV_APRD_WRITE;
1302 if (idx == qc->n_elem - 1)
1303 flags |= NV_APRD_END;
1304 else if (idx != 4)
1305 flags |= NV_APRD_CONT;
1306
1307 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1308 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1309 aprd->flags = flags;
1310 aprd->packet_len = 0;
1311 }
1312
nv_adma_fill_sg(struct ata_queued_cmd * qc,struct nv_adma_cpb * cpb)1313 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1314 {
1315 struct nv_adma_port_priv *pp = qc->ap->private_data;
1316 struct nv_adma_prd *aprd;
1317 struct scatterlist *sg;
1318 unsigned int si;
1319
1320 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1321 aprd = (si < 5) ? &cpb->aprd[si] :
1322 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1323 nv_adma_fill_aprd(qc, sg, si, aprd);
1324 }
1325 if (si > 5)
1326 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1327 else
1328 cpb->next_aprd = cpu_to_le64(0);
1329 }
1330
nv_adma_use_reg_mode(struct ata_queued_cmd * qc)1331 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1332 {
1333 struct nv_adma_port_priv *pp = qc->ap->private_data;
1334
1335 /* ADMA engine can only be used for non-ATAPI DMA commands,
1336 or interrupt-driven no-data commands. */
1337 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1338 (qc->tf.flags & ATA_TFLAG_POLLING))
1339 return 1;
1340
1341 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1342 (qc->tf.protocol == ATA_PROT_NODATA))
1343 return 0;
1344
1345 return 1;
1346 }
1347
nv_adma_qc_prep(struct ata_queued_cmd * qc)1348 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1349 {
1350 struct nv_adma_port_priv *pp = qc->ap->private_data;
1351 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1352 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1353 NV_CPB_CTL_IEN;
1354
1355 if (nv_adma_use_reg_mode(qc)) {
1356 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1357 (qc->flags & ATA_QCFLAG_DMAMAP));
1358 nv_adma_register_mode(qc->ap);
1359 ata_bmdma_qc_prep(qc);
1360 return AC_ERR_OK;
1361 }
1362
1363 cpb->resp_flags = NV_CPB_RESP_DONE;
1364 wmb();
1365 cpb->ctl_flags = 0;
1366 wmb();
1367
1368 cpb->len = 3;
1369 cpb->tag = qc->hw_tag;
1370 cpb->next_cpb_idx = 0;
1371
1372 /* turn on NCQ flags for NCQ commands */
1373 if (qc->tf.protocol == ATA_PROT_NCQ)
1374 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1375
1376 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1377
1378 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1379 nv_adma_fill_sg(qc, cpb);
1380 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1381 } else
1382 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1383
1384 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1385 until we are finished filling in all of the contents */
1386 wmb();
1387 cpb->ctl_flags = ctl_flags;
1388 wmb();
1389 cpb->resp_flags = 0;
1390
1391 return AC_ERR_OK;
1392 }
1393
nv_adma_qc_issue(struct ata_queued_cmd * qc)1394 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1395 {
1396 struct nv_adma_port_priv *pp = qc->ap->private_data;
1397 void __iomem *mmio = pp->ctl_block;
1398 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1399
1400 /* We can't handle result taskfile with NCQ commands, since
1401 retrieving the taskfile switches us out of ADMA mode and would abort
1402 existing commands. */
1403 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1404 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1405 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1406 return AC_ERR_SYSTEM;
1407 }
1408
1409 if (nv_adma_use_reg_mode(qc)) {
1410 /* use ATA register mode */
1411 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1412 (qc->flags & ATA_QCFLAG_DMAMAP));
1413 nv_adma_register_mode(qc->ap);
1414 return ata_bmdma_qc_issue(qc);
1415 } else
1416 nv_adma_mode(qc->ap);
1417
1418 /* write append register, command tag in lower 8 bits
1419 and (number of cpbs to append -1) in top 8 bits */
1420 wmb();
1421
1422 if (curr_ncq != pp->last_issue_ncq) {
1423 /* Seems to need some delay before switching between NCQ and
1424 non-NCQ commands, else we get command timeouts and such. */
1425 udelay(20);
1426 pp->last_issue_ncq = curr_ncq;
1427 }
1428
1429 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1430
1431 return 0;
1432 }
1433
nv_generic_interrupt(int irq,void * dev_instance)1434 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1435 {
1436 struct ata_host *host = dev_instance;
1437 unsigned int i;
1438 unsigned int handled = 0;
1439 unsigned long flags;
1440
1441 spin_lock_irqsave(&host->lock, flags);
1442
1443 for (i = 0; i < host->n_ports; i++) {
1444 struct ata_port *ap = host->ports[i];
1445 struct ata_queued_cmd *qc;
1446
1447 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1448 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1449 handled += ata_bmdma_port_intr(ap, qc);
1450 } else {
1451 /*
1452 * No request pending? Clear interrupt status
1453 * anyway, in case there's one pending.
1454 */
1455 ap->ops->sff_check_status(ap);
1456 }
1457 }
1458
1459 spin_unlock_irqrestore(&host->lock, flags);
1460
1461 return IRQ_RETVAL(handled);
1462 }
1463
nv_do_interrupt(struct ata_host * host,u8 irq_stat)1464 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1465 {
1466 int i, handled = 0;
1467
1468 for (i = 0; i < host->n_ports; i++) {
1469 handled += nv_host_intr(host->ports[i], irq_stat);
1470 irq_stat >>= NV_INT_PORT_SHIFT;
1471 }
1472
1473 return IRQ_RETVAL(handled);
1474 }
1475
nv_nf2_interrupt(int irq,void * dev_instance)1476 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1477 {
1478 struct ata_host *host = dev_instance;
1479 u8 irq_stat;
1480 irqreturn_t ret;
1481
1482 spin_lock(&host->lock);
1483 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1484 ret = nv_do_interrupt(host, irq_stat);
1485 spin_unlock(&host->lock);
1486
1487 return ret;
1488 }
1489
nv_ck804_interrupt(int irq,void * dev_instance)1490 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1491 {
1492 struct ata_host *host = dev_instance;
1493 u8 irq_stat;
1494 irqreturn_t ret;
1495
1496 spin_lock(&host->lock);
1497 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1498 ret = nv_do_interrupt(host, irq_stat);
1499 spin_unlock(&host->lock);
1500
1501 return ret;
1502 }
1503
nv_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)1504 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1505 {
1506 if (sc_reg > SCR_CONTROL)
1507 return -EINVAL;
1508
1509 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1510 return 0;
1511 }
1512
nv_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)1513 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1514 {
1515 if (sc_reg > SCR_CONTROL)
1516 return -EINVAL;
1517
1518 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1519 return 0;
1520 }
1521
nv_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)1522 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1523 unsigned long deadline)
1524 {
1525 struct ata_eh_context *ehc = &link->eh_context;
1526
1527 /* Do hardreset iff it's post-boot probing, please read the
1528 * comment above port ops for details.
1529 */
1530 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1531 !ata_dev_enabled(link->device))
1532 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1533 NULL, NULL);
1534 else {
1535 const unsigned int *timing = sata_ehc_deb_timing(ehc);
1536 int rc;
1537
1538 if (!(ehc->i.flags & ATA_EHI_QUIET))
1539 ata_link_info(link,
1540 "nv: skipping hardreset on occupied port\n");
1541
1542 /* make sure the link is online */
1543 rc = sata_link_resume(link, timing, deadline);
1544 /* whine about phy resume failure but proceed */
1545 if (rc && rc != -EOPNOTSUPP)
1546 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1547 rc);
1548 }
1549
1550 /* device signature acquisition is unreliable */
1551 return -EAGAIN;
1552 }
1553
nv_nf2_freeze(struct ata_port * ap)1554 static void nv_nf2_freeze(struct ata_port *ap)
1555 {
1556 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1557 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1558 u8 mask;
1559
1560 mask = ioread8(scr_addr + NV_INT_ENABLE);
1561 mask &= ~(NV_INT_ALL << shift);
1562 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1563 }
1564
nv_nf2_thaw(struct ata_port * ap)1565 static void nv_nf2_thaw(struct ata_port *ap)
1566 {
1567 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1568 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1569 u8 mask;
1570
1571 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1572
1573 mask = ioread8(scr_addr + NV_INT_ENABLE);
1574 mask |= (NV_INT_MASK << shift);
1575 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1576 }
1577
nv_ck804_freeze(struct ata_port * ap)1578 static void nv_ck804_freeze(struct ata_port *ap)
1579 {
1580 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1581 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1582 u8 mask;
1583
1584 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1585 mask &= ~(NV_INT_ALL << shift);
1586 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1587 }
1588
nv_ck804_thaw(struct ata_port * ap)1589 static void nv_ck804_thaw(struct ata_port *ap)
1590 {
1591 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1592 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1593 u8 mask;
1594
1595 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1596
1597 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1598 mask |= (NV_INT_MASK << shift);
1599 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1600 }
1601
nv_mcp55_freeze(struct ata_port * ap)1602 static void nv_mcp55_freeze(struct ata_port *ap)
1603 {
1604 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1605 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1606 u32 mask;
1607
1608 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1609
1610 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1611 mask &= ~(NV_INT_ALL_MCP55 << shift);
1612 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1613 }
1614
nv_mcp55_thaw(struct ata_port * ap)1615 static void nv_mcp55_thaw(struct ata_port *ap)
1616 {
1617 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1618 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1619 u32 mask;
1620
1621 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1622
1623 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1624 mask |= (NV_INT_MASK_MCP55 << shift);
1625 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1626 }
1627
nv_adma_error_handler(struct ata_port * ap)1628 static void nv_adma_error_handler(struct ata_port *ap)
1629 {
1630 struct nv_adma_port_priv *pp = ap->private_data;
1631 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1632 void __iomem *mmio = pp->ctl_block;
1633 int i;
1634 u16 tmp;
1635
1636 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1637 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1638 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1639 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1640 u32 status = readw(mmio + NV_ADMA_STAT);
1641 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1642 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1643
1644 ata_port_err(ap,
1645 "EH in ADMA mode, notifier 0x%X "
1646 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1647 "next cpb count 0x%X next cpb idx 0x%x\n",
1648 notifier, notifier_error, gen_ctl, status,
1649 cpb_count, next_cpb_idx);
1650
1651 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1652 struct nv_adma_cpb *cpb = &pp->cpb[i];
1653 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1654 ap->link.sactive & (1 << i))
1655 ata_port_err(ap,
1656 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1657 i, cpb->ctl_flags, cpb->resp_flags);
1658 }
1659 }
1660
1661 /* Push us back into port register mode for error handling. */
1662 nv_adma_register_mode(ap);
1663
1664 /* Mark all of the CPBs as invalid to prevent them from
1665 being executed */
1666 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1667 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1668
1669 /* clear CPB fetch count */
1670 writew(0, mmio + NV_ADMA_CPB_COUNT);
1671
1672 /* Reset channel */
1673 tmp = readw(mmio + NV_ADMA_CTL);
1674 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1675 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1676 udelay(1);
1677 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1678 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1679 }
1680
1681 ata_bmdma_error_handler(ap);
1682 }
1683
nv_swncq_qc_to_dq(struct ata_port * ap,struct ata_queued_cmd * qc)1684 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1685 {
1686 struct nv_swncq_port_priv *pp = ap->private_data;
1687 struct defer_queue *dq = &pp->defer_queue;
1688
1689 /* queue is full */
1690 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1691 dq->defer_bits |= (1 << qc->hw_tag);
1692 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1693 }
1694
nv_swncq_qc_from_dq(struct ata_port * ap)1695 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1696 {
1697 struct nv_swncq_port_priv *pp = ap->private_data;
1698 struct defer_queue *dq = &pp->defer_queue;
1699 unsigned int tag;
1700
1701 if (dq->head == dq->tail) /* null queue */
1702 return NULL;
1703
1704 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1705 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1706 WARN_ON(!(dq->defer_bits & (1 << tag)));
1707 dq->defer_bits &= ~(1 << tag);
1708
1709 return ata_qc_from_tag(ap, tag);
1710 }
1711
nv_swncq_fis_reinit(struct ata_port * ap)1712 static void nv_swncq_fis_reinit(struct ata_port *ap)
1713 {
1714 struct nv_swncq_port_priv *pp = ap->private_data;
1715
1716 pp->dhfis_bits = 0;
1717 pp->dmafis_bits = 0;
1718 pp->sdbfis_bits = 0;
1719 pp->ncq_flags = 0;
1720 }
1721
nv_swncq_pp_reinit(struct ata_port * ap)1722 static void nv_swncq_pp_reinit(struct ata_port *ap)
1723 {
1724 struct nv_swncq_port_priv *pp = ap->private_data;
1725 struct defer_queue *dq = &pp->defer_queue;
1726
1727 dq->head = 0;
1728 dq->tail = 0;
1729 dq->defer_bits = 0;
1730 pp->qc_active = 0;
1731 pp->last_issue_tag = ATA_TAG_POISON;
1732 nv_swncq_fis_reinit(ap);
1733 }
1734
nv_swncq_irq_clear(struct ata_port * ap,u16 fis)1735 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1736 {
1737 struct nv_swncq_port_priv *pp = ap->private_data;
1738
1739 writew(fis, pp->irq_block);
1740 }
1741
__ata_bmdma_stop(struct ata_port * ap)1742 static void __ata_bmdma_stop(struct ata_port *ap)
1743 {
1744 struct ata_queued_cmd qc;
1745
1746 qc.ap = ap;
1747 ata_bmdma_stop(&qc);
1748 }
1749
nv_swncq_ncq_stop(struct ata_port * ap)1750 static void nv_swncq_ncq_stop(struct ata_port *ap)
1751 {
1752 struct nv_swncq_port_priv *pp = ap->private_data;
1753 unsigned int i;
1754 u32 sactive;
1755 u32 done_mask;
1756
1757 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1758 ap->qc_active, ap->link.sactive);
1759 ata_port_err(ap,
1760 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1761 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1762 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1763 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1764
1765 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1766 ap->ops->sff_check_status(ap),
1767 ioread8(ap->ioaddr.error_addr));
1768
1769 sactive = readl(pp->sactive_block);
1770 done_mask = pp->qc_active ^ sactive;
1771
1772 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1773 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1774 u8 err = 0;
1775 if (pp->qc_active & (1 << i))
1776 err = 0;
1777 else if (done_mask & (1 << i))
1778 err = 1;
1779 else
1780 continue;
1781
1782 ata_port_err(ap,
1783 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1784 (pp->dhfis_bits >> i) & 0x1,
1785 (pp->dmafis_bits >> i) & 0x1,
1786 (pp->sdbfis_bits >> i) & 0x1,
1787 (sactive >> i) & 0x1,
1788 (err ? "error! tag doesn't exit" : " "));
1789 }
1790
1791 nv_swncq_pp_reinit(ap);
1792 ap->ops->sff_irq_clear(ap);
1793 __ata_bmdma_stop(ap);
1794 nv_swncq_irq_clear(ap, 0xffff);
1795 }
1796
nv_swncq_error_handler(struct ata_port * ap)1797 static void nv_swncq_error_handler(struct ata_port *ap)
1798 {
1799 struct ata_eh_context *ehc = &ap->link.eh_context;
1800
1801 if (ap->link.sactive) {
1802 nv_swncq_ncq_stop(ap);
1803 ehc->i.action |= ATA_EH_RESET;
1804 }
1805
1806 ata_bmdma_error_handler(ap);
1807 }
1808
1809 #ifdef CONFIG_PM
nv_swncq_port_suspend(struct ata_port * ap,pm_message_t mesg)1810 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1811 {
1812 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1813 u32 tmp;
1814
1815 /* clear irq */
1816 writel(~0, mmio + NV_INT_STATUS_MCP55);
1817
1818 /* disable irq */
1819 writel(0, mmio + NV_INT_ENABLE_MCP55);
1820
1821 /* disable swncq */
1822 tmp = readl(mmio + NV_CTL_MCP55);
1823 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1824 writel(tmp, mmio + NV_CTL_MCP55);
1825
1826 return 0;
1827 }
1828
nv_swncq_port_resume(struct ata_port * ap)1829 static int nv_swncq_port_resume(struct ata_port *ap)
1830 {
1831 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1832 u32 tmp;
1833
1834 /* clear irq */
1835 writel(~0, mmio + NV_INT_STATUS_MCP55);
1836
1837 /* enable irq */
1838 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1839
1840 /* enable swncq */
1841 tmp = readl(mmio + NV_CTL_MCP55);
1842 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1843
1844 return 0;
1845 }
1846 #endif
1847
nv_swncq_host_init(struct ata_host * host)1848 static void nv_swncq_host_init(struct ata_host *host)
1849 {
1850 u32 tmp;
1851 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1852 struct pci_dev *pdev = to_pci_dev(host->dev);
1853 u8 regval;
1854
1855 /* disable ECO 398 */
1856 pci_read_config_byte(pdev, 0x7f, ®val);
1857 regval &= ~(1 << 7);
1858 pci_write_config_byte(pdev, 0x7f, regval);
1859
1860 /* enable swncq */
1861 tmp = readl(mmio + NV_CTL_MCP55);
1862 dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
1863 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1864
1865 /* enable irq intr */
1866 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1867 dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
1868 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1869
1870 /* clear port irq */
1871 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1872 }
1873
nv_swncq_device_configure(struct scsi_device * sdev,struct queue_limits * lim)1874 static int nv_swncq_device_configure(struct scsi_device *sdev,
1875 struct queue_limits *lim)
1876 {
1877 struct ata_port *ap = ata_shost_to_port(sdev->host);
1878 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1879 struct ata_device *dev;
1880 int rc;
1881 u8 rev;
1882 u8 check_maxtor = 0;
1883 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1884
1885 rc = ata_scsi_device_configure(sdev, lim);
1886 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1887 /* Not a proper libata device, ignore */
1888 return rc;
1889
1890 dev = &ap->link.device[sdev->id];
1891 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1892 return rc;
1893
1894 /* if MCP51 and Maxtor, then disable ncq */
1895 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1896 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1897 check_maxtor = 1;
1898
1899 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1900 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1901 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1902 pci_read_config_byte(pdev, 0x8, &rev);
1903 if (rev <= 0xa2)
1904 check_maxtor = 1;
1905 }
1906
1907 if (!check_maxtor)
1908 return rc;
1909
1910 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1911
1912 if (strncmp(model_num, "Maxtor", 6) == 0) {
1913 ata_scsi_change_queue_depth(sdev, 1);
1914 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1915 sdev->queue_depth);
1916 }
1917
1918 return rc;
1919 }
1920
nv_swncq_port_start(struct ata_port * ap)1921 static int nv_swncq_port_start(struct ata_port *ap)
1922 {
1923 struct device *dev = ap->host->dev;
1924 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1925 struct nv_swncq_port_priv *pp;
1926 int rc;
1927
1928 /* we might fallback to bmdma, allocate bmdma resources */
1929 rc = ata_bmdma_port_start(ap);
1930 if (rc)
1931 return rc;
1932
1933 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1934 if (!pp)
1935 return -ENOMEM;
1936
1937 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1938 &pp->prd_dma, GFP_KERNEL);
1939 if (!pp->prd)
1940 return -ENOMEM;
1941
1942 ap->private_data = pp;
1943 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1944 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1945 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1946
1947 return 0;
1948 }
1949
nv_swncq_qc_prep(struct ata_queued_cmd * qc)1950 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1951 {
1952 if (qc->tf.protocol != ATA_PROT_NCQ) {
1953 ata_bmdma_qc_prep(qc);
1954 return AC_ERR_OK;
1955 }
1956
1957 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1958 return AC_ERR_OK;
1959
1960 nv_swncq_fill_sg(qc);
1961
1962 return AC_ERR_OK;
1963 }
1964
nv_swncq_fill_sg(struct ata_queued_cmd * qc)1965 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1966 {
1967 struct ata_port *ap = qc->ap;
1968 struct scatterlist *sg;
1969 struct nv_swncq_port_priv *pp = ap->private_data;
1970 struct ata_bmdma_prd *prd;
1971 unsigned int si, idx;
1972
1973 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1974
1975 idx = 0;
1976 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1977 u32 addr, offset;
1978 u32 sg_len, len;
1979
1980 addr = (u32)sg_dma_address(sg);
1981 sg_len = sg_dma_len(sg);
1982
1983 while (sg_len) {
1984 offset = addr & 0xffff;
1985 len = sg_len;
1986 if ((offset + sg_len) > 0x10000)
1987 len = 0x10000 - offset;
1988
1989 prd[idx].addr = cpu_to_le32(addr);
1990 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1991
1992 idx++;
1993 sg_len -= len;
1994 addr += len;
1995 }
1996 }
1997
1998 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1999 }
2000
nv_swncq_issue_atacmd(struct ata_port * ap,struct ata_queued_cmd * qc)2001 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2002 struct ata_queued_cmd *qc)
2003 {
2004 struct nv_swncq_port_priv *pp = ap->private_data;
2005
2006 if (qc == NULL)
2007 return 0;
2008
2009 writel((1 << qc->hw_tag), pp->sactive_block);
2010 pp->last_issue_tag = qc->hw_tag;
2011 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2012 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2013 pp->qc_active |= (0x1 << qc->hw_tag);
2014
2015 trace_ata_tf_load(ap, &qc->tf);
2016 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2017 trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
2018 ap->ops->sff_exec_command(ap, &qc->tf);
2019
2020 return 0;
2021 }
2022
nv_swncq_qc_issue(struct ata_queued_cmd * qc)2023 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2024 {
2025 struct ata_port *ap = qc->ap;
2026 struct nv_swncq_port_priv *pp = ap->private_data;
2027
2028 if (qc->tf.protocol != ATA_PROT_NCQ)
2029 return ata_bmdma_qc_issue(qc);
2030
2031 if (!pp->qc_active)
2032 nv_swncq_issue_atacmd(ap, qc);
2033 else
2034 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2035
2036 return 0;
2037 }
2038
nv_swncq_hotplug(struct ata_port * ap,u32 fis)2039 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2040 {
2041 u32 serror;
2042 struct ata_eh_info *ehi = &ap->link.eh_info;
2043
2044 ata_ehi_clear_desc(ehi);
2045
2046 /* AHCI needs SError cleared; otherwise, it might lock up */
2047 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2048 sata_scr_write(&ap->link, SCR_ERROR, serror);
2049
2050 /* analyze @irq_stat */
2051 if (fis & NV_SWNCQ_IRQ_ADDED)
2052 ata_ehi_push_desc(ehi, "hot plug");
2053 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2054 ata_ehi_push_desc(ehi, "hot unplug");
2055
2056 ata_ehi_hotplugged(ehi);
2057
2058 /* okay, let's hand over to EH */
2059 ehi->serror |= serror;
2060
2061 ata_port_freeze(ap);
2062 }
2063
nv_swncq_sdbfis(struct ata_port * ap)2064 static int nv_swncq_sdbfis(struct ata_port *ap)
2065 {
2066 struct ata_queued_cmd *qc;
2067 struct nv_swncq_port_priv *pp = ap->private_data;
2068 struct ata_eh_info *ehi = &ap->link.eh_info;
2069 u32 sactive;
2070 u32 done_mask;
2071 u8 host_stat;
2072 u8 lack_dhfis = 0;
2073
2074 host_stat = ap->ops->bmdma_status(ap);
2075 trace_ata_bmdma_status(ap, host_stat);
2076 if (unlikely(host_stat & ATA_DMA_ERR)) {
2077 /* error when transferring data to/from memory */
2078 ata_ehi_clear_desc(ehi);
2079 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2080 ehi->err_mask |= AC_ERR_HOST_BUS;
2081 ehi->action |= ATA_EH_RESET;
2082 return -EINVAL;
2083 }
2084
2085 ap->ops->sff_irq_clear(ap);
2086 __ata_bmdma_stop(ap);
2087
2088 sactive = readl(pp->sactive_block);
2089 done_mask = pp->qc_active ^ sactive;
2090
2091 pp->qc_active &= ~done_mask;
2092 pp->dhfis_bits &= ~done_mask;
2093 pp->dmafis_bits &= ~done_mask;
2094 pp->sdbfis_bits |= done_mask;
2095 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2096
2097 if (!ap->qc_active) {
2098 ata_port_dbg(ap, "over\n");
2099 nv_swncq_pp_reinit(ap);
2100 return 0;
2101 }
2102
2103 if (pp->qc_active & pp->dhfis_bits)
2104 return 0;
2105
2106 if ((pp->ncq_flags & ncq_saw_backout) ||
2107 (pp->qc_active ^ pp->dhfis_bits))
2108 /* if the controller can't get a device to host register FIS,
2109 * The driver needs to reissue the new command.
2110 */
2111 lack_dhfis = 1;
2112
2113 ata_port_dbg(ap, "QC: qc_active 0x%llx,"
2114 "SWNCQ:qc_active 0x%X defer_bits %X "
2115 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2116 ap->qc_active, pp->qc_active,
2117 pp->defer_queue.defer_bits, pp->dhfis_bits,
2118 pp->dmafis_bits, pp->last_issue_tag);
2119
2120 nv_swncq_fis_reinit(ap);
2121
2122 if (lack_dhfis) {
2123 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2124 nv_swncq_issue_atacmd(ap, qc);
2125 return 0;
2126 }
2127
2128 if (pp->defer_queue.defer_bits) {
2129 /* send deferral queue command */
2130 qc = nv_swncq_qc_from_dq(ap);
2131 WARN_ON(qc == NULL);
2132 nv_swncq_issue_atacmd(ap, qc);
2133 }
2134
2135 return 0;
2136 }
2137
nv_swncq_tag(struct ata_port * ap)2138 static inline u32 nv_swncq_tag(struct ata_port *ap)
2139 {
2140 struct nv_swncq_port_priv *pp = ap->private_data;
2141 u32 tag;
2142
2143 tag = readb(pp->tag_block) >> 2;
2144 return (tag & 0x1f);
2145 }
2146
nv_swncq_dmafis(struct ata_port * ap)2147 static void nv_swncq_dmafis(struct ata_port *ap)
2148 {
2149 struct ata_queued_cmd *qc;
2150 unsigned int rw;
2151 u8 dmactl;
2152 u32 tag;
2153 struct nv_swncq_port_priv *pp = ap->private_data;
2154
2155 __ata_bmdma_stop(ap);
2156 tag = nv_swncq_tag(ap);
2157
2158 ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
2159 qc = ata_qc_from_tag(ap, tag);
2160
2161 if (unlikely(!qc))
2162 return;
2163
2164 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2165
2166 /* load PRD table addr. */
2167 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2168 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2169
2170 /* specify data direction, triple-check start bit is clear */
2171 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2172 dmactl &= ~ATA_DMA_WR;
2173 if (!rw)
2174 dmactl |= ATA_DMA_WR;
2175
2176 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2177 }
2178
nv_swncq_host_interrupt(struct ata_port * ap,u16 fis)2179 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2180 {
2181 struct nv_swncq_port_priv *pp = ap->private_data;
2182 struct ata_queued_cmd *qc;
2183 struct ata_eh_info *ehi = &ap->link.eh_info;
2184 u32 serror;
2185 u8 ata_stat;
2186
2187 ata_stat = ap->ops->sff_check_status(ap);
2188 nv_swncq_irq_clear(ap, fis);
2189 if (!fis)
2190 return;
2191
2192 if (ata_port_is_frozen(ap))
2193 return;
2194
2195 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2196 nv_swncq_hotplug(ap, fis);
2197 return;
2198 }
2199
2200 if (!pp->qc_active)
2201 return;
2202
2203 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2204 return;
2205 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2206
2207 if (ata_stat & ATA_ERR) {
2208 ata_ehi_clear_desc(ehi);
2209 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2210 ehi->err_mask |= AC_ERR_DEV;
2211 ehi->serror |= serror;
2212 ehi->action |= ATA_EH_RESET;
2213 ata_port_freeze(ap);
2214 return;
2215 }
2216
2217 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2218 /* If the IRQ is backout, driver must issue
2219 * the new command again some time later.
2220 */
2221 pp->ncq_flags |= ncq_saw_backout;
2222 }
2223
2224 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2225 pp->ncq_flags |= ncq_saw_sdb;
2226 ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
2227 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2228 pp->qc_active, pp->dhfis_bits,
2229 pp->dmafis_bits, readl(pp->sactive_block));
2230 if (nv_swncq_sdbfis(ap) < 0)
2231 goto irq_error;
2232 }
2233
2234 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2235 /* The interrupt indicates the new command
2236 * was transmitted correctly to the drive.
2237 */
2238 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2239 pp->ncq_flags |= ncq_saw_d2h;
2240 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2241 ata_ehi_push_desc(ehi, "illegal fis transaction");
2242 ehi->err_mask |= AC_ERR_HSM;
2243 ehi->action |= ATA_EH_RESET;
2244 goto irq_error;
2245 }
2246
2247 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2248 !(pp->ncq_flags & ncq_saw_dmas)) {
2249 ata_stat = ap->ops->sff_check_status(ap);
2250 if (ata_stat & ATA_BUSY)
2251 goto irq_exit;
2252
2253 if (pp->defer_queue.defer_bits) {
2254 ata_port_dbg(ap, "send next command\n");
2255 qc = nv_swncq_qc_from_dq(ap);
2256 nv_swncq_issue_atacmd(ap, qc);
2257 }
2258 }
2259 }
2260
2261 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2262 /* program the dma controller with appropriate PRD buffers
2263 * and start the DMA transfer for requested command.
2264 */
2265 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2266 pp->ncq_flags |= ncq_saw_dmas;
2267 nv_swncq_dmafis(ap);
2268 }
2269
2270 irq_exit:
2271 return;
2272 irq_error:
2273 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2274 ata_port_freeze(ap);
2275 return;
2276 }
2277
nv_swncq_interrupt(int irq,void * dev_instance)2278 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2279 {
2280 struct ata_host *host = dev_instance;
2281 unsigned int i;
2282 unsigned int handled = 0;
2283 unsigned long flags;
2284 u32 irq_stat;
2285
2286 spin_lock_irqsave(&host->lock, flags);
2287
2288 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2289
2290 for (i = 0; i < host->n_ports; i++) {
2291 struct ata_port *ap = host->ports[i];
2292
2293 if (ap->link.sactive) {
2294 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2295 handled = 1;
2296 } else {
2297 if (irq_stat) /* reserve Hotplug */
2298 nv_swncq_irq_clear(ap, 0xfff0);
2299
2300 handled += nv_host_intr(ap, (u8)irq_stat);
2301 }
2302 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2303 }
2304
2305 spin_unlock_irqrestore(&host->lock, flags);
2306
2307 return IRQ_RETVAL(handled);
2308 }
2309
nv_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2310 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2311 {
2312 const struct ata_port_info *ppi[] = { NULL, NULL };
2313 struct nv_pi_priv *ipriv;
2314 struct ata_host *host;
2315 struct nv_host_priv *hpriv;
2316 int rc;
2317 u32 bar;
2318 void __iomem *base;
2319 unsigned long type = ent->driver_data;
2320
2321 // Make sure this is a SATA controller by counting the number of bars
2322 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2323 // it's an IDE controller and we ignore it.
2324 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2325 if (pci_resource_start(pdev, bar) == 0)
2326 return -ENODEV;
2327
2328 ata_print_version_once(&pdev->dev, DRV_VERSION);
2329
2330 rc = pcim_enable_device(pdev);
2331 if (rc)
2332 return rc;
2333
2334 /* determine type and allocate host */
2335 if (type == CK804 && adma_enabled) {
2336 dev_notice(&pdev->dev, "Using ADMA mode\n");
2337 type = ADMA;
2338 } else if (type == MCP5x && swncq_enabled) {
2339 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2340 type = SWNCQ;
2341 }
2342
2343 ppi[0] = &nv_port_info[type];
2344 ipriv = ppi[0]->private_data;
2345 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2346 if (rc)
2347 return rc;
2348
2349 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2350 if (!hpriv)
2351 return -ENOMEM;
2352 hpriv->type = type;
2353 host->private_data = hpriv;
2354
2355 /* request and iomap NV_MMIO_BAR */
2356 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2357 if (rc)
2358 return rc;
2359
2360 /* configure SCR access */
2361 base = host->iomap[NV_MMIO_BAR];
2362 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2363 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2364
2365 /* enable SATA space for CK804 */
2366 if (type >= CK804) {
2367 u8 regval;
2368
2369 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2370 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2371 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2372 }
2373
2374 /* init ADMA */
2375 if (type == ADMA) {
2376 rc = nv_adma_host_init(host);
2377 if (rc)
2378 return rc;
2379 } else if (type == SWNCQ)
2380 nv_swncq_host_init(host);
2381
2382 if (msi_enabled) {
2383 dev_notice(&pdev->dev, "Using MSI\n");
2384 pci_enable_msi(pdev);
2385 }
2386
2387 pci_set_master(pdev);
2388 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2389 }
2390
2391 #ifdef CONFIG_PM_SLEEP
nv_pci_device_resume(struct pci_dev * pdev)2392 static int nv_pci_device_resume(struct pci_dev *pdev)
2393 {
2394 struct ata_host *host = pci_get_drvdata(pdev);
2395 struct nv_host_priv *hpriv = host->private_data;
2396 int rc;
2397
2398 rc = ata_pci_device_do_resume(pdev);
2399 if (rc)
2400 return rc;
2401
2402 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2403 if (hpriv->type >= CK804) {
2404 u8 regval;
2405
2406 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2407 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2408 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2409 }
2410 if (hpriv->type == ADMA) {
2411 u32 tmp32;
2412 struct nv_adma_port_priv *pp;
2413 /* enable/disable ADMA on the ports appropriately */
2414 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2415
2416 pp = host->ports[0]->private_data;
2417 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2418 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2419 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2420 else
2421 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2422 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2423 pp = host->ports[1]->private_data;
2424 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2425 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2426 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2427 else
2428 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2429 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2430
2431 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2432 }
2433 }
2434
2435 ata_host_resume(host);
2436
2437 return 0;
2438 }
2439 #endif
2440
nv_ck804_host_stop(struct ata_host * host)2441 static void nv_ck804_host_stop(struct ata_host *host)
2442 {
2443 struct pci_dev *pdev = to_pci_dev(host->dev);
2444 u8 regval;
2445
2446 /* disable SATA space for CK804 */
2447 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2448 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2449 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2450 }
2451
nv_adma_host_stop(struct ata_host * host)2452 static void nv_adma_host_stop(struct ata_host *host)
2453 {
2454 struct pci_dev *pdev = to_pci_dev(host->dev);
2455 u32 tmp32;
2456
2457 /* disable ADMA on the ports */
2458 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2459 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2460 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2461 NV_MCP_SATA_CFG_20_PORT1_EN |
2462 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2463
2464 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2465
2466 nv_ck804_host_stop(host);
2467 }
2468
2469 module_pci_driver(nv_pci_driver);
2470
2471 module_param_named(adma, adma_enabled, bool, 0444);
2472 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2473 module_param_named(swncq, swncq_enabled, bool, 0444);
2474 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2475 module_param_named(msi, msi_enabled, bool, 0444);
2476 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2477