1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
4 *
5 * Copyright 2005 Tejun Heo
6 *
7 * Based on preview driver from Silicon Image.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/gfp.h>
13 #include <linux/pci.h>
14 #include <linux/blkdev.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/device.h>
19 #include <scsi/scsi_host.h>
20 #include <scsi/scsi_cmnd.h>
21 #include <linux/libata.h>
22
23 #define DRV_NAME "sata_sil24"
24 #define DRV_VERSION "1.1"
25
26 /*
27 * Port request block (PRB) 32 bytes
28 */
29 struct sil24_prb {
30 __le16 ctrl;
31 __le16 prot;
32 __le32 rx_cnt;
33 u8 fis[6 * 4];
34 };
35
36 /*
37 * Scatter gather entry (SGE) 16 bytes
38 */
39 struct sil24_sge {
40 __le64 addr;
41 __le32 cnt;
42 __le32 flags;
43 };
44
45
46 enum {
47 SIL24_HOST_BAR = 0,
48 SIL24_PORT_BAR = 2,
49
50 /* sil24 fetches in chunks of 64bytes. The first block
51 * contains the PRB and two SGEs. From the second block, it's
52 * consisted of four SGEs and called SGT. Calculate the
53 * number of SGTs that fit into one page.
54 */
55 SIL24_PRB_SZ = sizeof(struct sil24_prb)
56 + 2 * sizeof(struct sil24_sge),
57 SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ)
58 / (4 * sizeof(struct sil24_sge)),
59
60 /* This will give us one unused SGEs for ATA. This extra SGE
61 * will be used to store CDB for ATAPI devices.
62 */
63 SIL24_MAX_SGE = 4 * SIL24_MAX_SGT + 1,
64
65 /*
66 * Global controller registers (128 bytes @ BAR0)
67 */
68 /* 32 bit regs */
69 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
70 HOST_CTRL = 0x40,
71 HOST_IRQ_STAT = 0x44,
72 HOST_PHY_CFG = 0x48,
73 HOST_BIST_CTRL = 0x50,
74 HOST_BIST_PTRN = 0x54,
75 HOST_BIST_STAT = 0x58,
76 HOST_MEM_BIST_STAT = 0x5c,
77 HOST_FLASH_CMD = 0x70,
78 /* 8 bit regs */
79 HOST_FLASH_DATA = 0x74,
80 HOST_TRANSITION_DETECT = 0x75,
81 HOST_GPIO_CTRL = 0x76,
82 HOST_I2C_ADDR = 0x78, /* 32 bit */
83 HOST_I2C_DATA = 0x7c,
84 HOST_I2C_XFER_CNT = 0x7e,
85 HOST_I2C_CTRL = 0x7f,
86
87 /* HOST_SLOT_STAT bits */
88 HOST_SSTAT_ATTN = (1 << 31),
89
90 /* HOST_CTRL bits */
91 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
92 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
93 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
94 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
95 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
96 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
97
98 /*
99 * Port registers
100 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
101 */
102 PORT_REGS_SIZE = 0x2000,
103
104 PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */
105 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
106
107 PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
108 PORT_PMP_STATUS = 0x0000, /* port device status offset */
109 PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */
110 PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */
111
112 /* 32 bit regs */
113 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
114 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
115 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
116 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
117 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
118 PORT_ACTIVATE_UPPER_ADDR= 0x101c,
119 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
120 PORT_CMD_ERR = 0x1024, /* command error number */
121 PORT_FIS_CFG = 0x1028,
122 PORT_FIFO_THRES = 0x102c,
123 /* 16 bit regs */
124 PORT_DECODE_ERR_CNT = 0x1040,
125 PORT_DECODE_ERR_THRESH = 0x1042,
126 PORT_CRC_ERR_CNT = 0x1044,
127 PORT_CRC_ERR_THRESH = 0x1046,
128 PORT_HSHK_ERR_CNT = 0x1048,
129 PORT_HSHK_ERR_THRESH = 0x104a,
130 /* 32 bit regs */
131 PORT_PHY_CFG = 0x1050,
132 PORT_SLOT_STAT = 0x1800,
133 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
134 PORT_CONTEXT = 0x1e04,
135 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
136 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
137 PORT_SCONTROL = 0x1f00,
138 PORT_SSTATUS = 0x1f04,
139 PORT_SERROR = 0x1f08,
140 PORT_SACTIVE = 0x1f0c,
141
142 /* PORT_CTRL_STAT bits */
143 PORT_CS_PORT_RST = (1 << 0), /* port reset */
144 PORT_CS_DEV_RST = (1 << 1), /* device reset */
145 PORT_CS_INIT = (1 << 2), /* port initialize */
146 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
147 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
148 PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */
149 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
150 PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */
151 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
152
153 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
154 /* bits[11:0] are masked */
155 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
156 PORT_IRQ_ERROR = (1 << 1), /* command execution error */
157 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
158 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
159 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
160 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
161 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
162 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
163 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
164 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
165 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
166 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
167
168 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
169 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
170 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY,
171
172 /* bits[27:16] are unmasked (raw) */
173 PORT_IRQ_RAW_SHIFT = 16,
174 PORT_IRQ_MASKED_MASK = 0x7ff,
175 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
176
177 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
178 PORT_IRQ_STEER_SHIFT = 30,
179 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
180
181 /* PORT_CMD_ERR constants */
182 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
183 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
184 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
185 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
186 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
187 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
188 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
189 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
190 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
191 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
192 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
193 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
194 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
195 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
196 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
197 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
198 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
199 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
200 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
201 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
202 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
203 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
204
205 /* bits of PRB control field */
206 PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
207 PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
208 PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
209 PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
210 PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
211
212 /* PRB protocol field */
213 PRB_PROT_PACKET = (1 << 0),
214 PRB_PROT_TCQ = (1 << 1),
215 PRB_PROT_NCQ = (1 << 2),
216 PRB_PROT_READ = (1 << 3),
217 PRB_PROT_WRITE = (1 << 4),
218 PRB_PROT_TRANSPARENT = (1 << 5),
219
220 /*
221 * Other constants
222 */
223 SGE_TRM = (1 << 31), /* Last SGE in chain */
224 SGE_LNK = (1 << 30), /* linked list
225 Points to SGT, not SGE */
226 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
227 data address ignored */
228
229 SIL24_MAX_CMDS = 31,
230
231 /* board id */
232 BID_SIL3124 = 0,
233 BID_SIL3132 = 1,
234 BID_SIL3131 = 2,
235
236 /* host flags */
237 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
238 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
239 ATA_FLAG_AN | ATA_FLAG_PMP,
240 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
241
242 IRQ_STAT_4PORTS = 0xf,
243 };
244
245 struct sil24_ata_block {
246 struct sil24_prb prb;
247 struct sil24_sge sge[SIL24_MAX_SGE];
248 };
249
250 struct sil24_atapi_block {
251 struct sil24_prb prb;
252 u8 cdb[16];
253 struct sil24_sge sge[SIL24_MAX_SGE];
254 };
255
256 union sil24_cmd_block {
257 struct sil24_ata_block ata;
258 struct sil24_atapi_block atapi;
259 };
260
261 static const struct sil24_cerr_info {
262 unsigned int err_mask, action;
263 const char *desc;
264 } sil24_cerr_db[] = {
265 [0] = { AC_ERR_DEV, 0,
266 "device error" },
267 [PORT_CERR_DEV] = { AC_ERR_DEV, 0,
268 "device error via D2H FIS" },
269 [PORT_CERR_SDB] = { AC_ERR_DEV, 0,
270 "device error via SDB FIS" },
271 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
272 "error in data FIS" },
273 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
274 "failed to transmit command FIS" },
275 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
276 "protocol mismatch" },
277 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET,
278 "data direction mismatch" },
279 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
280 "ran out of SGEs while writing" },
281 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
282 "ran out of SGEs while reading" },
283 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET,
284 "invalid data direction for ATAPI CDB" },
285 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
286 "SGT not on qword boundary" },
287 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
288 "PCI target abort while fetching SGT" },
289 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
290 "PCI master abort while fetching SGT" },
291 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
292 "PCI parity error while fetching SGT" },
293 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
294 "PRB not on qword boundary" },
295 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
296 "PCI target abort while fetching PRB" },
297 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
298 "PCI master abort while fetching PRB" },
299 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
300 "PCI parity error while fetching PRB" },
301 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
302 "undefined error while transferring data" },
303 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
304 "PCI target abort while transferring data" },
305 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
306 "PCI master abort while transferring data" },
307 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
308 "PCI parity error while transferring data" },
309 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET,
310 "FIS received while sending service FIS" },
311 };
312
313 /*
314 * ap->private_data
315 *
316 * The preview driver always returned 0 for status. We emulate it
317 * here from the previous interrupt.
318 */
319 struct sil24_port_priv {
320 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
321 dma_addr_t cmd_block_dma; /* DMA base addr for them */
322 int do_port_rst;
323 };
324
325 static void sil24_dev_config(struct ata_device *dev);
326 static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
327 static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
328 static int sil24_qc_defer(struct ata_queued_cmd *qc);
329 static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
330 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
331 static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
332 static void sil24_pmp_attach(struct ata_port *ap);
333 static void sil24_pmp_detach(struct ata_port *ap);
334 static void sil24_freeze(struct ata_port *ap);
335 static void sil24_thaw(struct ata_port *ap);
336 static int sil24_softreset(struct ata_link *link, unsigned int *class,
337 unsigned long deadline);
338 static int sil24_hardreset(struct ata_link *link, unsigned int *class,
339 unsigned long deadline);
340 static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
341 unsigned long deadline);
342 static void sil24_error_handler(struct ata_port *ap);
343 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
344 static int sil24_port_start(struct ata_port *ap);
345 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
346 #ifdef CONFIG_PM_SLEEP
347 static int sil24_pci_device_resume(struct pci_dev *pdev);
348 #endif
349 #ifdef CONFIG_PM
350 static int sil24_port_resume(struct ata_port *ap);
351 #endif
352
353 static const struct pci_device_id sil24_pci_tbl[] = {
354 { PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 },
355 { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 },
356 { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 },
357 { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 },
358 { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 },
359 { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 },
360 { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 },
361
362 { } /* terminate list */
363 };
364
365 static struct pci_driver sil24_pci_driver = {
366 .name = DRV_NAME,
367 .id_table = sil24_pci_tbl,
368 .probe = sil24_init_one,
369 .remove = ata_pci_remove_one,
370 #ifdef CONFIG_PM_SLEEP
371 .suspend = ata_pci_device_suspend,
372 .resume = sil24_pci_device_resume,
373 #endif
374 };
375
376 static const struct scsi_host_template sil24_sht = {
377 __ATA_BASE_SHT(DRV_NAME),
378 .can_queue = SIL24_MAX_CMDS,
379 .sg_tablesize = SIL24_MAX_SGE,
380 .dma_boundary = ATA_DMA_BOUNDARY,
381 .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
382 .sdev_groups = ata_ncq_sdev_groups,
383 .change_queue_depth = ata_scsi_change_queue_depth,
384 .device_configure = ata_scsi_device_configure
385 };
386
387 static struct ata_port_operations sil24_ops = {
388 .inherits = &sata_pmp_port_ops,
389
390 .qc_defer = sil24_qc_defer,
391 .qc_prep = sil24_qc_prep,
392 .qc_issue = sil24_qc_issue,
393 .qc_fill_rtf = sil24_qc_fill_rtf,
394
395 .freeze = sil24_freeze,
396 .thaw = sil24_thaw,
397 .softreset = sil24_softreset,
398 .hardreset = sil24_hardreset,
399 .pmp_softreset = sil24_softreset,
400 .pmp_hardreset = sil24_pmp_hardreset,
401 .error_handler = sil24_error_handler,
402 .post_internal_cmd = sil24_post_internal_cmd,
403 .dev_config = sil24_dev_config,
404
405 .scr_read = sil24_scr_read,
406 .scr_write = sil24_scr_write,
407 .pmp_attach = sil24_pmp_attach,
408 .pmp_detach = sil24_pmp_detach,
409
410 .port_start = sil24_port_start,
411 #ifdef CONFIG_PM
412 .port_resume = sil24_port_resume,
413 #endif
414 };
415
416 static bool sata_sil24_msi; /* Disable MSI */
417 module_param_named(msi, sata_sil24_msi, bool, S_IRUGO);
418 MODULE_PARM_DESC(msi, "Enable MSI (Default: false)");
419
420 /*
421 * Use bits 30-31 of port_flags to encode available port numbers.
422 * Current maxium is 4.
423 */
424 #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
425 #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
426
427 static const struct ata_port_info sil24_port_info[] = {
428 /* sil_3124 */
429 {
430 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
431 SIL24_FLAG_PCIX_IRQ_WOC,
432 .pio_mask = ATA_PIO4,
433 .mwdma_mask = ATA_MWDMA2,
434 .udma_mask = ATA_UDMA5,
435 .port_ops = &sil24_ops,
436 },
437 /* sil_3132 */
438 {
439 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
440 .pio_mask = ATA_PIO4,
441 .mwdma_mask = ATA_MWDMA2,
442 .udma_mask = ATA_UDMA5,
443 .port_ops = &sil24_ops,
444 },
445 /* sil_3131/sil_3531 */
446 {
447 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
448 .pio_mask = ATA_PIO4,
449 .mwdma_mask = ATA_MWDMA2,
450 .udma_mask = ATA_UDMA5,
451 .port_ops = &sil24_ops,
452 },
453 };
454
sil24_tag(int tag)455 static int sil24_tag(int tag)
456 {
457 if (unlikely(ata_tag_internal(tag)))
458 return 0;
459 return tag;
460 }
461
sil24_port_offset(struct ata_port * ap)462 static unsigned long sil24_port_offset(struct ata_port *ap)
463 {
464 return ap->port_no * PORT_REGS_SIZE;
465 }
466
sil24_port_base(struct ata_port * ap)467 static void __iomem *sil24_port_base(struct ata_port *ap)
468 {
469 return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap);
470 }
471
sil24_dev_config(struct ata_device * dev)472 static void sil24_dev_config(struct ata_device *dev)
473 {
474 void __iomem *port = sil24_port_base(dev->link->ap);
475
476 if (dev->cdb_len == 16)
477 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
478 else
479 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
480 }
481
sil24_read_tf(struct ata_port * ap,int tag,struct ata_taskfile * tf)482 static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
483 {
484 void __iomem *port = sil24_port_base(ap);
485 struct sil24_prb __iomem *prb;
486 u8 fis[6 * 4];
487
488 prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ;
489 memcpy_fromio(fis, prb->fis, sizeof(fis));
490 ata_tf_from_fis(fis, tf);
491 }
492
493 static int sil24_scr_map[] = {
494 [SCR_CONTROL] = 0,
495 [SCR_STATUS] = 1,
496 [SCR_ERROR] = 2,
497 [SCR_ACTIVE] = 3,
498 };
499
sil24_scr_read(struct ata_link * link,unsigned sc_reg,u32 * val)500 static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
501 {
502 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
503
504 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
505 *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
506 return 0;
507 }
508 return -EINVAL;
509 }
510
sil24_scr_write(struct ata_link * link,unsigned sc_reg,u32 val)511 static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
512 {
513 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
514
515 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
516 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
517 return 0;
518 }
519 return -EINVAL;
520 }
521
sil24_config_port(struct ata_port * ap)522 static void sil24_config_port(struct ata_port *ap)
523 {
524 void __iomem *port = sil24_port_base(ap);
525
526 /* configure IRQ WoC */
527 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
528 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
529 else
530 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
531
532 /* zero error counters. */
533 writew(0x8000, port + PORT_DECODE_ERR_THRESH);
534 writew(0x8000, port + PORT_CRC_ERR_THRESH);
535 writew(0x8000, port + PORT_HSHK_ERR_THRESH);
536 writew(0x0000, port + PORT_DECODE_ERR_CNT);
537 writew(0x0000, port + PORT_CRC_ERR_CNT);
538 writew(0x0000, port + PORT_HSHK_ERR_CNT);
539
540 /* always use 64bit activation */
541 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
542
543 /* clear port multiplier enable and resume bits */
544 writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
545 }
546
sil24_config_pmp(struct ata_port * ap,int attached)547 static void sil24_config_pmp(struct ata_port *ap, int attached)
548 {
549 void __iomem *port = sil24_port_base(ap);
550
551 if (attached)
552 writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
553 else
554 writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR);
555 }
556
sil24_clear_pmp(struct ata_port * ap)557 static void sil24_clear_pmp(struct ata_port *ap)
558 {
559 void __iomem *port = sil24_port_base(ap);
560 int i;
561
562 writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
563
564 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
565 void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE;
566
567 writel(0, pmp_base + PORT_PMP_STATUS);
568 writel(0, pmp_base + PORT_PMP_QACTIVE);
569 }
570 }
571
sil24_init_port(struct ata_port * ap)572 static int sil24_init_port(struct ata_port *ap)
573 {
574 void __iomem *port = sil24_port_base(ap);
575 struct sil24_port_priv *pp = ap->private_data;
576 u32 tmp;
577
578 /* clear PMP error status */
579 if (sata_pmp_attached(ap))
580 sil24_clear_pmp(ap);
581
582 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
583 ata_wait_register(ap, port + PORT_CTRL_STAT,
584 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
585 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
586 PORT_CS_RDY, 0, 10, 100);
587
588 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
589 pp->do_port_rst = 1;
590 ap->link.eh_context.i.action |= ATA_EH_RESET;
591 return -EIO;
592 }
593
594 return 0;
595 }
596
sil24_exec_polled_cmd(struct ata_port * ap,int pmp,const struct ata_taskfile * tf,int is_cmd,u32 ctrl,unsigned int timeout_msec)597 static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
598 const struct ata_taskfile *tf,
599 int is_cmd, u32 ctrl,
600 unsigned int timeout_msec)
601 {
602 void __iomem *port = sil24_port_base(ap);
603 struct sil24_port_priv *pp = ap->private_data;
604 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
605 dma_addr_t paddr = pp->cmd_block_dma;
606 u32 irq_enabled, irq_mask, irq_stat;
607 int rc;
608
609 prb->ctrl = cpu_to_le16(ctrl);
610 ata_tf_to_fis(tf, pmp, is_cmd, prb->fis);
611
612 /* temporarily plug completion and error interrupts */
613 irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
614 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
615
616 /*
617 * The barrier is required to ensure that writes to cmd_block reach
618 * the memory before the write to PORT_CMD_ACTIVATE.
619 */
620 wmb();
621 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
622 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
623
624 irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
625 irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0,
626 10, timeout_msec);
627
628 writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
629 irq_stat >>= PORT_IRQ_RAW_SHIFT;
630
631 if (irq_stat & PORT_IRQ_COMPLETE)
632 rc = 0;
633 else {
634 /* force port into known state */
635 sil24_init_port(ap);
636
637 if (irq_stat & PORT_IRQ_ERROR)
638 rc = -EIO;
639 else
640 rc = -EBUSY;
641 }
642
643 /* restore IRQ enabled */
644 writel(irq_enabled, port + PORT_IRQ_ENABLE_SET);
645
646 return rc;
647 }
648
sil24_softreset(struct ata_link * link,unsigned int * class,unsigned long deadline)649 static int sil24_softreset(struct ata_link *link, unsigned int *class,
650 unsigned long deadline)
651 {
652 struct ata_port *ap = link->ap;
653 int pmp = sata_srst_pmp(link);
654 unsigned int timeout_msec = 0;
655 struct ata_taskfile tf;
656 const char *reason;
657 int rc;
658
659 /* put the port into known state */
660 if (sil24_init_port(ap)) {
661 reason = "port not ready";
662 goto err;
663 }
664
665 /* do SRST */
666 if (time_after(deadline, jiffies))
667 timeout_msec = jiffies_to_msecs(deadline - jiffies);
668
669 ata_tf_init(link->device, &tf); /* doesn't really matter */
670 rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
671 timeout_msec);
672 if (rc == -EBUSY) {
673 reason = "timeout";
674 goto err;
675 } else if (rc) {
676 reason = "SRST command error";
677 goto err;
678 }
679
680 sil24_read_tf(ap, 0, &tf);
681 *class = ata_port_classify(ap, &tf);
682
683 return 0;
684
685 err:
686 ata_link_err(link, "softreset failed (%s)\n", reason);
687 return -EIO;
688 }
689
sil24_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)690 static int sil24_hardreset(struct ata_link *link, unsigned int *class,
691 unsigned long deadline)
692 {
693 struct ata_port *ap = link->ap;
694 void __iomem *port = sil24_port_base(ap);
695 struct sil24_port_priv *pp = ap->private_data;
696 int did_port_rst = 0;
697 const char *reason;
698 int tout_msec, rc;
699 u32 tmp;
700
701 retry:
702 /* Sometimes, DEV_RST is not enough to recover the controller.
703 * This happens often after PM DMA CS errata.
704 */
705 if (pp->do_port_rst) {
706 ata_port_warn(ap,
707 "controller in dubious state, performing PORT_RST\n");
708
709 writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
710 ata_msleep(ap, 10);
711 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
712 ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
713 10, 5000);
714
715 /* restore port configuration */
716 sil24_config_port(ap);
717 sil24_config_pmp(ap, ap->nr_pmp_links);
718
719 pp->do_port_rst = 0;
720 did_port_rst = 1;
721 }
722
723 /* sil24 does the right thing(tm) without any protection */
724 sata_set_spd(link);
725
726 tout_msec = 100;
727 if (ata_link_online(link))
728 tout_msec = 5000;
729
730 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
731 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
732 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
733 tout_msec);
734
735 /* SStatus oscillates between zero and valid status after
736 * DEV_RST, debounce it.
737 */
738 rc = sata_link_debounce(link, sata_deb_timing_long, deadline);
739 if (rc) {
740 reason = "PHY debouncing failed";
741 goto err;
742 }
743
744 if (tmp & PORT_CS_DEV_RST) {
745 if (ata_link_offline(link))
746 return 0;
747 reason = "link not ready";
748 goto err;
749 }
750
751 /* Sil24 doesn't store signature FIS after hardreset, so we
752 * can't wait for BSY to clear. Some devices take a long time
753 * to get ready and those devices will choke if we don't wait
754 * for BSY clearance here. Tell libata to perform follow-up
755 * softreset.
756 */
757 return -EAGAIN;
758
759 err:
760 if (!did_port_rst) {
761 pp->do_port_rst = 1;
762 goto retry;
763 }
764
765 ata_link_err(link, "hardreset failed (%s)\n", reason);
766 return -EIO;
767 }
768
sil24_fill_sg(struct ata_queued_cmd * qc,struct sil24_sge * sge)769 static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
770 struct sil24_sge *sge)
771 {
772 struct scatterlist *sg;
773 struct sil24_sge *last_sge = NULL;
774 unsigned int si;
775
776 for_each_sg(qc->sg, sg, qc->n_elem, si) {
777 sge->addr = cpu_to_le64(sg_dma_address(sg));
778 sge->cnt = cpu_to_le32(sg_dma_len(sg));
779 sge->flags = 0;
780
781 last_sge = sge;
782 sge++;
783 }
784
785 last_sge->flags = cpu_to_le32(SGE_TRM);
786 }
787
sil24_qc_defer(struct ata_queued_cmd * qc)788 static int sil24_qc_defer(struct ata_queued_cmd *qc)
789 {
790 struct ata_link *link = qc->dev->link;
791 struct ata_port *ap = link->ap;
792 u8 prot = qc->tf.protocol;
793
794 /*
795 * There is a bug in the chip:
796 * Port LRAM Causes the PRB/SGT Data to be Corrupted
797 * If the host issues a read request for LRAM and SActive registers
798 * while active commands are available in the port, PRB/SGT data in
799 * the LRAM can become corrupted. This issue applies only when
800 * reading from, but not writing to, the LRAM.
801 *
802 * Therefore, reading LRAM when there is no particular error [and
803 * other commands may be outstanding] is prohibited.
804 *
805 * To avoid this bug there are two situations where a command must run
806 * exclusive of any other commands on the port:
807 *
808 * - ATAPI commands which check the sense data
809 * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
810 * set.
811 *
812 */
813 int is_excl = (ata_is_atapi(prot) ||
814 (qc->flags & ATA_QCFLAG_RESULT_TF));
815
816 if (unlikely(ap->excl_link)) {
817 if (link == ap->excl_link) {
818 if (ap->nr_active_links)
819 return ATA_DEFER_PORT;
820 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
821 } else
822 return ATA_DEFER_PORT;
823 } else if (unlikely(is_excl)) {
824 ap->excl_link = link;
825 if (ap->nr_active_links)
826 return ATA_DEFER_PORT;
827 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
828 }
829
830 return ata_std_qc_defer(qc);
831 }
832
sil24_qc_prep(struct ata_queued_cmd * qc)833 static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
834 {
835 struct ata_port *ap = qc->ap;
836 struct sil24_port_priv *pp = ap->private_data;
837 union sil24_cmd_block *cb;
838 struct sil24_prb *prb;
839 struct sil24_sge *sge;
840 u16 ctrl = 0;
841
842 cb = &pp->cmd_block[sil24_tag(qc->hw_tag)];
843
844 if (!ata_is_atapi(qc->tf.protocol)) {
845 prb = &cb->ata.prb;
846 sge = cb->ata.sge;
847 if (ata_is_data(qc->tf.protocol)) {
848 u16 prot = 0;
849 ctrl = PRB_CTRL_PROTOCOL;
850 if (ata_is_ncq(qc->tf.protocol))
851 prot |= PRB_PROT_NCQ;
852 if (qc->tf.flags & ATA_TFLAG_WRITE)
853 prot |= PRB_PROT_WRITE;
854 else
855 prot |= PRB_PROT_READ;
856 prb->prot = cpu_to_le16(prot);
857 }
858 } else {
859 prb = &cb->atapi.prb;
860 sge = cb->atapi.sge;
861 memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
862 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
863
864 if (ata_is_data(qc->tf.protocol)) {
865 if (qc->tf.flags & ATA_TFLAG_WRITE)
866 ctrl = PRB_CTRL_PACKET_WRITE;
867 else
868 ctrl = PRB_CTRL_PACKET_READ;
869 }
870 }
871
872 prb->ctrl = cpu_to_le16(ctrl);
873 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis);
874
875 if (qc->flags & ATA_QCFLAG_DMAMAP)
876 sil24_fill_sg(qc, sge);
877
878 return AC_ERR_OK;
879 }
880
sil24_qc_issue(struct ata_queued_cmd * qc)881 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
882 {
883 struct ata_port *ap = qc->ap;
884 struct sil24_port_priv *pp = ap->private_data;
885 void __iomem *port = sil24_port_base(ap);
886 unsigned int tag = sil24_tag(qc->hw_tag);
887 dma_addr_t paddr;
888 void __iomem *activate;
889
890 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
891 activate = port + PORT_CMD_ACTIVATE + tag * 8;
892
893 /*
894 * The barrier is required to ensure that writes to cmd_block reach
895 * the memory before the write to PORT_CMD_ACTIVATE.
896 */
897 wmb();
898 writel((u32)paddr, activate);
899 writel((u64)paddr >> 32, activate + 4);
900
901 return 0;
902 }
903
sil24_qc_fill_rtf(struct ata_queued_cmd * qc)904 static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
905 {
906 sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf);
907 }
908
sil24_pmp_attach(struct ata_port * ap)909 static void sil24_pmp_attach(struct ata_port *ap)
910 {
911 u32 *gscr = ap->link.device->gscr;
912
913 sil24_config_pmp(ap, 1);
914 sil24_init_port(ap);
915
916 if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
917 sata_pmp_gscr_devid(gscr) == 0x4140) {
918 ata_port_info(ap,
919 "disabling NCQ support due to sil24-mv4140 quirk\n");
920 ap->flags &= ~ATA_FLAG_NCQ;
921 }
922 }
923
sil24_pmp_detach(struct ata_port * ap)924 static void sil24_pmp_detach(struct ata_port *ap)
925 {
926 sil24_init_port(ap);
927 sil24_config_pmp(ap, 0);
928
929 ap->flags |= ATA_FLAG_NCQ;
930 }
931
sil24_pmp_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)932 static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
933 unsigned long deadline)
934 {
935 int rc;
936
937 rc = sil24_init_port(link->ap);
938 if (rc) {
939 ata_link_err(link, "hardreset failed (port not ready)\n");
940 return rc;
941 }
942
943 return sata_std_hardreset(link, class, deadline);
944 }
945
sil24_freeze(struct ata_port * ap)946 static void sil24_freeze(struct ata_port *ap)
947 {
948 void __iomem *port = sil24_port_base(ap);
949
950 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
951 * PORT_IRQ_ENABLE instead.
952 */
953 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
954 }
955
sil24_thaw(struct ata_port * ap)956 static void sil24_thaw(struct ata_port *ap)
957 {
958 void __iomem *port = sil24_port_base(ap);
959 u32 tmp;
960
961 /* clear IRQ */
962 tmp = readl(port + PORT_IRQ_STAT);
963 writel(tmp, port + PORT_IRQ_STAT);
964
965 /* turn IRQ back on */
966 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
967 }
968
sil24_error_intr(struct ata_port * ap)969 static void sil24_error_intr(struct ata_port *ap)
970 {
971 void __iomem *port = sil24_port_base(ap);
972 struct sil24_port_priv *pp = ap->private_data;
973 struct ata_queued_cmd *qc = NULL;
974 struct ata_link *link;
975 struct ata_eh_info *ehi;
976 int abort = 0, freeze = 0;
977 u32 irq_stat;
978
979 /* on error, we need to clear IRQ explicitly */
980 irq_stat = readl(port + PORT_IRQ_STAT);
981 writel(irq_stat, port + PORT_IRQ_STAT);
982
983 /* first, analyze and record host port events */
984 link = &ap->link;
985 ehi = &link->eh_info;
986 ata_ehi_clear_desc(ehi);
987
988 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
989
990 if (irq_stat & PORT_IRQ_SDB_NOTIFY) {
991 ata_ehi_push_desc(ehi, "SDB notify");
992 sata_async_notification(ap);
993 }
994
995 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
996 ata_ehi_hotplugged(ehi);
997 ata_ehi_push_desc(ehi, "%s",
998 irq_stat & PORT_IRQ_PHYRDY_CHG ?
999 "PHY RDY changed" : "device exchanged");
1000 freeze = 1;
1001 }
1002
1003 if (irq_stat & PORT_IRQ_UNK_FIS) {
1004 ehi->err_mask |= AC_ERR_HSM;
1005 ehi->action |= ATA_EH_RESET;
1006 ata_ehi_push_desc(ehi, "unknown FIS");
1007 freeze = 1;
1008 }
1009
1010 /* deal with command error */
1011 if (irq_stat & PORT_IRQ_ERROR) {
1012 const struct sil24_cerr_info *ci = NULL;
1013 unsigned int err_mask = 0, action = 0;
1014 u32 context, cerr;
1015 int pmp;
1016
1017 abort = 1;
1018
1019 /* DMA Context Switch Failure in Port Multiplier Mode
1020 * errata. If we have active commands to 3 or more
1021 * devices, any error condition on active devices can
1022 * corrupt DMA context switching.
1023 */
1024 if (ap->nr_active_links >= 3) {
1025 ehi->err_mask |= AC_ERR_OTHER;
1026 ehi->action |= ATA_EH_RESET;
1027 ata_ehi_push_desc(ehi, "PMP DMA CS errata");
1028 pp->do_port_rst = 1;
1029 freeze = 1;
1030 }
1031
1032 /* find out the offending link and qc */
1033 if (sata_pmp_attached(ap)) {
1034 context = readl(port + PORT_CONTEXT);
1035 pmp = (context >> 5) & 0xf;
1036
1037 if (pmp < ap->nr_pmp_links) {
1038 link = &ap->pmp_link[pmp];
1039 ehi = &link->eh_info;
1040 qc = ata_qc_from_tag(ap, link->active_tag);
1041
1042 ata_ehi_clear_desc(ehi);
1043 ata_ehi_push_desc(ehi, "irq_stat 0x%08x",
1044 irq_stat);
1045 } else {
1046 err_mask |= AC_ERR_HSM;
1047 action |= ATA_EH_RESET;
1048 freeze = 1;
1049 }
1050 } else
1051 qc = ata_qc_from_tag(ap, link->active_tag);
1052
1053 /* analyze CMD_ERR */
1054 cerr = readl(port + PORT_CMD_ERR);
1055 if (cerr < ARRAY_SIZE(sil24_cerr_db))
1056 ci = &sil24_cerr_db[cerr];
1057
1058 if (ci && ci->desc) {
1059 err_mask |= ci->err_mask;
1060 action |= ci->action;
1061 if (action & ATA_EH_RESET)
1062 freeze = 1;
1063 ata_ehi_push_desc(ehi, "%s", ci->desc);
1064 } else {
1065 err_mask |= AC_ERR_OTHER;
1066 action |= ATA_EH_RESET;
1067 freeze = 1;
1068 ata_ehi_push_desc(ehi, "unknown command error %d",
1069 cerr);
1070 }
1071
1072 /* record error info */
1073 if (qc)
1074 qc->err_mask |= err_mask;
1075 else
1076 ehi->err_mask |= err_mask;
1077
1078 ehi->action |= action;
1079
1080 /* if PMP, resume */
1081 if (sata_pmp_attached(ap))
1082 writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
1083 }
1084
1085 /* freeze or abort */
1086 if (freeze)
1087 ata_port_freeze(ap);
1088 else if (abort) {
1089 if (qc)
1090 ata_link_abort(qc->dev->link);
1091 else
1092 ata_port_abort(ap);
1093 }
1094 }
1095
sil24_host_intr(struct ata_port * ap)1096 static inline void sil24_host_intr(struct ata_port *ap)
1097 {
1098 void __iomem *port = sil24_port_base(ap);
1099 u32 slot_stat, qc_active;
1100 int rc;
1101
1102 /* If PCIX_IRQ_WOC, there's an inherent race window between
1103 * clearing IRQ pending status and reading PORT_SLOT_STAT
1104 * which may cause spurious interrupts afterwards. This is
1105 * unavoidable and much better than losing interrupts which
1106 * happens if IRQ pending is cleared after reading
1107 * PORT_SLOT_STAT.
1108 */
1109 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
1110 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
1111
1112 slot_stat = readl(port + PORT_SLOT_STAT);
1113
1114 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
1115 sil24_error_intr(ap);
1116 return;
1117 }
1118
1119 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
1120 rc = ata_qc_complete_multiple(ap, qc_active);
1121 if (rc > 0)
1122 return;
1123 if (rc < 0) {
1124 struct ata_eh_info *ehi = &ap->link.eh_info;
1125 ehi->err_mask |= AC_ERR_HSM;
1126 ehi->action |= ATA_EH_RESET;
1127 ata_port_freeze(ap);
1128 return;
1129 }
1130
1131 /* spurious interrupts are expected if PCIX_IRQ_WOC */
1132 if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
1133 ata_port_info(ap,
1134 "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
1135 slot_stat, ap->link.active_tag, ap->link.sactive);
1136 }
1137
sil24_interrupt(int irq,void * dev_instance)1138 static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
1139 {
1140 struct ata_host *host = dev_instance;
1141 void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
1142 unsigned handled = 0;
1143 u32 status;
1144 int i;
1145
1146 status = readl(host_base + HOST_IRQ_STAT);
1147
1148 if (status == 0xffffffff) {
1149 dev_err(host->dev, "IRQ status == 0xffffffff, "
1150 "PCI fault or device removal?\n");
1151 goto out;
1152 }
1153
1154 if (!(status & IRQ_STAT_4PORTS))
1155 goto out;
1156
1157 spin_lock(&host->lock);
1158
1159 for (i = 0; i < host->n_ports; i++)
1160 if (status & (1 << i)) {
1161 sil24_host_intr(host->ports[i]);
1162 handled++;
1163 }
1164
1165 spin_unlock(&host->lock);
1166 out:
1167 return IRQ_RETVAL(handled);
1168 }
1169
sil24_error_handler(struct ata_port * ap)1170 static void sil24_error_handler(struct ata_port *ap)
1171 {
1172 struct sil24_port_priv *pp = ap->private_data;
1173
1174 if (sil24_init_port(ap))
1175 ata_eh_freeze_port(ap);
1176
1177 sata_pmp_error_handler(ap);
1178
1179 pp->do_port_rst = 0;
1180 }
1181
sil24_post_internal_cmd(struct ata_queued_cmd * qc)1182 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
1183 {
1184 struct ata_port *ap = qc->ap;
1185
1186 /* make DMA engine forget about the failed command */
1187 if ((qc->flags & ATA_QCFLAG_EH) && sil24_init_port(ap))
1188 ata_eh_freeze_port(ap);
1189 }
1190
sil24_port_start(struct ata_port * ap)1191 static int sil24_port_start(struct ata_port *ap)
1192 {
1193 struct device *dev = ap->host->dev;
1194 struct sil24_port_priv *pp;
1195 union sil24_cmd_block *cb;
1196 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
1197 dma_addr_t cb_dma;
1198
1199 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1200 if (!pp)
1201 return -ENOMEM;
1202
1203 cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
1204 if (!cb)
1205 return -ENOMEM;
1206
1207 pp->cmd_block = cb;
1208 pp->cmd_block_dma = cb_dma;
1209
1210 ap->private_data = pp;
1211
1212 ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
1213 ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port");
1214
1215 return 0;
1216 }
1217
sil24_init_controller(struct ata_host * host)1218 static void sil24_init_controller(struct ata_host *host)
1219 {
1220 void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
1221 u32 tmp;
1222 int i;
1223
1224 /* GPIO off */
1225 writel(0, host_base + HOST_FLASH_CMD);
1226
1227 /* clear global reset & mask interrupts during initialization */
1228 writel(0, host_base + HOST_CTRL);
1229
1230 /* init ports */
1231 for (i = 0; i < host->n_ports; i++) {
1232 struct ata_port *ap = host->ports[i];
1233 void __iomem *port = sil24_port_base(ap);
1234
1235
1236 /* Initial PHY setting */
1237 writel(0x20c, port + PORT_PHY_CFG);
1238
1239 /* Clear port RST */
1240 tmp = readl(port + PORT_CTRL_STAT);
1241 if (tmp & PORT_CS_PORT_RST) {
1242 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1243 tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT,
1244 PORT_CS_PORT_RST,
1245 PORT_CS_PORT_RST, 10, 100);
1246 if (tmp & PORT_CS_PORT_RST)
1247 dev_err(host->dev,
1248 "failed to clear port RST\n");
1249 }
1250
1251 /* configure port */
1252 sil24_config_port(ap);
1253 }
1254
1255 /* Turn on interrupts */
1256 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
1257 }
1258
sil24_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1259 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1260 {
1261 extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
1262 struct ata_port_info pi = sil24_port_info[ent->driver_data];
1263 const struct ata_port_info *ppi[] = { &pi, NULL };
1264 void __iomem * const *iomap;
1265 struct ata_host *host;
1266 int rc;
1267 u32 tmp;
1268
1269 /* cause link error if sil24_cmd_block is sized wrongly */
1270 if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
1271 __MARKER__sil24_cmd_block_is_sized_wrongly = 1;
1272
1273 ata_print_version_once(&pdev->dev, DRV_VERSION);
1274
1275 /* acquire resources */
1276 rc = pcim_enable_device(pdev);
1277 if (rc)
1278 return rc;
1279
1280 rc = pcim_iomap_regions(pdev,
1281 (1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
1282 DRV_NAME);
1283 if (rc)
1284 return rc;
1285 iomap = pcim_iomap_table(pdev);
1286
1287 /* apply workaround for completion IRQ loss on PCI-X errata */
1288 if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1289 tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
1290 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1291 dev_info(&pdev->dev,
1292 "Applying completion IRQ loss on PCI-X errata fix\n");
1293 else
1294 pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1295 }
1296
1297 /* allocate and fill host */
1298 host = ata_host_alloc_pinfo(&pdev->dev, ppi,
1299 SIL24_FLAG2NPORTS(ppi[0]->flags));
1300 if (!host)
1301 return -ENOMEM;
1302 host->iomap = iomap;
1303
1304 /* configure and activate the device */
1305 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1306 if (rc) {
1307 dev_err(&pdev->dev, "DMA enable failed\n");
1308 return rc;
1309 }
1310
1311 /* Set max read request size to 4096. This slightly increases
1312 * write throughput for pci-e variants.
1313 */
1314 pcie_set_readrq(pdev, 4096);
1315
1316 sil24_init_controller(host);
1317
1318 if (sata_sil24_msi && !pci_enable_msi(pdev)) {
1319 dev_info(&pdev->dev, "Using MSI\n");
1320 pci_intx(pdev, 0);
1321 }
1322
1323 pci_set_master(pdev);
1324 return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED,
1325 &sil24_sht);
1326 }
1327
1328 #ifdef CONFIG_PM_SLEEP
sil24_pci_device_resume(struct pci_dev * pdev)1329 static int sil24_pci_device_resume(struct pci_dev *pdev)
1330 {
1331 struct ata_host *host = pci_get_drvdata(pdev);
1332 void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
1333 int rc;
1334
1335 rc = ata_pci_device_do_resume(pdev);
1336 if (rc)
1337 return rc;
1338
1339 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1340 writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
1341
1342 sil24_init_controller(host);
1343
1344 ata_host_resume(host);
1345
1346 return 0;
1347 }
1348 #endif
1349
1350 #ifdef CONFIG_PM
sil24_port_resume(struct ata_port * ap)1351 static int sil24_port_resume(struct ata_port *ap)
1352 {
1353 sil24_config_pmp(ap, ap->nr_pmp_links);
1354 return 0;
1355 }
1356 #endif
1357
1358 module_pci_driver(sil24_pci_driver);
1359
1360 MODULE_AUTHOR("Tejun Heo");
1361 MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1362 MODULE_LICENSE("GPL");
1363 MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
1364