1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sata_via.c - VIA Serial ATA controllers
4 *
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
10 * Copyright 2003-2004 Jeff Garzik
11 *
12 * libata documentation is available via 'make {ps|pdf}docs',
13 * as Documentation/driver-api/libata.rst
14 *
15 * Hardware documentation available under NDA.
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/blkdev.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_host.h>
27 #include <linux/libata.h>
28 #include <linux/string_choices.h>
29
30 #define DRV_NAME "sata_via"
31 #define DRV_VERSION "2.6"
32
33 /*
34 * vt8251 is different from other sata controllers of VIA. It has two
35 * channels, each channel has both Master and Slave slot.
36 */
37 enum board_ids_enum {
38 vt6420,
39 vt6421,
40 vt8251,
41 };
42
43 enum {
44 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
45 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
46 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
47 SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */
48 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
49 PATA_PIO_TIMING = 0xAB, /* PATA timing register */
50
51 PORT0 = (1 << 1),
52 PORT1 = (1 << 0),
53 ALL_PORTS = PORT0 | PORT1,
54
55 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
56
57 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
58
59 SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */
60 };
61
62 struct svia_priv {
63 bool wd_workaround;
64 };
65
66 static int vt6420_hotplug;
67 module_param_named(vt6420_hotplug, vt6420_hotplug, int, 0644);
68 MODULE_PARM_DESC(vt6420_hotplug, "Enable hot-plug support for VT6420 (0=Don't support, 1=support)");
69
70 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
71 #ifdef CONFIG_PM_SLEEP
72 static int svia_pci_device_resume(struct pci_dev *pdev);
73 #endif
74 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
75 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
76 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
77 static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
78 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
79 static void svia_noop_freeze(struct ata_port *ap);
80 static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
81 static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
82 static int vt6421_pata_cable_detect(struct ata_port *ap);
83 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
84 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
85 static void vt6421_error_handler(struct ata_port *ap);
86
87 static const struct pci_device_id svia_pci_tbl[] = {
88 { PCI_VDEVICE(VIA, 0x5337), vt6420 },
89 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */
90 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */
91 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */
92 { PCI_VDEVICE(VIA, 0x5372), vt6420 },
93 { PCI_VDEVICE(VIA, 0x7372), vt6420 },
94 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
95 { PCI_VDEVICE(VIA, 0x9000), vt8251 },
96
97 { } /* terminate list */
98 };
99
100 static struct pci_driver svia_pci_driver = {
101 .name = DRV_NAME,
102 .id_table = svia_pci_tbl,
103 .probe = svia_init_one,
104 #ifdef CONFIG_PM_SLEEP
105 .suspend = ata_pci_device_suspend,
106 .resume = svia_pci_device_resume,
107 #endif
108 .remove = ata_pci_remove_one,
109 };
110
111 static const struct scsi_host_template svia_sht = {
112 ATA_BMDMA_SHT(DRV_NAME),
113 };
114
115 static struct ata_port_operations svia_base_ops = {
116 .inherits = &ata_bmdma_port_ops,
117 .sff_tf_load = svia_tf_load,
118 };
119
120 static struct ata_port_operations vt6420_sata_ops = {
121 .inherits = &svia_base_ops,
122 .freeze = svia_noop_freeze,
123 .prereset = vt6420_prereset,
124 .bmdma_start = vt6420_bmdma_start,
125 };
126
127 static struct ata_port_operations vt6421_pata_ops = {
128 .inherits = &svia_base_ops,
129 .cable_detect = vt6421_pata_cable_detect,
130 .set_piomode = vt6421_set_pio_mode,
131 .set_dmamode = vt6421_set_dma_mode,
132 };
133
134 static struct ata_port_operations vt6421_sata_ops = {
135 .inherits = &svia_base_ops,
136 .scr_read = svia_scr_read,
137 .scr_write = svia_scr_write,
138 .error_handler = vt6421_error_handler,
139 };
140
141 static struct ata_port_operations vt8251_ops = {
142 .inherits = &svia_base_ops,
143 .hardreset = sata_std_hardreset,
144 .scr_read = vt8251_scr_read,
145 .scr_write = vt8251_scr_write,
146 };
147
148 static const struct ata_port_info vt6420_port_info = {
149 .flags = ATA_FLAG_SATA,
150 .pio_mask = ATA_PIO4,
151 .mwdma_mask = ATA_MWDMA2,
152 .udma_mask = ATA_UDMA6,
153 .port_ops = &vt6420_sata_ops,
154 };
155
156 static const struct ata_port_info vt6421_sport_info = {
157 .flags = ATA_FLAG_SATA,
158 .pio_mask = ATA_PIO4,
159 .mwdma_mask = ATA_MWDMA2,
160 .udma_mask = ATA_UDMA6,
161 .port_ops = &vt6421_sata_ops,
162 };
163
164 static const struct ata_port_info vt6421_pport_info = {
165 .flags = ATA_FLAG_SLAVE_POSS,
166 .pio_mask = ATA_PIO4,
167 /* No MWDMA */
168 .udma_mask = ATA_UDMA6,
169 .port_ops = &vt6421_pata_ops,
170 };
171
172 static const struct ata_port_info vt8251_port_info = {
173 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
174 .pio_mask = ATA_PIO4,
175 .mwdma_mask = ATA_MWDMA2,
176 .udma_mask = ATA_UDMA6,
177 .port_ops = &vt8251_ops,
178 };
179
180 MODULE_AUTHOR("Jeff Garzik");
181 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
182 MODULE_LICENSE("GPL");
183 MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
184 MODULE_VERSION(DRV_VERSION);
185
svia_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)186 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
187 {
188 if (sc_reg > SCR_CONTROL)
189 return -EINVAL;
190 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
191 return 0;
192 }
193
svia_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)194 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
195 {
196 if (sc_reg > SCR_CONTROL)
197 return -EINVAL;
198 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
199 return 0;
200 }
201
vt8251_scr_read(struct ata_link * link,unsigned int scr,u32 * val)202 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
203 {
204 static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
205 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
206 int slot = 2 * link->ap->port_no + link->pmp;
207 u32 v = 0;
208 u8 raw;
209
210 switch (scr) {
211 case SCR_STATUS:
212 pci_read_config_byte(pdev, 0xA0 + slot, &raw);
213
214 /* read the DET field, bit0 and 1 of the config byte */
215 v |= raw & 0x03;
216
217 /* read the SPD field, bit4 of the configure byte */
218 if (raw & (1 << 4))
219 v |= 0x02 << 4;
220 else
221 v |= 0x01 << 4;
222
223 /* read the IPM field, bit2 and 3 of the config byte */
224 v |= ipm_tbl[(raw >> 2) & 0x3];
225 break;
226
227 case SCR_ERROR:
228 /* devices other than 5287 uses 0xA8 as base */
229 WARN_ON(pdev->device != 0x5287);
230 pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
231 break;
232
233 case SCR_CONTROL:
234 pci_read_config_byte(pdev, 0xA4 + slot, &raw);
235
236 /* read the DET field, bit0 and bit1 */
237 v |= ((raw & 0x02) << 1) | (raw & 0x01);
238
239 /* read the IPM field, bit2 and bit3 */
240 v |= ((raw >> 2) & 0x03) << 8;
241 break;
242
243 default:
244 return -EINVAL;
245 }
246
247 *val = v;
248 return 0;
249 }
250
vt8251_scr_write(struct ata_link * link,unsigned int scr,u32 val)251 static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
252 {
253 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
254 int slot = 2 * link->ap->port_no + link->pmp;
255 u32 v = 0;
256
257 switch (scr) {
258 case SCR_ERROR:
259 /* devices other than 5287 uses 0xA8 as base */
260 WARN_ON(pdev->device != 0x5287);
261 pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
262 return 0;
263
264 case SCR_CONTROL:
265 /* set the DET field */
266 v |= ((val & 0x4) >> 1) | (val & 0x1);
267
268 /* set the IPM field */
269 v |= ((val >> 8) & 0x3) << 2;
270
271 pci_write_config_byte(pdev, 0xA4 + slot, v);
272 return 0;
273
274 default:
275 return -EINVAL;
276 }
277 }
278
279 /**
280 * svia_tf_load - send taskfile registers to host controller
281 * @ap: Port to which output is sent
282 * @tf: ATA taskfile register set
283 *
284 * Outputs ATA taskfile to standard ATA host controller.
285 *
286 * This is to fix the internal bug of via chipsets, which will
287 * reset the device register after changing the IEN bit on ctl
288 * register.
289 */
svia_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)290 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
291 {
292 struct ata_taskfile ttf;
293
294 if (tf->ctl != ap->last_ctl) {
295 ttf = *tf;
296 ttf.flags |= ATA_TFLAG_DEVICE;
297 tf = &ttf;
298 }
299 ata_sff_tf_load(ap, tf);
300 }
301
svia_noop_freeze(struct ata_port * ap)302 static void svia_noop_freeze(struct ata_port *ap)
303 {
304 /* Some VIA controllers choke if ATA_NIEN is manipulated in
305 * certain way. Leave it alone and just clear pending IRQ.
306 */
307 ap->ops->sff_check_status(ap);
308 ata_bmdma_irq_clear(ap);
309 }
310
311 /**
312 * vt6420_prereset - prereset for vt6420
313 * @link: target ATA link
314 * @deadline: deadline jiffies for the operation
315 *
316 * SCR registers on vt6420 are pieces of shit and may hang the
317 * whole machine completely if accessed with the wrong timing.
318 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
319 * access operations, but uses SStatus and SControl only during
320 * boot probing in controlled way.
321 *
322 * As the old (pre EH update) probing code is proven to work, we
323 * strictly follow the access pattern.
324 *
325 * LOCKING:
326 * Kernel thread context (may sleep)
327 *
328 * RETURNS:
329 * 0 on success, -errno otherwise.
330 */
vt6420_prereset(struct ata_link * link,unsigned long deadline)331 static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
332 {
333 struct ata_port *ap = link->ap;
334 struct ata_eh_context *ehc = &ap->link.eh_context;
335 unsigned long timeout = jiffies + (HZ * 5);
336 u32 sstatus, scontrol;
337 int online;
338
339 /* don't do any SCR stuff if we're not loading */
340 if (!(ap->pflags & ATA_PFLAG_LOADING))
341 goto skip_scr;
342
343 /* Resume phy. This is the old SATA resume sequence */
344 svia_scr_write(link, SCR_CONTROL, 0x300);
345 svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
346
347 /* wait for phy to become ready, if necessary */
348 do {
349 ata_msleep(link->ap, 200);
350 svia_scr_read(link, SCR_STATUS, &sstatus);
351 if ((sstatus & 0xf) != 1)
352 break;
353 } while (time_before(jiffies, timeout));
354
355 /* open code sata_print_link_status() */
356 svia_scr_read(link, SCR_STATUS, &sstatus);
357 svia_scr_read(link, SCR_CONTROL, &scontrol);
358
359 online = (sstatus & 0xf) == 0x3;
360
361 ata_port_info(ap,
362 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
363 str_up_down(online), sstatus, scontrol);
364
365 /* SStatus is read one more time */
366 svia_scr_read(link, SCR_STATUS, &sstatus);
367
368 if (!online) {
369 /* tell EH to bail */
370 ehc->i.action &= ~ATA_EH_RESET;
371 return 0;
372 }
373
374 skip_scr:
375 /* wait for !BSY */
376 ata_sff_wait_ready(link, deadline);
377
378 return 0;
379 }
380
vt6420_bmdma_start(struct ata_queued_cmd * qc)381 static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
382 {
383 struct ata_port *ap = qc->ap;
384 if ((qc->tf.command == ATA_CMD_PACKET) &&
385 (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
386 /* Prevents corruption on some ATAPI burners */
387 ata_sff_pause(ap);
388 }
389 ata_bmdma_start(qc);
390 }
391
vt6421_pata_cable_detect(struct ata_port * ap)392 static int vt6421_pata_cable_detect(struct ata_port *ap)
393 {
394 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
395 u8 tmp;
396
397 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
398 if (tmp & 0x10)
399 return ATA_CBL_PATA40;
400 return ATA_CBL_PATA80;
401 }
402
vt6421_set_pio_mode(struct ata_port * ap,struct ata_device * adev)403 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
404 {
405 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
406 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
407 pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno,
408 pio_bits[adev->pio_mode - XFER_PIO_0]);
409 }
410
vt6421_set_dma_mode(struct ata_port * ap,struct ata_device * adev)411 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
412 {
413 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
414 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
415 pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno,
416 udma_bits[adev->dma_mode - XFER_UDMA_0]);
417 }
418
419 static const unsigned int svia_bar_sizes[] = {
420 8, 4, 8, 4, 16, 256
421 };
422
423 static const unsigned int vt6421_bar_sizes[] = {
424 16, 16, 16, 16, 32, 128
425 };
426
svia_scr_addr(void __iomem * addr,unsigned int port)427 static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
428 {
429 return addr + (port * 128);
430 }
431
vt6421_scr_addr(void __iomem * addr,unsigned int port)432 static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
433 {
434 return addr + (port * 64);
435 }
436
vt6421_init_addrs(struct ata_port * ap)437 static void vt6421_init_addrs(struct ata_port *ap)
438 {
439 void __iomem * const * iomap = ap->host->iomap;
440 void __iomem *reg_addr = iomap[ap->port_no];
441 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
442 struct ata_ioports *ioaddr = &ap->ioaddr;
443
444 ioaddr->cmd_addr = reg_addr;
445 ioaddr->altstatus_addr =
446 ioaddr->ctl_addr = (void __iomem *)
447 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
448 ioaddr->bmdma_addr = bmdma_addr;
449 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
450
451 ata_sff_std_ports(ioaddr);
452
453 ata_port_pbar_desc(ap, ap->port_no, -1, "port");
454 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
455 }
456
vt6420_prepare_host(struct pci_dev * pdev,struct ata_host ** r_host)457 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
458 {
459 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
460 struct ata_host *host;
461 int rc;
462
463 if (vt6420_hotplug) {
464 ppi[0]->port_ops->scr_read = svia_scr_read;
465 ppi[0]->port_ops->scr_write = svia_scr_write;
466 }
467
468 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
469 if (rc)
470 return rc;
471 *r_host = host;
472
473 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
474 if (rc) {
475 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
476 return rc;
477 }
478
479 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
480 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
481
482 return 0;
483 }
484
vt6421_prepare_host(struct pci_dev * pdev,struct ata_host ** r_host)485 static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
486 {
487 const struct ata_port_info *ppi[] =
488 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
489 struct ata_host *host;
490 int i, rc;
491
492 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
493 if (!host) {
494 dev_err(&pdev->dev, "failed to allocate host\n");
495 return -ENOMEM;
496 }
497
498 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
499 if (rc) {
500 dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
501 rc);
502 return rc;
503 }
504 host->iomap = pcim_iomap_table(pdev);
505
506 for (i = 0; i < host->n_ports; i++)
507 vt6421_init_addrs(host->ports[i]);
508
509 return dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
510 }
511
vt8251_prepare_host(struct pci_dev * pdev,struct ata_host ** r_host)512 static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
513 {
514 const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
515 struct ata_host *host;
516 int i, rc;
517
518 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
519 if (rc)
520 return rc;
521 *r_host = host;
522
523 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
524 if (rc) {
525 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
526 return rc;
527 }
528
529 /* 8251 hosts four sata ports as M/S of the two channels */
530 for (i = 0; i < host->n_ports; i++)
531 ata_slave_link_init(host->ports[i]);
532
533 return 0;
534 }
535
svia_wd_fix(struct pci_dev * pdev)536 static void svia_wd_fix(struct pci_dev *pdev)
537 {
538 u8 tmp8;
539
540 pci_read_config_byte(pdev, 0x52, &tmp8);
541 pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
542 }
543
vt642x_interrupt(int irq,void * dev_instance)544 static irqreturn_t vt642x_interrupt(int irq, void *dev_instance)
545 {
546 struct ata_host *host = dev_instance;
547 irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
548
549 /* if the IRQ was not handled, it might be a hotplug IRQ */
550 if (rc != IRQ_HANDLED) {
551 u32 serror;
552 unsigned long flags;
553
554 spin_lock_irqsave(&host->lock, flags);
555 /* check for hotplug on port 0 */
556 svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
557 if (serror & SERR_PHYRDY_CHG) {
558 ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
559 ata_port_freeze(host->ports[0]);
560 rc = IRQ_HANDLED;
561 }
562 /* check for hotplug on port 1 */
563 svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
564 if (serror & SERR_PHYRDY_CHG) {
565 ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
566 ata_port_freeze(host->ports[1]);
567 rc = IRQ_HANDLED;
568 }
569 spin_unlock_irqrestore(&host->lock, flags);
570 }
571
572 return rc;
573 }
574
vt6421_error_handler(struct ata_port * ap)575 static void vt6421_error_handler(struct ata_port *ap)
576 {
577 struct svia_priv *hpriv = ap->host->private_data;
578 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
579 u32 serror;
580
581 /* see svia_configure() for description */
582 if (!hpriv->wd_workaround) {
583 svia_scr_read(&ap->link, SCR_ERROR, &serror);
584 if (serror == 0x1000500) {
585 ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
586 svia_wd_fix(pdev);
587 hpriv->wd_workaround = true;
588 ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
589 }
590 }
591
592 ata_sff_error_handler(ap);
593 }
594
svia_configure(struct pci_dev * pdev,int board_id,struct svia_priv * hpriv)595 static void svia_configure(struct pci_dev *pdev, int board_id,
596 struct svia_priv *hpriv)
597 {
598 u8 tmp8;
599
600 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
601 dev_info(&pdev->dev, "routed to hard irq line %d\n",
602 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
603
604 /* make sure SATA channels are enabled */
605 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
606 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
607 dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
608 (int)tmp8);
609 tmp8 |= ALL_PORTS;
610 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
611 }
612
613 /* make sure interrupts for each channel sent to us */
614 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
615 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
616 dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
617 (int) tmp8);
618 tmp8 |= ALL_PORTS;
619 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
620 }
621
622 /* make sure native mode is enabled */
623 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
624 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
625 dev_dbg(&pdev->dev,
626 "enabling SATA channel native mode (0x%x)\n",
627 (int) tmp8);
628 tmp8 |= NATIVE_MODE_ALL;
629 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
630 }
631
632 if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421) {
633 /* enable IRQ on hotplug */
634 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
635 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
636 dev_dbg(&pdev->dev,
637 "enabling SATA hotplug (0x%x)\n",
638 (int) tmp8);
639 tmp8 |= SATA_HOTPLUG;
640 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
641 }
642 }
643
644 /*
645 * vt6420/1 has problems talking to some drives. The following
646 * is the fix from Joseph Chan <JosephChan@via.com.tw>.
647 *
648 * When host issues HOLD, device may send up to 20DW of data
649 * before acknowledging it with HOLDA and the host should be
650 * able to buffer them in FIFO. Unfortunately, some WD drives
651 * send up to 40DW before acknowledging HOLD and, in the
652 * default configuration, this ends up overflowing vt6421's
653 * FIFO, making the controller abort the transaction with
654 * R_ERR.
655 *
656 * Rx52[2] is the internal 128DW FIFO Flow control watermark
657 * adjusting mechanism enable bit and the default value 0
658 * means host will issue HOLD to device when the left FIFO
659 * size goes below 32DW. Setting it to 1 makes the watermark
660 * 64DW.
661 *
662 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
663 * http://article.gmane.org/gmane.linux.ide/46352
664 * http://thread.gmane.org/gmane.linux.kernel/1062139
665 *
666 * As the fix slows down data transfer, apply it only if the error
667 * actually appears - see vt6421_error_handler()
668 * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
669 * read safely.
670 */
671 if (board_id == vt6420) {
672 svia_wd_fix(pdev);
673 hpriv->wd_workaround = true;
674 }
675 }
676
svia_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)677 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
678 {
679 unsigned int i;
680 int rc;
681 struct ata_host *host = NULL;
682 int board_id = (int) ent->driver_data;
683 const unsigned *bar_sizes;
684 struct svia_priv *hpriv;
685
686 ata_print_version_once(&pdev->dev, DRV_VERSION);
687
688 rc = pcim_enable_device(pdev);
689 if (rc)
690 return rc;
691
692 if (board_id == vt6421)
693 bar_sizes = &vt6421_bar_sizes[0];
694 else
695 bar_sizes = &svia_bar_sizes[0];
696
697 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
698 if ((pci_resource_start(pdev, i) == 0) ||
699 (pci_resource_len(pdev, i) < bar_sizes[i])) {
700 dev_err(&pdev->dev,
701 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
702 i,
703 (unsigned long long)pci_resource_start(pdev, i),
704 (unsigned long long)pci_resource_len(pdev, i));
705 return -ENODEV;
706 }
707
708 switch (board_id) {
709 case vt6420:
710 rc = vt6420_prepare_host(pdev, &host);
711 break;
712 case vt6421:
713 rc = vt6421_prepare_host(pdev, &host);
714 break;
715 case vt8251:
716 rc = vt8251_prepare_host(pdev, &host);
717 break;
718 default:
719 rc = -EINVAL;
720 }
721 if (rc)
722 return rc;
723
724 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
725 if (!hpriv)
726 return -ENOMEM;
727 host->private_data = hpriv;
728
729 svia_configure(pdev, board_id, hpriv);
730
731 pci_set_master(pdev);
732 if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421)
733 return ata_host_activate(host, pdev->irq, vt642x_interrupt,
734 IRQF_SHARED, &svia_sht);
735 else
736 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
737 IRQF_SHARED, &svia_sht);
738 }
739
740 #ifdef CONFIG_PM_SLEEP
svia_pci_device_resume(struct pci_dev * pdev)741 static int svia_pci_device_resume(struct pci_dev *pdev)
742 {
743 struct ata_host *host = pci_get_drvdata(pdev);
744 struct svia_priv *hpriv = host->private_data;
745 int rc;
746
747 rc = ata_pci_device_do_resume(pdev);
748 if (rc)
749 return rc;
750
751 if (hpriv->wd_workaround)
752 svia_wd_fix(pdev);
753 ata_host_resume(host);
754
755 return 0;
756 }
757 #endif
758
759 module_pci_driver(svia_pci_driver);
760