1 /* 2 * sata_via.c - VIA Serial ATA controllers 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available under NDA. 31 * 32 * 33 * To-do list: 34 * - VT6421 PATA support 35 * 36 */ 37 38 #include <linux/kernel.h> 39 #include <linux/module.h> 40 #include <linux/pci.h> 41 #include <linux/init.h> 42 #include <linux/blkdev.h> 43 #include <linux/delay.h> 44 #include <linux/device.h> 45 #include <scsi/scsi_host.h> 46 #include <linux/libata.h> 47 48 #define DRV_NAME "sata_via" 49 #define DRV_VERSION "2.1" 50 51 enum board_ids_enum { 52 vt6420, 53 vt6421, 54 }; 55 56 enum { 57 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ 58 SATA_INT_GATE = 0x41, /* SATA interrupt gating */ 59 SATA_NATIVE_MODE = 0x42, /* Native mode enable */ 60 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */ 61 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ 62 PATA_PIO_TIMING = 0xAB, /* PATA timing register */ 63 64 PORT0 = (1 << 1), 65 PORT1 = (1 << 0), 66 ALL_PORTS = PORT0 | PORT1, 67 68 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), 69 70 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ 71 SATA_2DEV = (1 << 5), /* SATA is master/slave */ 72 }; 73 74 static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 75 static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); 76 static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 77 static void svia_noop_freeze(struct ata_port *ap); 78 static void vt6420_error_handler(struct ata_port *ap); 79 static int vt6421_pata_cable_detect(struct ata_port *ap); 80 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); 81 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); 82 83 static const struct pci_device_id svia_pci_tbl[] = { 84 { PCI_VDEVICE(VIA, 0x5337), vt6420 }, 85 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, 86 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, 87 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, 88 89 { } /* terminate list */ 90 }; 91 92 static struct pci_driver svia_pci_driver = { 93 .name = DRV_NAME, 94 .id_table = svia_pci_tbl, 95 .probe = svia_init_one, 96 .remove = ata_pci_remove_one, 97 }; 98 99 static struct scsi_host_template svia_sht = { 100 .module = THIS_MODULE, 101 .name = DRV_NAME, 102 .ioctl = ata_scsi_ioctl, 103 .queuecommand = ata_scsi_queuecmd, 104 .can_queue = ATA_DEF_QUEUE, 105 .this_id = ATA_SHT_THIS_ID, 106 .sg_tablesize = LIBATA_MAX_PRD, 107 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 108 .emulated = ATA_SHT_EMULATED, 109 .use_clustering = ATA_SHT_USE_CLUSTERING, 110 .proc_name = DRV_NAME, 111 .dma_boundary = ATA_DMA_BOUNDARY, 112 .slave_configure = ata_scsi_slave_config, 113 .slave_destroy = ata_scsi_slave_destroy, 114 .bios_param = ata_std_bios_param, 115 }; 116 117 static const struct ata_port_operations vt6420_sata_ops = { 118 .port_disable = ata_port_disable, 119 120 .tf_load = ata_tf_load, 121 .tf_read = ata_tf_read, 122 .check_status = ata_check_status, 123 .exec_command = ata_exec_command, 124 .dev_select = ata_std_dev_select, 125 126 .bmdma_setup = ata_bmdma_setup, 127 .bmdma_start = ata_bmdma_start, 128 .bmdma_stop = ata_bmdma_stop, 129 .bmdma_status = ata_bmdma_status, 130 131 .qc_prep = ata_qc_prep, 132 .qc_issue = ata_qc_issue_prot, 133 .data_xfer = ata_data_xfer, 134 135 .freeze = svia_noop_freeze, 136 .thaw = ata_bmdma_thaw, 137 .error_handler = vt6420_error_handler, 138 .post_internal_cmd = ata_bmdma_post_internal_cmd, 139 140 .irq_clear = ata_bmdma_irq_clear, 141 .irq_on = ata_irq_on, 142 .irq_ack = ata_irq_ack, 143 144 .port_start = ata_port_start, 145 }; 146 147 static const struct ata_port_operations vt6421_pata_ops = { 148 .port_disable = ata_port_disable, 149 150 .set_piomode = vt6421_set_pio_mode, 151 .set_dmamode = vt6421_set_dma_mode, 152 153 .tf_load = ata_tf_load, 154 .tf_read = ata_tf_read, 155 .check_status = ata_check_status, 156 .exec_command = ata_exec_command, 157 .dev_select = ata_std_dev_select, 158 159 .bmdma_setup = ata_bmdma_setup, 160 .bmdma_start = ata_bmdma_start, 161 .bmdma_stop = ata_bmdma_stop, 162 .bmdma_status = ata_bmdma_status, 163 164 .qc_prep = ata_qc_prep, 165 .qc_issue = ata_qc_issue_prot, 166 .data_xfer = ata_data_xfer, 167 168 .freeze = ata_bmdma_freeze, 169 .thaw = ata_bmdma_thaw, 170 .error_handler = ata_bmdma_error_handler, 171 .post_internal_cmd = ata_bmdma_post_internal_cmd, 172 .cable_detect = vt6421_pata_cable_detect, 173 174 .irq_clear = ata_bmdma_irq_clear, 175 .irq_on = ata_irq_on, 176 .irq_ack = ata_irq_ack, 177 178 .port_start = ata_port_start, 179 }; 180 181 static const struct ata_port_operations vt6421_sata_ops = { 182 .port_disable = ata_port_disable, 183 184 .tf_load = ata_tf_load, 185 .tf_read = ata_tf_read, 186 .check_status = ata_check_status, 187 .exec_command = ata_exec_command, 188 .dev_select = ata_std_dev_select, 189 190 .bmdma_setup = ata_bmdma_setup, 191 .bmdma_start = ata_bmdma_start, 192 .bmdma_stop = ata_bmdma_stop, 193 .bmdma_status = ata_bmdma_status, 194 195 .qc_prep = ata_qc_prep, 196 .qc_issue = ata_qc_issue_prot, 197 .data_xfer = ata_data_xfer, 198 199 .freeze = ata_bmdma_freeze, 200 .thaw = ata_bmdma_thaw, 201 .error_handler = ata_bmdma_error_handler, 202 .post_internal_cmd = ata_bmdma_post_internal_cmd, 203 .cable_detect = ata_cable_sata, 204 205 .irq_clear = ata_bmdma_irq_clear, 206 .irq_on = ata_irq_on, 207 .irq_ack = ata_irq_ack, 208 209 .scr_read = svia_scr_read, 210 .scr_write = svia_scr_write, 211 212 .port_start = ata_port_start, 213 }; 214 215 static const struct ata_port_info vt6420_port_info = { 216 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 217 .pio_mask = 0x1f, 218 .mwdma_mask = 0x07, 219 .udma_mask = 0x7f, 220 .port_ops = &vt6420_sata_ops, 221 }; 222 223 static struct ata_port_info vt6421_sport_info = { 224 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 225 .pio_mask = 0x1f, 226 .mwdma_mask = 0x07, 227 .udma_mask = 0x7f, 228 .port_ops = &vt6421_sata_ops, 229 }; 230 231 static struct ata_port_info vt6421_pport_info = { 232 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY, 233 .pio_mask = 0x1f, 234 .mwdma_mask = 0, 235 .udma_mask = 0x7f, 236 .port_ops = &vt6421_pata_ops, 237 }; 238 239 MODULE_AUTHOR("Jeff Garzik"); 240 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); 241 MODULE_LICENSE("GPL"); 242 MODULE_DEVICE_TABLE(pci, svia_pci_tbl); 243 MODULE_VERSION(DRV_VERSION); 244 245 static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg) 246 { 247 if (sc_reg > SCR_CONTROL) 248 return 0xffffffffU; 249 return ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); 250 } 251 252 static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 253 { 254 if (sc_reg > SCR_CONTROL) 255 return; 256 iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 257 } 258 259 static void svia_noop_freeze(struct ata_port *ap) 260 { 261 /* Some VIA controllers choke if ATA_NIEN is manipulated in 262 * certain way. Leave it alone and just clear pending IRQ. 263 */ 264 ata_chk_status(ap); 265 ata_bmdma_irq_clear(ap); 266 } 267 268 /** 269 * vt6420_prereset - prereset for vt6420 270 * @ap: target ATA port 271 * 272 * SCR registers on vt6420 are pieces of shit and may hang the 273 * whole machine completely if accessed with the wrong timing. 274 * To avoid such catastrophe, vt6420 doesn't provide generic SCR 275 * access operations, but uses SStatus and SControl only during 276 * boot probing in controlled way. 277 * 278 * As the old (pre EH update) probing code is proven to work, we 279 * strictly follow the access pattern. 280 * 281 * LOCKING: 282 * Kernel thread context (may sleep) 283 * 284 * RETURNS: 285 * 0 on success, -errno otherwise. 286 */ 287 static int vt6420_prereset(struct ata_port *ap) 288 { 289 struct ata_eh_context *ehc = &ap->eh_context; 290 unsigned long timeout = jiffies + (HZ * 5); 291 u32 sstatus, scontrol; 292 int online; 293 294 /* don't do any SCR stuff if we're not loading */ 295 if (!(ap->pflags & ATA_PFLAG_LOADING)) 296 goto skip_scr; 297 298 /* Resume phy. This is the old resume sequence from 299 * __sata_phy_reset(). 300 */ 301 svia_scr_write(ap, SCR_CONTROL, 0x300); 302 svia_scr_read(ap, SCR_CONTROL); /* flush */ 303 304 /* wait for phy to become ready, if necessary */ 305 do { 306 msleep(200); 307 if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1) 308 break; 309 } while (time_before(jiffies, timeout)); 310 311 /* open code sata_print_link_status() */ 312 sstatus = svia_scr_read(ap, SCR_STATUS); 313 scontrol = svia_scr_read(ap, SCR_CONTROL); 314 315 online = (sstatus & 0xf) == 0x3; 316 317 ata_port_printk(ap, KERN_INFO, 318 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", 319 online ? "up" : "down", sstatus, scontrol); 320 321 /* SStatus is read one more time */ 322 svia_scr_read(ap, SCR_STATUS); 323 324 if (!online) { 325 /* tell EH to bail */ 326 ehc->i.action &= ~ATA_EH_RESET_MASK; 327 return 0; 328 } 329 330 skip_scr: 331 /* wait for !BSY */ 332 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 333 334 return 0; 335 } 336 337 static void vt6420_error_handler(struct ata_port *ap) 338 { 339 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, 340 NULL, ata_std_postreset); 341 } 342 343 static int vt6421_pata_cable_detect(struct ata_port *ap) 344 { 345 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 346 u8 tmp; 347 348 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp); 349 if (tmp & 0x10) 350 return ATA_CBL_PATA40; 351 return ATA_CBL_PATA80; 352 } 353 354 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev) 355 { 356 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 357 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 }; 358 pci_write_config_byte(pdev, PATA_PIO_TIMING, pio_bits[adev->pio_mode - XFER_PIO_0]); 359 } 360 361 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) 362 { 363 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 364 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 }; 365 pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->pio_mode - XFER_UDMA_0]); 366 } 367 368 static const unsigned int svia_bar_sizes[] = { 369 8, 4, 8, 4, 16, 256 370 }; 371 372 static const unsigned int vt6421_bar_sizes[] = { 373 16, 16, 16, 16, 32, 128 374 }; 375 376 static void __iomem * svia_scr_addr(void __iomem *addr, unsigned int port) 377 { 378 return addr + (port * 128); 379 } 380 381 static void __iomem * vt6421_scr_addr(void __iomem *addr, unsigned int port) 382 { 383 return addr + (port * 64); 384 } 385 386 static void vt6421_init_addrs(struct ata_port *ap) 387 { 388 void __iomem * const * iomap = ap->host->iomap; 389 void __iomem *reg_addr = iomap[ap->port_no]; 390 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8); 391 struct ata_ioports *ioaddr = &ap->ioaddr; 392 393 ioaddr->cmd_addr = reg_addr; 394 ioaddr->altstatus_addr = 395 ioaddr->ctl_addr = (void __iomem *) 396 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS); 397 ioaddr->bmdma_addr = bmdma_addr; 398 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); 399 400 ata_std_ports(ioaddr); 401 } 402 403 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 404 { 405 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL }; 406 struct ata_host *host; 407 int rc; 408 409 rc = ata_pci_prepare_native_host(pdev, ppi, 2, &host); 410 if (rc) 411 return rc; 412 *r_host = host; 413 414 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); 415 if (rc) { 416 dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n"); 417 return rc; 418 } 419 420 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0); 421 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1); 422 423 return 0; 424 } 425 426 static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 427 { 428 const struct ata_port_info *ppi[] = 429 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info }; 430 struct ata_host *host; 431 int i, rc; 432 433 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi)); 434 if (!host) { 435 dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n"); 436 return -ENOMEM; 437 } 438 439 rc = pcim_iomap_regions(pdev, 0x1f, DRV_NAME); 440 if (rc) { 441 dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap " 442 "PCI BARs (errno=%d)\n", rc); 443 return rc; 444 } 445 host->iomap = pcim_iomap_table(pdev); 446 447 for (i = 0; i < host->n_ports; i++) 448 vt6421_init_addrs(host->ports[i]); 449 450 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 451 if (rc) 452 return rc; 453 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 454 if (rc) 455 return rc; 456 457 return 0; 458 } 459 460 static void svia_configure(struct pci_dev *pdev) 461 { 462 u8 tmp8; 463 464 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); 465 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n", 466 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); 467 468 /* make sure SATA channels are enabled */ 469 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); 470 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 471 dev_printk(KERN_DEBUG, &pdev->dev, 472 "enabling SATA channels (0x%x)\n", 473 (int) tmp8); 474 tmp8 |= ALL_PORTS; 475 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); 476 } 477 478 /* make sure interrupts for each channel sent to us */ 479 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); 480 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 481 dev_printk(KERN_DEBUG, &pdev->dev, 482 "enabling SATA channel interrupts (0x%x)\n", 483 (int) tmp8); 484 tmp8 |= ALL_PORTS; 485 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); 486 } 487 488 /* make sure native mode is enabled */ 489 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); 490 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { 491 dev_printk(KERN_DEBUG, &pdev->dev, 492 "enabling SATA channel native mode (0x%x)\n", 493 (int) tmp8); 494 tmp8 |= NATIVE_MODE_ALL; 495 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 496 } 497 } 498 499 static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 500 { 501 static int printed_version; 502 unsigned int i; 503 int rc; 504 struct ata_host *host; 505 int board_id = (int) ent->driver_data; 506 const int *bar_sizes; 507 u8 tmp8; 508 509 if (!printed_version++) 510 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 511 512 rc = pcim_enable_device(pdev); 513 if (rc) 514 return rc; 515 516 if (board_id == vt6420) { 517 pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8); 518 if (tmp8 & SATA_2DEV) { 519 dev_printk(KERN_ERR, &pdev->dev, 520 "SATA master/slave not supported (0x%x)\n", 521 (int) tmp8); 522 return -EIO; 523 } 524 525 bar_sizes = &svia_bar_sizes[0]; 526 } else { 527 bar_sizes = &vt6421_bar_sizes[0]; 528 } 529 530 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 531 if ((pci_resource_start(pdev, i) == 0) || 532 (pci_resource_len(pdev, i) < bar_sizes[i])) { 533 dev_printk(KERN_ERR, &pdev->dev, 534 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", 535 i, 536 (unsigned long long)pci_resource_start(pdev, i), 537 (unsigned long long)pci_resource_len(pdev, i)); 538 return -ENODEV; 539 } 540 541 if (board_id == vt6420) 542 rc = vt6420_prepare_host(pdev, &host); 543 else 544 rc = vt6421_prepare_host(pdev, &host); 545 if (rc) 546 return rc; 547 548 svia_configure(pdev); 549 550 pci_set_master(pdev); 551 return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, 552 &svia_sht); 553 } 554 555 static int __init svia_init(void) 556 { 557 return pci_register_driver(&svia_pci_driver); 558 } 559 560 static void __exit svia_exit(void) 561 { 562 pci_unregister_driver(&svia_pci_driver); 563 } 564 565 module_init(svia_init); 566 module_exit(svia_exit); 567