1 /* 2 * sata_sil.c - Silicon Image SATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2005 Red Hat, Inc. 9 * Copyright 2003 Benjamin Herrenschmidt 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Documentation for SiI 3112: 31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2 32 * 33 * Other errata and documentation available under NDA. 34 * 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/pci.h> 40 #include <linux/init.h> 41 #include <linux/blkdev.h> 42 #include <linux/delay.h> 43 #include <linux/interrupt.h> 44 #include <linux/device.h> 45 #include <scsi/scsi_host.h> 46 #include <linux/libata.h> 47 48 #define DRV_NAME "sata_sil" 49 #define DRV_VERSION "2.2" 50 51 enum { 52 SIL_MMIO_BAR = 5, 53 54 /* 55 * host flags 56 */ 57 SIL_FLAG_NO_SATA_IRQ = (1 << 28), 58 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 59 SIL_FLAG_MOD15WRITE = (1 << 30), 60 61 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 62 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME, 63 64 /* 65 * Controller IDs 66 */ 67 sil_3112 = 0, 68 sil_3112_no_sata_irq = 1, 69 sil_3512 = 2, 70 sil_3114 = 3, 71 72 /* 73 * Register offsets 74 */ 75 SIL_SYSCFG = 0x48, 76 77 /* 78 * Register bits 79 */ 80 /* SYSCFG */ 81 SIL_MASK_IDE0_INT = (1 << 22), 82 SIL_MASK_IDE1_INT = (1 << 23), 83 SIL_MASK_IDE2_INT = (1 << 24), 84 SIL_MASK_IDE3_INT = (1 << 25), 85 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT, 86 SIL_MASK_4PORT = SIL_MASK_2PORT | 87 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 88 89 /* BMDMA/BMDMA2 */ 90 SIL_INTR_STEERING = (1 << 1), 91 92 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */ 93 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */ 94 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */ 95 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */ 96 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */ 97 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */ 98 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */ 99 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */ 100 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */ 101 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */ 102 103 /* SIEN */ 104 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */ 105 106 /* 107 * Others 108 */ 109 SIL_QUIRK_MOD15WRITE = (1 << 0), 110 SIL_QUIRK_UDMA5MAX = (1 << 1), 111 }; 112 113 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 114 #ifdef CONFIG_PM 115 static int sil_pci_device_resume(struct pci_dev *pdev); 116 #endif 117 static void sil_dev_config(struct ata_device *dev); 118 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 119 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 120 static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed); 121 static void sil_freeze(struct ata_port *ap); 122 static void sil_thaw(struct ata_port *ap); 123 124 125 static const struct pci_device_id sil_pci_tbl[] = { 126 { PCI_VDEVICE(CMD, 0x3112), sil_3112 }, 127 { PCI_VDEVICE(CMD, 0x0240), sil_3112 }, 128 { PCI_VDEVICE(CMD, 0x3512), sil_3512 }, 129 { PCI_VDEVICE(CMD, 0x3114), sil_3114 }, 130 { PCI_VDEVICE(ATI, 0x436e), sil_3112 }, 131 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq }, 132 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq }, 133 134 { } /* terminate list */ 135 }; 136 137 138 /* TODO firmware versions should be added - eric */ 139 static const struct sil_drivelist { 140 const char * product; 141 unsigned int quirk; 142 } sil_blacklist [] = { 143 { "ST320012AS", SIL_QUIRK_MOD15WRITE }, 144 { "ST330013AS", SIL_QUIRK_MOD15WRITE }, 145 { "ST340017AS", SIL_QUIRK_MOD15WRITE }, 146 { "ST360015AS", SIL_QUIRK_MOD15WRITE }, 147 { "ST380023AS", SIL_QUIRK_MOD15WRITE }, 148 { "ST3120023AS", SIL_QUIRK_MOD15WRITE }, 149 { "ST340014ASL", SIL_QUIRK_MOD15WRITE }, 150 { "ST360014ASL", SIL_QUIRK_MOD15WRITE }, 151 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 152 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, 153 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, 154 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, 155 { } 156 }; 157 158 static struct pci_driver sil_pci_driver = { 159 .name = DRV_NAME, 160 .id_table = sil_pci_tbl, 161 .probe = sil_init_one, 162 .remove = ata_pci_remove_one, 163 #ifdef CONFIG_PM 164 .suspend = ata_pci_device_suspend, 165 .resume = sil_pci_device_resume, 166 #endif 167 }; 168 169 static struct scsi_host_template sil_sht = { 170 .module = THIS_MODULE, 171 .name = DRV_NAME, 172 .ioctl = ata_scsi_ioctl, 173 .queuecommand = ata_scsi_queuecmd, 174 .can_queue = ATA_DEF_QUEUE, 175 .this_id = ATA_SHT_THIS_ID, 176 .sg_tablesize = LIBATA_MAX_PRD, 177 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 178 .emulated = ATA_SHT_EMULATED, 179 .use_clustering = ATA_SHT_USE_CLUSTERING, 180 .proc_name = DRV_NAME, 181 .dma_boundary = ATA_DMA_BOUNDARY, 182 .slave_configure = ata_scsi_slave_config, 183 .slave_destroy = ata_scsi_slave_destroy, 184 .bios_param = ata_std_bios_param, 185 #ifdef CONFIG_PM 186 .suspend = ata_scsi_device_suspend, 187 .resume = ata_scsi_device_resume, 188 #endif 189 }; 190 191 static const struct ata_port_operations sil_ops = { 192 .port_disable = ata_port_disable, 193 .dev_config = sil_dev_config, 194 .tf_load = ata_tf_load, 195 .tf_read = ata_tf_read, 196 .check_status = ata_check_status, 197 .exec_command = ata_exec_command, 198 .dev_select = ata_std_dev_select, 199 .set_mode = sil_set_mode, 200 .bmdma_setup = ata_bmdma_setup, 201 .bmdma_start = ata_bmdma_start, 202 .bmdma_stop = ata_bmdma_stop, 203 .bmdma_status = ata_bmdma_status, 204 .qc_prep = ata_qc_prep, 205 .qc_issue = ata_qc_issue_prot, 206 .data_xfer = ata_data_xfer, 207 .freeze = sil_freeze, 208 .thaw = sil_thaw, 209 .error_handler = ata_bmdma_error_handler, 210 .post_internal_cmd = ata_bmdma_post_internal_cmd, 211 .irq_clear = ata_bmdma_irq_clear, 212 .irq_on = ata_irq_on, 213 .irq_ack = ata_irq_ack, 214 .scr_read = sil_scr_read, 215 .scr_write = sil_scr_write, 216 .port_start = ata_port_start, 217 }; 218 219 static const struct ata_port_info sil_port_info[] = { 220 /* sil_3112 */ 221 { 222 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, 223 .pio_mask = 0x1f, /* pio0-4 */ 224 .mwdma_mask = 0x07, /* mwdma0-2 */ 225 .udma_mask = 0x3f, /* udma0-5 */ 226 .port_ops = &sil_ops, 227 }, 228 /* sil_3112_no_sata_irq */ 229 { 230 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | 231 SIL_FLAG_NO_SATA_IRQ, 232 .pio_mask = 0x1f, /* pio0-4 */ 233 .mwdma_mask = 0x07, /* mwdma0-2 */ 234 .udma_mask = 0x3f, /* udma0-5 */ 235 .port_ops = &sil_ops, 236 }, 237 /* sil_3512 */ 238 { 239 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 240 .pio_mask = 0x1f, /* pio0-4 */ 241 .mwdma_mask = 0x07, /* mwdma0-2 */ 242 .udma_mask = 0x3f, /* udma0-5 */ 243 .port_ops = &sil_ops, 244 }, 245 /* sil_3114 */ 246 { 247 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 248 .pio_mask = 0x1f, /* pio0-4 */ 249 .mwdma_mask = 0x07, /* mwdma0-2 */ 250 .udma_mask = 0x3f, /* udma0-5 */ 251 .port_ops = &sil_ops, 252 }, 253 }; 254 255 /* per-port register offsets */ 256 /* TODO: we can probably calculate rather than use a table */ 257 static const struct { 258 unsigned long tf; /* ATA taskfile register block */ 259 unsigned long ctl; /* ATA control/altstatus register block */ 260 unsigned long bmdma; /* DMA register block */ 261 unsigned long bmdma2; /* DMA register block #2 */ 262 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ 263 unsigned long scr; /* SATA control register block */ 264 unsigned long sien; /* SATA Interrupt Enable register */ 265 unsigned long xfer_mode;/* data transfer mode register */ 266 unsigned long sfis_cfg; /* SATA FIS reception config register */ 267 } sil_port[] = { 268 /* port 0 ... */ 269 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c }, 270 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, 271 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, 272 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, 273 /* ... port 3 */ 274 }; 275 276 MODULE_AUTHOR("Jeff Garzik"); 277 MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller"); 278 MODULE_LICENSE("GPL"); 279 MODULE_DEVICE_TABLE(pci, sil_pci_tbl); 280 MODULE_VERSION(DRV_VERSION); 281 282 static int slow_down = 0; 283 module_param(slow_down, int, 0444); 284 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); 285 286 287 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 288 { 289 u8 cache_line = 0; 290 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line); 291 return cache_line; 292 } 293 294 /** 295 * sil_set_mode - wrap set_mode functions 296 * @ap: port to set up 297 * @r_failed: returned device when we fail 298 * 299 * Wrap the libata method for device setup as after the setup we need 300 * to inspect the results and do some configuration work 301 */ 302 303 static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed) 304 { 305 struct ata_host *host = ap->host; 306 struct ata_device *dev; 307 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 308 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; 309 u32 tmp, dev_mode[2]; 310 unsigned int i; 311 int rc; 312 313 rc = ata_do_set_mode(ap, r_failed); 314 if (rc) 315 return rc; 316 317 for (i = 0; i < 2; i++) { 318 dev = &ap->device[i]; 319 if (!ata_dev_enabled(dev)) 320 dev_mode[i] = 0; /* PIO0/1/2 */ 321 else if (dev->flags & ATA_DFLAG_PIO) 322 dev_mode[i] = 1; /* PIO3/4 */ 323 else 324 dev_mode[i] = 3; /* UDMA */ 325 /* value 2 indicates MDMA */ 326 } 327 328 tmp = readl(addr); 329 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0)); 330 tmp |= dev_mode[0]; 331 tmp |= (dev_mode[1] << 4); 332 writel(tmp, addr); 333 readl(addr); /* flush */ 334 return 0; 335 } 336 337 static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg) 338 { 339 void __iomem *offset = ap->ioaddr.scr_addr; 340 341 switch (sc_reg) { 342 case SCR_STATUS: 343 return offset + 4; 344 case SCR_ERROR: 345 return offset + 8; 346 case SCR_CONTROL: 347 return offset; 348 default: 349 /* do nothing */ 350 break; 351 } 352 353 return NULL; 354 } 355 356 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) 357 { 358 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 359 if (mmio) 360 return readl(mmio); 361 return 0xffffffffU; 362 } 363 364 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 365 { 366 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 367 if (mmio) 368 writel(val, mmio); 369 } 370 371 static void sil_host_intr(struct ata_port *ap, u32 bmdma2) 372 { 373 struct ata_eh_info *ehi = &ap->eh_info; 374 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 375 u8 status; 376 377 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 378 u32 serror; 379 380 /* SIEN doesn't mask SATA IRQs on some 3112s. Those 381 * controllers continue to assert IRQ as long as 382 * SError bits are pending. Clear SError immediately. 383 */ 384 serror = sil_scr_read(ap, SCR_ERROR); 385 sil_scr_write(ap, SCR_ERROR, serror); 386 387 /* Trigger hotplug and accumulate SError only if the 388 * port isn't already frozen. Otherwise, PHY events 389 * during hardreset makes controllers with broken SIEN 390 * repeat probing needlessly. 391 */ 392 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 393 ata_ehi_hotplugged(&ap->eh_info); 394 ap->eh_info.serror |= serror; 395 } 396 397 goto freeze; 398 } 399 400 if (unlikely(!qc)) 401 goto freeze; 402 403 if (unlikely(qc->tf.flags & ATA_TFLAG_POLLING)) { 404 /* this sometimes happens, just clear IRQ */ 405 ata_chk_status(ap); 406 return; 407 } 408 409 /* Check whether we are expecting interrupt in this state */ 410 switch (ap->hsm_task_state) { 411 case HSM_ST_FIRST: 412 /* Some pre-ATAPI-4 devices assert INTRQ 413 * at this state when ready to receive CDB. 414 */ 415 416 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 417 * The flag was turned on only for atapi devices. 418 * No need to check is_atapi_taskfile(&qc->tf) again. 419 */ 420 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 421 goto err_hsm; 422 break; 423 case HSM_ST_LAST: 424 if (qc->tf.protocol == ATA_PROT_DMA || 425 qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 426 /* clear DMA-Start bit */ 427 ap->ops->bmdma_stop(qc); 428 429 if (bmdma2 & SIL_DMA_ERROR) { 430 qc->err_mask |= AC_ERR_HOST_BUS; 431 ap->hsm_task_state = HSM_ST_ERR; 432 } 433 } 434 break; 435 case HSM_ST: 436 break; 437 default: 438 goto err_hsm; 439 } 440 441 /* check main status, clearing INTRQ */ 442 status = ata_chk_status(ap); 443 if (unlikely(status & ATA_BUSY)) 444 goto err_hsm; 445 446 /* ack bmdma irq events */ 447 ata_bmdma_irq_clear(ap); 448 449 /* kick HSM in the ass */ 450 ata_hsm_move(ap, qc, status, 0); 451 452 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 453 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) 454 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); 455 456 return; 457 458 err_hsm: 459 qc->err_mask |= AC_ERR_HSM; 460 freeze: 461 ata_port_freeze(ap); 462 } 463 464 static irqreturn_t sil_interrupt(int irq, void *dev_instance) 465 { 466 struct ata_host *host = dev_instance; 467 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 468 int handled = 0; 469 int i; 470 471 spin_lock(&host->lock); 472 473 for (i = 0; i < host->n_ports; i++) { 474 struct ata_port *ap = host->ports[i]; 475 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 476 477 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 478 continue; 479 480 /* turn off SATA_IRQ if not supported */ 481 if (ap->flags & SIL_FLAG_NO_SATA_IRQ) 482 bmdma2 &= ~SIL_DMA_SATA_IRQ; 483 484 if (bmdma2 == 0xffffffff || 485 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) 486 continue; 487 488 sil_host_intr(ap, bmdma2); 489 handled = 1; 490 } 491 492 spin_unlock(&host->lock); 493 494 return IRQ_RETVAL(handled); 495 } 496 497 static void sil_freeze(struct ata_port *ap) 498 { 499 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 500 u32 tmp; 501 502 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ 503 writel(0, mmio_base + sil_port[ap->port_no].sien); 504 505 /* plug IRQ */ 506 tmp = readl(mmio_base + SIL_SYSCFG); 507 tmp |= SIL_MASK_IDE0_INT << ap->port_no; 508 writel(tmp, mmio_base + SIL_SYSCFG); 509 readl(mmio_base + SIL_SYSCFG); /* flush */ 510 } 511 512 static void sil_thaw(struct ata_port *ap) 513 { 514 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 515 u32 tmp; 516 517 /* clear IRQ */ 518 ata_chk_status(ap); 519 ata_bmdma_irq_clear(ap); 520 521 /* turn on SATA IRQ if supported */ 522 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) 523 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); 524 525 /* turn on IRQ */ 526 tmp = readl(mmio_base + SIL_SYSCFG); 527 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no); 528 writel(tmp, mmio_base + SIL_SYSCFG); 529 } 530 531 /** 532 * sil_dev_config - Apply device/host-specific errata fixups 533 * @dev: Device to be examined 534 * 535 * After the IDENTIFY [PACKET] DEVICE step is complete, and a 536 * device is known to be present, this function is called. 537 * We apply two errata fixups which are specific to Silicon Image, 538 * a Seagate and a Maxtor fixup. 539 * 540 * For certain Seagate devices, we must limit the maximum sectors 541 * to under 8K. 542 * 543 * For certain Maxtor devices, we must not program the drive 544 * beyond udma5. 545 * 546 * Both fixups are unfairly pessimistic. As soon as I get more 547 * information on these errata, I will create a more exhaustive 548 * list, and apply the fixups to only the specific 549 * devices/hosts/firmwares that need it. 550 * 551 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted 552 * The Maxtor quirk is in the blacklist, but I'm keeping the original 553 * pessimistic fix for the following reasons... 554 * - There seems to be less info on it, only one device gleaned off the 555 * Windows driver, maybe only one is affected. More info would be greatly 556 * appreciated. 557 * - But then again UDMA5 is hardly anything to complain about 558 */ 559 static void sil_dev_config(struct ata_device *dev) 560 { 561 struct ata_port *ap = dev->ap; 562 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO; 563 unsigned int n, quirks = 0; 564 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 565 566 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 567 568 for (n = 0; sil_blacklist[n].product; n++) 569 if (!strcmp(sil_blacklist[n].product, model_num)) { 570 quirks = sil_blacklist[n].quirk; 571 break; 572 } 573 574 /* limit requests to 15 sectors */ 575 if (slow_down || 576 ((ap->flags & SIL_FLAG_MOD15WRITE) && 577 (quirks & SIL_QUIRK_MOD15WRITE))) { 578 if (print_info) 579 ata_dev_printk(dev, KERN_INFO, "applying Seagate " 580 "errata fix (mod15write workaround)\n"); 581 dev->max_sectors = 15; 582 return; 583 } 584 585 /* limit to udma5 */ 586 if (quirks & SIL_QUIRK_UDMA5MAX) { 587 if (print_info) 588 ata_dev_printk(dev, KERN_INFO, "applying Maxtor " 589 "errata fix %s\n", model_num); 590 dev->udma_mask &= ATA_UDMA5; 591 return; 592 } 593 } 594 595 static void sil_init_controller(struct ata_host *host) 596 { 597 struct pci_dev *pdev = to_pci_dev(host->dev); 598 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 599 u8 cls; 600 u32 tmp; 601 int i; 602 603 /* Initialize FIFO PCI bus arbitration */ 604 cls = sil_get_device_cache_line(pdev); 605 if (cls) { 606 cls >>= 3; 607 cls++; /* cls = (line_size/8)+1 */ 608 for (i = 0; i < host->n_ports; i++) 609 writew(cls << 8 | cls, 610 mmio_base + sil_port[i].fifo_cfg); 611 } else 612 dev_printk(KERN_WARNING, &pdev->dev, 613 "cache line size not set. Driver may not function\n"); 614 615 /* Apply R_ERR on DMA activate FIS errata workaround */ 616 if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) { 617 int cnt; 618 619 for (i = 0, cnt = 0; i < host->n_ports; i++) { 620 tmp = readl(mmio_base + sil_port[i].sfis_cfg); 621 if ((tmp & 0x3) != 0x01) 622 continue; 623 if (!cnt) 624 dev_printk(KERN_INFO, &pdev->dev, 625 "Applying R_ERR on DMA activate " 626 "FIS errata fix\n"); 627 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); 628 cnt++; 629 } 630 } 631 632 if (host->n_ports == 4) { 633 /* flip the magic "make 4 ports work" bit */ 634 tmp = readl(mmio_base + sil_port[2].bmdma); 635 if ((tmp & SIL_INTR_STEERING) == 0) 636 writel(tmp | SIL_INTR_STEERING, 637 mmio_base + sil_port[2].bmdma); 638 } 639 } 640 641 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 642 { 643 static int printed_version; 644 int board_id = ent->driver_data; 645 const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL }; 646 struct ata_host *host; 647 void __iomem *mmio_base; 648 int n_ports, rc; 649 unsigned int i; 650 651 if (!printed_version++) 652 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 653 654 /* allocate host */ 655 n_ports = 2; 656 if (board_id == sil_3114) 657 n_ports = 4; 658 659 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 660 if (!host) 661 return -ENOMEM; 662 663 /* acquire resources and fill host */ 664 rc = pcim_enable_device(pdev); 665 if (rc) 666 return rc; 667 668 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME); 669 if (rc == -EBUSY) 670 pcim_pin_device(pdev); 671 if (rc) 672 return rc; 673 host->iomap = pcim_iomap_table(pdev); 674 675 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 676 if (rc) 677 return rc; 678 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 679 if (rc) 680 return rc; 681 682 mmio_base = host->iomap[SIL_MMIO_BAR]; 683 684 for (i = 0; i < host->n_ports; i++) { 685 struct ata_ioports *ioaddr = &host->ports[i]->ioaddr; 686 687 ioaddr->cmd_addr = mmio_base + sil_port[i].tf; 688 ioaddr->altstatus_addr = 689 ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; 690 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; 691 ioaddr->scr_addr = mmio_base + sil_port[i].scr; 692 ata_std_ports(ioaddr); 693 } 694 695 /* initialize and activate */ 696 sil_init_controller(host); 697 698 pci_set_master(pdev); 699 return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED, 700 &sil_sht); 701 } 702 703 #ifdef CONFIG_PM 704 static int sil_pci_device_resume(struct pci_dev *pdev) 705 { 706 struct ata_host *host = dev_get_drvdata(&pdev->dev); 707 int rc; 708 709 rc = ata_pci_device_do_resume(pdev); 710 if (rc) 711 return rc; 712 713 sil_init_controller(host); 714 ata_host_resume(host); 715 716 return 0; 717 } 718 #endif 719 720 static int __init sil_init(void) 721 { 722 return pci_register_driver(&sil_pci_driver); 723 } 724 725 static void __exit sil_exit(void) 726 { 727 pci_unregister_driver(&sil_pci_driver); 728 } 729 730 731 module_init(sil_init); 732 module_exit(sil_exit); 733