1 /* 2 * pata_amd.c - AMD PATA for new ATA layer 3 * (C) 2005-2006 Red Hat Inc 4 * Alan Cox <alan@redhat.com> 5 * 6 * Based on pata-sil680. Errata information is taken from data sheets 7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are 8 * claimed by sata-nv.c. 9 * 10 * TODO: 11 * Variable system clock when/if it makes sense 12 * Power management on ports 13 * 14 * 15 * Documentation publically available. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/pci.h> 21 #include <linux/init.h> 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <scsi/scsi_host.h> 25 #include <linux/libata.h> 26 27 #define DRV_NAME "pata_amd" 28 #define DRV_VERSION "0.3.10" 29 30 /** 31 * timing_setup - shared timing computation and load 32 * @ap: ATA port being set up 33 * @adev: drive being configured 34 * @offset: port offset 35 * @speed: target speed 36 * @clock: clock multiplier (number of times 33MHz for this part) 37 * 38 * Perform the actual timing set up for Nvidia or AMD PATA devices. 39 * The actual devices vary so they all call into this helper function 40 * providing the clock multipler and offset (because AMD and Nvidia put 41 * the ports at different locations). 42 */ 43 44 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock) 45 { 46 static const unsigned char amd_cyc2udma[] = { 47 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7 48 }; 49 50 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 51 struct ata_device *peer = ata_dev_pair(adev); 52 int dn = ap->port_no * 2 + adev->devno; 53 struct ata_timing at, apeer; 54 int T, UT; 55 const int amd_clock = 33333; /* KHz. */ 56 u8 t; 57 58 T = 1000000000 / amd_clock; 59 UT = T / min_t(int, max_t(int, clock, 1), 2); 60 61 if (ata_timing_compute(adev, speed, &at, T, UT) < 0) { 62 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed); 63 return; 64 } 65 66 if (peer) { 67 /* This may be over conservative */ 68 if (peer->dma_mode) { 69 ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT); 70 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT); 71 } 72 ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT); 73 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT); 74 } 75 76 if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1; 77 if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15; 78 79 /* 80 * Now do the setup work 81 */ 82 83 /* Configure the address set up timing */ 84 pci_read_config_byte(pdev, offset + 0x0C, &t); 85 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1)); 86 pci_write_config_byte(pdev, offset + 0x0C , t); 87 88 /* Configure the 8bit I/O timing */ 89 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)), 90 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1)); 91 92 /* Drive timing */ 93 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn), 94 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1)); 95 96 switch (clock) { 97 case 1: 98 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03; 99 break; 100 101 case 2: 102 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03; 103 break; 104 105 case 3: 106 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03; 107 break; 108 109 case 4: 110 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03; 111 break; 112 113 default: 114 return; 115 } 116 117 /* UDMA timing */ 118 if (at.udma) 119 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t); 120 } 121 122 /** 123 * amd_pre_reset - perform reset handling 124 * @link: ATA link 125 * @deadline: deadline jiffies for the operation 126 * 127 * Reset sequence checking enable bits to see which ports are 128 * active. 129 */ 130 131 static int amd_pre_reset(struct ata_link *link, unsigned long deadline) 132 { 133 static const struct pci_bits amd_enable_bits[] = { 134 { 0x40, 1, 0x02, 0x02 }, 135 { 0x40, 1, 0x01, 0x01 } 136 }; 137 138 struct ata_port *ap = link->ap; 139 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 140 141 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) 142 return -ENOENT; 143 144 return ata_std_prereset(link, deadline); 145 } 146 147 static void amd_error_handler(struct ata_port *ap) 148 { 149 return ata_bmdma_drive_eh(ap, amd_pre_reset, 150 ata_std_softreset, NULL, 151 ata_std_postreset); 152 } 153 154 static int amd_cable_detect(struct ata_port *ap) 155 { 156 static const u32 bitmask[2] = {0x03, 0x0C}; 157 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 158 u8 ata66; 159 160 pci_read_config_byte(pdev, 0x42, &ata66); 161 if (ata66 & bitmask[ap->port_no]) 162 return ATA_CBL_PATA80; 163 return ATA_CBL_PATA40; 164 } 165 166 /** 167 * amd33_set_piomode - set initial PIO mode data 168 * @ap: ATA interface 169 * @adev: ATA device 170 * 171 * Program the AMD registers for PIO mode. 172 */ 173 174 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev) 175 { 176 timing_setup(ap, adev, 0x40, adev->pio_mode, 1); 177 } 178 179 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev) 180 { 181 timing_setup(ap, adev, 0x40, adev->pio_mode, 2); 182 } 183 184 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev) 185 { 186 timing_setup(ap, adev, 0x40, adev->pio_mode, 3); 187 } 188 189 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev) 190 { 191 timing_setup(ap, adev, 0x40, adev->pio_mode, 4); 192 } 193 194 /** 195 * amd33_set_dmamode - set initial DMA mode data 196 * @ap: ATA interface 197 * @adev: ATA device 198 * 199 * Program the MWDMA/UDMA modes for the AMD and Nvidia 200 * chipset. 201 */ 202 203 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev) 204 { 205 timing_setup(ap, adev, 0x40, adev->dma_mode, 1); 206 } 207 208 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev) 209 { 210 timing_setup(ap, adev, 0x40, adev->dma_mode, 2); 211 } 212 213 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev) 214 { 215 timing_setup(ap, adev, 0x40, adev->dma_mode, 3); 216 } 217 218 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev) 219 { 220 timing_setup(ap, adev, 0x40, adev->dma_mode, 4); 221 } 222 223 /* Both host-side and drive-side detection results are worthless on NV 224 * PATAs. Ignore them and just follow what BIOS configured. Both the 225 * current configuration in PCI config reg and ACPI GTM result are 226 * cached during driver attach and are consulted to select transfer 227 * mode. 228 */ 229 static unsigned long nv_mode_filter(struct ata_device *dev, 230 unsigned long xfer_mask) 231 { 232 static const unsigned int udma_mask_map[] = 233 { ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0, 234 ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 }; 235 struct ata_port *ap = dev->link->ap; 236 char acpi_str[32] = ""; 237 u32 saved_udma, udma; 238 const struct ata_acpi_gtm *gtm; 239 unsigned long bios_limit = 0, acpi_limit = 0, limit; 240 241 /* find out what BIOS configured */ 242 udma = saved_udma = (unsigned long)ap->host->private_data; 243 244 if (ap->port_no == 0) 245 udma >>= 16; 246 if (dev->devno == 0) 247 udma >>= 8; 248 249 if ((udma & 0xc0) == 0xc0) 250 bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]); 251 252 /* consult ACPI GTM too */ 253 gtm = ata_acpi_init_gtm(ap); 254 if (gtm) { 255 acpi_limit = ata_acpi_gtm_xfermask(dev, gtm); 256 257 snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)", 258 gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags); 259 } 260 261 /* be optimistic, EH can take care of things if something goes wrong */ 262 limit = bios_limit | acpi_limit; 263 264 /* If PIO or DMA isn't configured at all, don't limit. Let EH 265 * handle it. 266 */ 267 if (!(limit & ATA_MASK_PIO)) 268 limit |= ATA_MASK_PIO; 269 if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA))) 270 limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA; 271 272 ata_port_printk(ap, KERN_DEBUG, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, " 273 "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n", 274 xfer_mask, limit, xfer_mask & limit, bios_limit, 275 saved_udma, acpi_limit, acpi_str); 276 277 return xfer_mask & limit; 278 } 279 280 /** 281 * nv_probe_init - cable detection 282 * @lin: ATA link 283 * 284 * Perform cable detection. The BIOS stores this in PCI config 285 * space for us. 286 */ 287 288 static int nv_pre_reset(struct ata_link *link, unsigned long deadline) 289 { 290 static const struct pci_bits nv_enable_bits[] = { 291 { 0x50, 1, 0x02, 0x02 }, 292 { 0x50, 1, 0x01, 0x01 } 293 }; 294 295 struct ata_port *ap = link->ap; 296 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 297 298 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) 299 return -ENOENT; 300 301 return ata_std_prereset(link, deadline); 302 } 303 304 static void nv_error_handler(struct ata_port *ap) 305 { 306 ata_bmdma_drive_eh(ap, nv_pre_reset, 307 ata_std_softreset, NULL, 308 ata_std_postreset); 309 } 310 311 /** 312 * nv100_set_piomode - set initial PIO mode data 313 * @ap: ATA interface 314 * @adev: ATA device 315 * 316 * Program the AMD registers for PIO mode. 317 */ 318 319 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev) 320 { 321 timing_setup(ap, adev, 0x50, adev->pio_mode, 3); 322 } 323 324 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev) 325 { 326 timing_setup(ap, adev, 0x50, adev->pio_mode, 4); 327 } 328 329 /** 330 * nv100_set_dmamode - set initial DMA mode data 331 * @ap: ATA interface 332 * @adev: ATA device 333 * 334 * Program the MWDMA/UDMA modes for the AMD and Nvidia 335 * chipset. 336 */ 337 338 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev) 339 { 340 timing_setup(ap, adev, 0x50, adev->dma_mode, 3); 341 } 342 343 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev) 344 { 345 timing_setup(ap, adev, 0x50, adev->dma_mode, 4); 346 } 347 348 static void nv_host_stop(struct ata_host *host) 349 { 350 u32 udma = (unsigned long)host->private_data; 351 352 /* restore PCI config register 0x60 */ 353 pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma); 354 } 355 356 static struct scsi_host_template amd_sht = { 357 .module = THIS_MODULE, 358 .name = DRV_NAME, 359 .ioctl = ata_scsi_ioctl, 360 .queuecommand = ata_scsi_queuecmd, 361 .can_queue = ATA_DEF_QUEUE, 362 .this_id = ATA_SHT_THIS_ID, 363 .sg_tablesize = LIBATA_MAX_PRD, 364 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 365 .emulated = ATA_SHT_EMULATED, 366 .use_clustering = ATA_SHT_USE_CLUSTERING, 367 .proc_name = DRV_NAME, 368 .dma_boundary = ATA_DMA_BOUNDARY, 369 .slave_configure = ata_scsi_slave_config, 370 .slave_destroy = ata_scsi_slave_destroy, 371 .bios_param = ata_std_bios_param, 372 }; 373 374 static struct ata_port_operations amd33_port_ops = { 375 .set_piomode = amd33_set_piomode, 376 .set_dmamode = amd33_set_dmamode, 377 .mode_filter = ata_pci_default_filter, 378 .tf_load = ata_tf_load, 379 .tf_read = ata_tf_read, 380 .check_status = ata_check_status, 381 .exec_command = ata_exec_command, 382 .dev_select = ata_std_dev_select, 383 384 .freeze = ata_bmdma_freeze, 385 .thaw = ata_bmdma_thaw, 386 .error_handler = amd_error_handler, 387 .post_internal_cmd = ata_bmdma_post_internal_cmd, 388 .cable_detect = ata_cable_40wire, 389 390 .bmdma_setup = ata_bmdma_setup, 391 .bmdma_start = ata_bmdma_start, 392 .bmdma_stop = ata_bmdma_stop, 393 .bmdma_status = ata_bmdma_status, 394 395 .qc_prep = ata_qc_prep, 396 .qc_issue = ata_qc_issue_prot, 397 398 .data_xfer = ata_data_xfer, 399 400 .irq_handler = ata_interrupt, 401 .irq_clear = ata_bmdma_irq_clear, 402 .irq_on = ata_irq_on, 403 404 .port_start = ata_sff_port_start, 405 }; 406 407 static struct ata_port_operations amd66_port_ops = { 408 .set_piomode = amd66_set_piomode, 409 .set_dmamode = amd66_set_dmamode, 410 .mode_filter = ata_pci_default_filter, 411 .tf_load = ata_tf_load, 412 .tf_read = ata_tf_read, 413 .check_status = ata_check_status, 414 .exec_command = ata_exec_command, 415 .dev_select = ata_std_dev_select, 416 417 .freeze = ata_bmdma_freeze, 418 .thaw = ata_bmdma_thaw, 419 .error_handler = amd_error_handler, 420 .post_internal_cmd = ata_bmdma_post_internal_cmd, 421 .cable_detect = ata_cable_unknown, 422 423 .bmdma_setup = ata_bmdma_setup, 424 .bmdma_start = ata_bmdma_start, 425 .bmdma_stop = ata_bmdma_stop, 426 .bmdma_status = ata_bmdma_status, 427 428 .qc_prep = ata_qc_prep, 429 .qc_issue = ata_qc_issue_prot, 430 431 .data_xfer = ata_data_xfer, 432 433 .irq_handler = ata_interrupt, 434 .irq_clear = ata_bmdma_irq_clear, 435 .irq_on = ata_irq_on, 436 437 .port_start = ata_sff_port_start, 438 }; 439 440 static struct ata_port_operations amd100_port_ops = { 441 .set_piomode = amd100_set_piomode, 442 .set_dmamode = amd100_set_dmamode, 443 .mode_filter = ata_pci_default_filter, 444 .tf_load = ata_tf_load, 445 .tf_read = ata_tf_read, 446 .check_status = ata_check_status, 447 .exec_command = ata_exec_command, 448 .dev_select = ata_std_dev_select, 449 450 .freeze = ata_bmdma_freeze, 451 .thaw = ata_bmdma_thaw, 452 .error_handler = amd_error_handler, 453 .post_internal_cmd = ata_bmdma_post_internal_cmd, 454 .cable_detect = ata_cable_unknown, 455 456 .bmdma_setup = ata_bmdma_setup, 457 .bmdma_start = ata_bmdma_start, 458 .bmdma_stop = ata_bmdma_stop, 459 .bmdma_status = ata_bmdma_status, 460 461 .qc_prep = ata_qc_prep, 462 .qc_issue = ata_qc_issue_prot, 463 464 .data_xfer = ata_data_xfer, 465 466 .irq_handler = ata_interrupt, 467 .irq_clear = ata_bmdma_irq_clear, 468 .irq_on = ata_irq_on, 469 470 .port_start = ata_sff_port_start, 471 }; 472 473 static struct ata_port_operations amd133_port_ops = { 474 .set_piomode = amd133_set_piomode, 475 .set_dmamode = amd133_set_dmamode, 476 .mode_filter = ata_pci_default_filter, 477 .tf_load = ata_tf_load, 478 .tf_read = ata_tf_read, 479 .check_status = ata_check_status, 480 .exec_command = ata_exec_command, 481 .dev_select = ata_std_dev_select, 482 483 .freeze = ata_bmdma_freeze, 484 .thaw = ata_bmdma_thaw, 485 .error_handler = amd_error_handler, 486 .post_internal_cmd = ata_bmdma_post_internal_cmd, 487 .cable_detect = amd_cable_detect, 488 489 .bmdma_setup = ata_bmdma_setup, 490 .bmdma_start = ata_bmdma_start, 491 .bmdma_stop = ata_bmdma_stop, 492 .bmdma_status = ata_bmdma_status, 493 494 .qc_prep = ata_qc_prep, 495 .qc_issue = ata_qc_issue_prot, 496 497 .data_xfer = ata_data_xfer, 498 499 .irq_handler = ata_interrupt, 500 .irq_clear = ata_bmdma_irq_clear, 501 .irq_on = ata_irq_on, 502 503 .port_start = ata_sff_port_start, 504 }; 505 506 static struct ata_port_operations nv100_port_ops = { 507 .set_piomode = nv100_set_piomode, 508 .set_dmamode = nv100_set_dmamode, 509 .mode_filter = ata_pci_default_filter, 510 .tf_load = ata_tf_load, 511 .tf_read = ata_tf_read, 512 .check_status = ata_check_status, 513 .exec_command = ata_exec_command, 514 .dev_select = ata_std_dev_select, 515 516 .freeze = ata_bmdma_freeze, 517 .thaw = ata_bmdma_thaw, 518 .error_handler = nv_error_handler, 519 .post_internal_cmd = ata_bmdma_post_internal_cmd, 520 .cable_detect = ata_cable_ignore, 521 .mode_filter = nv_mode_filter, 522 523 .bmdma_setup = ata_bmdma_setup, 524 .bmdma_start = ata_bmdma_start, 525 .bmdma_stop = ata_bmdma_stop, 526 .bmdma_status = ata_bmdma_status, 527 528 .qc_prep = ata_qc_prep, 529 .qc_issue = ata_qc_issue_prot, 530 531 .data_xfer = ata_data_xfer, 532 533 .irq_handler = ata_interrupt, 534 .irq_clear = ata_bmdma_irq_clear, 535 .irq_on = ata_irq_on, 536 537 .port_start = ata_sff_port_start, 538 .host_stop = nv_host_stop, 539 }; 540 541 static struct ata_port_operations nv133_port_ops = { 542 .set_piomode = nv133_set_piomode, 543 .set_dmamode = nv133_set_dmamode, 544 .mode_filter = ata_pci_default_filter, 545 .tf_load = ata_tf_load, 546 .tf_read = ata_tf_read, 547 .check_status = ata_check_status, 548 .exec_command = ata_exec_command, 549 .dev_select = ata_std_dev_select, 550 551 .freeze = ata_bmdma_freeze, 552 .thaw = ata_bmdma_thaw, 553 .error_handler = nv_error_handler, 554 .post_internal_cmd = ata_bmdma_post_internal_cmd, 555 .cable_detect = ata_cable_ignore, 556 .mode_filter = nv_mode_filter, 557 558 .bmdma_setup = ata_bmdma_setup, 559 .bmdma_start = ata_bmdma_start, 560 .bmdma_stop = ata_bmdma_stop, 561 .bmdma_status = ata_bmdma_status, 562 563 .qc_prep = ata_qc_prep, 564 .qc_issue = ata_qc_issue_prot, 565 566 .data_xfer = ata_data_xfer, 567 568 .irq_handler = ata_interrupt, 569 .irq_clear = ata_bmdma_irq_clear, 570 .irq_on = ata_irq_on, 571 572 .port_start = ata_sff_port_start, 573 .host_stop = nv_host_stop, 574 }; 575 576 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 577 { 578 static const struct ata_port_info info[10] = { 579 { /* 0: AMD 7401 */ 580 .sht = &amd_sht, 581 .flags = ATA_FLAG_SLAVE_POSS, 582 .pio_mask = 0x1f, 583 .mwdma_mask = 0x07, /* No SWDMA */ 584 .udma_mask = 0x07, /* UDMA 33 */ 585 .port_ops = &amd33_port_ops 586 }, 587 { /* 1: Early AMD7409 - no swdma */ 588 .sht = &amd_sht, 589 .flags = ATA_FLAG_SLAVE_POSS, 590 .pio_mask = 0x1f, 591 .mwdma_mask = 0x07, 592 .udma_mask = ATA_UDMA4, /* UDMA 66 */ 593 .port_ops = &amd66_port_ops 594 }, 595 { /* 2: AMD 7409, no swdma errata */ 596 .sht = &amd_sht, 597 .flags = ATA_FLAG_SLAVE_POSS, 598 .pio_mask = 0x1f, 599 .mwdma_mask = 0x07, 600 .udma_mask = ATA_UDMA4, /* UDMA 66 */ 601 .port_ops = &amd66_port_ops 602 }, 603 { /* 3: AMD 7411 */ 604 .sht = &amd_sht, 605 .flags = ATA_FLAG_SLAVE_POSS, 606 .pio_mask = 0x1f, 607 .mwdma_mask = 0x07, 608 .udma_mask = ATA_UDMA5, /* UDMA 100 */ 609 .port_ops = &amd100_port_ops 610 }, 611 { /* 4: AMD 7441 */ 612 .sht = &amd_sht, 613 .flags = ATA_FLAG_SLAVE_POSS, 614 .pio_mask = 0x1f, 615 .mwdma_mask = 0x07, 616 .udma_mask = ATA_UDMA5, /* UDMA 100 */ 617 .port_ops = &amd100_port_ops 618 }, 619 { /* 5: AMD 8111*/ 620 .sht = &amd_sht, 621 .flags = ATA_FLAG_SLAVE_POSS, 622 .pio_mask = 0x1f, 623 .mwdma_mask = 0x07, 624 .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */ 625 .port_ops = &amd133_port_ops 626 }, 627 { /* 6: AMD 8111 UDMA 100 (Serenade) */ 628 .sht = &amd_sht, 629 .flags = ATA_FLAG_SLAVE_POSS, 630 .pio_mask = 0x1f, 631 .mwdma_mask = 0x07, 632 .udma_mask = ATA_UDMA5, /* UDMA 100, no swdma */ 633 .port_ops = &amd133_port_ops 634 }, 635 { /* 7: Nvidia Nforce */ 636 .sht = &amd_sht, 637 .flags = ATA_FLAG_SLAVE_POSS, 638 .pio_mask = 0x1f, 639 .mwdma_mask = 0x07, 640 .udma_mask = ATA_UDMA5, /* UDMA 100 */ 641 .port_ops = &nv100_port_ops 642 }, 643 { /* 8: Nvidia Nforce2 and later */ 644 .sht = &amd_sht, 645 .flags = ATA_FLAG_SLAVE_POSS, 646 .pio_mask = 0x1f, 647 .mwdma_mask = 0x07, 648 .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */ 649 .port_ops = &nv133_port_ops 650 }, 651 { /* 9: AMD CS5536 (Geode companion) */ 652 .sht = &amd_sht, 653 .flags = ATA_FLAG_SLAVE_POSS, 654 .pio_mask = 0x1f, 655 .mwdma_mask = 0x07, 656 .udma_mask = ATA_UDMA5, /* UDMA 100 */ 657 .port_ops = &amd100_port_ops 658 } 659 }; 660 struct ata_port_info pi; 661 const struct ata_port_info *ppi[] = { &pi, NULL }; 662 static int printed_version; 663 int type = id->driver_data; 664 u8 fifo; 665 666 if (!printed_version++) 667 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 668 669 pci_read_config_byte(pdev, 0x41, &fifo); 670 671 /* Check for AMD7409 without swdma errata and if found adjust type */ 672 if (type == 1 && pdev->revision > 0x7) 673 type = 2; 674 675 /* Serenade ? */ 676 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD && 677 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE) 678 type = 6; /* UDMA 100 only */ 679 680 /* 681 * Okay, type is determined now. Apply type-specific workarounds. 682 */ 683 pi = info[type]; 684 685 if (type < 3) 686 ata_pci_clear_simplex(pdev); 687 688 /* Check for AMD7411 */ 689 if (type == 3) 690 /* FIFO is broken */ 691 pci_write_config_byte(pdev, 0x41, fifo & 0x0F); 692 else 693 pci_write_config_byte(pdev, 0x41, fifo | 0xF0); 694 695 /* Cable detection on Nvidia chips doesn't work too well, 696 * cache BIOS programmed UDMA mode. 697 */ 698 if (type == 7 || type == 8) { 699 u32 udma; 700 701 pci_read_config_dword(pdev, 0x60, &udma); 702 pi.private_data = (void *)(unsigned long)udma; 703 } 704 705 /* And fire it up */ 706 return ata_pci_init_one(pdev, ppi); 707 } 708 709 #ifdef CONFIG_PM 710 static int amd_reinit_one(struct pci_dev *pdev) 711 { 712 if (pdev->vendor == PCI_VENDOR_ID_AMD) { 713 u8 fifo; 714 pci_read_config_byte(pdev, 0x41, &fifo); 715 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411) 716 /* FIFO is broken */ 717 pci_write_config_byte(pdev, 0x41, fifo & 0x0F); 718 else 719 pci_write_config_byte(pdev, 0x41, fifo | 0xF0); 720 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 || 721 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401) 722 ata_pci_clear_simplex(pdev); 723 } 724 return ata_pci_device_resume(pdev); 725 } 726 #endif 727 728 static const struct pci_device_id amd[] = { 729 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 }, 730 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 }, 731 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 }, 732 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 }, 733 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 }, 734 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 }, 735 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 }, 736 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 }, 737 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 }, 738 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 }, 739 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 }, 740 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 }, 741 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 }, 742 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 }, 743 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 }, 744 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 }, 745 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 }, 746 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 }, 747 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 }, 748 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 }, 749 750 { }, 751 }; 752 753 static struct pci_driver amd_pci_driver = { 754 .name = DRV_NAME, 755 .id_table = amd, 756 .probe = amd_init_one, 757 .remove = ata_pci_remove_one, 758 #ifdef CONFIG_PM 759 .suspend = ata_pci_device_suspend, 760 .resume = amd_reinit_one, 761 #endif 762 }; 763 764 static int __init amd_init(void) 765 { 766 return pci_register_driver(&amd_pci_driver); 767 } 768 769 static void __exit amd_exit(void) 770 { 771 pci_unregister_driver(&amd_pci_driver); 772 } 773 774 MODULE_AUTHOR("Alan Cox"); 775 MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE"); 776 MODULE_LICENSE("GPL"); 777 MODULE_DEVICE_TABLE(pci, amd); 778 MODULE_VERSION(DRV_VERSION); 779 780 module_init(amd_init); 781 module_exit(amd_exit); 782