1 /* 2 * libata-bmdma.c - helper library for PCI IDE BMDMA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2006 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/pci.h> 37 #include <linux/libata.h> 38 39 #include "libata.h" 40 41 /** 42 * ata_irq_on - Enable interrupts on a port. 43 * @ap: Port on which interrupts are enabled. 44 * 45 * Enable interrupts on a legacy IDE device using MMIO or PIO, 46 * wait for idle, clear any pending interrupts. 47 * 48 * LOCKING: 49 * Inherited from caller. 50 */ 51 u8 ata_irq_on(struct ata_port *ap) 52 { 53 struct ata_ioports *ioaddr = &ap->ioaddr; 54 u8 tmp; 55 56 ap->ctl &= ~ATA_NIEN; 57 ap->last_ctl = ap->ctl; 58 59 if (ap->flags & ATA_FLAG_MMIO) 60 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 61 else 62 outb(ap->ctl, ioaddr->ctl_addr); 63 tmp = ata_wait_idle(ap); 64 65 ap->ops->irq_clear(ap); 66 67 return tmp; 68 } 69 70 /** 71 * ata_tf_load_pio - send taskfile registers to host controller 72 * @ap: Port to which output is sent 73 * @tf: ATA taskfile register set 74 * 75 * Outputs ATA taskfile to standard ATA host controller. 76 * 77 * LOCKING: 78 * Inherited from caller. 79 */ 80 81 static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf) 82 { 83 struct ata_ioports *ioaddr = &ap->ioaddr; 84 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 85 86 if (tf->ctl != ap->last_ctl) { 87 outb(tf->ctl, ioaddr->ctl_addr); 88 ap->last_ctl = tf->ctl; 89 ata_wait_idle(ap); 90 } 91 92 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 93 outb(tf->hob_feature, ioaddr->feature_addr); 94 outb(tf->hob_nsect, ioaddr->nsect_addr); 95 outb(tf->hob_lbal, ioaddr->lbal_addr); 96 outb(tf->hob_lbam, ioaddr->lbam_addr); 97 outb(tf->hob_lbah, ioaddr->lbah_addr); 98 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 99 tf->hob_feature, 100 tf->hob_nsect, 101 tf->hob_lbal, 102 tf->hob_lbam, 103 tf->hob_lbah); 104 } 105 106 if (is_addr) { 107 outb(tf->feature, ioaddr->feature_addr); 108 outb(tf->nsect, ioaddr->nsect_addr); 109 outb(tf->lbal, ioaddr->lbal_addr); 110 outb(tf->lbam, ioaddr->lbam_addr); 111 outb(tf->lbah, ioaddr->lbah_addr); 112 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 113 tf->feature, 114 tf->nsect, 115 tf->lbal, 116 tf->lbam, 117 tf->lbah); 118 } 119 120 if (tf->flags & ATA_TFLAG_DEVICE) { 121 outb(tf->device, ioaddr->device_addr); 122 VPRINTK("device 0x%X\n", tf->device); 123 } 124 125 ata_wait_idle(ap); 126 } 127 128 /** 129 * ata_tf_load_mmio - send taskfile registers to host controller 130 * @ap: Port to which output is sent 131 * @tf: ATA taskfile register set 132 * 133 * Outputs ATA taskfile to standard ATA host controller using MMIO. 134 * 135 * LOCKING: 136 * Inherited from caller. 137 */ 138 139 static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 140 { 141 struct ata_ioports *ioaddr = &ap->ioaddr; 142 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 143 144 if (tf->ctl != ap->last_ctl) { 145 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr); 146 ap->last_ctl = tf->ctl; 147 ata_wait_idle(ap); 148 } 149 150 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 151 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr); 152 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr); 153 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr); 154 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr); 155 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr); 156 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 157 tf->hob_feature, 158 tf->hob_nsect, 159 tf->hob_lbal, 160 tf->hob_lbam, 161 tf->hob_lbah); 162 } 163 164 if (is_addr) { 165 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr); 166 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr); 167 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr); 168 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr); 169 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr); 170 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 171 tf->feature, 172 tf->nsect, 173 tf->lbal, 174 tf->lbam, 175 tf->lbah); 176 } 177 178 if (tf->flags & ATA_TFLAG_DEVICE) { 179 writeb(tf->device, (void __iomem *) ioaddr->device_addr); 180 VPRINTK("device 0x%X\n", tf->device); 181 } 182 183 ata_wait_idle(ap); 184 } 185 186 187 /** 188 * ata_tf_load - send taskfile registers to host controller 189 * @ap: Port to which output is sent 190 * @tf: ATA taskfile register set 191 * 192 * Outputs ATA taskfile to standard ATA host controller using MMIO 193 * or PIO as indicated by the ATA_FLAG_MMIO flag. 194 * Writes the control, feature, nsect, lbal, lbam, and lbah registers. 195 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, 196 * hob_lbal, hob_lbam, and hob_lbah. 197 * 198 * This function waits for idle (!BUSY and !DRQ) after writing 199 * registers. If the control register has a new value, this 200 * function also waits for idle after writing control and before 201 * writing the remaining registers. 202 * 203 * May be used as the tf_load() entry in ata_port_operations. 204 * 205 * LOCKING: 206 * Inherited from caller. 207 */ 208 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 209 { 210 if (ap->flags & ATA_FLAG_MMIO) 211 ata_tf_load_mmio(ap, tf); 212 else 213 ata_tf_load_pio(ap, tf); 214 } 215 216 /** 217 * ata_exec_command_pio - issue ATA command to host controller 218 * @ap: port to which command is being issued 219 * @tf: ATA taskfile register set 220 * 221 * Issues PIO write to ATA command register, with proper 222 * synchronization with interrupt handler / other threads. 223 * 224 * LOCKING: 225 * spin_lock_irqsave(host lock) 226 */ 227 228 static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) 229 { 230 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 231 232 outb(tf->command, ap->ioaddr.command_addr); 233 ata_pause(ap); 234 } 235 236 237 /** 238 * ata_exec_command_mmio - issue ATA command to host controller 239 * @ap: port to which command is being issued 240 * @tf: ATA taskfile register set 241 * 242 * Issues MMIO write to ATA command register, with proper 243 * synchronization with interrupt handler / other threads. 244 * 245 * FIXME: missing write posting for 400nS delay enforcement 246 * 247 * LOCKING: 248 * spin_lock_irqsave(host lock) 249 */ 250 251 static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 252 { 253 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 254 255 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr); 256 ata_pause(ap); 257 } 258 259 260 /** 261 * ata_exec_command - issue ATA command to host controller 262 * @ap: port to which command is being issued 263 * @tf: ATA taskfile register set 264 * 265 * Issues PIO/MMIO write to ATA command register, with proper 266 * synchronization with interrupt handler / other threads. 267 * 268 * LOCKING: 269 * spin_lock_irqsave(host lock) 270 */ 271 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 272 { 273 if (ap->flags & ATA_FLAG_MMIO) 274 ata_exec_command_mmio(ap, tf); 275 else 276 ata_exec_command_pio(ap, tf); 277 } 278 279 /** 280 * ata_tf_read_pio - input device's ATA taskfile shadow registers 281 * @ap: Port from which input is read 282 * @tf: ATA taskfile register set for storing input 283 * 284 * Reads ATA taskfile registers for currently-selected device 285 * into @tf. 286 * 287 * LOCKING: 288 * Inherited from caller. 289 */ 290 291 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) 292 { 293 struct ata_ioports *ioaddr = &ap->ioaddr; 294 295 tf->command = ata_check_status(ap); 296 tf->feature = inb(ioaddr->error_addr); 297 tf->nsect = inb(ioaddr->nsect_addr); 298 tf->lbal = inb(ioaddr->lbal_addr); 299 tf->lbam = inb(ioaddr->lbam_addr); 300 tf->lbah = inb(ioaddr->lbah_addr); 301 tf->device = inb(ioaddr->device_addr); 302 303 if (tf->flags & ATA_TFLAG_LBA48) { 304 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 305 tf->hob_feature = inb(ioaddr->error_addr); 306 tf->hob_nsect = inb(ioaddr->nsect_addr); 307 tf->hob_lbal = inb(ioaddr->lbal_addr); 308 tf->hob_lbam = inb(ioaddr->lbam_addr); 309 tf->hob_lbah = inb(ioaddr->lbah_addr); 310 } 311 } 312 313 /** 314 * ata_tf_read_mmio - input device's ATA taskfile shadow registers 315 * @ap: Port from which input is read 316 * @tf: ATA taskfile register set for storing input 317 * 318 * Reads ATA taskfile registers for currently-selected device 319 * into @tf via MMIO. 320 * 321 * LOCKING: 322 * Inherited from caller. 323 */ 324 325 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) 326 { 327 struct ata_ioports *ioaddr = &ap->ioaddr; 328 329 tf->command = ata_check_status(ap); 330 tf->feature = readb((void __iomem *)ioaddr->error_addr); 331 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); 332 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); 333 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); 334 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr); 335 tf->device = readb((void __iomem *)ioaddr->device_addr); 336 337 if (tf->flags & ATA_TFLAG_LBA48) { 338 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr); 339 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr); 340 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr); 341 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr); 342 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr); 343 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr); 344 } 345 } 346 347 348 /** 349 * ata_tf_read - input device's ATA taskfile shadow registers 350 * @ap: Port from which input is read 351 * @tf: ATA taskfile register set for storing input 352 * 353 * Reads ATA taskfile registers for currently-selected device 354 * into @tf. 355 * 356 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48 357 * is set, also reads the hob registers. 358 * 359 * May be used as the tf_read() entry in ata_port_operations. 360 * 361 * LOCKING: 362 * Inherited from caller. 363 */ 364 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 365 { 366 if (ap->flags & ATA_FLAG_MMIO) 367 ata_tf_read_mmio(ap, tf); 368 else 369 ata_tf_read_pio(ap, tf); 370 } 371 372 /** 373 * ata_check_status_pio - Read device status reg & clear interrupt 374 * @ap: port where the device is 375 * 376 * Reads ATA taskfile status register for currently-selected device 377 * and return its value. This also clears pending interrupts 378 * from this device 379 * 380 * LOCKING: 381 * Inherited from caller. 382 */ 383 static u8 ata_check_status_pio(struct ata_port *ap) 384 { 385 return inb(ap->ioaddr.status_addr); 386 } 387 388 /** 389 * ata_check_status_mmio - Read device status reg & clear interrupt 390 * @ap: port where the device is 391 * 392 * Reads ATA taskfile status register for currently-selected device 393 * via MMIO and return its value. This also clears pending interrupts 394 * from this device 395 * 396 * LOCKING: 397 * Inherited from caller. 398 */ 399 static u8 ata_check_status_mmio(struct ata_port *ap) 400 { 401 return readb((void __iomem *) ap->ioaddr.status_addr); 402 } 403 404 405 /** 406 * ata_check_status - Read device status reg & clear interrupt 407 * @ap: port where the device is 408 * 409 * Reads ATA taskfile status register for currently-selected device 410 * and return its value. This also clears pending interrupts 411 * from this device 412 * 413 * May be used as the check_status() entry in ata_port_operations. 414 * 415 * LOCKING: 416 * Inherited from caller. 417 */ 418 u8 ata_check_status(struct ata_port *ap) 419 { 420 if (ap->flags & ATA_FLAG_MMIO) 421 return ata_check_status_mmio(ap); 422 return ata_check_status_pio(ap); 423 } 424 425 426 /** 427 * ata_altstatus - Read device alternate status reg 428 * @ap: port where the device is 429 * 430 * Reads ATA taskfile alternate status register for 431 * currently-selected device and return its value. 432 * 433 * Note: may NOT be used as the check_altstatus() entry in 434 * ata_port_operations. 435 * 436 * LOCKING: 437 * Inherited from caller. 438 */ 439 u8 ata_altstatus(struct ata_port *ap) 440 { 441 if (ap->ops->check_altstatus) 442 return ap->ops->check_altstatus(ap); 443 444 if (ap->flags & ATA_FLAG_MMIO) 445 return readb((void __iomem *)ap->ioaddr.altstatus_addr); 446 return inb(ap->ioaddr.altstatus_addr); 447 } 448 449 /** 450 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction 451 * @qc: Info associated with this ATA transaction. 452 * 453 * LOCKING: 454 * spin_lock_irqsave(host lock) 455 */ 456 457 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) 458 { 459 struct ata_port *ap = qc->ap; 460 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 461 u8 dmactl; 462 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 463 464 /* load PRD table addr. */ 465 mb(); /* make sure PRD table writes are visible to controller */ 466 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); 467 468 /* specify data direction, triple-check start bit is clear */ 469 dmactl = readb(mmio + ATA_DMA_CMD); 470 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 471 if (!rw) 472 dmactl |= ATA_DMA_WR; 473 writeb(dmactl, mmio + ATA_DMA_CMD); 474 475 /* issue r/w command */ 476 ap->ops->exec_command(ap, &qc->tf); 477 } 478 479 /** 480 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction 481 * @qc: Info associated with this ATA transaction. 482 * 483 * LOCKING: 484 * spin_lock_irqsave(host lock) 485 */ 486 487 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) 488 { 489 struct ata_port *ap = qc->ap; 490 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 491 u8 dmactl; 492 493 /* start host DMA transaction */ 494 dmactl = readb(mmio + ATA_DMA_CMD); 495 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); 496 497 /* Strictly, one may wish to issue a readb() here, to 498 * flush the mmio write. However, control also passes 499 * to the hardware at this point, and it will interrupt 500 * us when we are to resume control. So, in effect, 501 * we don't care when the mmio write flushes. 502 * Further, a read of the DMA status register _immediately_ 503 * following the write may not be what certain flaky hardware 504 * is expected, so I think it is best to not add a readb() 505 * without first all the MMIO ATA cards/mobos. 506 * Or maybe I'm just being paranoid. 507 */ 508 } 509 510 /** 511 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) 512 * @qc: Info associated with this ATA transaction. 513 * 514 * LOCKING: 515 * spin_lock_irqsave(host lock) 516 */ 517 518 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) 519 { 520 struct ata_port *ap = qc->ap; 521 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 522 u8 dmactl; 523 524 /* load PRD table addr. */ 525 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 526 527 /* specify data direction, triple-check start bit is clear */ 528 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 529 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 530 if (!rw) 531 dmactl |= ATA_DMA_WR; 532 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 533 534 /* issue r/w command */ 535 ap->ops->exec_command(ap, &qc->tf); 536 } 537 538 /** 539 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) 540 * @qc: Info associated with this ATA transaction. 541 * 542 * LOCKING: 543 * spin_lock_irqsave(host lock) 544 */ 545 546 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) 547 { 548 struct ata_port *ap = qc->ap; 549 u8 dmactl; 550 551 /* start host DMA transaction */ 552 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 553 outb(dmactl | ATA_DMA_START, 554 ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 555 } 556 557 558 /** 559 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 560 * @qc: Info associated with this ATA transaction. 561 * 562 * Writes the ATA_DMA_START flag to the DMA command register. 563 * 564 * May be used as the bmdma_start() entry in ata_port_operations. 565 * 566 * LOCKING: 567 * spin_lock_irqsave(host lock) 568 */ 569 void ata_bmdma_start(struct ata_queued_cmd *qc) 570 { 571 if (qc->ap->flags & ATA_FLAG_MMIO) 572 ata_bmdma_start_mmio(qc); 573 else 574 ata_bmdma_start_pio(qc); 575 } 576 577 578 /** 579 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 580 * @qc: Info associated with this ATA transaction. 581 * 582 * Writes address of PRD table to device's PRD Table Address 583 * register, sets the DMA control register, and calls 584 * ops->exec_command() to start the transfer. 585 * 586 * May be used as the bmdma_setup() entry in ata_port_operations. 587 * 588 * LOCKING: 589 * spin_lock_irqsave(host lock) 590 */ 591 void ata_bmdma_setup(struct ata_queued_cmd *qc) 592 { 593 if (qc->ap->flags & ATA_FLAG_MMIO) 594 ata_bmdma_setup_mmio(qc); 595 else 596 ata_bmdma_setup_pio(qc); 597 } 598 599 600 /** 601 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 602 * @ap: Port associated with this ATA transaction. 603 * 604 * Clear interrupt and error flags in DMA status register. 605 * 606 * May be used as the irq_clear() entry in ata_port_operations. 607 * 608 * LOCKING: 609 * spin_lock_irqsave(host lock) 610 */ 611 612 void ata_bmdma_irq_clear(struct ata_port *ap) 613 { 614 if (!ap->ioaddr.bmdma_addr) 615 return; 616 617 if (ap->flags & ATA_FLAG_MMIO) { 618 void __iomem *mmio = 619 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; 620 writeb(readb(mmio), mmio); 621 } else { 622 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; 623 outb(inb(addr), addr); 624 } 625 } 626 627 628 /** 629 * ata_bmdma_status - Read PCI IDE BMDMA status 630 * @ap: Port associated with this ATA transaction. 631 * 632 * Read and return BMDMA status register. 633 * 634 * May be used as the bmdma_status() entry in ata_port_operations. 635 * 636 * LOCKING: 637 * spin_lock_irqsave(host lock) 638 */ 639 640 u8 ata_bmdma_status(struct ata_port *ap) 641 { 642 u8 host_stat; 643 if (ap->flags & ATA_FLAG_MMIO) { 644 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 645 host_stat = readb(mmio + ATA_DMA_STATUS); 646 } else 647 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 648 return host_stat; 649 } 650 651 652 /** 653 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 654 * @qc: Command we are ending DMA for 655 * 656 * Clears the ATA_DMA_START flag in the dma control register 657 * 658 * May be used as the bmdma_stop() entry in ata_port_operations. 659 * 660 * LOCKING: 661 * spin_lock_irqsave(host lock) 662 */ 663 664 void ata_bmdma_stop(struct ata_queued_cmd *qc) 665 { 666 struct ata_port *ap = qc->ap; 667 if (ap->flags & ATA_FLAG_MMIO) { 668 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 669 670 /* clear start/stop bit */ 671 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 672 mmio + ATA_DMA_CMD); 673 } else { 674 /* clear start/stop bit */ 675 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, 676 ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 677 } 678 679 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 680 ata_altstatus(ap); /* dummy read */ 681 } 682 683 /** 684 * ata_bmdma_freeze - Freeze BMDMA controller port 685 * @ap: port to freeze 686 * 687 * Freeze BMDMA controller port. 688 * 689 * LOCKING: 690 * Inherited from caller. 691 */ 692 void ata_bmdma_freeze(struct ata_port *ap) 693 { 694 struct ata_ioports *ioaddr = &ap->ioaddr; 695 696 ap->ctl |= ATA_NIEN; 697 ap->last_ctl = ap->ctl; 698 699 if (ap->flags & ATA_FLAG_MMIO) 700 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr); 701 else 702 outb(ap->ctl, ioaddr->ctl_addr); 703 704 /* Under certain circumstances, some controllers raise IRQ on 705 * ATA_NIEN manipulation. Also, many controllers fail to mask 706 * previously pending IRQ on ATA_NIEN assertion. Clear it. 707 */ 708 ata_chk_status(ap); 709 710 ap->ops->irq_clear(ap); 711 } 712 713 /** 714 * ata_bmdma_thaw - Thaw BMDMA controller port 715 * @ap: port to thaw 716 * 717 * Thaw BMDMA controller port. 718 * 719 * LOCKING: 720 * Inherited from caller. 721 */ 722 void ata_bmdma_thaw(struct ata_port *ap) 723 { 724 /* clear & re-enable interrupts */ 725 ata_chk_status(ap); 726 ap->ops->irq_clear(ap); 727 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 728 ata_irq_on(ap); 729 } 730 731 /** 732 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller 733 * @ap: port to handle error for 734 * @prereset: prereset method (can be NULL) 735 * @softreset: softreset method (can be NULL) 736 * @hardreset: hardreset method (can be NULL) 737 * @postreset: postreset method (can be NULL) 738 * 739 * Handle error for ATA BMDMA controller. It can handle both 740 * PATA and SATA controllers. Many controllers should be able to 741 * use this EH as-is or with some added handling before and 742 * after. 743 * 744 * This function is intended to be used for constructing 745 * ->error_handler callback by low level drivers. 746 * 747 * LOCKING: 748 * Kernel thread context (may sleep) 749 */ 750 void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 751 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 752 ata_postreset_fn_t postreset) 753 { 754 struct ata_queued_cmd *qc; 755 unsigned long flags; 756 int thaw = 0; 757 758 qc = __ata_qc_from_tag(ap, ap->active_tag); 759 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 760 qc = NULL; 761 762 /* reset PIO HSM and stop DMA engine */ 763 spin_lock_irqsave(ap->lock, flags); 764 765 ap->hsm_task_state = HSM_ST_IDLE; 766 767 if (qc && (qc->tf.protocol == ATA_PROT_DMA || 768 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) { 769 u8 host_stat; 770 771 host_stat = ap->ops->bmdma_status(ap); 772 773 /* BMDMA controllers indicate host bus error by 774 * setting DMA_ERR bit and timing out. As it wasn't 775 * really a timeout event, adjust error mask and 776 * cancel frozen state. 777 */ 778 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) { 779 qc->err_mask = AC_ERR_HOST_BUS; 780 thaw = 1; 781 } 782 783 ap->ops->bmdma_stop(qc); 784 } 785 786 ata_altstatus(ap); 787 ata_chk_status(ap); 788 ap->ops->irq_clear(ap); 789 790 spin_unlock_irqrestore(ap->lock, flags); 791 792 if (thaw) 793 ata_eh_thaw_port(ap); 794 795 /* PIO and DMA engines have been stopped, perform recovery */ 796 ata_do_eh(ap, prereset, softreset, hardreset, postreset); 797 } 798 799 /** 800 * ata_bmdma_error_handler - Stock error handler for BMDMA controller 801 * @ap: port to handle error for 802 * 803 * Stock error handler for BMDMA controller. 804 * 805 * LOCKING: 806 * Kernel thread context (may sleep) 807 */ 808 void ata_bmdma_error_handler(struct ata_port *ap) 809 { 810 ata_reset_fn_t hardreset; 811 812 hardreset = NULL; 813 if (sata_scr_valid(ap)) 814 hardreset = sata_std_hardreset; 815 816 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, 817 ata_std_postreset); 818 } 819 820 /** 821 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for 822 * BMDMA controller 823 * @qc: internal command to clean up 824 * 825 * LOCKING: 826 * Kernel thread context (may sleep) 827 */ 828 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) 829 { 830 if (qc->ap->ioaddr.bmdma_addr) 831 ata_bmdma_stop(qc); 832 } 833 834 #ifdef CONFIG_PCI 835 /** 836 * ata_pci_init_native_mode - Initialize native-mode driver 837 * @pdev: pci device to be initialized 838 * @port: array[2] of pointers to port info structures. 839 * @ports: bitmap of ports present 840 * 841 * Utility function which allocates and initializes an 842 * ata_probe_ent structure for a standard dual-port 843 * PIO-based IDE controller. The returned ata_probe_ent 844 * structure can be passed to ata_device_add(). The returned 845 * ata_probe_ent structure should then be freed with kfree(). 846 * 847 * The caller need only pass the address of the primary port, the 848 * secondary will be deduced automatically. If the device has non 849 * standard secondary port mappings this function can be called twice, 850 * once for each interface. 851 */ 852 853 struct ata_probe_ent * 854 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) 855 { 856 struct ata_probe_ent *probe_ent = 857 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 858 int p = 0; 859 unsigned long bmdma; 860 861 if (!probe_ent) 862 return NULL; 863 864 probe_ent->irq = pdev->irq; 865 probe_ent->irq_flags = IRQF_SHARED; 866 867 if (ports & ATA_PORT_PRIMARY) { 868 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); 869 probe_ent->port[p].altstatus_addr = 870 probe_ent->port[p].ctl_addr = 871 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 872 bmdma = pci_resource_start(pdev, 4); 873 if (bmdma) { 874 if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && 875 (inb(bmdma + 2) & 0x80)) 876 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 877 probe_ent->port[p].bmdma_addr = bmdma; 878 } 879 ata_std_ports(&probe_ent->port[p]); 880 p++; 881 } 882 883 if (ports & ATA_PORT_SECONDARY) { 884 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2); 885 probe_ent->port[p].altstatus_addr = 886 probe_ent->port[p].ctl_addr = 887 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; 888 bmdma = pci_resource_start(pdev, 4); 889 if (bmdma) { 890 bmdma += 8; 891 if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && 892 (inb(bmdma + 2) & 0x80)) 893 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 894 probe_ent->port[p].bmdma_addr = bmdma; 895 } 896 ata_std_ports(&probe_ent->port[p]); 897 probe_ent->pinfo2 = port[1]; 898 p++; 899 } 900 901 probe_ent->n_ports = p; 902 return probe_ent; 903 } 904 905 906 static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, 907 struct ata_port_info **port, int port_mask) 908 { 909 struct ata_probe_ent *probe_ent; 910 unsigned long bmdma = pci_resource_start(pdev, 4); 911 912 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 913 if (!probe_ent) 914 return NULL; 915 916 probe_ent->n_ports = 2; 917 probe_ent->irq_flags = IRQF_SHARED; 918 919 if (port_mask & ATA_PORT_PRIMARY) { 920 probe_ent->irq = ATA_PRIMARY_IRQ(pdev); 921 probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD; 922 probe_ent->port[0].altstatus_addr = 923 probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL; 924 if (bmdma) { 925 probe_ent->port[0].bmdma_addr = bmdma; 926 if ((!(port[0]->flags & ATA_FLAG_IGN_SIMPLEX)) && 927 (inb(bmdma + 2) & 0x80)) 928 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 929 } 930 ata_std_ports(&probe_ent->port[0]); 931 } else 932 probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY; 933 934 if (port_mask & ATA_PORT_SECONDARY) { 935 if (probe_ent->irq) 936 probe_ent->irq2 = ATA_SECONDARY_IRQ(pdev); 937 else 938 probe_ent->irq = ATA_SECONDARY_IRQ(pdev); 939 probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD; 940 probe_ent->port[1].altstatus_addr = 941 probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL; 942 if (bmdma) { 943 probe_ent->port[1].bmdma_addr = bmdma + 8; 944 if ((!(port[1]->flags & ATA_FLAG_IGN_SIMPLEX)) && 945 (inb(bmdma + 10) & 0x80)) 946 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 947 } 948 ata_std_ports(&probe_ent->port[1]); 949 950 /* FIXME: could be pointing to stack area; must copy */ 951 probe_ent->pinfo2 = port[1]; 952 } else 953 probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY; 954 955 return probe_ent; 956 } 957 958 959 /** 960 * ata_pci_init_one - Initialize/register PCI IDE host controller 961 * @pdev: Controller to be initialized 962 * @port_info: Information from low-level host driver 963 * @n_ports: Number of ports attached to host controller 964 * 965 * This is a helper function which can be called from a driver's 966 * xxx_init_one() probe function if the hardware uses traditional 967 * IDE taskfile registers. 968 * 969 * This function calls pci_enable_device(), reserves its register 970 * regions, sets the dma mask, enables bus master mode, and calls 971 * ata_device_add() 972 * 973 * ASSUMPTION: 974 * Nobody makes a single channel controller that appears solely as 975 * the secondary legacy port on PCI. 976 * 977 * LOCKING: 978 * Inherited from PCI layer (may sleep). 979 * 980 * RETURNS: 981 * Zero on success, negative on errno-based value on error. 982 */ 983 984 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 985 unsigned int n_ports) 986 { 987 struct ata_probe_ent *probe_ent = NULL; 988 struct ata_port_info *port[2]; 989 u8 mask; 990 unsigned int legacy_mode = 0; 991 int disable_dev_on_err = 1; 992 int rc; 993 994 DPRINTK("ENTER\n"); 995 996 BUG_ON(n_ports < 1 || n_ports > 2); 997 998 port[0] = port_info[0]; 999 if (n_ports > 1) 1000 port[1] = port_info[1]; 1001 else 1002 port[1] = port[0]; 1003 1004 /* FIXME: Really for ATA it isn't safe because the device may be 1005 multi-purpose and we want to leave it alone if it was already 1006 enabled. Secondly for shared use as Arjan says we want refcounting 1007 1008 Checking dev->is_enabled is insufficient as this is not set at 1009 boot for the primary video which is BIOS enabled 1010 */ 1011 1012 rc = pci_enable_device(pdev); 1013 if (rc) 1014 return rc; 1015 1016 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 1017 u8 tmp8; 1018 1019 /* TODO: What if one channel is in native mode ... */ 1020 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 1021 mask = (1 << 2) | (1 << 0); 1022 if ((tmp8 & mask) != mask) 1023 legacy_mode = (1 << 3); 1024 #if defined(CONFIG_NO_ATA_LEGACY) 1025 /* Some platforms with PCI limits cannot address compat 1026 port space. In that case we punt if their firmware has 1027 left a device in compatibility mode */ 1028 if (legacy_mode) { 1029 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); 1030 return -EOPNOTSUPP; 1031 } 1032 #endif 1033 } 1034 1035 if (!legacy_mode) { 1036 rc = pci_request_regions(pdev, DRV_NAME); 1037 if (rc) { 1038 disable_dev_on_err = 0; 1039 goto err_out; 1040 } 1041 } else { 1042 /* Deal with combined mode hack. This side of the logic all 1043 goes away once the combined mode hack is killed in 2.6.21 */ 1044 if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) { 1045 struct resource *conflict, res; 1046 res.start = ATA_PRIMARY_CMD; 1047 res.end = ATA_PRIMARY_CMD + 8 - 1; 1048 conflict = ____request_resource(&ioport_resource, &res); 1049 while (conflict->child) 1050 conflict = ____request_resource(conflict, &res); 1051 if (!strcmp(conflict->name, "libata")) 1052 legacy_mode |= ATA_PORT_PRIMARY; 1053 else { 1054 disable_dev_on_err = 0; 1055 printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \ 1056 "ata: conflict with %s\n", 1057 ATA_PRIMARY_CMD, 1058 conflict->name); 1059 } 1060 } else 1061 legacy_mode |= ATA_PORT_PRIMARY; 1062 1063 if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) { 1064 struct resource *conflict, res; 1065 res.start = ATA_SECONDARY_CMD; 1066 res.end = ATA_SECONDARY_CMD + 8 - 1; 1067 conflict = ____request_resource(&ioport_resource, &res); 1068 while (conflict->child) 1069 conflict = ____request_resource(conflict, &res); 1070 if (!strcmp(conflict->name, "libata")) 1071 legacy_mode |= ATA_PORT_SECONDARY; 1072 else { 1073 disable_dev_on_err = 0; 1074 printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \ 1075 "ata: conflict with %s\n", 1076 ATA_SECONDARY_CMD, 1077 conflict->name); 1078 } 1079 } else 1080 legacy_mode |= ATA_PORT_SECONDARY; 1081 1082 if (legacy_mode & ATA_PORT_PRIMARY) 1083 pci_request_region(pdev, 1, DRV_NAME); 1084 if (legacy_mode & ATA_PORT_SECONDARY) 1085 pci_request_region(pdev, 3, DRV_NAME); 1086 /* If there is a DMA resource, allocate it */ 1087 pci_request_region(pdev, 4, DRV_NAME); 1088 } 1089 1090 /* we have legacy mode, but all ports are unavailable */ 1091 if (legacy_mode == (1 << 3)) { 1092 rc = -EBUSY; 1093 goto err_out_regions; 1094 } 1095 1096 /* TODO: If we get no DMA mask we should fall back to PIO */ 1097 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1098 if (rc) 1099 goto err_out_regions; 1100 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1101 if (rc) 1102 goto err_out_regions; 1103 1104 if (legacy_mode) { 1105 probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode); 1106 } else { 1107 if (n_ports == 2) 1108 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 1109 else 1110 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); 1111 } 1112 if (!probe_ent) { 1113 rc = -ENOMEM; 1114 goto err_out_regions; 1115 } 1116 1117 pci_set_master(pdev); 1118 1119 if (!ata_device_add(probe_ent)) { 1120 rc = -ENODEV; 1121 goto err_out_ent; 1122 } 1123 1124 kfree(probe_ent); 1125 1126 return 0; 1127 1128 err_out_ent: 1129 kfree(probe_ent); 1130 err_out_regions: 1131 /* All this conditional stuff is needed for the combined mode hack 1132 until 2.6.21 when it can go */ 1133 if (legacy_mode) { 1134 pci_release_region(pdev, 4); 1135 if (legacy_mode & ATA_PORT_PRIMARY) { 1136 release_region(ATA_PRIMARY_CMD, 8); 1137 pci_release_region(pdev, 1); 1138 } 1139 if (legacy_mode & ATA_PORT_SECONDARY) { 1140 release_region(ATA_SECONDARY_CMD, 8); 1141 pci_release_region(pdev, 3); 1142 } 1143 } else 1144 pci_release_regions(pdev); 1145 err_out: 1146 if (disable_dev_on_err) 1147 pci_disable_device(pdev); 1148 return rc; 1149 } 1150 1151 /** 1152 * ata_pci_clear_simplex - attempt to kick device out of simplex 1153 * @pdev: PCI device 1154 * 1155 * Some PCI ATA devices report simplex mode but in fact can be told to 1156 * enter non simplex mode. This implements the neccessary logic to 1157 * perform the task on such devices. Calling it on other devices will 1158 * have -undefined- behaviour. 1159 */ 1160 1161 int ata_pci_clear_simplex(struct pci_dev *pdev) 1162 { 1163 unsigned long bmdma = pci_resource_start(pdev, 4); 1164 u8 simplex; 1165 1166 if (bmdma == 0) 1167 return -ENOENT; 1168 1169 simplex = inb(bmdma + 0x02); 1170 outb(simplex & 0x60, bmdma + 0x02); 1171 simplex = inb(bmdma + 0x02); 1172 if (simplex & 0x80) 1173 return -EOPNOTSUPP; 1174 return 0; 1175 } 1176 1177 unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask) 1178 { 1179 /* Filter out DMA modes if the device has been configured by 1180 the BIOS as PIO only */ 1181 1182 if (ap->ioaddr.bmdma_addr == 0) 1183 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 1184 return xfer_mask; 1185 } 1186 1187 #endif /* CONFIG_PCI */ 1188 1189