1 /* 2 * sata_sx4.c - Promise SATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * 26 * libata documentation is available via 'make {ps|pdf}docs', 27 * as Documentation/DocBook/libata.* 28 * 29 * Hardware documentation available under NDA. 30 * 31 */ 32 33 /* 34 Theory of operation 35 ------------------- 36 37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy 38 engine, DIMM memory, and four ATA engines (one per SATA port). 39 Data is copied to/from DIMM memory by the HDMA engine, before 40 handing off to one (or more) of the ATA engines. The ATA 41 engines operate solely on DIMM memory. 42 43 The SX4 behaves like a PATA chip, with no SATA controls or 44 knowledge whatsoever, leading to the presumption that 45 PATA<->SATA bridges exist on SX4 boards, external to the 46 PDC20621 chip itself. 47 48 The chip is quite capable, supporting an XOR engine and linked 49 hardware commands (permits a string to transactions to be 50 submitted and waited-on as a single unit), and an optional 51 microprocessor. 52 53 The limiting factor is largely software. This Linux driver was 54 written to multiplex the single HDMA engine to copy disk 55 transactions into a fixed DIMM memory space, from where an ATA 56 engine takes over. As a result, each WRITE looks like this: 57 58 submit HDMA packet to hardware 59 hardware copies data from system memory to DIMM 60 hardware raises interrupt 61 62 submit ATA packet to hardware 63 hardware executes ATA WRITE command, w/ data in DIMM 64 hardware raises interrupt 65 66 and each READ looks like this: 67 68 submit ATA packet to hardware 69 hardware executes ATA READ command, w/ data in DIMM 70 hardware raises interrupt 71 72 submit HDMA packet to hardware 73 hardware copies data from DIMM to system memory 74 hardware raises interrupt 75 76 This is a very slow, lock-step way of doing things that can 77 certainly be improved by motivated kernel hackers. 78 79 */ 80 81 #include <linux/kernel.h> 82 #include <linux/module.h> 83 #include <linux/pci.h> 84 #include <linux/init.h> 85 #include <linux/blkdev.h> 86 #include <linux/delay.h> 87 #include <linux/interrupt.h> 88 #include <linux/device.h> 89 #include <scsi/scsi_host.h> 90 #include <scsi/scsi_cmnd.h> 91 #include <linux/libata.h> 92 #include "sata_promise.h" 93 94 #define DRV_NAME "sata_sx4" 95 #define DRV_VERSION "0.12" 96 97 98 enum { 99 PDC_MMIO_BAR = 3, 100 PDC_DIMM_BAR = 4, 101 102 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */ 103 104 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 105 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */ 106 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ 107 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */ 108 109 PDC_CTLSTAT = 0x60, /* IDEn control / status */ 110 111 PDC_20621_SEQCTL = 0x400, 112 PDC_20621_SEQMASK = 0x480, 113 PDC_20621_GENERAL_CTL = 0x484, 114 PDC_20621_PAGE_SIZE = (32 * 1024), 115 116 /* chosen, not constant, values; we design our own DIMM mem map */ 117 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */ 118 PDC_20621_DIMM_BASE = 0x00200000, 119 PDC_20621_DIMM_DATA = (64 * 1024), 120 PDC_DIMM_DATA_STEP = (256 * 1024), 121 PDC_DIMM_WINDOW_STEP = (8 * 1024), 122 PDC_DIMM_HOST_PRD = (6 * 1024), 123 PDC_DIMM_HOST_PKT = (128 * 0), 124 PDC_DIMM_HPKT_PRD = (128 * 1), 125 PDC_DIMM_ATA_PKT = (128 * 2), 126 PDC_DIMM_APKT_PRD = (128 * 3), 127 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128, 128 PDC_PAGE_WINDOW = 0x40, 129 PDC_PAGE_DATA = PDC_PAGE_WINDOW + 130 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE), 131 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE, 132 133 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */ 134 135 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 136 (1<<23), 137 138 board_20621 = 0, /* FastTrak S150 SX4 */ 139 140 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */ 141 PDC_RESET = (1 << 11), /* HDMA/ATA reset */ 142 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */ 143 144 PDC_MAX_HDMA = 32, 145 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1), 146 147 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50, 148 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51, 149 PDC_I2C_CONTROL = 0x48, 150 PDC_I2C_ADDR_DATA = 0x4C, 151 PDC_DIMM0_CONTROL = 0x80, 152 PDC_DIMM1_CONTROL = 0x84, 153 PDC_SDRAM_CONTROL = 0x88, 154 PDC_I2C_WRITE = 0, /* master -> slave */ 155 PDC_I2C_READ = (1 << 6), /* master <- slave */ 156 PDC_I2C_START = (1 << 7), /* start I2C proto */ 157 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */ 158 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */ 159 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */ 160 PDC_DIMM_SPD_SUBADDRESS_START = 0x00, 161 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F, 162 PDC_DIMM_SPD_ROW_NUM = 3, 163 PDC_DIMM_SPD_COLUMN_NUM = 4, 164 PDC_DIMM_SPD_MODULE_ROW = 5, 165 PDC_DIMM_SPD_TYPE = 11, 166 PDC_DIMM_SPD_FRESH_RATE = 12, 167 PDC_DIMM_SPD_BANK_NUM = 17, 168 PDC_DIMM_SPD_CAS_LATENCY = 18, 169 PDC_DIMM_SPD_ATTRIBUTE = 21, 170 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27, 171 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28, 172 PDC_DIMM_SPD_RAS_CAS_DELAY = 29, 173 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30, 174 PDC_DIMM_SPD_SYSTEM_FREQ = 126, 175 PDC_CTL_STATUS = 0x08, 176 PDC_DIMM_WINDOW_CTLR = 0x0C, 177 PDC_TIME_CONTROL = 0x3C, 178 PDC_TIME_PERIOD = 0x40, 179 PDC_TIME_COUNTER = 0x44, 180 PDC_GENERAL_CTLR = 0x484, 181 PCI_PLL_INIT = 0x8A531824, 182 PCI_X_TCOUNT = 0xEE1E5CFF, 183 184 /* PDC_TIME_CONTROL bits */ 185 PDC_TIMER_BUZZER = (1 << 10), 186 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */ 187 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */ 188 PDC_TIMER_ENABLE = (1 << 7), 189 PDC_TIMER_MASK_INT = (1 << 5), 190 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */ 191 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE | 192 PDC_TIMER_ENABLE | 193 PDC_TIMER_MASK_INT, 194 }; 195 196 #define ECC_ERASE_BUF_SZ (128 * 1024) 197 198 struct pdc_port_priv { 199 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512]; 200 u8 *pkt; 201 dma_addr_t pkt_dma; 202 }; 203 204 struct pdc_host_priv { 205 unsigned int doing_hdma; 206 unsigned int hdma_prod; 207 unsigned int hdma_cons; 208 struct { 209 struct ata_queued_cmd *qc; 210 unsigned int seq; 211 unsigned long pkt_ofs; 212 } hdma[32]; 213 }; 214 215 216 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 217 static void pdc_error_handler(struct ata_port *ap); 218 static void pdc_freeze(struct ata_port *ap); 219 static void pdc_thaw(struct ata_port *ap); 220 static int pdc_port_start(struct ata_port *ap); 221 static void pdc20621_qc_prep(struct ata_queued_cmd *qc); 222 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 223 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 224 static unsigned int pdc20621_dimm_init(struct ata_host *host); 225 static int pdc20621_detect_dimm(struct ata_host *host); 226 static unsigned int pdc20621_i2c_read(struct ata_host *host, 227 u32 device, u32 subaddr, u32 *pdata); 228 static int pdc20621_prog_dimm0(struct ata_host *host); 229 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host); 230 #ifdef ATA_VERBOSE_DEBUG 231 static void pdc20621_get_from_dimm(struct ata_host *host, 232 void *psource, u32 offset, u32 size); 233 #endif 234 static void pdc20621_put_to_dimm(struct ata_host *host, 235 void *psource, u32 offset, u32 size); 236 static void pdc20621_irq_clear(struct ata_port *ap); 237 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc); 238 static int pdc_softreset(struct ata_link *link, unsigned int *class, 239 unsigned long deadline); 240 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); 241 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); 242 243 244 static struct scsi_host_template pdc_sata_sht = { 245 ATA_BASE_SHT(DRV_NAME), 246 .sg_tablesize = LIBATA_MAX_PRD, 247 .dma_boundary = ATA_DMA_BOUNDARY, 248 }; 249 250 /* TODO: inherit from base port_ops after converting to new EH */ 251 static struct ata_port_operations pdc_20621_ops = { 252 .inherits = &ata_sff_port_ops, 253 254 .check_atapi_dma = pdc_check_atapi_dma, 255 .qc_prep = pdc20621_qc_prep, 256 .qc_issue = pdc20621_qc_issue, 257 258 .freeze = pdc_freeze, 259 .thaw = pdc_thaw, 260 .softreset = pdc_softreset, 261 .error_handler = pdc_error_handler, 262 .lost_interrupt = ATA_OP_NULL, 263 .post_internal_cmd = pdc_post_internal_cmd, 264 265 .port_start = pdc_port_start, 266 267 .sff_tf_load = pdc_tf_load_mmio, 268 .sff_exec_command = pdc_exec_command_mmio, 269 .sff_irq_clear = pdc20621_irq_clear, 270 }; 271 272 static const struct ata_port_info pdc_port_info[] = { 273 /* board_20621 */ 274 { 275 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 276 ATA_FLAG_SRST | ATA_FLAG_MMIO | 277 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING, 278 .pio_mask = ATA_PIO4, 279 .mwdma_mask = ATA_MWDMA2, 280 .udma_mask = ATA_UDMA6, 281 .port_ops = &pdc_20621_ops, 282 }, 283 284 }; 285 286 static const struct pci_device_id pdc_sata_pci_tbl[] = { 287 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 }, 288 289 { } /* terminate list */ 290 }; 291 292 static struct pci_driver pdc_sata_pci_driver = { 293 .name = DRV_NAME, 294 .id_table = pdc_sata_pci_tbl, 295 .probe = pdc_sata_init_one, 296 .remove = ata_pci_remove_one, 297 }; 298 299 300 static int pdc_port_start(struct ata_port *ap) 301 { 302 struct device *dev = ap->host->dev; 303 struct pdc_port_priv *pp; 304 int rc; 305 306 rc = ata_port_start(ap); 307 if (rc) 308 return rc; 309 310 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 311 if (!pp) 312 return -ENOMEM; 313 314 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 315 if (!pp->pkt) 316 return -ENOMEM; 317 318 ap->private_data = pp; 319 320 return 0; 321 } 322 323 static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, 324 unsigned int portno, 325 unsigned int total_len) 326 { 327 u32 addr; 328 unsigned int dw = PDC_DIMM_APKT_PRD >> 2; 329 __le32 *buf32 = (__le32 *) buf; 330 331 /* output ATA packet S/G table */ 332 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + 333 (PDC_DIMM_DATA_STEP * portno); 334 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr); 335 buf32[dw] = cpu_to_le32(addr); 336 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); 337 338 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n", 339 PDC_20621_DIMM_BASE + 340 (PDC_DIMM_WINDOW_STEP * portno) + 341 PDC_DIMM_APKT_PRD, 342 buf32[dw], buf32[dw + 1]); 343 } 344 345 static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf, 346 unsigned int portno, 347 unsigned int total_len) 348 { 349 u32 addr; 350 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2; 351 __le32 *buf32 = (__le32 *) buf; 352 353 /* output Host DMA packet S/G table */ 354 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + 355 (PDC_DIMM_DATA_STEP * portno); 356 357 buf32[dw] = cpu_to_le32(addr); 358 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); 359 360 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n", 361 PDC_20621_DIMM_BASE + 362 (PDC_DIMM_WINDOW_STEP * portno) + 363 PDC_DIMM_HPKT_PRD, 364 buf32[dw], buf32[dw + 1]); 365 } 366 367 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf, 368 unsigned int devno, u8 *buf, 369 unsigned int portno) 370 { 371 unsigned int i, dw; 372 __le32 *buf32 = (__le32 *) buf; 373 u8 dev_reg; 374 375 unsigned int dimm_sg = PDC_20621_DIMM_BASE + 376 (PDC_DIMM_WINDOW_STEP * portno) + 377 PDC_DIMM_APKT_PRD; 378 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); 379 380 i = PDC_DIMM_ATA_PKT; 381 382 /* 383 * Set up ATA packet 384 */ 385 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) 386 buf[i++] = PDC_PKT_READ; 387 else if (tf->protocol == ATA_PROT_NODATA) 388 buf[i++] = PDC_PKT_NODATA; 389 else 390 buf[i++] = 0; 391 buf[i++] = 0; /* reserved */ 392 buf[i++] = portno + 1; /* seq. id */ 393 buf[i++] = 0xff; /* delay seq. id */ 394 395 /* dimm dma S/G, and next-pkt */ 396 dw = i >> 2; 397 if (tf->protocol == ATA_PROT_NODATA) 398 buf32[dw] = 0; 399 else 400 buf32[dw] = cpu_to_le32(dimm_sg); 401 buf32[dw + 1] = 0; 402 i += 8; 403 404 if (devno == 0) 405 dev_reg = ATA_DEVICE_OBS; 406 else 407 dev_reg = ATA_DEVICE_OBS | ATA_DEV1; 408 409 /* select device */ 410 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; 411 buf[i++] = dev_reg; 412 413 /* device control register */ 414 buf[i++] = (1 << 5) | PDC_REG_DEVCTL; 415 buf[i++] = tf->ctl; 416 417 return i; 418 } 419 420 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, 421 unsigned int portno) 422 { 423 unsigned int dw; 424 u32 tmp; 425 __le32 *buf32 = (__le32 *) buf; 426 427 unsigned int host_sg = PDC_20621_DIMM_BASE + 428 (PDC_DIMM_WINDOW_STEP * portno) + 429 PDC_DIMM_HOST_PRD; 430 unsigned int dimm_sg = PDC_20621_DIMM_BASE + 431 (PDC_DIMM_WINDOW_STEP * portno) + 432 PDC_DIMM_HPKT_PRD; 433 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); 434 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg); 435 436 dw = PDC_DIMM_HOST_PKT >> 2; 437 438 /* 439 * Set up Host DMA packet 440 */ 441 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) 442 tmp = PDC_PKT_READ; 443 else 444 tmp = 0; 445 tmp |= ((portno + 1 + 4) << 16); /* seq. id */ 446 tmp |= (0xff << 24); /* delay seq. id */ 447 buf32[dw + 0] = cpu_to_le32(tmp); 448 buf32[dw + 1] = cpu_to_le32(host_sg); 449 buf32[dw + 2] = cpu_to_le32(dimm_sg); 450 buf32[dw + 3] = 0; 451 452 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n", 453 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + 454 PDC_DIMM_HOST_PKT, 455 buf32[dw + 0], 456 buf32[dw + 1], 457 buf32[dw + 2], 458 buf32[dw + 3]); 459 } 460 461 static void pdc20621_dma_prep(struct ata_queued_cmd *qc) 462 { 463 struct scatterlist *sg; 464 struct ata_port *ap = qc->ap; 465 struct pdc_port_priv *pp = ap->private_data; 466 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; 467 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; 468 unsigned int portno = ap->port_no; 469 unsigned int i, si, idx, total_len = 0, sgt_len; 470 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 471 472 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); 473 474 VPRINTK("ata%u: ENTER\n", ap->print_id); 475 476 /* hard-code chip #0 */ 477 mmio += PDC_CHIP0_OFS; 478 479 /* 480 * Build S/G table 481 */ 482 idx = 0; 483 for_each_sg(qc->sg, sg, qc->n_elem, si) { 484 buf[idx++] = cpu_to_le32(sg_dma_address(sg)); 485 buf[idx++] = cpu_to_le32(sg_dma_len(sg)); 486 total_len += sg_dma_len(sg); 487 } 488 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); 489 sgt_len = idx * 4; 490 491 /* 492 * Build ATA, host DMA packets 493 */ 494 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); 495 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno); 496 497 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); 498 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); 499 500 if (qc->tf.flags & ATA_TFLAG_LBA48) 501 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); 502 else 503 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); 504 505 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); 506 507 /* copy three S/G tables and two packets to DIMM MMIO window */ 508 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), 509 &pp->dimm_buf, PDC_DIMM_HEADER_SZ); 510 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) + 511 PDC_DIMM_HOST_PRD, 512 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len); 513 514 /* force host FIFO dump */ 515 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); 516 517 readl(dimm_mmio); /* MMIO PCI posting flush */ 518 519 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len); 520 } 521 522 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) 523 { 524 struct ata_port *ap = qc->ap; 525 struct pdc_port_priv *pp = ap->private_data; 526 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; 527 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; 528 unsigned int portno = ap->port_no; 529 unsigned int i; 530 531 VPRINTK("ata%u: ENTER\n", ap->print_id); 532 533 /* hard-code chip #0 */ 534 mmio += PDC_CHIP0_OFS; 535 536 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); 537 538 if (qc->tf.flags & ATA_TFLAG_LBA48) 539 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); 540 else 541 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); 542 543 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); 544 545 /* copy three S/G tables and two packets to DIMM MMIO window */ 546 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), 547 &pp->dimm_buf, PDC_DIMM_HEADER_SZ); 548 549 /* force host FIFO dump */ 550 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); 551 552 readl(dimm_mmio); /* MMIO PCI posting flush */ 553 554 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); 555 } 556 557 static void pdc20621_qc_prep(struct ata_queued_cmd *qc) 558 { 559 switch (qc->tf.protocol) { 560 case ATA_PROT_DMA: 561 pdc20621_dma_prep(qc); 562 break; 563 case ATA_PROT_NODATA: 564 pdc20621_nodata_prep(qc); 565 break; 566 default: 567 break; 568 } 569 } 570 571 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, 572 unsigned int seq, 573 u32 pkt_ofs) 574 { 575 struct ata_port *ap = qc->ap; 576 struct ata_host *host = ap->host; 577 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 578 579 /* hard-code chip #0 */ 580 mmio += PDC_CHIP0_OFS; 581 582 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 583 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ 584 585 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT); 586 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */ 587 } 588 589 static void pdc20621_push_hdma(struct ata_queued_cmd *qc, 590 unsigned int seq, 591 u32 pkt_ofs) 592 { 593 struct ata_port *ap = qc->ap; 594 struct pdc_host_priv *pp = ap->host->private_data; 595 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK; 596 597 if (!pp->doing_hdma) { 598 __pdc20621_push_hdma(qc, seq, pkt_ofs); 599 pp->doing_hdma = 1; 600 return; 601 } 602 603 pp->hdma[idx].qc = qc; 604 pp->hdma[idx].seq = seq; 605 pp->hdma[idx].pkt_ofs = pkt_ofs; 606 pp->hdma_prod++; 607 } 608 609 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) 610 { 611 struct ata_port *ap = qc->ap; 612 struct pdc_host_priv *pp = ap->host->private_data; 613 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK; 614 615 /* if nothing on queue, we're done */ 616 if (pp->hdma_prod == pp->hdma_cons) { 617 pp->doing_hdma = 0; 618 return; 619 } 620 621 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq, 622 pp->hdma[idx].pkt_ofs); 623 pp->hdma_cons++; 624 } 625 626 #ifdef ATA_VERBOSE_DEBUG 627 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) 628 { 629 struct ata_port *ap = qc->ap; 630 unsigned int port_no = ap->port_no; 631 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; 632 633 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); 634 dimm_mmio += PDC_DIMM_HOST_PKT; 635 636 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio)); 637 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4)); 638 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8)); 639 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12)); 640 } 641 #else 642 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { } 643 #endif /* ATA_VERBOSE_DEBUG */ 644 645 static void pdc20621_packet_start(struct ata_queued_cmd *qc) 646 { 647 struct ata_port *ap = qc->ap; 648 struct ata_host *host = ap->host; 649 unsigned int port_no = ap->port_no; 650 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 651 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 652 u8 seq = (u8) (port_no + 1); 653 unsigned int port_ofs; 654 655 /* hard-code chip #0 */ 656 mmio += PDC_CHIP0_OFS; 657 658 VPRINTK("ata%u: ENTER\n", ap->print_id); 659 660 wmb(); /* flush PRD, pkt writes */ 661 662 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); 663 664 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */ 665 if (rw && qc->tf.protocol == ATA_PROT_DMA) { 666 seq += 4; 667 668 pdc20621_dump_hdma(qc); 669 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); 670 VPRINTK("queued ofs 0x%x (%u), seq %u\n", 671 port_ofs + PDC_DIMM_HOST_PKT, 672 port_ofs + PDC_DIMM_HOST_PKT, 673 seq); 674 } else { 675 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 676 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ 677 678 writel(port_ofs + PDC_DIMM_ATA_PKT, 679 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 680 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 681 VPRINTK("submitted ofs 0x%x (%u), seq %u\n", 682 port_ofs + PDC_DIMM_ATA_PKT, 683 port_ofs + PDC_DIMM_ATA_PKT, 684 seq); 685 } 686 } 687 688 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) 689 { 690 switch (qc->tf.protocol) { 691 case ATA_PROT_NODATA: 692 if (qc->tf.flags & ATA_TFLAG_POLLING) 693 break; 694 /*FALLTHROUGH*/ 695 case ATA_PROT_DMA: 696 pdc20621_packet_start(qc); 697 return 0; 698 699 case ATAPI_PROT_DMA: 700 BUG(); 701 break; 702 703 default: 704 break; 705 } 706 707 return ata_sff_qc_issue(qc); 708 } 709 710 static inline unsigned int pdc20621_host_intr(struct ata_port *ap, 711 struct ata_queued_cmd *qc, 712 unsigned int doing_hdma, 713 void __iomem *mmio) 714 { 715 unsigned int port_no = ap->port_no; 716 unsigned int port_ofs = 717 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); 718 u8 status; 719 unsigned int handled = 0; 720 721 VPRINTK("ENTER\n"); 722 723 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */ 724 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 725 726 /* step two - DMA from DIMM to host */ 727 if (doing_hdma) { 728 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id, 729 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 730 /* get drive status; clear intr; complete txn */ 731 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 732 ata_qc_complete(qc); 733 pdc20621_pop_hdma(qc); 734 } 735 736 /* step one - exec ATA command */ 737 else { 738 u8 seq = (u8) (port_no + 1 + 4); 739 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id, 740 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 741 742 /* submit hdma pkt */ 743 pdc20621_dump_hdma(qc); 744 pdc20621_push_hdma(qc, seq, 745 port_ofs + PDC_DIMM_HOST_PKT); 746 } 747 handled = 1; 748 749 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */ 750 751 /* step one - DMA from host to DIMM */ 752 if (doing_hdma) { 753 u8 seq = (u8) (port_no + 1); 754 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id, 755 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 756 757 /* submit ata pkt */ 758 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 759 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); 760 writel(port_ofs + PDC_DIMM_ATA_PKT, 761 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 762 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 763 } 764 765 /* step two - execute ATA command */ 766 else { 767 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id, 768 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 769 /* get drive status; clear intr; complete txn */ 770 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 771 ata_qc_complete(qc); 772 pdc20621_pop_hdma(qc); 773 } 774 handled = 1; 775 776 /* command completion, but no data xfer */ 777 } else if (qc->tf.protocol == ATA_PROT_NODATA) { 778 779 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 780 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); 781 qc->err_mask |= ac_err_mask(status); 782 ata_qc_complete(qc); 783 handled = 1; 784 785 } else { 786 ap->stats.idle_irq++; 787 } 788 789 return handled; 790 } 791 792 static void pdc20621_irq_clear(struct ata_port *ap) 793 { 794 ioread8(ap->ioaddr.status_addr); 795 } 796 797 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) 798 { 799 struct ata_host *host = dev_instance; 800 struct ata_port *ap; 801 u32 mask = 0; 802 unsigned int i, tmp, port_no; 803 unsigned int handled = 0; 804 void __iomem *mmio_base; 805 806 VPRINTK("ENTER\n"); 807 808 if (!host || !host->iomap[PDC_MMIO_BAR]) { 809 VPRINTK("QUICK EXIT\n"); 810 return IRQ_NONE; 811 } 812 813 mmio_base = host->iomap[PDC_MMIO_BAR]; 814 815 /* reading should also clear interrupts */ 816 mmio_base += PDC_CHIP0_OFS; 817 mask = readl(mmio_base + PDC_20621_SEQMASK); 818 VPRINTK("mask == 0x%x\n", mask); 819 820 if (mask == 0xffffffff) { 821 VPRINTK("QUICK EXIT 2\n"); 822 return IRQ_NONE; 823 } 824 mask &= 0xffff; /* only 16 tags possible */ 825 if (!mask) { 826 VPRINTK("QUICK EXIT 3\n"); 827 return IRQ_NONE; 828 } 829 830 spin_lock(&host->lock); 831 832 for (i = 1; i < 9; i++) { 833 port_no = i - 1; 834 if (port_no > 3) 835 port_no -= 4; 836 if (port_no >= host->n_ports) 837 ap = NULL; 838 else 839 ap = host->ports[port_no]; 840 tmp = mask & (1 << i); 841 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 842 if (tmp && ap && 843 !(ap->flags & ATA_FLAG_DISABLED)) { 844 struct ata_queued_cmd *qc; 845 846 qc = ata_qc_from_tag(ap, ap->link.active_tag); 847 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 848 handled += pdc20621_host_intr(ap, qc, (i > 4), 849 mmio_base); 850 } 851 } 852 853 spin_unlock(&host->lock); 854 855 VPRINTK("mask == 0x%x\n", mask); 856 857 VPRINTK("EXIT\n"); 858 859 return IRQ_RETVAL(handled); 860 } 861 862 static void pdc_freeze(struct ata_port *ap) 863 { 864 void __iomem *mmio = ap->ioaddr.cmd_addr; 865 u32 tmp; 866 867 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */ 868 869 tmp = readl(mmio + PDC_CTLSTAT); 870 tmp |= PDC_MASK_INT; 871 tmp &= ~PDC_DMA_ENABLE; 872 writel(tmp, mmio + PDC_CTLSTAT); 873 readl(mmio + PDC_CTLSTAT); /* flush */ 874 } 875 876 static void pdc_thaw(struct ata_port *ap) 877 { 878 void __iomem *mmio = ap->ioaddr.cmd_addr; 879 u32 tmp; 880 881 /* FIXME: start HDMA engine, if zero ATA engines running */ 882 883 /* clear IRQ */ 884 ioread8(ap->ioaddr.status_addr); 885 886 /* turn IRQ back on */ 887 tmp = readl(mmio + PDC_CTLSTAT); 888 tmp &= ~PDC_MASK_INT; 889 writel(tmp, mmio + PDC_CTLSTAT); 890 readl(mmio + PDC_CTLSTAT); /* flush */ 891 } 892 893 static void pdc_reset_port(struct ata_port *ap) 894 { 895 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; 896 unsigned int i; 897 u32 tmp; 898 899 /* FIXME: handle HDMA copy engine */ 900 901 for (i = 11; i > 0; i--) { 902 tmp = readl(mmio); 903 if (tmp & PDC_RESET) 904 break; 905 906 udelay(100); 907 908 tmp |= PDC_RESET; 909 writel(tmp, mmio); 910 } 911 912 tmp &= ~PDC_RESET; 913 writel(tmp, mmio); 914 readl(mmio); /* flush */ 915 } 916 917 static int pdc_softreset(struct ata_link *link, unsigned int *class, 918 unsigned long deadline) 919 { 920 pdc_reset_port(link->ap); 921 return ata_sff_softreset(link, class, deadline); 922 } 923 924 static void pdc_error_handler(struct ata_port *ap) 925 { 926 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 927 pdc_reset_port(ap); 928 929 ata_std_error_handler(ap); 930 } 931 932 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) 933 { 934 struct ata_port *ap = qc->ap; 935 936 /* make DMA engine forget about the failed command */ 937 if (qc->flags & ATA_QCFLAG_FAILED) 938 pdc_reset_port(ap); 939 } 940 941 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) 942 { 943 u8 *scsicmd = qc->scsicmd->cmnd; 944 int pio = 1; /* atapi dma off by default */ 945 946 /* Whitelist commands that may use DMA. */ 947 switch (scsicmd[0]) { 948 case WRITE_12: 949 case WRITE_10: 950 case WRITE_6: 951 case READ_12: 952 case READ_10: 953 case READ_6: 954 case 0xad: /* READ_DVD_STRUCTURE */ 955 case 0xbe: /* READ_CD */ 956 pio = 0; 957 } 958 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ 959 if (scsicmd[0] == WRITE_10) { 960 unsigned int lba = 961 (scsicmd[2] << 24) | 962 (scsicmd[3] << 16) | 963 (scsicmd[4] << 8) | 964 scsicmd[5]; 965 if (lba >= 0xFFFF4FA2) 966 pio = 1; 967 } 968 return pio; 969 } 970 971 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 972 { 973 WARN_ON(tf->protocol == ATA_PROT_DMA || 974 tf->protocol == ATAPI_PROT_DMA); 975 ata_sff_tf_load(ap, tf); 976 } 977 978 979 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 980 { 981 WARN_ON(tf->protocol == ATA_PROT_DMA || 982 tf->protocol == ATAPI_PROT_DMA); 983 ata_sff_exec_command(ap, tf); 984 } 985 986 987 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base) 988 { 989 port->cmd_addr = base; 990 port->data_addr = base; 991 port->feature_addr = 992 port->error_addr = base + 0x4; 993 port->nsect_addr = base + 0x8; 994 port->lbal_addr = base + 0xc; 995 port->lbam_addr = base + 0x10; 996 port->lbah_addr = base + 0x14; 997 port->device_addr = base + 0x18; 998 port->command_addr = 999 port->status_addr = base + 0x1c; 1000 port->altstatus_addr = 1001 port->ctl_addr = base + 0x38; 1002 } 1003 1004 1005 #ifdef ATA_VERBOSE_DEBUG 1006 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, 1007 u32 offset, u32 size) 1008 { 1009 u32 window_size; 1010 u16 idx; 1011 u8 page_mask; 1012 long dist; 1013 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1014 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; 1015 1016 /* hard-code chip #0 */ 1017 mmio += PDC_CHIP0_OFS; 1018 1019 page_mask = 0x00; 1020 window_size = 0x2000 * 4; /* 32K byte uchar size */ 1021 idx = (u16) (offset / window_size); 1022 1023 writel(0x01, mmio + PDC_GENERAL_CTLR); 1024 readl(mmio + PDC_GENERAL_CTLR); 1025 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1026 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1027 1028 offset -= (idx * window_size); 1029 idx++; 1030 dist = ((long) (window_size - (offset + size))) >= 0 ? size : 1031 (long) (window_size - offset); 1032 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4), 1033 dist); 1034 1035 psource += dist; 1036 size -= dist; 1037 for (; (long) size >= (long) window_size ;) { 1038 writel(0x01, mmio + PDC_GENERAL_CTLR); 1039 readl(mmio + PDC_GENERAL_CTLR); 1040 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1041 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1042 memcpy_fromio((char *) psource, (char *) (dimm_mmio), 1043 window_size / 4); 1044 psource += window_size; 1045 size -= window_size; 1046 idx++; 1047 } 1048 1049 if (size) { 1050 writel(0x01, mmio + PDC_GENERAL_CTLR); 1051 readl(mmio + PDC_GENERAL_CTLR); 1052 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1053 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1054 memcpy_fromio((char *) psource, (char *) (dimm_mmio), 1055 size / 4); 1056 } 1057 } 1058 #endif 1059 1060 1061 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, 1062 u32 offset, u32 size) 1063 { 1064 u32 window_size; 1065 u16 idx; 1066 u8 page_mask; 1067 long dist; 1068 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1069 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; 1070 1071 /* hard-code chip #0 */ 1072 mmio += PDC_CHIP0_OFS; 1073 1074 page_mask = 0x00; 1075 window_size = 0x2000 * 4; /* 32K byte uchar size */ 1076 idx = (u16) (offset / window_size); 1077 1078 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1079 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1080 offset -= (idx * window_size); 1081 idx++; 1082 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : 1083 (long) (window_size - offset); 1084 memcpy_toio(dimm_mmio + offset / 4, psource, dist); 1085 writel(0x01, mmio + PDC_GENERAL_CTLR); 1086 readl(mmio + PDC_GENERAL_CTLR); 1087 1088 psource += dist; 1089 size -= dist; 1090 for (; (long) size >= (long) window_size ;) { 1091 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1092 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1093 memcpy_toio(dimm_mmio, psource, window_size / 4); 1094 writel(0x01, mmio + PDC_GENERAL_CTLR); 1095 readl(mmio + PDC_GENERAL_CTLR); 1096 psource += window_size; 1097 size -= window_size; 1098 idx++; 1099 } 1100 1101 if (size) { 1102 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1103 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1104 memcpy_toio(dimm_mmio, psource, size / 4); 1105 writel(0x01, mmio + PDC_GENERAL_CTLR); 1106 readl(mmio + PDC_GENERAL_CTLR); 1107 } 1108 } 1109 1110 1111 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, 1112 u32 subaddr, u32 *pdata) 1113 { 1114 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1115 u32 i2creg = 0; 1116 u32 status; 1117 u32 count = 0; 1118 1119 /* hard-code chip #0 */ 1120 mmio += PDC_CHIP0_OFS; 1121 1122 i2creg |= device << 24; 1123 i2creg |= subaddr << 16; 1124 1125 /* Set the device and subaddress */ 1126 writel(i2creg, mmio + PDC_I2C_ADDR_DATA); 1127 readl(mmio + PDC_I2C_ADDR_DATA); 1128 1129 /* Write Control to perform read operation, mask int */ 1130 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT, 1131 mmio + PDC_I2C_CONTROL); 1132 1133 for (count = 0; count <= 1000; count ++) { 1134 status = readl(mmio + PDC_I2C_CONTROL); 1135 if (status & PDC_I2C_COMPLETE) { 1136 status = readl(mmio + PDC_I2C_ADDR_DATA); 1137 break; 1138 } else if (count == 1000) 1139 return 0; 1140 } 1141 1142 *pdata = (status >> 8) & 0x000000ff; 1143 return 1; 1144 } 1145 1146 1147 static int pdc20621_detect_dimm(struct ata_host *host) 1148 { 1149 u32 data = 0; 1150 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1151 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { 1152 if (data == 100) 1153 return 100; 1154 } else 1155 return 0; 1156 1157 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { 1158 if (data <= 0x75) 1159 return 133; 1160 } else 1161 return 0; 1162 1163 return 0; 1164 } 1165 1166 1167 static int pdc20621_prog_dimm0(struct ata_host *host) 1168 { 1169 u32 spd0[50]; 1170 u32 data = 0; 1171 int size, i; 1172 u8 bdimmsize; 1173 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1174 static const struct { 1175 unsigned int reg; 1176 unsigned int ofs; 1177 } pdc_i2c_read_data [] = { 1178 { PDC_DIMM_SPD_TYPE, 11 }, 1179 { PDC_DIMM_SPD_FRESH_RATE, 12 }, 1180 { PDC_DIMM_SPD_COLUMN_NUM, 4 }, 1181 { PDC_DIMM_SPD_ATTRIBUTE, 21 }, 1182 { PDC_DIMM_SPD_ROW_NUM, 3 }, 1183 { PDC_DIMM_SPD_BANK_NUM, 17 }, 1184 { PDC_DIMM_SPD_MODULE_ROW, 5 }, 1185 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 }, 1186 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 }, 1187 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 }, 1188 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 }, 1189 { PDC_DIMM_SPD_CAS_LATENCY, 18 }, 1190 }; 1191 1192 /* hard-code chip #0 */ 1193 mmio += PDC_CHIP0_OFS; 1194 1195 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) 1196 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1197 pdc_i2c_read_data[i].reg, 1198 &spd0[pdc_i2c_read_data[i].ofs]); 1199 1200 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); 1201 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | 1202 ((((spd0[27] + 9) / 10) - 1) << 8) ; 1203 data |= (((((spd0[29] > spd0[28]) 1204 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; 1205 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; 1206 1207 if (spd0[18] & 0x08) 1208 data |= ((0x03) << 14); 1209 else if (spd0[18] & 0x04) 1210 data |= ((0x02) << 14); 1211 else if (spd0[18] & 0x01) 1212 data |= ((0x01) << 14); 1213 else 1214 data |= (0 << 14); 1215 1216 /* 1217 Calculate the size of bDIMMSize (power of 2) and 1218 merge the DIMM size by program start/end address. 1219 */ 1220 1221 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; 1222 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ 1223 data |= (((size / 16) - 1) << 16); 1224 data |= (0 << 23); 1225 data |= 8; 1226 writel(data, mmio + PDC_DIMM0_CONTROL); 1227 readl(mmio + PDC_DIMM0_CONTROL); 1228 return size; 1229 } 1230 1231 1232 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) 1233 { 1234 u32 data, spd0; 1235 int error, i; 1236 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1237 1238 /* hard-code chip #0 */ 1239 mmio += PDC_CHIP0_OFS; 1240 1241 /* 1242 Set To Default : DIMM Module Global Control Register (0x022259F1) 1243 DIMM Arbitration Disable (bit 20) 1244 DIMM Data/Control Output Driving Selection (bit12 - bit15) 1245 Refresh Enable (bit 17) 1246 */ 1247 1248 data = 0x022259F1; 1249 writel(data, mmio + PDC_SDRAM_CONTROL); 1250 readl(mmio + PDC_SDRAM_CONTROL); 1251 1252 /* Turn on for ECC */ 1253 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1254 PDC_DIMM_SPD_TYPE, &spd0); 1255 if (spd0 == 0x02) { 1256 data |= (0x01 << 16); 1257 writel(data, mmio + PDC_SDRAM_CONTROL); 1258 readl(mmio + PDC_SDRAM_CONTROL); 1259 printk(KERN_ERR "Local DIMM ECC Enabled\n"); 1260 } 1261 1262 /* DIMM Initialization Select/Enable (bit 18/19) */ 1263 data &= (~(1<<18)); 1264 data |= (1<<19); 1265 writel(data, mmio + PDC_SDRAM_CONTROL); 1266 1267 error = 1; 1268 for (i = 1; i <= 10; i++) { /* polling ~5 secs */ 1269 data = readl(mmio + PDC_SDRAM_CONTROL); 1270 if (!(data & (1<<19))) { 1271 error = 0; 1272 break; 1273 } 1274 msleep(i*100); 1275 } 1276 return error; 1277 } 1278 1279 1280 static unsigned int pdc20621_dimm_init(struct ata_host *host) 1281 { 1282 int speed, size, length; 1283 u32 addr, spd0, pci_status; 1284 u32 time_period = 0; 1285 u32 tcount = 0; 1286 u32 ticks = 0; 1287 u32 clock = 0; 1288 u32 fparam = 0; 1289 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1290 1291 /* hard-code chip #0 */ 1292 mmio += PDC_CHIP0_OFS; 1293 1294 /* Initialize PLL based upon PCI Bus Frequency */ 1295 1296 /* Initialize Time Period Register */ 1297 writel(0xffffffff, mmio + PDC_TIME_PERIOD); 1298 time_period = readl(mmio + PDC_TIME_PERIOD); 1299 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period); 1300 1301 /* Enable timer */ 1302 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL); 1303 readl(mmio + PDC_TIME_CONTROL); 1304 1305 /* Wait 3 seconds */ 1306 msleep(3000); 1307 1308 /* 1309 When timer is enabled, counter is decreased every internal 1310 clock cycle. 1311 */ 1312 1313 tcount = readl(mmio + PDC_TIME_COUNTER); 1314 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount); 1315 1316 /* 1317 If SX4 is on PCI-X bus, after 3 seconds, the timer counter 1318 register should be >= (0xffffffff - 3x10^8). 1319 */ 1320 if (tcount >= PCI_X_TCOUNT) { 1321 ticks = (time_period - tcount); 1322 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); 1323 1324 clock = (ticks / 300000); 1325 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock); 1326 1327 clock = (clock * 33); 1328 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock); 1329 1330 /* PLL F Param (bit 22:16) */ 1331 fparam = (1400000 / clock) - 2; 1332 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam); 1333 1334 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */ 1335 pci_status = (0x8a001824 | (fparam << 16)); 1336 } else 1337 pci_status = PCI_PLL_INIT; 1338 1339 /* Initialize PLL. */ 1340 VPRINTK("pci_status: 0x%x\n", pci_status); 1341 writel(pci_status, mmio + PDC_CTL_STATUS); 1342 readl(mmio + PDC_CTL_STATUS); 1343 1344 /* 1345 Read SPD of DIMM by I2C interface, 1346 and program the DIMM Module Controller. 1347 */ 1348 if (!(speed = pdc20621_detect_dimm(host))) { 1349 printk(KERN_ERR "Detect Local DIMM Fail\n"); 1350 return 1; /* DIMM error */ 1351 } 1352 VPRINTK("Local DIMM Speed = %d\n", speed); 1353 1354 /* Programming DIMM0 Module Control Register (index_CID0:80h) */ 1355 size = pdc20621_prog_dimm0(host); 1356 VPRINTK("Local DIMM Size = %dMB\n", size); 1357 1358 /* Programming DIMM Module Global Control Register (index_CID0:88h) */ 1359 if (pdc20621_prog_dimm_global(host)) { 1360 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); 1361 return 1; 1362 } 1363 1364 #ifdef ATA_VERBOSE_DEBUG 1365 { 1366 u8 test_parttern1[40] = 1367 {0x55,0xAA,'P','r','o','m','i','s','e',' ', 1368 'N','o','t',' ','Y','e','t',' ', 1369 'D','e','f','i','n','e','d',' ', 1370 '1','.','1','0', 1371 '9','8','0','3','1','6','1','2',0,0}; 1372 u8 test_parttern2[40] = {0}; 1373 1374 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40); 1375 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40); 1376 1377 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40); 1378 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); 1379 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1380 test_parttern2[1], &(test_parttern2[2])); 1381 pdc20621_get_from_dimm(host, test_parttern2, 0x10040, 1382 40); 1383 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1384 test_parttern2[1], &(test_parttern2[2])); 1385 1386 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40); 1387 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); 1388 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1389 test_parttern2[1], &(test_parttern2[2])); 1390 } 1391 #endif 1392 1393 /* ECC initiliazation. */ 1394 1395 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1396 PDC_DIMM_SPD_TYPE, &spd0); 1397 if (spd0 == 0x02) { 1398 void *buf; 1399 VPRINTK("Start ECC initialization\n"); 1400 addr = 0; 1401 length = size * 1024 * 1024; 1402 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL); 1403 while (addr < length) { 1404 pdc20621_put_to_dimm(host, buf, addr, 1405 ECC_ERASE_BUF_SZ); 1406 addr += ECC_ERASE_BUF_SZ; 1407 } 1408 kfree(buf); 1409 VPRINTK("Finish ECC initialization\n"); 1410 } 1411 return 0; 1412 } 1413 1414 1415 static void pdc_20621_init(struct ata_host *host) 1416 { 1417 u32 tmp; 1418 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1419 1420 /* hard-code chip #0 */ 1421 mmio += PDC_CHIP0_OFS; 1422 1423 /* 1424 * Select page 0x40 for our 32k DIMM window 1425 */ 1426 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000; 1427 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */ 1428 writel(tmp, mmio + PDC_20621_DIMM_WINDOW); 1429 1430 /* 1431 * Reset Host DMA 1432 */ 1433 tmp = readl(mmio + PDC_HDMA_CTLSTAT); 1434 tmp |= PDC_RESET; 1435 writel(tmp, mmio + PDC_HDMA_CTLSTAT); 1436 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ 1437 1438 udelay(10); 1439 1440 tmp = readl(mmio + PDC_HDMA_CTLSTAT); 1441 tmp &= ~PDC_RESET; 1442 writel(tmp, mmio + PDC_HDMA_CTLSTAT); 1443 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ 1444 } 1445 1446 static int pdc_sata_init_one(struct pci_dev *pdev, 1447 const struct pci_device_id *ent) 1448 { 1449 static int printed_version; 1450 const struct ata_port_info *ppi[] = 1451 { &pdc_port_info[ent->driver_data], NULL }; 1452 struct ata_host *host; 1453 struct pdc_host_priv *hpriv; 1454 int i, rc; 1455 1456 if (!printed_version++) 1457 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1458 1459 /* allocate host */ 1460 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4); 1461 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 1462 if (!host || !hpriv) 1463 return -ENOMEM; 1464 1465 host->private_data = hpriv; 1466 1467 /* acquire resources and fill host */ 1468 rc = pcim_enable_device(pdev); 1469 if (rc) 1470 return rc; 1471 1472 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR), 1473 DRV_NAME); 1474 if (rc == -EBUSY) 1475 pcim_pin_device(pdev); 1476 if (rc) 1477 return rc; 1478 host->iomap = pcim_iomap_table(pdev); 1479 1480 for (i = 0; i < 4; i++) { 1481 struct ata_port *ap = host->ports[i]; 1482 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; 1483 unsigned int offset = 0x200 + i * 0x80; 1484 1485 pdc_sata_setup_port(&ap->ioaddr, base + offset); 1486 1487 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); 1488 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm"); 1489 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port"); 1490 } 1491 1492 /* configure and activate */ 1493 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1494 if (rc) 1495 return rc; 1496 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1497 if (rc) 1498 return rc; 1499 1500 if (pdc20621_dimm_init(host)) 1501 return -ENOMEM; 1502 pdc_20621_init(host); 1503 1504 pci_set_master(pdev); 1505 return ata_host_activate(host, pdev->irq, pdc20621_interrupt, 1506 IRQF_SHARED, &pdc_sata_sht); 1507 } 1508 1509 1510 static int __init pdc_sata_init(void) 1511 { 1512 return pci_register_driver(&pdc_sata_pci_driver); 1513 } 1514 1515 1516 static void __exit pdc_sata_exit(void) 1517 { 1518 pci_unregister_driver(&pdc_sata_pci_driver); 1519 } 1520 1521 1522 MODULE_AUTHOR("Jeff Garzik"); 1523 MODULE_DESCRIPTION("Promise SATA low-level driver"); 1524 MODULE_LICENSE("GPL"); 1525 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl); 1526 MODULE_VERSION(DRV_VERSION); 1527 1528 module_init(pdc_sata_init); 1529 module_exit(pdc_sata_exit); 1530