1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * sata_sx4.c - Promise SATA 4 * 5 * Maintained by: Tejun Heo <tj@kernel.org> 6 * Please ALWAYS copy linux-ide@vger.kernel.org 7 * on emails. 8 * 9 * Copyright 2003-2004 Red Hat, Inc. 10 * 11 * libata documentation is available via 'make {ps|pdf}docs', 12 * as Documentation/driver-api/libata.rst 13 * 14 * Hardware documentation available under NDA. 15 */ 16 17 /* 18 Theory of operation 19 ------------------- 20 21 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy 22 engine, DIMM memory, and four ATA engines (one per SATA port). 23 Data is copied to/from DIMM memory by the HDMA engine, before 24 handing off to one (or more) of the ATA engines. The ATA 25 engines operate solely on DIMM memory. 26 27 The SX4 behaves like a PATA chip, with no SATA controls or 28 knowledge whatsoever, leading to the presumption that 29 PATA<->SATA bridges exist on SX4 boards, external to the 30 PDC20621 chip itself. 31 32 The chip is quite capable, supporting an XOR engine and linked 33 hardware commands (permits a string to transactions to be 34 submitted and waited-on as a single unit), and an optional 35 microprocessor. 36 37 The limiting factor is largely software. This Linux driver was 38 written to multiplex the single HDMA engine to copy disk 39 transactions into a fixed DIMM memory space, from where an ATA 40 engine takes over. As a result, each WRITE looks like this: 41 42 submit HDMA packet to hardware 43 hardware copies data from system memory to DIMM 44 hardware raises interrupt 45 46 submit ATA packet to hardware 47 hardware executes ATA WRITE command, w/ data in DIMM 48 hardware raises interrupt 49 50 and each READ looks like this: 51 52 submit ATA packet to hardware 53 hardware executes ATA READ command, w/ data in DIMM 54 hardware raises interrupt 55 56 submit HDMA packet to hardware 57 hardware copies data from DIMM to system memory 58 hardware raises interrupt 59 60 This is a very slow, lock-step way of doing things that can 61 certainly be improved by motivated kernel hackers. 62 63 */ 64 65 #include <linux/kernel.h> 66 #include <linux/module.h> 67 #include <linux/pci.h> 68 #include <linux/slab.h> 69 #include <linux/blkdev.h> 70 #include <linux/delay.h> 71 #include <linux/interrupt.h> 72 #include <linux/device.h> 73 #include <scsi/scsi_host.h> 74 #include <scsi/scsi_cmnd.h> 75 #include <linux/libata.h> 76 #include "sata_promise.h" 77 78 #define DRV_NAME "sata_sx4" 79 #define DRV_VERSION "0.12" 80 81 82 enum { 83 PDC_MMIO_BAR = 3, 84 PDC_DIMM_BAR = 4, 85 86 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */ 87 88 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 89 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */ 90 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ 91 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */ 92 93 PDC_CTLSTAT = 0x60, /* IDEn control / status */ 94 95 PDC_20621_SEQCTL = 0x400, 96 PDC_20621_SEQMASK = 0x480, 97 PDC_20621_GENERAL_CTL = 0x484, 98 PDC_20621_PAGE_SIZE = (32 * 1024), 99 100 /* chosen, not constant, values; we design our own DIMM mem map */ 101 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */ 102 PDC_20621_DIMM_BASE = 0x00200000, 103 PDC_20621_DIMM_DATA = (64 * 1024), 104 PDC_DIMM_DATA_STEP = (256 * 1024), 105 PDC_DIMM_WINDOW_STEP = (8 * 1024), 106 PDC_DIMM_HOST_PRD = (6 * 1024), 107 PDC_DIMM_HOST_PKT = (128 * 0), 108 PDC_DIMM_HPKT_PRD = (128 * 1), 109 PDC_DIMM_ATA_PKT = (128 * 2), 110 PDC_DIMM_APKT_PRD = (128 * 3), 111 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128, 112 PDC_PAGE_WINDOW = 0x40, 113 PDC_PAGE_DATA = PDC_PAGE_WINDOW + 114 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE), 115 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE, 116 117 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */ 118 119 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 120 (1<<23), 121 122 board_20621 = 0, /* FastTrak S150 SX4 */ 123 124 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */ 125 PDC_RESET = (1 << 11), /* HDMA/ATA reset */ 126 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */ 127 128 PDC_MAX_HDMA = 32, 129 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1), 130 131 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50, 132 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51, 133 PDC_I2C_CONTROL = 0x48, 134 PDC_I2C_ADDR_DATA = 0x4C, 135 PDC_DIMM0_CONTROL = 0x80, 136 PDC_DIMM1_CONTROL = 0x84, 137 PDC_SDRAM_CONTROL = 0x88, 138 PDC_I2C_WRITE = 0, /* master -> slave */ 139 PDC_I2C_READ = (1 << 6), /* master <- slave */ 140 PDC_I2C_START = (1 << 7), /* start I2C proto */ 141 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */ 142 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */ 143 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */ 144 PDC_DIMM_SPD_SUBADDRESS_START = 0x00, 145 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F, 146 PDC_DIMM_SPD_ROW_NUM = 3, 147 PDC_DIMM_SPD_COLUMN_NUM = 4, 148 PDC_DIMM_SPD_MODULE_ROW = 5, 149 PDC_DIMM_SPD_TYPE = 11, 150 PDC_DIMM_SPD_FRESH_RATE = 12, 151 PDC_DIMM_SPD_BANK_NUM = 17, 152 PDC_DIMM_SPD_CAS_LATENCY = 18, 153 PDC_DIMM_SPD_ATTRIBUTE = 21, 154 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27, 155 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28, 156 PDC_DIMM_SPD_RAS_CAS_DELAY = 29, 157 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30, 158 PDC_DIMM_SPD_SYSTEM_FREQ = 126, 159 PDC_CTL_STATUS = 0x08, 160 PDC_DIMM_WINDOW_CTLR = 0x0C, 161 PDC_TIME_CONTROL = 0x3C, 162 PDC_TIME_PERIOD = 0x40, 163 PDC_TIME_COUNTER = 0x44, 164 PDC_GENERAL_CTLR = 0x484, 165 PCI_PLL_INIT = 0x8A531824, 166 PCI_X_TCOUNT = 0xEE1E5CFF, 167 168 /* PDC_TIME_CONTROL bits */ 169 PDC_TIMER_BUZZER = (1 << 10), 170 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */ 171 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */ 172 PDC_TIMER_ENABLE = (1 << 7), 173 PDC_TIMER_MASK_INT = (1 << 5), 174 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */ 175 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE | 176 PDC_TIMER_ENABLE | 177 PDC_TIMER_MASK_INT, 178 }; 179 180 #define ECC_ERASE_BUF_SZ (128 * 1024) 181 182 struct pdc_port_priv { 183 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512]; 184 u8 *pkt; 185 dma_addr_t pkt_dma; 186 }; 187 188 struct pdc_host_priv { 189 unsigned int doing_hdma; 190 unsigned int hdma_prod; 191 unsigned int hdma_cons; 192 struct { 193 struct ata_queued_cmd *qc; 194 unsigned int seq; 195 unsigned long pkt_ofs; 196 } hdma[32]; 197 }; 198 199 200 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 201 static void pdc_error_handler(struct ata_port *ap); 202 static void pdc_freeze(struct ata_port *ap); 203 static void pdc_thaw(struct ata_port *ap); 204 static int pdc_port_start(struct ata_port *ap); 205 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc); 206 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 207 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 208 static unsigned int pdc20621_dimm_init(struct ata_host *host); 209 static int pdc20621_detect_dimm(struct ata_host *host); 210 static unsigned int pdc20621_i2c_read(struct ata_host *host, 211 u32 device, u32 subaddr, u32 *pdata); 212 static int pdc20621_prog_dimm0(struct ata_host *host); 213 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host); 214 #ifdef ATA_VERBOSE_DEBUG 215 static void pdc20621_get_from_dimm(struct ata_host *host, 216 void *psource, u32 offset, u32 size); 217 #endif 218 static void pdc20621_put_to_dimm(struct ata_host *host, 219 void *psource, u32 offset, u32 size); 220 static void pdc20621_irq_clear(struct ata_port *ap); 221 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc); 222 static int pdc_softreset(struct ata_link *link, unsigned int *class, 223 unsigned long deadline); 224 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); 225 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); 226 227 228 static struct scsi_host_template pdc_sata_sht = { 229 ATA_BASE_SHT(DRV_NAME), 230 .sg_tablesize = LIBATA_MAX_PRD, 231 .dma_boundary = ATA_DMA_BOUNDARY, 232 }; 233 234 /* TODO: inherit from base port_ops after converting to new EH */ 235 static struct ata_port_operations pdc_20621_ops = { 236 .inherits = &ata_sff_port_ops, 237 238 .check_atapi_dma = pdc_check_atapi_dma, 239 .qc_prep = pdc20621_qc_prep, 240 .qc_issue = pdc20621_qc_issue, 241 242 .freeze = pdc_freeze, 243 .thaw = pdc_thaw, 244 .softreset = pdc_softreset, 245 .error_handler = pdc_error_handler, 246 .lost_interrupt = ATA_OP_NULL, 247 .post_internal_cmd = pdc_post_internal_cmd, 248 249 .port_start = pdc_port_start, 250 251 .sff_tf_load = pdc_tf_load_mmio, 252 .sff_exec_command = pdc_exec_command_mmio, 253 .sff_irq_clear = pdc20621_irq_clear, 254 }; 255 256 static const struct ata_port_info pdc_port_info[] = { 257 /* board_20621 */ 258 { 259 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI | 260 ATA_FLAG_PIO_POLLING, 261 .pio_mask = ATA_PIO4, 262 .mwdma_mask = ATA_MWDMA2, 263 .udma_mask = ATA_UDMA6, 264 .port_ops = &pdc_20621_ops, 265 }, 266 267 }; 268 269 static const struct pci_device_id pdc_sata_pci_tbl[] = { 270 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 }, 271 272 { } /* terminate list */ 273 }; 274 275 static struct pci_driver pdc_sata_pci_driver = { 276 .name = DRV_NAME, 277 .id_table = pdc_sata_pci_tbl, 278 .probe = pdc_sata_init_one, 279 .remove = ata_pci_remove_one, 280 }; 281 282 283 static int pdc_port_start(struct ata_port *ap) 284 { 285 struct device *dev = ap->host->dev; 286 struct pdc_port_priv *pp; 287 288 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 289 if (!pp) 290 return -ENOMEM; 291 292 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 293 if (!pp->pkt) 294 return -ENOMEM; 295 296 ap->private_data = pp; 297 298 return 0; 299 } 300 301 static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno, 302 unsigned int total_len) 303 { 304 u32 addr; 305 unsigned int dw = PDC_DIMM_APKT_PRD >> 2; 306 __le32 *buf32 = (__le32 *) buf; 307 308 /* output ATA packet S/G table */ 309 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + 310 (PDC_DIMM_DATA_STEP * portno); 311 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr); 312 buf32[dw] = cpu_to_le32(addr); 313 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); 314 315 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n", 316 PDC_20621_DIMM_BASE + 317 (PDC_DIMM_WINDOW_STEP * portno) + 318 PDC_DIMM_APKT_PRD, 319 buf32[dw], buf32[dw + 1]); 320 } 321 322 static inline void pdc20621_host_sg(u8 *buf, unsigned int portno, 323 unsigned int total_len) 324 { 325 u32 addr; 326 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2; 327 __le32 *buf32 = (__le32 *) buf; 328 329 /* output Host DMA packet S/G table */ 330 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + 331 (PDC_DIMM_DATA_STEP * portno); 332 333 buf32[dw] = cpu_to_le32(addr); 334 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); 335 336 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n", 337 PDC_20621_DIMM_BASE + 338 (PDC_DIMM_WINDOW_STEP * portno) + 339 PDC_DIMM_HPKT_PRD, 340 buf32[dw], buf32[dw + 1]); 341 } 342 343 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf, 344 unsigned int devno, u8 *buf, 345 unsigned int portno) 346 { 347 unsigned int i, dw; 348 __le32 *buf32 = (__le32 *) buf; 349 u8 dev_reg; 350 351 unsigned int dimm_sg = PDC_20621_DIMM_BASE + 352 (PDC_DIMM_WINDOW_STEP * portno) + 353 PDC_DIMM_APKT_PRD; 354 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); 355 356 i = PDC_DIMM_ATA_PKT; 357 358 /* 359 * Set up ATA packet 360 */ 361 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) 362 buf[i++] = PDC_PKT_READ; 363 else if (tf->protocol == ATA_PROT_NODATA) 364 buf[i++] = PDC_PKT_NODATA; 365 else 366 buf[i++] = 0; 367 buf[i++] = 0; /* reserved */ 368 buf[i++] = portno + 1; /* seq. id */ 369 buf[i++] = 0xff; /* delay seq. id */ 370 371 /* dimm dma S/G, and next-pkt */ 372 dw = i >> 2; 373 if (tf->protocol == ATA_PROT_NODATA) 374 buf32[dw] = 0; 375 else 376 buf32[dw] = cpu_to_le32(dimm_sg); 377 buf32[dw + 1] = 0; 378 i += 8; 379 380 if (devno == 0) 381 dev_reg = ATA_DEVICE_OBS; 382 else 383 dev_reg = ATA_DEVICE_OBS | ATA_DEV1; 384 385 /* select device */ 386 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; 387 buf[i++] = dev_reg; 388 389 /* device control register */ 390 buf[i++] = (1 << 5) | PDC_REG_DEVCTL; 391 buf[i++] = tf->ctl; 392 393 return i; 394 } 395 396 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, 397 unsigned int portno) 398 { 399 unsigned int dw; 400 u32 tmp; 401 __le32 *buf32 = (__le32 *) buf; 402 403 unsigned int host_sg = PDC_20621_DIMM_BASE + 404 (PDC_DIMM_WINDOW_STEP * portno) + 405 PDC_DIMM_HOST_PRD; 406 unsigned int dimm_sg = PDC_20621_DIMM_BASE + 407 (PDC_DIMM_WINDOW_STEP * portno) + 408 PDC_DIMM_HPKT_PRD; 409 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); 410 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg); 411 412 dw = PDC_DIMM_HOST_PKT >> 2; 413 414 /* 415 * Set up Host DMA packet 416 */ 417 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) 418 tmp = PDC_PKT_READ; 419 else 420 tmp = 0; 421 tmp |= ((portno + 1 + 4) << 16); /* seq. id */ 422 tmp |= (0xff << 24); /* delay seq. id */ 423 buf32[dw + 0] = cpu_to_le32(tmp); 424 buf32[dw + 1] = cpu_to_le32(host_sg); 425 buf32[dw + 2] = cpu_to_le32(dimm_sg); 426 buf32[dw + 3] = 0; 427 428 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n", 429 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + 430 PDC_DIMM_HOST_PKT, 431 buf32[dw + 0], 432 buf32[dw + 1], 433 buf32[dw + 2], 434 buf32[dw + 3]); 435 } 436 437 static void pdc20621_dma_prep(struct ata_queued_cmd *qc) 438 { 439 struct scatterlist *sg; 440 struct ata_port *ap = qc->ap; 441 struct pdc_port_priv *pp = ap->private_data; 442 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; 443 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; 444 unsigned int portno = ap->port_no; 445 unsigned int i, si, idx, total_len = 0, sgt_len; 446 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 447 448 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); 449 450 VPRINTK("ata%u: ENTER\n", ap->print_id); 451 452 /* hard-code chip #0 */ 453 mmio += PDC_CHIP0_OFS; 454 455 /* 456 * Build S/G table 457 */ 458 idx = 0; 459 for_each_sg(qc->sg, sg, qc->n_elem, si) { 460 buf[idx++] = cpu_to_le32(sg_dma_address(sg)); 461 buf[idx++] = cpu_to_le32(sg_dma_len(sg)); 462 total_len += sg_dma_len(sg); 463 } 464 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); 465 sgt_len = idx * 4; 466 467 /* 468 * Build ATA, host DMA packets 469 */ 470 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len); 471 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno); 472 473 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len); 474 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); 475 476 if (qc->tf.flags & ATA_TFLAG_LBA48) 477 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); 478 else 479 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); 480 481 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); 482 483 /* copy three S/G tables and two packets to DIMM MMIO window */ 484 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), 485 &pp->dimm_buf, PDC_DIMM_HEADER_SZ); 486 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) + 487 PDC_DIMM_HOST_PRD, 488 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len); 489 490 /* force host FIFO dump */ 491 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); 492 493 readl(dimm_mmio); /* MMIO PCI posting flush */ 494 495 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len); 496 } 497 498 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) 499 { 500 struct ata_port *ap = qc->ap; 501 struct pdc_port_priv *pp = ap->private_data; 502 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; 503 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; 504 unsigned int portno = ap->port_no; 505 unsigned int i; 506 507 VPRINTK("ata%u: ENTER\n", ap->print_id); 508 509 /* hard-code chip #0 */ 510 mmio += PDC_CHIP0_OFS; 511 512 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); 513 514 if (qc->tf.flags & ATA_TFLAG_LBA48) 515 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); 516 else 517 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); 518 519 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); 520 521 /* copy three S/G tables and two packets to DIMM MMIO window */ 522 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), 523 &pp->dimm_buf, PDC_DIMM_HEADER_SZ); 524 525 /* force host FIFO dump */ 526 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); 527 528 readl(dimm_mmio); /* MMIO PCI posting flush */ 529 530 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); 531 } 532 533 static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) 534 { 535 switch (qc->tf.protocol) { 536 case ATA_PROT_DMA: 537 pdc20621_dma_prep(qc); 538 break; 539 case ATA_PROT_NODATA: 540 pdc20621_nodata_prep(qc); 541 break; 542 default: 543 break; 544 } 545 546 return AC_ERR_OK; 547 } 548 549 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, 550 unsigned int seq, 551 u32 pkt_ofs) 552 { 553 struct ata_port *ap = qc->ap; 554 struct ata_host *host = ap->host; 555 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 556 557 /* hard-code chip #0 */ 558 mmio += PDC_CHIP0_OFS; 559 560 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 561 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ 562 563 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT); 564 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */ 565 } 566 567 static void pdc20621_push_hdma(struct ata_queued_cmd *qc, 568 unsigned int seq, 569 u32 pkt_ofs) 570 { 571 struct ata_port *ap = qc->ap; 572 struct pdc_host_priv *pp = ap->host->private_data; 573 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK; 574 575 if (!pp->doing_hdma) { 576 __pdc20621_push_hdma(qc, seq, pkt_ofs); 577 pp->doing_hdma = 1; 578 return; 579 } 580 581 pp->hdma[idx].qc = qc; 582 pp->hdma[idx].seq = seq; 583 pp->hdma[idx].pkt_ofs = pkt_ofs; 584 pp->hdma_prod++; 585 } 586 587 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) 588 { 589 struct ata_port *ap = qc->ap; 590 struct pdc_host_priv *pp = ap->host->private_data; 591 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK; 592 593 /* if nothing on queue, we're done */ 594 if (pp->hdma_prod == pp->hdma_cons) { 595 pp->doing_hdma = 0; 596 return; 597 } 598 599 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq, 600 pp->hdma[idx].pkt_ofs); 601 pp->hdma_cons++; 602 } 603 604 #ifdef ATA_VERBOSE_DEBUG 605 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) 606 { 607 struct ata_port *ap = qc->ap; 608 unsigned int port_no = ap->port_no; 609 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; 610 611 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); 612 dimm_mmio += PDC_DIMM_HOST_PKT; 613 614 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio)); 615 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4)); 616 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8)); 617 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12)); 618 } 619 #else 620 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { } 621 #endif /* ATA_VERBOSE_DEBUG */ 622 623 static void pdc20621_packet_start(struct ata_queued_cmd *qc) 624 { 625 struct ata_port *ap = qc->ap; 626 struct ata_host *host = ap->host; 627 unsigned int port_no = ap->port_no; 628 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 629 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 630 u8 seq = (u8) (port_no + 1); 631 unsigned int port_ofs; 632 633 /* hard-code chip #0 */ 634 mmio += PDC_CHIP0_OFS; 635 636 VPRINTK("ata%u: ENTER\n", ap->print_id); 637 638 wmb(); /* flush PRD, pkt writes */ 639 640 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); 641 642 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */ 643 if (rw && qc->tf.protocol == ATA_PROT_DMA) { 644 seq += 4; 645 646 pdc20621_dump_hdma(qc); 647 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); 648 VPRINTK("queued ofs 0x%x (%u), seq %u\n", 649 port_ofs + PDC_DIMM_HOST_PKT, 650 port_ofs + PDC_DIMM_HOST_PKT, 651 seq); 652 } else { 653 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 654 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ 655 656 writel(port_ofs + PDC_DIMM_ATA_PKT, 657 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 658 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 659 VPRINTK("submitted ofs 0x%x (%u), seq %u\n", 660 port_ofs + PDC_DIMM_ATA_PKT, 661 port_ofs + PDC_DIMM_ATA_PKT, 662 seq); 663 } 664 } 665 666 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) 667 { 668 switch (qc->tf.protocol) { 669 case ATA_PROT_NODATA: 670 if (qc->tf.flags & ATA_TFLAG_POLLING) 671 break; 672 /*FALLTHROUGH*/ 673 case ATA_PROT_DMA: 674 pdc20621_packet_start(qc); 675 return 0; 676 677 case ATAPI_PROT_DMA: 678 BUG(); 679 break; 680 681 default: 682 break; 683 } 684 685 return ata_sff_qc_issue(qc); 686 } 687 688 static inline unsigned int pdc20621_host_intr(struct ata_port *ap, 689 struct ata_queued_cmd *qc, 690 unsigned int doing_hdma, 691 void __iomem *mmio) 692 { 693 unsigned int port_no = ap->port_no; 694 unsigned int port_ofs = 695 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); 696 u8 status; 697 unsigned int handled = 0; 698 699 VPRINTK("ENTER\n"); 700 701 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */ 702 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 703 704 /* step two - DMA from DIMM to host */ 705 if (doing_hdma) { 706 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id, 707 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 708 /* get drive status; clear intr; complete txn */ 709 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 710 ata_qc_complete(qc); 711 pdc20621_pop_hdma(qc); 712 } 713 714 /* step one - exec ATA command */ 715 else { 716 u8 seq = (u8) (port_no + 1 + 4); 717 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id, 718 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 719 720 /* submit hdma pkt */ 721 pdc20621_dump_hdma(qc); 722 pdc20621_push_hdma(qc, seq, 723 port_ofs + PDC_DIMM_HOST_PKT); 724 } 725 handled = 1; 726 727 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */ 728 729 /* step one - DMA from host to DIMM */ 730 if (doing_hdma) { 731 u8 seq = (u8) (port_no + 1); 732 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id, 733 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 734 735 /* submit ata pkt */ 736 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 737 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); 738 writel(port_ofs + PDC_DIMM_ATA_PKT, 739 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 740 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 741 } 742 743 /* step two - execute ATA command */ 744 else { 745 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id, 746 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); 747 /* get drive status; clear intr; complete txn */ 748 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 749 ata_qc_complete(qc); 750 pdc20621_pop_hdma(qc); 751 } 752 handled = 1; 753 754 /* command completion, but no data xfer */ 755 } else if (qc->tf.protocol == ATA_PROT_NODATA) { 756 757 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 758 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); 759 qc->err_mask |= ac_err_mask(status); 760 ata_qc_complete(qc); 761 handled = 1; 762 763 } else { 764 ap->stats.idle_irq++; 765 } 766 767 return handled; 768 } 769 770 static void pdc20621_irq_clear(struct ata_port *ap) 771 { 772 ioread8(ap->ioaddr.status_addr); 773 } 774 775 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) 776 { 777 struct ata_host *host = dev_instance; 778 struct ata_port *ap; 779 u32 mask = 0; 780 unsigned int i, tmp, port_no; 781 unsigned int handled = 0; 782 void __iomem *mmio_base; 783 784 VPRINTK("ENTER\n"); 785 786 if (!host || !host->iomap[PDC_MMIO_BAR]) { 787 VPRINTK("QUICK EXIT\n"); 788 return IRQ_NONE; 789 } 790 791 mmio_base = host->iomap[PDC_MMIO_BAR]; 792 793 /* reading should also clear interrupts */ 794 mmio_base += PDC_CHIP0_OFS; 795 mask = readl(mmio_base + PDC_20621_SEQMASK); 796 VPRINTK("mask == 0x%x\n", mask); 797 798 if (mask == 0xffffffff) { 799 VPRINTK("QUICK EXIT 2\n"); 800 return IRQ_NONE; 801 } 802 mask &= 0xffff; /* only 16 tags possible */ 803 if (!mask) { 804 VPRINTK("QUICK EXIT 3\n"); 805 return IRQ_NONE; 806 } 807 808 spin_lock(&host->lock); 809 810 for (i = 1; i < 9; i++) { 811 port_no = i - 1; 812 if (port_no > 3) 813 port_no -= 4; 814 if (port_no >= host->n_ports) 815 ap = NULL; 816 else 817 ap = host->ports[port_no]; 818 tmp = mask & (1 << i); 819 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 820 if (tmp && ap) { 821 struct ata_queued_cmd *qc; 822 823 qc = ata_qc_from_tag(ap, ap->link.active_tag); 824 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 825 handled += pdc20621_host_intr(ap, qc, (i > 4), 826 mmio_base); 827 } 828 } 829 830 spin_unlock(&host->lock); 831 832 VPRINTK("mask == 0x%x\n", mask); 833 834 VPRINTK("EXIT\n"); 835 836 return IRQ_RETVAL(handled); 837 } 838 839 static void pdc_freeze(struct ata_port *ap) 840 { 841 void __iomem *mmio = ap->ioaddr.cmd_addr; 842 u32 tmp; 843 844 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */ 845 846 tmp = readl(mmio + PDC_CTLSTAT); 847 tmp |= PDC_MASK_INT; 848 tmp &= ~PDC_DMA_ENABLE; 849 writel(tmp, mmio + PDC_CTLSTAT); 850 readl(mmio + PDC_CTLSTAT); /* flush */ 851 } 852 853 static void pdc_thaw(struct ata_port *ap) 854 { 855 void __iomem *mmio = ap->ioaddr.cmd_addr; 856 u32 tmp; 857 858 /* FIXME: start HDMA engine, if zero ATA engines running */ 859 860 /* clear IRQ */ 861 ioread8(ap->ioaddr.status_addr); 862 863 /* turn IRQ back on */ 864 tmp = readl(mmio + PDC_CTLSTAT); 865 tmp &= ~PDC_MASK_INT; 866 writel(tmp, mmio + PDC_CTLSTAT); 867 readl(mmio + PDC_CTLSTAT); /* flush */ 868 } 869 870 static void pdc_reset_port(struct ata_port *ap) 871 { 872 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; 873 unsigned int i; 874 u32 tmp; 875 876 /* FIXME: handle HDMA copy engine */ 877 878 for (i = 11; i > 0; i--) { 879 tmp = readl(mmio); 880 if (tmp & PDC_RESET) 881 break; 882 883 udelay(100); 884 885 tmp |= PDC_RESET; 886 writel(tmp, mmio); 887 } 888 889 tmp &= ~PDC_RESET; 890 writel(tmp, mmio); 891 readl(mmio); /* flush */ 892 } 893 894 static int pdc_softreset(struct ata_link *link, unsigned int *class, 895 unsigned long deadline) 896 { 897 pdc_reset_port(link->ap); 898 return ata_sff_softreset(link, class, deadline); 899 } 900 901 static void pdc_error_handler(struct ata_port *ap) 902 { 903 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 904 pdc_reset_port(ap); 905 906 ata_sff_error_handler(ap); 907 } 908 909 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) 910 { 911 struct ata_port *ap = qc->ap; 912 913 /* make DMA engine forget about the failed command */ 914 if (qc->flags & ATA_QCFLAG_FAILED) 915 pdc_reset_port(ap); 916 } 917 918 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) 919 { 920 u8 *scsicmd = qc->scsicmd->cmnd; 921 int pio = 1; /* atapi dma off by default */ 922 923 /* Whitelist commands that may use DMA. */ 924 switch (scsicmd[0]) { 925 case WRITE_12: 926 case WRITE_10: 927 case WRITE_6: 928 case READ_12: 929 case READ_10: 930 case READ_6: 931 case 0xad: /* READ_DVD_STRUCTURE */ 932 case 0xbe: /* READ_CD */ 933 pio = 0; 934 } 935 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ 936 if (scsicmd[0] == WRITE_10) { 937 unsigned int lba = 938 (scsicmd[2] << 24) | 939 (scsicmd[3] << 16) | 940 (scsicmd[4] << 8) | 941 scsicmd[5]; 942 if (lba >= 0xFFFF4FA2) 943 pio = 1; 944 } 945 return pio; 946 } 947 948 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 949 { 950 WARN_ON(tf->protocol == ATA_PROT_DMA || 951 tf->protocol == ATAPI_PROT_DMA); 952 ata_sff_tf_load(ap, tf); 953 } 954 955 956 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 957 { 958 WARN_ON(tf->protocol == ATA_PROT_DMA || 959 tf->protocol == ATAPI_PROT_DMA); 960 ata_sff_exec_command(ap, tf); 961 } 962 963 964 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base) 965 { 966 port->cmd_addr = base; 967 port->data_addr = base; 968 port->feature_addr = 969 port->error_addr = base + 0x4; 970 port->nsect_addr = base + 0x8; 971 port->lbal_addr = base + 0xc; 972 port->lbam_addr = base + 0x10; 973 port->lbah_addr = base + 0x14; 974 port->device_addr = base + 0x18; 975 port->command_addr = 976 port->status_addr = base + 0x1c; 977 port->altstatus_addr = 978 port->ctl_addr = base + 0x38; 979 } 980 981 982 #ifdef ATA_VERBOSE_DEBUG 983 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, 984 u32 offset, u32 size) 985 { 986 u32 window_size; 987 u16 idx; 988 u8 page_mask; 989 long dist; 990 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 991 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; 992 993 /* hard-code chip #0 */ 994 mmio += PDC_CHIP0_OFS; 995 996 page_mask = 0x00; 997 window_size = 0x2000 * 4; /* 32K byte uchar size */ 998 idx = (u16) (offset / window_size); 999 1000 writel(0x01, mmio + PDC_GENERAL_CTLR); 1001 readl(mmio + PDC_GENERAL_CTLR); 1002 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1003 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1004 1005 offset -= (idx * window_size); 1006 idx++; 1007 dist = ((long) (window_size - (offset + size))) >= 0 ? size : 1008 (long) (window_size - offset); 1009 memcpy_fromio(psource, dimm_mmio + offset / 4, dist); 1010 1011 psource += dist; 1012 size -= dist; 1013 for (; (long) size >= (long) window_size ;) { 1014 writel(0x01, mmio + PDC_GENERAL_CTLR); 1015 readl(mmio + PDC_GENERAL_CTLR); 1016 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1017 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1018 memcpy_fromio(psource, dimm_mmio, window_size / 4); 1019 psource += window_size; 1020 size -= window_size; 1021 idx++; 1022 } 1023 1024 if (size) { 1025 writel(0x01, mmio + PDC_GENERAL_CTLR); 1026 readl(mmio + PDC_GENERAL_CTLR); 1027 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1028 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1029 memcpy_fromio(psource, dimm_mmio, size / 4); 1030 } 1031 } 1032 #endif 1033 1034 1035 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, 1036 u32 offset, u32 size) 1037 { 1038 u32 window_size; 1039 u16 idx; 1040 u8 page_mask; 1041 long dist; 1042 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1043 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; 1044 1045 /* hard-code chip #0 */ 1046 mmio += PDC_CHIP0_OFS; 1047 1048 page_mask = 0x00; 1049 window_size = 0x2000 * 4; /* 32K byte uchar size */ 1050 idx = (u16) (offset / window_size); 1051 1052 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1053 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1054 offset -= (idx * window_size); 1055 idx++; 1056 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : 1057 (long) (window_size - offset); 1058 memcpy_toio(dimm_mmio + offset / 4, psource, dist); 1059 writel(0x01, mmio + PDC_GENERAL_CTLR); 1060 readl(mmio + PDC_GENERAL_CTLR); 1061 1062 psource += dist; 1063 size -= dist; 1064 for (; (long) size >= (long) window_size ;) { 1065 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1066 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1067 memcpy_toio(dimm_mmio, psource, window_size / 4); 1068 writel(0x01, mmio + PDC_GENERAL_CTLR); 1069 readl(mmio + PDC_GENERAL_CTLR); 1070 psource += window_size; 1071 size -= window_size; 1072 idx++; 1073 } 1074 1075 if (size) { 1076 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); 1077 readl(mmio + PDC_DIMM_WINDOW_CTLR); 1078 memcpy_toio(dimm_mmio, psource, size / 4); 1079 writel(0x01, mmio + PDC_GENERAL_CTLR); 1080 readl(mmio + PDC_GENERAL_CTLR); 1081 } 1082 } 1083 1084 1085 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, 1086 u32 subaddr, u32 *pdata) 1087 { 1088 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1089 u32 i2creg = 0; 1090 u32 status; 1091 u32 count = 0; 1092 1093 /* hard-code chip #0 */ 1094 mmio += PDC_CHIP0_OFS; 1095 1096 i2creg |= device << 24; 1097 i2creg |= subaddr << 16; 1098 1099 /* Set the device and subaddress */ 1100 writel(i2creg, mmio + PDC_I2C_ADDR_DATA); 1101 readl(mmio + PDC_I2C_ADDR_DATA); 1102 1103 /* Write Control to perform read operation, mask int */ 1104 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT, 1105 mmio + PDC_I2C_CONTROL); 1106 1107 for (count = 0; count <= 1000; count ++) { 1108 status = readl(mmio + PDC_I2C_CONTROL); 1109 if (status & PDC_I2C_COMPLETE) { 1110 status = readl(mmio + PDC_I2C_ADDR_DATA); 1111 break; 1112 } else if (count == 1000) 1113 return 0; 1114 } 1115 1116 *pdata = (status >> 8) & 0x000000ff; 1117 return 1; 1118 } 1119 1120 1121 static int pdc20621_detect_dimm(struct ata_host *host) 1122 { 1123 u32 data = 0; 1124 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1125 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { 1126 if (data == 100) 1127 return 100; 1128 } else 1129 return 0; 1130 1131 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { 1132 if (data <= 0x75) 1133 return 133; 1134 } else 1135 return 0; 1136 1137 return 0; 1138 } 1139 1140 1141 static int pdc20621_prog_dimm0(struct ata_host *host) 1142 { 1143 u32 spd0[50]; 1144 u32 data = 0; 1145 int size, i; 1146 u8 bdimmsize; 1147 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1148 static const struct { 1149 unsigned int reg; 1150 unsigned int ofs; 1151 } pdc_i2c_read_data [] = { 1152 { PDC_DIMM_SPD_TYPE, 11 }, 1153 { PDC_DIMM_SPD_FRESH_RATE, 12 }, 1154 { PDC_DIMM_SPD_COLUMN_NUM, 4 }, 1155 { PDC_DIMM_SPD_ATTRIBUTE, 21 }, 1156 { PDC_DIMM_SPD_ROW_NUM, 3 }, 1157 { PDC_DIMM_SPD_BANK_NUM, 17 }, 1158 { PDC_DIMM_SPD_MODULE_ROW, 5 }, 1159 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 }, 1160 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 }, 1161 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 }, 1162 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 }, 1163 { PDC_DIMM_SPD_CAS_LATENCY, 18 }, 1164 }; 1165 1166 /* hard-code chip #0 */ 1167 mmio += PDC_CHIP0_OFS; 1168 1169 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) 1170 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1171 pdc_i2c_read_data[i].reg, 1172 &spd0[pdc_i2c_read_data[i].ofs]); 1173 1174 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); 1175 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | 1176 ((((spd0[27] + 9) / 10) - 1) << 8) ; 1177 data |= (((((spd0[29] > spd0[28]) 1178 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; 1179 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; 1180 1181 if (spd0[18] & 0x08) 1182 data |= ((0x03) << 14); 1183 else if (spd0[18] & 0x04) 1184 data |= ((0x02) << 14); 1185 else if (spd0[18] & 0x01) 1186 data |= ((0x01) << 14); 1187 else 1188 data |= (0 << 14); 1189 1190 /* 1191 Calculate the size of bDIMMSize (power of 2) and 1192 merge the DIMM size by program start/end address. 1193 */ 1194 1195 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; 1196 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ 1197 data |= (((size / 16) - 1) << 16); 1198 data |= (0 << 23); 1199 data |= 8; 1200 writel(data, mmio + PDC_DIMM0_CONTROL); 1201 readl(mmio + PDC_DIMM0_CONTROL); 1202 return size; 1203 } 1204 1205 1206 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) 1207 { 1208 u32 data, spd0; 1209 int error, i; 1210 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1211 1212 /* hard-code chip #0 */ 1213 mmio += PDC_CHIP0_OFS; 1214 1215 /* 1216 Set To Default : DIMM Module Global Control Register (0x022259F1) 1217 DIMM Arbitration Disable (bit 20) 1218 DIMM Data/Control Output Driving Selection (bit12 - bit15) 1219 Refresh Enable (bit 17) 1220 */ 1221 1222 data = 0x022259F1; 1223 writel(data, mmio + PDC_SDRAM_CONTROL); 1224 readl(mmio + PDC_SDRAM_CONTROL); 1225 1226 /* Turn on for ECC */ 1227 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1228 PDC_DIMM_SPD_TYPE, &spd0)) { 1229 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n", 1230 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE); 1231 return 1; 1232 } 1233 if (spd0 == 0x02) { 1234 data |= (0x01 << 16); 1235 writel(data, mmio + PDC_SDRAM_CONTROL); 1236 readl(mmio + PDC_SDRAM_CONTROL); 1237 printk(KERN_ERR "Local DIMM ECC Enabled\n"); 1238 } 1239 1240 /* DIMM Initialization Select/Enable (bit 18/19) */ 1241 data &= (~(1<<18)); 1242 data |= (1<<19); 1243 writel(data, mmio + PDC_SDRAM_CONTROL); 1244 1245 error = 1; 1246 for (i = 1; i <= 10; i++) { /* polling ~5 secs */ 1247 data = readl(mmio + PDC_SDRAM_CONTROL); 1248 if (!(data & (1<<19))) { 1249 error = 0; 1250 break; 1251 } 1252 msleep(i*100); 1253 } 1254 return error; 1255 } 1256 1257 1258 static unsigned int pdc20621_dimm_init(struct ata_host *host) 1259 { 1260 int speed, size, length; 1261 u32 addr, spd0, pci_status; 1262 u32 time_period = 0; 1263 u32 tcount = 0; 1264 u32 ticks = 0; 1265 u32 clock = 0; 1266 u32 fparam = 0; 1267 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1268 1269 /* hard-code chip #0 */ 1270 mmio += PDC_CHIP0_OFS; 1271 1272 /* Initialize PLL based upon PCI Bus Frequency */ 1273 1274 /* Initialize Time Period Register */ 1275 writel(0xffffffff, mmio + PDC_TIME_PERIOD); 1276 time_period = readl(mmio + PDC_TIME_PERIOD); 1277 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period); 1278 1279 /* Enable timer */ 1280 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL); 1281 readl(mmio + PDC_TIME_CONTROL); 1282 1283 /* Wait 3 seconds */ 1284 msleep(3000); 1285 1286 /* 1287 When timer is enabled, counter is decreased every internal 1288 clock cycle. 1289 */ 1290 1291 tcount = readl(mmio + PDC_TIME_COUNTER); 1292 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount); 1293 1294 /* 1295 If SX4 is on PCI-X bus, after 3 seconds, the timer counter 1296 register should be >= (0xffffffff - 3x10^8). 1297 */ 1298 if (tcount >= PCI_X_TCOUNT) { 1299 ticks = (time_period - tcount); 1300 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); 1301 1302 clock = (ticks / 300000); 1303 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock); 1304 1305 clock = (clock * 33); 1306 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock); 1307 1308 /* PLL F Param (bit 22:16) */ 1309 fparam = (1400000 / clock) - 2; 1310 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam); 1311 1312 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */ 1313 pci_status = (0x8a001824 | (fparam << 16)); 1314 } else 1315 pci_status = PCI_PLL_INIT; 1316 1317 /* Initialize PLL. */ 1318 VPRINTK("pci_status: 0x%x\n", pci_status); 1319 writel(pci_status, mmio + PDC_CTL_STATUS); 1320 readl(mmio + PDC_CTL_STATUS); 1321 1322 /* 1323 Read SPD of DIMM by I2C interface, 1324 and program the DIMM Module Controller. 1325 */ 1326 if (!(speed = pdc20621_detect_dimm(host))) { 1327 printk(KERN_ERR "Detect Local DIMM Fail\n"); 1328 return 1; /* DIMM error */ 1329 } 1330 VPRINTK("Local DIMM Speed = %d\n", speed); 1331 1332 /* Programming DIMM0 Module Control Register (index_CID0:80h) */ 1333 size = pdc20621_prog_dimm0(host); 1334 VPRINTK("Local DIMM Size = %dMB\n", size); 1335 1336 /* Programming DIMM Module Global Control Register (index_CID0:88h) */ 1337 if (pdc20621_prog_dimm_global(host)) { 1338 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); 1339 return 1; 1340 } 1341 1342 #ifdef ATA_VERBOSE_DEBUG 1343 { 1344 u8 test_parttern1[40] = 1345 {0x55,0xAA,'P','r','o','m','i','s','e',' ', 1346 'N','o','t',' ','Y','e','t',' ', 1347 'D','e','f','i','n','e','d',' ', 1348 '1','.','1','0', 1349 '9','8','0','3','1','6','1','2',0,0}; 1350 u8 test_parttern2[40] = {0}; 1351 1352 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40); 1353 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40); 1354 1355 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40); 1356 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); 1357 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1358 test_parttern2[1], &(test_parttern2[2])); 1359 pdc20621_get_from_dimm(host, test_parttern2, 0x10040, 1360 40); 1361 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1362 test_parttern2[1], &(test_parttern2[2])); 1363 1364 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40); 1365 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); 1366 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], 1367 test_parttern2[1], &(test_parttern2[2])); 1368 } 1369 #endif 1370 1371 /* ECC initiliazation. */ 1372 1373 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1374 PDC_DIMM_SPD_TYPE, &spd0)) { 1375 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n", 1376 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE); 1377 return 1; 1378 } 1379 if (spd0 == 0x02) { 1380 void *buf; 1381 VPRINTK("Start ECC initialization\n"); 1382 addr = 0; 1383 length = size * 1024 * 1024; 1384 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL); 1385 if (!buf) 1386 return 1; 1387 while (addr < length) { 1388 pdc20621_put_to_dimm(host, buf, addr, 1389 ECC_ERASE_BUF_SZ); 1390 addr += ECC_ERASE_BUF_SZ; 1391 } 1392 kfree(buf); 1393 VPRINTK("Finish ECC initialization\n"); 1394 } 1395 return 0; 1396 } 1397 1398 1399 static void pdc_20621_init(struct ata_host *host) 1400 { 1401 u32 tmp; 1402 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 1403 1404 /* hard-code chip #0 */ 1405 mmio += PDC_CHIP0_OFS; 1406 1407 /* 1408 * Select page 0x40 for our 32k DIMM window 1409 */ 1410 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000; 1411 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */ 1412 writel(tmp, mmio + PDC_20621_DIMM_WINDOW); 1413 1414 /* 1415 * Reset Host DMA 1416 */ 1417 tmp = readl(mmio + PDC_HDMA_CTLSTAT); 1418 tmp |= PDC_RESET; 1419 writel(tmp, mmio + PDC_HDMA_CTLSTAT); 1420 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ 1421 1422 udelay(10); 1423 1424 tmp = readl(mmio + PDC_HDMA_CTLSTAT); 1425 tmp &= ~PDC_RESET; 1426 writel(tmp, mmio + PDC_HDMA_CTLSTAT); 1427 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ 1428 } 1429 1430 static int pdc_sata_init_one(struct pci_dev *pdev, 1431 const struct pci_device_id *ent) 1432 { 1433 const struct ata_port_info *ppi[] = 1434 { &pdc_port_info[ent->driver_data], NULL }; 1435 struct ata_host *host; 1436 struct pdc_host_priv *hpriv; 1437 int i, rc; 1438 1439 ata_print_version_once(&pdev->dev, DRV_VERSION); 1440 1441 /* allocate host */ 1442 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4); 1443 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 1444 if (!host || !hpriv) 1445 return -ENOMEM; 1446 1447 host->private_data = hpriv; 1448 1449 /* acquire resources and fill host */ 1450 rc = pcim_enable_device(pdev); 1451 if (rc) 1452 return rc; 1453 1454 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR), 1455 DRV_NAME); 1456 if (rc == -EBUSY) 1457 pcim_pin_device(pdev); 1458 if (rc) 1459 return rc; 1460 host->iomap = pcim_iomap_table(pdev); 1461 1462 for (i = 0; i < 4; i++) { 1463 struct ata_port *ap = host->ports[i]; 1464 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; 1465 unsigned int offset = 0x200 + i * 0x80; 1466 1467 pdc_sata_setup_port(&ap->ioaddr, base + offset); 1468 1469 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); 1470 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm"); 1471 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port"); 1472 } 1473 1474 /* configure and activate */ 1475 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK); 1476 if (rc) 1477 return rc; 1478 1479 if (pdc20621_dimm_init(host)) 1480 return -ENOMEM; 1481 pdc_20621_init(host); 1482 1483 pci_set_master(pdev); 1484 return ata_host_activate(host, pdev->irq, pdc20621_interrupt, 1485 IRQF_SHARED, &pdc_sata_sht); 1486 } 1487 1488 module_pci_driver(pdc_sata_pci_driver); 1489 1490 MODULE_AUTHOR("Jeff Garzik"); 1491 MODULE_DESCRIPTION("Promise SATA low-level driver"); 1492 MODULE_LICENSE("GPL"); 1493 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl); 1494 MODULE_VERSION(DRV_VERSION); 1495