1 /* sun_esp.c: ESP front-end for Sparc SBUS systems. 2 * 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/types.h> 8 #include <linux/module.h> 9 #include <linux/init.h> 10 11 #include <asm/irq.h> 12 #include <asm/io.h> 13 #include <asm/dma.h> 14 15 #include <asm/sbus.h> 16 17 #include <scsi/scsi_host.h> 18 19 #include "esp_scsi.h" 20 21 #define DRV_MODULE_NAME "sun_esp" 22 #define PFX DRV_MODULE_NAME ": " 23 #define DRV_VERSION "1.000" 24 #define DRV_MODULE_RELDATE "April 19, 2007" 25 26 #define dma_read32(REG) \ 27 sbus_readl(esp->dma_regs + (REG)) 28 #define dma_write32(VAL, REG) \ 29 sbus_writel((VAL), esp->dma_regs + (REG)) 30 31 static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) 32 { 33 struct sbus_dev *sdev = esp->dev; 34 struct sbus_dma *dma; 35 36 if (dma_sdev != NULL) { 37 for_each_dvma(dma) { 38 if (dma->sdev == dma_sdev) 39 break; 40 } 41 } else { 42 for_each_dvma(dma) { 43 if (dma->sdev == NULL) 44 break; 45 46 /* If bus + slot are the same and it has the 47 * correct OBP name, it's ours. 48 */ 49 if (sdev->bus == dma->sdev->bus && 50 sdev->slot == dma->sdev->slot && 51 (!strcmp(dma->sdev->prom_name, "dma") || 52 !strcmp(dma->sdev->prom_name, "espdma"))) 53 break; 54 } 55 } 56 57 if (dma == NULL) { 58 printk(KERN_ERR PFX "[%s] Cannot find dma.\n", 59 sdev->ofdev.node->full_name); 60 return -ENODEV; 61 } 62 esp->dma = dma; 63 esp->dma_regs = dma->regs; 64 65 return 0; 66 67 } 68 69 static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) 70 { 71 struct sbus_dev *sdev = esp->dev; 72 struct resource *res; 73 74 /* On HME, two reg sets exist, first is DVMA, 75 * second is ESP registers. 76 */ 77 if (hme) 78 res = &sdev->resource[1]; 79 else 80 res = &sdev->resource[0]; 81 82 esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); 83 if (!esp->regs) 84 return -ENOMEM; 85 86 return 0; 87 } 88 89 static int __devinit esp_sbus_map_command_block(struct esp *esp) 90 { 91 struct sbus_dev *sdev = esp->dev; 92 93 esp->command_block = sbus_alloc_consistent(sdev, 16, 94 &esp->command_block_dma); 95 if (!esp->command_block) 96 return -ENOMEM; 97 return 0; 98 } 99 100 static int __devinit esp_sbus_register_irq(struct esp *esp) 101 { 102 struct Scsi_Host *host = esp->host; 103 struct sbus_dev *sdev = esp->dev; 104 105 host->irq = sdev->irqs[0]; 106 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); 107 } 108 109 static void __devinit esp_get_scsi_id(struct esp *esp) 110 { 111 struct sbus_dev *sdev = esp->dev; 112 struct device_node *dp = sdev->ofdev.node; 113 114 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); 115 if (esp->scsi_id != 0xff) 116 goto done; 117 118 esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); 119 if (esp->scsi_id != 0xff) 120 goto done; 121 122 if (!sdev->bus) { 123 /* SUN4 */ 124 esp->scsi_id = 7; 125 goto done; 126 } 127 128 esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node, 129 "scsi-initiator-id", 7); 130 131 done: 132 esp->host->this_id = esp->scsi_id; 133 esp->scsi_id_mask = (1 << esp->scsi_id); 134 } 135 136 static void __devinit esp_get_differential(struct esp *esp) 137 { 138 struct sbus_dev *sdev = esp->dev; 139 struct device_node *dp = sdev->ofdev.node; 140 141 if (of_find_property(dp, "differential", NULL)) 142 esp->flags |= ESP_FLAG_DIFFERENTIAL; 143 else 144 esp->flags &= ~ESP_FLAG_DIFFERENTIAL; 145 } 146 147 static void __devinit esp_get_clock_params(struct esp *esp) 148 { 149 struct sbus_dev *sdev = esp->dev; 150 struct device_node *dp = sdev->ofdev.node; 151 struct device_node *bus_dp; 152 int fmhz; 153 154 bus_dp = NULL; 155 if (sdev != NULL && sdev->bus != NULL) 156 bus_dp = sdev->bus->ofdev.node; 157 158 fmhz = of_getintprop_default(dp, "clock-frequency", 0); 159 if (fmhz == 0) 160 fmhz = (!bus_dp) ? 0 : 161 of_getintprop_default(bus_dp, "clock-frequency", 0); 162 163 esp->cfreq = fmhz; 164 } 165 166 static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) 167 { 168 struct sbus_dev *sdev = esp->dev; 169 struct device_node *dp = sdev->ofdev.node; 170 u8 bursts; 171 172 bursts = of_getintprop_default(dp, "burst-sizes", 0xff); 173 if (dma) { 174 struct device_node *dma_dp = dma->ofdev.node; 175 u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); 176 if (val != 0xff) 177 bursts &= val; 178 } 179 180 if (sdev->bus) { 181 u8 val = of_getintprop_default(sdev->bus->ofdev.node, 182 "burst-sizes", 0xff); 183 if (val != 0xff) 184 bursts &= val; 185 } 186 187 if (bursts == 0xff || 188 (bursts & DMA_BURST16) == 0 || 189 (bursts & DMA_BURST32) == 0) 190 bursts = (DMA_BURST32 - 1); 191 192 esp->bursts = bursts; 193 } 194 195 static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) 196 { 197 esp_get_scsi_id(esp); 198 esp_get_differential(esp); 199 esp_get_clock_params(esp); 200 esp_get_bursts(esp, espdma); 201 } 202 203 static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) 204 { 205 sbus_writeb(val, esp->regs + (reg * 4UL)); 206 } 207 208 static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) 209 { 210 return sbus_readb(esp->regs + (reg * 4UL)); 211 } 212 213 static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, 214 size_t sz, int dir) 215 { 216 return sbus_map_single(esp->dev, buf, sz, dir); 217 } 218 219 static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, 220 int num_sg, int dir) 221 { 222 return sbus_map_sg(esp->dev, sg, num_sg, dir); 223 } 224 225 static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, 226 size_t sz, int dir) 227 { 228 sbus_unmap_single(esp->dev, addr, sz, dir); 229 } 230 231 static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, 232 int num_sg, int dir) 233 { 234 sbus_unmap_sg(esp->dev, sg, num_sg, dir); 235 } 236 237 static int sbus_esp_irq_pending(struct esp *esp) 238 { 239 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) 240 return 1; 241 return 0; 242 } 243 244 static void sbus_esp_reset_dma(struct esp *esp) 245 { 246 int can_do_burst16, can_do_burst32, can_do_burst64; 247 int can_do_sbus64, lim; 248 u32 val; 249 250 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; 251 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; 252 can_do_burst64 = 0; 253 can_do_sbus64 = 0; 254 if (sbus_can_dma_64bit(esp->dev)) 255 can_do_sbus64 = 1; 256 if (sbus_can_burst64(esp->sdev)) 257 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; 258 259 /* Put the DVMA into a known state. */ 260 if (esp->dma->revision != dvmahme) { 261 val = dma_read32(DMA_CSR); 262 dma_write32(val | DMA_RST_SCSI, DMA_CSR); 263 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 264 } 265 switch (esp->dma->revision) { 266 case dvmahme: 267 dma_write32(DMA_RESET_FAS366, DMA_CSR); 268 dma_write32(DMA_RST_SCSI, DMA_CSR); 269 270 esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | 271 DMA_SCSI_DISAB | DMA_INT_ENAB); 272 273 esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | 274 DMA_BRST_SZ); 275 276 if (can_do_burst64) 277 esp->prev_hme_dmacsr |= DMA_BRST64; 278 else if (can_do_burst32) 279 esp->prev_hme_dmacsr |= DMA_BRST32; 280 281 if (can_do_sbus64) { 282 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; 283 sbus_set_sbus64(esp->dev, esp->bursts); 284 } 285 286 lim = 1000; 287 while (dma_read32(DMA_CSR) & DMA_PEND_READ) { 288 if (--lim == 0) { 289 printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " 290 "will not clear!\n", 291 esp->host->unique_id); 292 break; 293 } 294 udelay(1); 295 } 296 297 dma_write32(0, DMA_CSR); 298 dma_write32(esp->prev_hme_dmacsr, DMA_CSR); 299 300 dma_write32(0, DMA_ADDR); 301 break; 302 303 case dvmarev2: 304 if (esp->rev != ESP100) { 305 val = dma_read32(DMA_CSR); 306 dma_write32(val | DMA_3CLKS, DMA_CSR); 307 } 308 break; 309 310 case dvmarev3: 311 val = dma_read32(DMA_CSR); 312 val &= ~DMA_3CLKS; 313 val |= DMA_2CLKS; 314 if (can_do_burst32) { 315 val &= ~DMA_BRST_SZ; 316 val |= DMA_BRST32; 317 } 318 dma_write32(val, DMA_CSR); 319 break; 320 321 case dvmaesc1: 322 val = dma_read32(DMA_CSR); 323 val |= DMA_ADD_ENABLE; 324 val &= ~DMA_BCNT_ENAB; 325 if (!can_do_burst32 && can_do_burst16) { 326 val |= DMA_ESC_BURST; 327 } else { 328 val &= ~(DMA_ESC_BURST); 329 } 330 dma_write32(val, DMA_CSR); 331 break; 332 333 default: 334 break; 335 } 336 337 /* Enable interrupts. */ 338 val = dma_read32(DMA_CSR); 339 dma_write32(val | DMA_INT_ENAB, DMA_CSR); 340 } 341 342 static void sbus_esp_dma_drain(struct esp *esp) 343 { 344 u32 csr; 345 int lim; 346 347 if (esp->dma->revision == dvmahme) 348 return; 349 350 csr = dma_read32(DMA_CSR); 351 if (!(csr & DMA_FIFO_ISDRAIN)) 352 return; 353 354 if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) 355 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); 356 357 lim = 1000; 358 while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { 359 if (--lim == 0) { 360 printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", 361 esp->host->unique_id); 362 break; 363 } 364 udelay(1); 365 } 366 } 367 368 static void sbus_esp_dma_invalidate(struct esp *esp) 369 { 370 if (esp->dma->revision == dvmahme) { 371 dma_write32(DMA_RST_SCSI, DMA_CSR); 372 373 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | 374 (DMA_PARITY_OFF | DMA_2CLKS | 375 DMA_SCSI_DISAB | DMA_INT_ENAB)) & 376 ~(DMA_ST_WRITE | DMA_ENABLE)); 377 378 dma_write32(0, DMA_CSR); 379 dma_write32(esp->prev_hme_dmacsr, DMA_CSR); 380 381 /* This is necessary to avoid having the SCSI channel 382 * engine lock up on us. 383 */ 384 dma_write32(0, DMA_ADDR); 385 } else { 386 u32 val; 387 int lim; 388 389 lim = 1000; 390 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { 391 if (--lim == 0) { 392 printk(KERN_ALERT PFX "esp%d: DMA will not " 393 "invalidate!\n", esp->host->unique_id); 394 break; 395 } 396 udelay(1); 397 } 398 399 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); 400 val |= DMA_FIFO_INV; 401 dma_write32(val, DMA_CSR); 402 val &= ~DMA_FIFO_INV; 403 dma_write32(val, DMA_CSR); 404 } 405 } 406 407 static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, 408 u32 dma_count, int write, u8 cmd) 409 { 410 u32 csr; 411 412 BUG_ON(!(cmd & ESP_CMD_DMA)); 413 414 sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 415 sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 416 if (esp->rev == FASHME) { 417 sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); 418 sbus_esp_write8(esp, 0, FAS_RHI); 419 420 scsi_esp_cmd(esp, cmd); 421 422 csr = esp->prev_hme_dmacsr; 423 csr |= DMA_SCSI_DISAB | DMA_ENABLE; 424 if (write) 425 csr |= DMA_ST_WRITE; 426 else 427 csr &= ~DMA_ST_WRITE; 428 esp->prev_hme_dmacsr = csr; 429 430 dma_write32(dma_count, DMA_COUNT); 431 dma_write32(addr, DMA_ADDR); 432 dma_write32(csr, DMA_CSR); 433 } else { 434 csr = dma_read32(DMA_CSR); 435 csr |= DMA_ENABLE; 436 if (write) 437 csr |= DMA_ST_WRITE; 438 else 439 csr &= ~DMA_ST_WRITE; 440 dma_write32(csr, DMA_CSR); 441 if (esp->dma->revision == dvmaesc1) { 442 u32 end = PAGE_ALIGN(addr + dma_count + 16U); 443 dma_write32(end - addr, DMA_COUNT); 444 } 445 dma_write32(addr, DMA_ADDR); 446 447 scsi_esp_cmd(esp, cmd); 448 } 449 450 } 451 452 static int sbus_esp_dma_error(struct esp *esp) 453 { 454 u32 csr = dma_read32(DMA_CSR); 455 456 if (csr & DMA_HNDL_ERROR) 457 return 1; 458 459 return 0; 460 } 461 462 static const struct esp_driver_ops sbus_esp_ops = { 463 .esp_write8 = sbus_esp_write8, 464 .esp_read8 = sbus_esp_read8, 465 .map_single = sbus_esp_map_single, 466 .map_sg = sbus_esp_map_sg, 467 .unmap_single = sbus_esp_unmap_single, 468 .unmap_sg = sbus_esp_unmap_sg, 469 .irq_pending = sbus_esp_irq_pending, 470 .reset_dma = sbus_esp_reset_dma, 471 .dma_drain = sbus_esp_dma_drain, 472 .dma_invalidate = sbus_esp_dma_invalidate, 473 .send_dma_cmd = sbus_esp_send_dma_cmd, 474 .dma_error = sbus_esp_dma_error, 475 }; 476 477 static int __devinit esp_sbus_probe_one(struct device *dev, 478 struct sbus_dev *esp_dev, 479 struct sbus_dev *espdma, 480 struct sbus_bus *sbus, 481 int hme) 482 { 483 struct scsi_host_template *tpnt = &scsi_esp_template; 484 struct Scsi_Host *host; 485 struct esp *esp; 486 int err; 487 488 host = scsi_host_alloc(tpnt, sizeof(struct esp)); 489 490 err = -ENOMEM; 491 if (!host) 492 goto fail; 493 494 host->max_id = (hme ? 16 : 8); 495 esp = host_to_esp(host); 496 497 esp->host = host; 498 esp->dev = esp_dev; 499 esp->ops = &sbus_esp_ops; 500 501 if (hme) 502 esp->flags |= ESP_FLAG_WIDE_CAPABLE; 503 504 err = esp_sbus_find_dma(esp, espdma); 505 if (err < 0) 506 goto fail_unlink; 507 508 err = esp_sbus_map_regs(esp, hme); 509 if (err < 0) 510 goto fail_unlink; 511 512 err = esp_sbus_map_command_block(esp); 513 if (err < 0) 514 goto fail_unmap_regs; 515 516 err = esp_sbus_register_irq(esp); 517 if (err < 0) 518 goto fail_unmap_command_block; 519 520 esp_sbus_get_props(esp, espdma); 521 522 /* Before we try to touch the ESP chip, ESC1 dma can 523 * come up with the reset bit set, so make sure that 524 * is clear first. 525 */ 526 if (esp->dma->revision == dvmaesc1) { 527 u32 val = dma_read32(DMA_CSR); 528 529 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 530 } 531 532 dev_set_drvdata(&esp_dev->ofdev.dev, esp); 533 534 err = scsi_esp_register(esp, dev); 535 if (err) 536 goto fail_free_irq; 537 538 return 0; 539 540 fail_free_irq: 541 free_irq(host->irq, esp); 542 fail_unmap_command_block: 543 sbus_free_consistent(esp->dev, 16, 544 esp->command_block, 545 esp->command_block_dma); 546 fail_unmap_regs: 547 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); 548 fail_unlink: 549 scsi_host_put(host); 550 fail: 551 return err; 552 } 553 554 static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) 555 { 556 struct sbus_dev *sdev = to_sbus_device(&dev->dev); 557 struct device_node *dp = dev->node; 558 struct sbus_dev *dma_sdev = NULL; 559 int hme = 0; 560 561 if (dp->parent && 562 (!strcmp(dp->parent->name, "espdma") || 563 !strcmp(dp->parent->name, "dma"))) 564 dma_sdev = sdev->parent; 565 else if (!strcmp(dp->name, "SUNW,fas")) { 566 dma_sdev = sdev; 567 hme = 1; 568 } 569 570 return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, 571 sdev->bus, hme); 572 } 573 574 static int __devexit esp_sbus_remove(struct of_device *dev) 575 { 576 struct esp *esp = dev_get_drvdata(&dev->dev); 577 unsigned int irq = esp->host->irq; 578 u32 val; 579 580 scsi_esp_unregister(esp); 581 582 /* Disable interrupts. */ 583 val = dma_read32(DMA_CSR); 584 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); 585 586 free_irq(irq, esp); 587 sbus_free_consistent(esp->dev, 16, 588 esp->command_block, 589 esp->command_block_dma); 590 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); 591 592 scsi_host_put(esp->host); 593 594 return 0; 595 } 596 597 static struct of_device_id esp_match[] = { 598 { 599 .name = "SUNW,esp", 600 }, 601 { 602 .name = "SUNW,fas", 603 }, 604 { 605 .name = "esp", 606 }, 607 {}, 608 }; 609 MODULE_DEVICE_TABLE(of, esp_match); 610 611 static struct of_platform_driver esp_sbus_driver = { 612 .name = "esp", 613 .match_table = esp_match, 614 .probe = esp_sbus_probe, 615 .remove = __devexit_p(esp_sbus_remove), 616 }; 617 618 static int __init sunesp_init(void) 619 { 620 return of_register_driver(&esp_sbus_driver, &sbus_bus_type); 621 } 622 623 static void __exit sunesp_exit(void) 624 { 625 of_unregister_driver(&esp_sbus_driver); 626 } 627 628 MODULE_DESCRIPTION("Sun ESP SCSI driver"); 629 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 630 MODULE_LICENSE("GPL"); 631 MODULE_VERSION(DRV_VERSION); 632 633 module_init(sunesp_init); 634 module_exit(sunesp_exit); 635