1 /* mac_esp.c: ESP front-end for Macintosh Quadra systems. 2 * 3 * Adapted from jazz_esp.c and the old mac_esp.c. 4 * 5 * The pseudo DMA algorithm is based on the one used in NetBSD. 6 * See sys/arch/mac68k/obio/esp.c for some background information. 7 * 8 * Copyright (C) 2007-2008 Finn Thain 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/platform_device.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/scatterlist.h> 19 #include <linux/delay.h> 20 #include <linux/io.h> 21 #include <linux/nubus.h> 22 #include <linux/slab.h> 23 24 #include <asm/irq.h> 25 #include <asm/dma.h> 26 #include <asm/macints.h> 27 #include <asm/macintosh.h> 28 #include <asm/mac_via.h> 29 30 #include <scsi/scsi_host.h> 31 32 #include "esp_scsi.h" 33 34 #define DRV_MODULE_NAME "mac_esp" 35 #define PFX DRV_MODULE_NAME ": " 36 #define DRV_VERSION "1.000" 37 #define DRV_MODULE_RELDATE "Sept 15, 2007" 38 39 #define MAC_ESP_IO_BASE 0x50F00000 40 #define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000) 41 #define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000) 42 #define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000) 43 #define MAC_ESP_REGS_SPACING 0x402 44 #define MAC_ESP_PDMA_REG 0xF9800024 45 #define MAC_ESP_PDMA_REG_SPACING 0x4 46 #define MAC_ESP_PDMA_IO_OFFSET 0x100 47 48 #define esp_read8(REG) mac_esp_read8(esp, REG) 49 #define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG) 50 51 struct mac_esp_priv { 52 struct esp *esp; 53 void __iomem *pdma_regs; 54 void __iomem *pdma_io; 55 int error; 56 }; 57 static struct esp *esp_chips[2]; 58 static DEFINE_SPINLOCK(esp_chips_lock); 59 60 #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ 61 platform_get_drvdata((struct platform_device *) \ 62 (esp->dev))) 63 64 static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg) 65 { 66 nubus_writeb(val, esp->regs + reg * 16); 67 } 68 69 static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg) 70 { 71 return nubus_readb(esp->regs + reg * 16); 72 } 73 74 /* For pseudo DMA and PIO we need the virtual address 75 * so this address mapping is the identity mapping. 76 */ 77 78 static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf, 79 size_t sz, int dir) 80 { 81 return (dma_addr_t)buf; 82 } 83 84 static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg, 85 int num_sg, int dir) 86 { 87 int i; 88 89 for (i = 0; i < num_sg; i++) 90 sg[i].dma_address = (u32)sg_virt(&sg[i]); 91 return num_sg; 92 } 93 94 static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr, 95 size_t sz, int dir) 96 { 97 /* Nothing to do. */ 98 } 99 100 static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, 101 int num_sg, int dir) 102 { 103 /* Nothing to do. */ 104 } 105 106 static void mac_esp_reset_dma(struct esp *esp) 107 { 108 /* Nothing to do. */ 109 } 110 111 static void mac_esp_dma_drain(struct esp *esp) 112 { 113 /* Nothing to do. */ 114 } 115 116 static void mac_esp_dma_invalidate(struct esp *esp) 117 { 118 /* Nothing to do. */ 119 } 120 121 static int mac_esp_dma_error(struct esp *esp) 122 { 123 return MAC_ESP_GET_PRIV(esp)->error; 124 } 125 126 static inline int mac_esp_wait_for_empty_fifo(struct esp *esp) 127 { 128 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 129 int i = 500000; 130 131 do { 132 if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)) 133 return 0; 134 135 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) 136 return 1; 137 138 udelay(2); 139 } while (--i); 140 141 printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n", 142 esp_read8(ESP_STATUS)); 143 mep->error = 1; 144 return 1; 145 } 146 147 static inline int mac_esp_wait_for_dreq(struct esp *esp) 148 { 149 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 150 int i = 500000; 151 152 do { 153 if (mep->pdma_regs == NULL) { 154 if (via2_scsi_drq_pending()) 155 return 0; 156 } else { 157 if (nubus_readl(mep->pdma_regs) & 0x200) 158 return 0; 159 } 160 161 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) 162 return 1; 163 164 udelay(2); 165 } while (--i); 166 167 printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n", 168 esp_read8(ESP_STATUS)); 169 mep->error = 1; 170 return 1; 171 } 172 173 #define MAC_ESP_PDMA_LOOP(operands) \ 174 asm volatile ( \ 175 " tstw %1 \n" \ 176 " jbeq 20f \n" \ 177 "1: movew " operands " \n" \ 178 "2: movew " operands " \n" \ 179 "3: movew " operands " \n" \ 180 "4: movew " operands " \n" \ 181 "5: movew " operands " \n" \ 182 "6: movew " operands " \n" \ 183 "7: movew " operands " \n" \ 184 "8: movew " operands " \n" \ 185 "9: movew " operands " \n" \ 186 "10: movew " operands " \n" \ 187 "11: movew " operands " \n" \ 188 "12: movew " operands " \n" \ 189 "13: movew " operands " \n" \ 190 "14: movew " operands " \n" \ 191 "15: movew " operands " \n" \ 192 "16: movew " operands " \n" \ 193 " subqw #1,%1 \n" \ 194 " jbne 1b \n" \ 195 "20: tstw %2 \n" \ 196 " jbeq 30f \n" \ 197 "21: movew " operands " \n" \ 198 " subqw #1,%2 \n" \ 199 " jbne 21b \n" \ 200 "30: tstw %3 \n" \ 201 " jbeq 40f \n" \ 202 "31: moveb " operands " \n" \ 203 "32: nop \n" \ 204 "40: \n" \ 205 " \n" \ 206 " .section __ex_table,\"a\" \n" \ 207 " .align 4 \n" \ 208 " .long 1b,40b \n" \ 209 " .long 2b,40b \n" \ 210 " .long 3b,40b \n" \ 211 " .long 4b,40b \n" \ 212 " .long 5b,40b \n" \ 213 " .long 6b,40b \n" \ 214 " .long 7b,40b \n" \ 215 " .long 8b,40b \n" \ 216 " .long 9b,40b \n" \ 217 " .long 10b,40b \n" \ 218 " .long 11b,40b \n" \ 219 " .long 12b,40b \n" \ 220 " .long 13b,40b \n" \ 221 " .long 14b,40b \n" \ 222 " .long 15b,40b \n" \ 223 " .long 16b,40b \n" \ 224 " .long 21b,40b \n" \ 225 " .long 31b,40b \n" \ 226 " .long 32b,40b \n" \ 227 " .previous \n" \ 228 : "+a" (addr), "+r" (count32), "+r" (count2) \ 229 : "g" (count1), "a" (mep->pdma_io)) 230 231 static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, 232 u32 dma_count, int write, u8 cmd) 233 { 234 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 235 236 mep->error = 0; 237 238 if (!write) 239 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 240 241 esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW); 242 esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED); 243 244 scsi_esp_cmd(esp, cmd); 245 246 do { 247 unsigned int count32 = esp_count >> 5; 248 unsigned int count2 = (esp_count & 0x1F) >> 1; 249 unsigned int count1 = esp_count & 1; 250 unsigned int start_addr = addr; 251 252 if (mac_esp_wait_for_dreq(esp)) 253 break; 254 255 if (write) { 256 MAC_ESP_PDMA_LOOP("%4@,%0@+"); 257 258 esp_count -= addr - start_addr; 259 } else { 260 unsigned int n; 261 262 MAC_ESP_PDMA_LOOP("%0@+,%4@"); 263 264 if (mac_esp_wait_for_empty_fifo(esp)) 265 break; 266 267 n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW); 268 addr = start_addr + esp_count - n; 269 esp_count = n; 270 } 271 } while (esp_count); 272 } 273 274 /* 275 * Programmed IO routines follow. 276 */ 277 278 static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp) 279 { 280 int i = 500000; 281 282 do { 283 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; 284 285 if (fbytes) 286 return fbytes; 287 288 udelay(2); 289 } while (--i); 290 291 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n", 292 esp_read8(ESP_STATUS)); 293 return 0; 294 } 295 296 static inline int mac_esp_wait_for_intr(struct esp *esp) 297 { 298 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 299 int i = 500000; 300 301 do { 302 esp->sreg = esp_read8(ESP_STATUS); 303 if (esp->sreg & ESP_STAT_INTR) 304 return 0; 305 306 udelay(2); 307 } while (--i); 308 309 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg); 310 mep->error = 1; 311 return 1; 312 } 313 314 #define MAC_ESP_PIO_LOOP(operands, reg1) \ 315 asm volatile ( \ 316 "1: moveb " operands " \n" \ 317 " subqw #1,%1 \n" \ 318 " jbne 1b \n" \ 319 : "+a" (addr), "+r" (reg1) \ 320 : "a" (fifo)) 321 322 #define MAC_ESP_PIO_FILL(operands, reg1) \ 323 asm volatile ( \ 324 " moveb " operands " \n" \ 325 " moveb " operands " \n" \ 326 " moveb " operands " \n" \ 327 " moveb " operands " \n" \ 328 " moveb " operands " \n" \ 329 " moveb " operands " \n" \ 330 " moveb " operands " \n" \ 331 " moveb " operands " \n" \ 332 " moveb " operands " \n" \ 333 " moveb " operands " \n" \ 334 " moveb " operands " \n" \ 335 " moveb " operands " \n" \ 336 " moveb " operands " \n" \ 337 " moveb " operands " \n" \ 338 " moveb " operands " \n" \ 339 " moveb " operands " \n" \ 340 " subqw #8,%1 \n" \ 341 " subqw #8,%1 \n" \ 342 : "+a" (addr), "+r" (reg1) \ 343 : "a" (fifo)) 344 345 #define MAC_ESP_FIFO_SIZE 16 346 347 static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, 348 u32 dma_count, int write, u8 cmd) 349 { 350 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 351 u8 __iomem *fifo = esp->regs + ESP_FDATA * 16; 352 u8 phase = esp->sreg & ESP_STAT_PMASK; 353 354 cmd &= ~ESP_CMD_DMA; 355 mep->error = 0; 356 357 if (write) { 358 u8 *dst = (u8 *)addr; 359 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV); 360 361 scsi_esp_cmd(esp, cmd); 362 363 while (1) { 364 if (!mac_esp_wait_for_fifo(esp)) 365 break; 366 367 *dst++ = esp_read8(ESP_FDATA); 368 --esp_count; 369 370 if (!esp_count) 371 break; 372 373 if (mac_esp_wait_for_intr(esp)) 374 break; 375 376 if ((esp->sreg & ESP_STAT_PMASK) != phase) 377 break; 378 379 esp->ireg = esp_read8(ESP_INTRPT); 380 if (esp->ireg & mask) { 381 mep->error = 1; 382 break; 383 } 384 385 if (phase == ESP_MIP) 386 scsi_esp_cmd(esp, ESP_CMD_MOK); 387 388 scsi_esp_cmd(esp, ESP_CMD_TI); 389 } 390 } else { 391 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 392 393 if (esp_count >= MAC_ESP_FIFO_SIZE) 394 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count); 395 else 396 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count); 397 398 scsi_esp_cmd(esp, cmd); 399 400 while (esp_count) { 401 unsigned int n; 402 403 if (mac_esp_wait_for_intr(esp)) 404 break; 405 406 if ((esp->sreg & ESP_STAT_PMASK) != phase) 407 break; 408 409 esp->ireg = esp_read8(ESP_INTRPT); 410 if (esp->ireg & ~ESP_INTR_BSERV) { 411 mep->error = 1; 412 break; 413 } 414 415 n = MAC_ESP_FIFO_SIZE - 416 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); 417 if (n > esp_count) 418 n = esp_count; 419 420 if (n == MAC_ESP_FIFO_SIZE) { 421 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count); 422 } else { 423 esp_count -= n; 424 MAC_ESP_PIO_LOOP("%0@+,%2@", n); 425 } 426 427 scsi_esp_cmd(esp, ESP_CMD_TI); 428 } 429 } 430 } 431 432 static int mac_esp_irq_pending(struct esp *esp) 433 { 434 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) 435 return 1; 436 return 0; 437 } 438 439 static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) 440 { 441 return dma_len > 0xFFFF ? 0xFFFF : dma_len; 442 } 443 444 static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id) 445 { 446 int got_intr; 447 448 /* 449 * This is an edge triggered IRQ, so we have to be careful to 450 * avoid missing a transition when it is shared by two ESP devices. 451 */ 452 453 do { 454 got_intr = 0; 455 if (esp_chips[0] && 456 (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) { 457 (void)scsi_esp_intr(irq, esp_chips[0]); 458 got_intr = 1; 459 } 460 if (esp_chips[1] && 461 (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) { 462 (void)scsi_esp_intr(irq, esp_chips[1]); 463 got_intr = 1; 464 } 465 } while (got_intr); 466 467 return IRQ_HANDLED; 468 } 469 470 static struct esp_driver_ops mac_esp_ops = { 471 .esp_write8 = mac_esp_write8, 472 .esp_read8 = mac_esp_read8, 473 .map_single = mac_esp_map_single, 474 .map_sg = mac_esp_map_sg, 475 .unmap_single = mac_esp_unmap_single, 476 .unmap_sg = mac_esp_unmap_sg, 477 .irq_pending = mac_esp_irq_pending, 478 .dma_length_limit = mac_esp_dma_length_limit, 479 .reset_dma = mac_esp_reset_dma, 480 .dma_drain = mac_esp_dma_drain, 481 .dma_invalidate = mac_esp_dma_invalidate, 482 .send_dma_cmd = mac_esp_send_pdma_cmd, 483 .dma_error = mac_esp_dma_error, 484 }; 485 486 static int esp_mac_probe(struct platform_device *dev) 487 { 488 struct scsi_host_template *tpnt = &scsi_esp_template; 489 struct Scsi_Host *host; 490 struct esp *esp; 491 int err; 492 struct mac_esp_priv *mep; 493 494 if (!MACH_IS_MAC) 495 return -ENODEV; 496 497 if (dev->id > 1) 498 return -ENODEV; 499 500 host = scsi_host_alloc(tpnt, sizeof(struct esp)); 501 502 err = -ENOMEM; 503 if (!host) 504 goto fail; 505 506 host->max_id = 8; 507 host->use_clustering = DISABLE_CLUSTERING; 508 esp = shost_priv(host); 509 510 esp->host = host; 511 esp->dev = dev; 512 513 esp->command_block = kzalloc(16, GFP_KERNEL); 514 if (!esp->command_block) 515 goto fail_unlink; 516 esp->command_block_dma = (dma_addr_t)esp->command_block; 517 518 esp->scsi_id = 7; 519 host->this_id = esp->scsi_id; 520 esp->scsi_id_mask = 1 << esp->scsi_id; 521 522 mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL); 523 if (!mep) 524 goto fail_free_command_block; 525 mep->esp = esp; 526 platform_set_drvdata(dev, mep); 527 528 switch (macintosh_config->scsi_type) { 529 case MAC_SCSI_QUADRA: 530 esp->cfreq = 16500000; 531 esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA; 532 mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; 533 mep->pdma_regs = NULL; 534 break; 535 case MAC_SCSI_QUADRA2: 536 esp->cfreq = 25000000; 537 esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 + 538 dev->id * MAC_ESP_REGS_SPACING); 539 mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET; 540 mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG + 541 dev->id * MAC_ESP_PDMA_REG_SPACING); 542 nubus_writel(0x1d1, mep->pdma_regs); 543 break; 544 case MAC_SCSI_QUADRA3: 545 /* These quadras have a real DMA controller (the PSC) but we 546 * don't know how to drive it so we must use PIO instead. 547 */ 548 esp->cfreq = 25000000; 549 esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3; 550 mep->pdma_io = NULL; 551 mep->pdma_regs = NULL; 552 break; 553 } 554 555 esp->ops = &mac_esp_ops; 556 if (mep->pdma_io == NULL) { 557 printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id); 558 esp_write8(0, ESP_TCLOW); 559 esp_write8(0, ESP_TCMED); 560 esp->flags = ESP_FLAG_DISABLE_SYNC; 561 mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd; 562 } else { 563 printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id); 564 } 565 566 host->irq = IRQ_MAC_SCSI; 567 568 /* The request_irq() call is intended to succeed for the first device 569 * and fail for the second device. 570 */ 571 err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); 572 spin_lock(&esp_chips_lock); 573 if (err < 0 && esp_chips[!dev->id] == NULL) { 574 spin_unlock(&esp_chips_lock); 575 goto fail_free_priv; 576 } 577 esp_chips[dev->id] = esp; 578 spin_unlock(&esp_chips_lock); 579 580 err = scsi_esp_register(esp, &dev->dev); 581 if (err) 582 goto fail_free_irq; 583 584 return 0; 585 586 fail_free_irq: 587 spin_lock(&esp_chips_lock); 588 esp_chips[dev->id] = NULL; 589 if (esp_chips[!dev->id] == NULL) { 590 spin_unlock(&esp_chips_lock); 591 free_irq(host->irq, NULL); 592 } else 593 spin_unlock(&esp_chips_lock); 594 fail_free_priv: 595 kfree(mep); 596 fail_free_command_block: 597 kfree(esp->command_block); 598 fail_unlink: 599 scsi_host_put(host); 600 fail: 601 return err; 602 } 603 604 static int esp_mac_remove(struct platform_device *dev) 605 { 606 struct mac_esp_priv *mep = platform_get_drvdata(dev); 607 struct esp *esp = mep->esp; 608 unsigned int irq = esp->host->irq; 609 610 scsi_esp_unregister(esp); 611 612 spin_lock(&esp_chips_lock); 613 esp_chips[dev->id] = NULL; 614 if (esp_chips[!dev->id] == NULL) { 615 spin_unlock(&esp_chips_lock); 616 free_irq(irq, NULL); 617 } else 618 spin_unlock(&esp_chips_lock); 619 620 kfree(mep); 621 622 kfree(esp->command_block); 623 624 scsi_host_put(esp->host); 625 626 return 0; 627 } 628 629 static struct platform_driver esp_mac_driver = { 630 .probe = esp_mac_probe, 631 .remove = esp_mac_remove, 632 .driver = { 633 .name = DRV_MODULE_NAME, 634 }, 635 }; 636 637 static int __init mac_esp_init(void) 638 { 639 return platform_driver_register(&esp_mac_driver); 640 } 641 642 static void __exit mac_esp_exit(void) 643 { 644 platform_driver_unregister(&esp_mac_driver); 645 } 646 647 MODULE_DESCRIPTION("Mac ESP SCSI driver"); 648 MODULE_AUTHOR("Finn Thain"); 649 MODULE_LICENSE("GPL v2"); 650 MODULE_VERSION(DRV_VERSION); 651 MODULE_ALIAS("platform:" DRV_MODULE_NAME); 652 653 module_init(mac_esp_init); 654 module_exit(mac_esp_exit); 655