1 /*- 2 * Copyright (C) 2008-2010 Nathan Whitehorn 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 22 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 23 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/module.h> 31 #include <sys/bus.h> 32 #include <sys/conf.h> 33 #include <sys/kernel.h> 34 #include <sys/pciio.h> 35 #include <sys/rman.h> 36 37 #include <dev/ofw/openfirm.h> 38 #include <dev/ofw/ofw_pci.h> 39 40 #include <dev/pci/pcivar.h> 41 #include <dev/pci/pcireg.h> 42 43 #include <machine/bus.h> 44 #include <machine/intr_machdep.h> 45 #include <machine/md_var.h> 46 #include <machine/openpicvar.h> 47 #include <machine/pio.h> 48 #include <machine/resource.h> 49 50 #include <dev/ofw/ofw_bus.h> 51 #include <dev/ofw/ofw_bus_subr.h> 52 53 #include <vm/vm.h> 54 #include <vm/pmap.h> 55 56 #include "pcib_if.h" 57 #include "pic_if.h" 58 59 /* 60 * IBM CPC9X5 Hypertransport Device interface. 61 */ 62 static int cpcht_probe(device_t); 63 static int cpcht_attach(device_t); 64 65 static void cpcht_configure_htbridge(device_t, phandle_t); 66 67 /* 68 * Bus interface. 69 */ 70 static int cpcht_read_ivar(device_t, device_t, int, 71 uintptr_t *); 72 static struct resource *cpcht_alloc_resource(device_t bus, device_t child, 73 int type, int *rid, u_long start, u_long end, 74 u_long count, u_int flags); 75 static int cpcht_activate_resource(device_t bus, device_t child, 76 int type, int rid, struct resource *res); 77 static int cpcht_release_resource(device_t bus, device_t child, 78 int type, int rid, struct resource *res); 79 static int cpcht_deactivate_resource(device_t bus, device_t child, 80 int type, int rid, struct resource *res); 81 82 /* 83 * pcib interface. 84 */ 85 static int cpcht_maxslots(device_t); 86 static u_int32_t cpcht_read_config(device_t, u_int, u_int, u_int, 87 u_int, int); 88 static void cpcht_write_config(device_t, u_int, u_int, u_int, 89 u_int, u_int32_t, int); 90 static int cpcht_route_interrupt(device_t bus, device_t dev, 91 int pin); 92 static int cpcht_alloc_msi(device_t dev, device_t child, 93 int count, int maxcount, int *irqs); 94 static int cpcht_release_msi(device_t dev, device_t child, 95 int count, int *irqs); 96 static int cpcht_alloc_msix(device_t dev, device_t child, 97 int *irq); 98 static int cpcht_release_msix(device_t dev, device_t child, 99 int irq); 100 static int cpcht_map_msi(device_t dev, device_t child, 101 int irq, uint64_t *addr, uint32_t *data); 102 103 /* 104 * ofw_bus interface 105 */ 106 107 static phandle_t cpcht_get_node(device_t bus, device_t child); 108 109 /* 110 * Driver methods. 111 */ 112 static device_method_t cpcht_methods[] = { 113 /* Device interface */ 114 DEVMETHOD(device_probe, cpcht_probe), 115 DEVMETHOD(device_attach, cpcht_attach), 116 117 /* Bus interface */ 118 DEVMETHOD(bus_print_child, bus_generic_print_child), 119 DEVMETHOD(bus_read_ivar, cpcht_read_ivar), 120 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 121 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 122 DEVMETHOD(bus_alloc_resource, cpcht_alloc_resource), 123 DEVMETHOD(bus_release_resource, cpcht_release_resource), 124 DEVMETHOD(bus_activate_resource, cpcht_activate_resource), 125 DEVMETHOD(bus_deactivate_resource, cpcht_deactivate_resource), 126 127 /* pcib interface */ 128 DEVMETHOD(pcib_maxslots, cpcht_maxslots), 129 DEVMETHOD(pcib_read_config, cpcht_read_config), 130 DEVMETHOD(pcib_write_config, cpcht_write_config), 131 DEVMETHOD(pcib_route_interrupt, cpcht_route_interrupt), 132 DEVMETHOD(pcib_alloc_msi, cpcht_alloc_msi), 133 DEVMETHOD(pcib_release_msi, cpcht_release_msi), 134 DEVMETHOD(pcib_alloc_msix, cpcht_alloc_msix), 135 DEVMETHOD(pcib_release_msix, cpcht_release_msix), 136 DEVMETHOD(pcib_map_msi, cpcht_map_msi), 137 138 /* ofw_bus interface */ 139 DEVMETHOD(ofw_bus_get_node, cpcht_get_node), 140 { 0, 0 } 141 }; 142 143 struct cpcht_irq { 144 enum { 145 IRQ_NONE, IRQ_HT, IRQ_MSI, IRQ_INTERNAL 146 } irq_type; 147 148 int ht_source; 149 150 vm_offset_t ht_base; 151 vm_offset_t apple_eoi; 152 uint32_t eoi_data; 153 int edge; 154 }; 155 156 static struct cpcht_irq *cpcht_irqmap = NULL; 157 uint32_t cpcht_msipic = 0; 158 159 struct cpcht_softc { 160 device_t sc_dev; 161 phandle_t sc_node; 162 vm_offset_t sc_data; 163 uint64_t sc_populated_slots; 164 struct rman sc_mem_rman; 165 struct rman sc_io_rman; 166 167 struct cpcht_irq htirq_map[128]; 168 struct mtx htirq_mtx; 169 }; 170 171 static driver_t cpcht_driver = { 172 "pcib", 173 cpcht_methods, 174 sizeof(struct cpcht_softc) 175 }; 176 177 static devclass_t cpcht_devclass; 178 179 DRIVER_MODULE(cpcht, nexus, cpcht_driver, cpcht_devclass, 0, 0); 180 181 #define CPCHT_IOPORT_BASE 0xf4000000UL /* Hardwired */ 182 #define CPCHT_IOPORT_SIZE 0x00400000UL 183 184 #define HTAPIC_REQUEST_EOI 0x20 185 #define HTAPIC_TRIGGER_LEVEL 0x02 186 #define HTAPIC_MASK 0x01 187 188 struct cpcht_range { 189 u_int32_t pci_hi; 190 u_int32_t pci_mid; 191 u_int32_t pci_lo; 192 u_int32_t junk; 193 u_int32_t host_hi; 194 u_int32_t host_lo; 195 u_int32_t size_hi; 196 u_int32_t size_lo; 197 }; 198 199 static int 200 cpcht_probe(device_t dev) 201 { 202 const char *type, *compatible; 203 204 type = ofw_bus_get_type(dev); 205 compatible = ofw_bus_get_compat(dev); 206 207 if (type == NULL || compatible == NULL) 208 return (ENXIO); 209 210 if (strcmp(type, "ht") != 0) 211 return (ENXIO); 212 213 if (strcmp(compatible, "u3-ht") != 0) 214 return (ENXIO); 215 216 217 device_set_desc(dev, "IBM CPC9X5 HyperTransport Tunnel"); 218 return (0); 219 } 220 221 static int 222 cpcht_attach(device_t dev) 223 { 224 struct cpcht_softc *sc; 225 phandle_t node, child; 226 u_int32_t reg[3]; 227 int i, error; 228 229 node = ofw_bus_get_node(dev); 230 sc = device_get_softc(dev); 231 232 if (OF_getprop(node, "reg", reg, sizeof(reg)) < 12) 233 return (ENXIO); 234 235 sc->sc_dev = dev; 236 sc->sc_node = node; 237 sc->sc_populated_slots = 0; 238 sc->sc_data = (vm_offset_t)pmap_mapdev(reg[1], reg[2]); 239 240 sc->sc_mem_rman.rm_type = RMAN_ARRAY; 241 sc->sc_mem_rman.rm_descr = "CPCHT Device Memory"; 242 error = rman_init(&sc->sc_mem_rman); 243 if (error) { 244 device_printf(dev, "rman_init() failed. error = %d\n", error); 245 return (error); 246 } 247 248 sc->sc_io_rman.rm_type = RMAN_ARRAY; 249 sc->sc_io_rman.rm_descr = "CPCHT I/O Memory"; 250 error = rman_init(&sc->sc_io_rman); 251 if (error) { 252 device_printf(dev, "rman_init() failed. error = %d\n", error); 253 return (error); 254 } 255 256 /* 257 * Set up the resource manager and the HT->MPIC mapping. For cpcht, 258 * the ranges are properties of the child bridges, and this is also 259 * where we get the HT interrupts properties. 260 */ 261 262 /* I/O port mappings are usually not in the device tree */ 263 rman_manage_region(&sc->sc_io_rman, 0, CPCHT_IOPORT_SIZE - 1); 264 265 bzero(sc->htirq_map, sizeof(sc->htirq_map)); 266 mtx_init(&sc->htirq_mtx, "cpcht irq", NULL, MTX_DEF); 267 for (i = 0; i < 8; i++) 268 sc->htirq_map[i].irq_type = IRQ_INTERNAL; 269 for (child = OF_child(node); child != 0; child = OF_peer(child)) 270 cpcht_configure_htbridge(dev, child); 271 272 /* Now make the mapping table available to the MPIC */ 273 cpcht_irqmap = sc->htirq_map; 274 275 device_add_child(dev, "pci", device_get_unit(dev)); 276 277 return (bus_generic_attach(dev)); 278 } 279 280 static void 281 cpcht_configure_htbridge(device_t dev, phandle_t child) 282 { 283 struct cpcht_softc *sc; 284 struct ofw_pci_register pcir; 285 struct cpcht_range ranges[7], *rp; 286 int nranges, ptr, nextptr; 287 uint32_t vend, val; 288 int i, nirq, irq; 289 u_int f, s; 290 291 sc = device_get_softc(dev); 292 if (OF_getprop(child, "reg", &pcir, sizeof(pcir)) == -1) 293 return; 294 295 s = OFW_PCI_PHYS_HI_DEVICE(pcir.phys_hi); 296 f = OFW_PCI_PHYS_HI_FUNCTION(pcir.phys_hi); 297 298 /* 299 * Mark this slot is populated. The remote south bridge does 300 * not like us talking to unpopulated slots on the root bus. 301 */ 302 sc->sc_populated_slots |= (1 << s); 303 304 /* 305 * Next grab this child bus's bus ranges. 306 */ 307 bzero(ranges, sizeof(ranges)); 308 nranges = OF_getprop(child, "ranges", ranges, sizeof(ranges)); 309 nranges /= sizeof(ranges[0]); 310 311 ranges[6].pci_hi = 0; 312 for (rp = ranges; rp < ranges + nranges && rp->pci_hi != 0; rp++) { 313 switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { 314 case OFW_PCI_PHYS_HI_SPACE_CONFIG: 315 break; 316 case OFW_PCI_PHYS_HI_SPACE_IO: 317 rman_manage_region(&sc->sc_io_rman, rp->pci_lo, 318 rp->pci_lo + rp->size_lo - 1); 319 break; 320 case OFW_PCI_PHYS_HI_SPACE_MEM32: 321 rman_manage_region(&sc->sc_mem_rman, rp->pci_lo, 322 rp->pci_lo + rp->size_lo - 1); 323 break; 324 case OFW_PCI_PHYS_HI_SPACE_MEM64: 325 panic("64-bit CPCHT reserved memory!"); 326 break; 327 } 328 } 329 330 /* 331 * Next build up any HT->MPIC mappings for this sub-bus. One would 332 * naively hope that enabling, disabling, and EOIing interrupts would 333 * cause the appropriate HT bus transactions to that effect. This is 334 * not the case. 335 * 336 * Instead, we have to muck about on the HT peer's root PCI bridges, 337 * figure out what interrupts they send, enable them, and cache 338 * the location of their WaitForEOI registers so that we can 339 * send EOIs later. 340 */ 341 342 /* All the devices we are interested in have caps */ 343 if (!(PCIB_READ_CONFIG(dev, 0, s, f, PCIR_STATUS, 2) 344 & PCIM_STATUS_CAPPRESENT)) 345 return; 346 347 nextptr = PCIB_READ_CONFIG(dev, 0, s, f, PCIR_CAP_PTR, 1); 348 while (nextptr != 0) { 349 ptr = nextptr; 350 nextptr = PCIB_READ_CONFIG(dev, 0, s, f, 351 ptr + PCICAP_NEXTPTR, 1); 352 353 /* Find the HT IRQ capabilities */ 354 if (PCIB_READ_CONFIG(dev, 0, s, f, 355 ptr + PCICAP_ID, 1) != PCIY_HT) 356 continue; 357 358 val = PCIB_READ_CONFIG(dev, 0, s, f, ptr + PCIR_HT_COMMAND, 2); 359 if ((val & PCIM_HTCMD_CAP_MASK) != PCIM_HTCAP_INTERRUPT) 360 continue; 361 362 /* Ask for the IRQ count */ 363 PCIB_WRITE_CONFIG(dev, 0, s, f, ptr + PCIR_HT_COMMAND, 0x1, 1); 364 nirq = PCIB_READ_CONFIG(dev, 0, s, f, ptr + 4, 4); 365 nirq = ((nirq >> 16) & 0xff) + 1; 366 367 device_printf(dev, "%d HT IRQs on device %d.%d\n", nirq, s, f); 368 369 for (i = 0; i < nirq; i++) { 370 PCIB_WRITE_CONFIG(dev, 0, s, f, 371 ptr + PCIR_HT_COMMAND, 0x10 + (i << 1), 1); 372 irq = PCIB_READ_CONFIG(dev, 0, s, f, ptr + 4, 4); 373 374 /* 375 * Mask this interrupt for now. 376 */ 377 PCIB_WRITE_CONFIG(dev, 0, s, f, ptr + 4, 378 irq | HTAPIC_MASK, 4); 379 irq = (irq >> 16) & 0xff; 380 381 sc->htirq_map[irq].irq_type = IRQ_HT; 382 sc->htirq_map[irq].ht_source = i; 383 sc->htirq_map[irq].ht_base = sc->sc_data + 384 (((((s & 0x1f) << 3) | (f & 0x07)) << 8) | (ptr)); 385 386 PCIB_WRITE_CONFIG(dev, 0, s, f, 387 ptr + PCIR_HT_COMMAND, 0x11 + (i << 1), 1); 388 sc->htirq_map[irq].eoi_data = 389 PCIB_READ_CONFIG(dev, 0, s, f, ptr + 4, 4) | 390 0x80000000; 391 392 /* 393 * Apple uses a non-compliant IO/APIC that differs 394 * in how we signal EOIs. Check if this device was 395 * made by Apple, and act accordingly. 396 */ 397 vend = PCIB_READ_CONFIG(dev, 0, s, f, 398 PCIR_DEVVENDOR, 4); 399 if ((vend & 0xffff) == 0x106b) 400 sc->htirq_map[irq].apple_eoi = 401 (sc->htirq_map[irq].ht_base - ptr) + 0x60; 402 } 403 } 404 } 405 406 static int 407 cpcht_maxslots(device_t dev) 408 { 409 410 return (PCI_SLOTMAX); 411 } 412 413 static u_int32_t 414 cpcht_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, 415 int width) 416 { 417 struct cpcht_softc *sc; 418 vm_offset_t caoff; 419 420 sc = device_get_softc(dev); 421 caoff = sc->sc_data + 422 (((((slot & 0x1f) << 3) | (func & 0x07)) << 8) | reg); 423 424 if (bus == 0 && (!(sc->sc_populated_slots & (1 << slot)) || func > 0)) 425 return (0xffffffff); 426 427 if (bus > 0) 428 caoff += 0x01000000UL + (bus << 16); 429 430 switch (width) { 431 case 1: 432 return (in8rb(caoff)); 433 break; 434 case 2: 435 return (in16rb(caoff)); 436 break; 437 case 4: 438 return (in32rb(caoff)); 439 break; 440 } 441 442 return (0xffffffff); 443 } 444 445 static void 446 cpcht_write_config(device_t dev, u_int bus, u_int slot, u_int func, 447 u_int reg, u_int32_t val, int width) 448 { 449 struct cpcht_softc *sc; 450 vm_offset_t caoff; 451 452 sc = device_get_softc(dev); 453 caoff = sc->sc_data + 454 (((((slot & 0x1f) << 3) | (func & 0x07)) << 8) | reg); 455 456 if (bus == 0 && (!(sc->sc_populated_slots & (1 << slot)) || func > 0)) 457 return; 458 459 if (bus > 0) 460 caoff += 0x01000000UL + (bus << 16); 461 462 switch (width) { 463 case 1: 464 out8rb(caoff, val); 465 break; 466 case 2: 467 out16rb(caoff, val); 468 break; 469 case 4: 470 out32rb(caoff, val); 471 break; 472 } 473 } 474 475 static int 476 cpcht_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 477 { 478 switch (which) { 479 case PCIB_IVAR_DOMAIN: 480 *result = device_get_unit(dev); 481 return (0); 482 case PCIB_IVAR_BUS: 483 *result = 0; /* Root bus */ 484 return (0); 485 } 486 487 return (ENOENT); 488 } 489 490 static phandle_t 491 cpcht_get_node(device_t bus, device_t dev) 492 { 493 struct cpcht_softc *sc; 494 495 sc = device_get_softc(bus); 496 /* We only have one child, the PCI bus, which needs our own node. */ 497 return (sc->sc_node); 498 } 499 500 static int 501 cpcht_route_interrupt(device_t bus, device_t dev, int pin) 502 { 503 return (pin); 504 } 505 506 static struct resource * 507 cpcht_alloc_resource(device_t bus, device_t child, int type, int *rid, 508 u_long start, u_long end, u_long count, u_int flags) 509 { 510 struct cpcht_softc *sc; 511 struct resource *rv; 512 struct rman *rm; 513 int needactivate; 514 515 needactivate = flags & RF_ACTIVE; 516 flags &= ~RF_ACTIVE; 517 518 sc = device_get_softc(bus); 519 520 switch (type) { 521 case SYS_RES_IOPORT: 522 end = min(end, start + count); 523 rm = &sc->sc_io_rman; 524 break; 525 526 case SYS_RES_MEMORY: 527 rm = &sc->sc_mem_rman; 528 break; 529 530 case SYS_RES_IRQ: 531 return (bus_alloc_resource(bus, type, rid, start, end, count, 532 flags)); 533 534 default: 535 device_printf(bus, "unknown resource request from %s\n", 536 device_get_nameunit(child)); 537 return (NULL); 538 } 539 540 rv = rman_reserve_resource(rm, start, end, count, flags, child); 541 if (rv == NULL) { 542 device_printf(bus, "failed to reserve resource for %s\n", 543 device_get_nameunit(child)); 544 return (NULL); 545 } 546 547 rman_set_rid(rv, *rid); 548 549 if (needactivate) { 550 if (bus_activate_resource(child, type, *rid, rv) != 0) { 551 device_printf(bus, 552 "failed to activate resource for %s\n", 553 device_get_nameunit(child)); 554 rman_release_resource(rv); 555 return (NULL); 556 } 557 } 558 559 return (rv); 560 } 561 562 static int 563 cpcht_activate_resource(device_t bus, device_t child, int type, int rid, 564 struct resource *res) 565 { 566 void *p; 567 568 if (type == SYS_RES_IRQ) 569 return (bus_activate_resource(bus, type, rid, res)); 570 571 if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { 572 vm_offset_t start; 573 574 start = (vm_offset_t)rman_get_start(res); 575 576 if (type == SYS_RES_IOPORT) 577 start += CPCHT_IOPORT_BASE; 578 579 if (bootverbose) 580 printf("cpcht mapdev: start %zx, len %ld\n", start, 581 rman_get_size(res)); 582 583 p = pmap_mapdev(start, (vm_size_t)rman_get_size(res)); 584 if (p == NULL) 585 return (ENOMEM); 586 rman_set_virtual(res, p); 587 rman_set_bustag(res, &bs_le_tag); 588 rman_set_bushandle(res, (u_long)p); 589 } 590 591 return (rman_activate_resource(res)); 592 } 593 594 static int 595 cpcht_release_resource(device_t bus, device_t child, int type, int rid, 596 struct resource *res) 597 { 598 599 if (rman_get_flags(res) & RF_ACTIVE) { 600 int error = bus_deactivate_resource(child, type, rid, res); 601 if (error) 602 return error; 603 } 604 605 return (rman_release_resource(res)); 606 } 607 608 static int 609 cpcht_deactivate_resource(device_t bus, device_t child, int type, int rid, 610 struct resource *res) 611 { 612 613 /* 614 * If this is a memory resource, unmap it. 615 */ 616 if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { 617 u_int32_t psize; 618 619 psize = rman_get_size(res); 620 pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); 621 } 622 623 return (rman_deactivate_resource(res)); 624 } 625 626 static int 627 cpcht_alloc_msi(device_t dev, device_t child, int count, int maxcount, 628 int *irqs) 629 { 630 struct cpcht_softc *sc; 631 int i, j; 632 633 sc = device_get_softc(dev); 634 j = 0; 635 636 /* Bail if no MSI PIC yet */ 637 if (cpcht_msipic == 0) 638 return (ENXIO); 639 640 mtx_lock(&sc->htirq_mtx); 641 for (i = 8; i < 124 - count; i++) { 642 for (j = 0; j < count; j++) { 643 if (sc->htirq_map[i+j].irq_type != IRQ_NONE) 644 break; 645 } 646 if (j == count) 647 break; 648 649 i += j; /* We know there isn't a large enough run */ 650 } 651 652 if (j != count) { 653 mtx_unlock(&sc->htirq_mtx); 654 return (ENXIO); 655 } 656 657 for (j = 0; j < count; j++) { 658 irqs[j] = MAP_IRQ(cpcht_msipic, i+j); 659 sc->htirq_map[i+j].irq_type = IRQ_MSI; 660 } 661 mtx_unlock(&sc->htirq_mtx); 662 663 return (0); 664 } 665 666 static int 667 cpcht_release_msi(device_t dev, device_t child, int count, int *irqs) 668 { 669 struct cpcht_softc *sc; 670 int i; 671 672 sc = device_get_softc(dev); 673 674 mtx_lock(&sc->htirq_mtx); 675 for (i = 0; i < count; i++) 676 sc->htirq_map[irqs[i] & 0xff].irq_type = IRQ_NONE; 677 mtx_unlock(&sc->htirq_mtx); 678 679 return (0); 680 } 681 682 static int 683 cpcht_alloc_msix(device_t dev, device_t child, int *irq) 684 { 685 struct cpcht_softc *sc; 686 int i; 687 688 sc = device_get_softc(dev); 689 690 /* Bail if no MSI PIC yet */ 691 if (cpcht_msipic == 0) 692 return (ENXIO); 693 694 mtx_lock(&sc->htirq_mtx); 695 for (i = 8; i < 124; i++) { 696 if (sc->htirq_map[i].irq_type == IRQ_NONE) { 697 sc->htirq_map[i].irq_type = IRQ_MSI; 698 *irq = MAP_IRQ(cpcht_msipic, i); 699 700 mtx_unlock(&sc->htirq_mtx); 701 return (0); 702 } 703 } 704 mtx_unlock(&sc->htirq_mtx); 705 706 return (ENXIO); 707 } 708 709 static int 710 cpcht_release_msix(device_t dev, device_t child, int irq) 711 { 712 struct cpcht_softc *sc; 713 714 sc = device_get_softc(dev); 715 716 mtx_lock(&sc->htirq_mtx); 717 sc->htirq_map[irq & 0xff].irq_type = IRQ_NONE; 718 mtx_unlock(&sc->htirq_mtx); 719 720 return (0); 721 } 722 723 static int 724 cpcht_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, 725 uint32_t *data) 726 { 727 device_t pcib; 728 struct pci_devinfo *dinfo; 729 struct pcicfg_ht *ht = NULL; 730 731 for (pcib = child; pcib != dev; pcib = 732 device_get_parent(device_get_parent(pcib))) { 733 dinfo = device_get_ivars(pcib); 734 ht = &dinfo->cfg.ht; 735 736 if (ht == NULL) 737 continue; 738 } 739 740 if (ht == NULL) 741 return (ENXIO); 742 743 *addr = ht->ht_msiaddr; 744 *data = irq & 0xff; 745 746 return (0); 747 } 748 749 /* 750 * Driver for the integrated MPIC on U3/U4 (CPC925/CPC945) 751 */ 752 753 static int openpic_cpcht_probe(device_t); 754 static int openpic_cpcht_attach(device_t); 755 static void openpic_cpcht_config(device_t, u_int irq, 756 enum intr_trigger trig, enum intr_polarity pol); 757 static void openpic_cpcht_enable(device_t, u_int irq, u_int vector); 758 static void openpic_cpcht_unmask(device_t, u_int irq); 759 static void openpic_cpcht_eoi(device_t, u_int irq); 760 761 static device_method_t openpic_cpcht_methods[] = { 762 /* Device interface */ 763 DEVMETHOD(device_probe, openpic_cpcht_probe), 764 DEVMETHOD(device_attach, openpic_cpcht_attach), 765 766 /* PIC interface */ 767 DEVMETHOD(pic_bind, openpic_bind), 768 DEVMETHOD(pic_config, openpic_cpcht_config), 769 DEVMETHOD(pic_dispatch, openpic_dispatch), 770 DEVMETHOD(pic_enable, openpic_cpcht_enable), 771 DEVMETHOD(pic_eoi, openpic_cpcht_eoi), 772 DEVMETHOD(pic_ipi, openpic_ipi), 773 DEVMETHOD(pic_mask, openpic_mask), 774 DEVMETHOD(pic_unmask, openpic_cpcht_unmask), 775 776 { 0, 0 }, 777 }; 778 779 struct openpic_cpcht_softc { 780 struct openpic_softc sc_openpic; 781 782 struct mtx sc_ht_mtx; 783 }; 784 785 static driver_t openpic_cpcht_driver = { 786 "htpic", 787 openpic_cpcht_methods, 788 sizeof(struct openpic_cpcht_softc), 789 }; 790 791 DRIVER_MODULE(openpic, unin, openpic_cpcht_driver, openpic_devclass, 0, 0); 792 793 static int 794 openpic_cpcht_probe(device_t dev) 795 { 796 const char *type = ofw_bus_get_type(dev); 797 798 if (strcmp(type, "open-pic") != 0) 799 return (ENXIO); 800 801 device_set_desc(dev, OPENPIC_DEVSTR); 802 return (0); 803 } 804 805 static int 806 openpic_cpcht_attach(device_t dev) 807 { 808 struct openpic_cpcht_softc *sc; 809 phandle_t node; 810 int err, irq; 811 812 node = ofw_bus_get_node(dev); 813 err = openpic_common_attach(dev, node); 814 if (err != 0) 815 return (err); 816 817 /* 818 * The HT APIC stuff is not thread-safe, so we need a mutex to 819 * protect it. 820 */ 821 sc = device_get_softc(dev); 822 mtx_init(&sc->sc_ht_mtx, "htpic", NULL, MTX_SPIN); 823 824 /* 825 * Interrupts 0-3 are internally sourced and are level triggered 826 * active low. Interrupts 4-123 are connected to a pulse generator 827 * and should be programmed as edge triggered low-to-high. 828 * 829 * IBM CPC945 Manual, Section 9.3. 830 */ 831 832 for (irq = 0; irq < 4; irq++) 833 openpic_config(dev, irq, INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW); 834 for (irq = 4; irq < 124; irq++) 835 openpic_config(dev, irq, INTR_TRIGGER_EDGE, INTR_POLARITY_LOW); 836 837 /* 838 * Use this PIC for MSI only if it is the root PIC. This may not 839 * be necessary, but Linux does it, and I cannot find any U3 machines 840 * with MSI devices to test. 841 */ 842 if (dev == root_pic) 843 cpcht_msipic = node; 844 845 return (0); 846 } 847 848 static void 849 openpic_cpcht_config(device_t dev, u_int irq, enum intr_trigger trig, 850 enum intr_polarity pol) 851 { 852 struct openpic_cpcht_softc *sc; 853 uint32_t ht_irq; 854 855 /* 856 * The interrupt settings for the MPIC are completely determined 857 * by the internal wiring in the northbridge. Real changes to these 858 * settings need to be negotiated with the remote IO-APIC on the HT 859 * link. 860 */ 861 862 sc = device_get_softc(dev); 863 864 if (cpcht_irqmap != NULL && irq < 128 && 865 cpcht_irqmap[irq].ht_base > 0 && !cpcht_irqmap[irq].edge) { 866 mtx_lock_spin(&sc->sc_ht_mtx); 867 868 /* Program the data port */ 869 out8rb(cpcht_irqmap[irq].ht_base + PCIR_HT_COMMAND, 870 0x10 + (cpcht_irqmap[irq].ht_source << 1)); 871 872 /* Grab the IRQ config register */ 873 ht_irq = in32rb(cpcht_irqmap[irq].ht_base + 4); 874 875 /* Mask the IRQ while we fiddle settings */ 876 out32rb(cpcht_irqmap[irq].ht_base + 4, ht_irq | HTAPIC_MASK); 877 878 /* Program the interrupt sense */ 879 ht_irq &= ~(HTAPIC_TRIGGER_LEVEL | HTAPIC_REQUEST_EOI); 880 if (trig == INTR_TRIGGER_EDGE) { 881 cpcht_irqmap[irq].edge = 1; 882 } else { 883 cpcht_irqmap[irq].edge = 0; 884 ht_irq |= HTAPIC_TRIGGER_LEVEL | HTAPIC_REQUEST_EOI; 885 } 886 out32rb(cpcht_irqmap[irq].ht_base + 4, ht_irq); 887 888 mtx_unlock_spin(&sc->sc_ht_mtx); 889 } 890 } 891 892 static void 893 openpic_cpcht_enable(device_t dev, u_int irq, u_int vec) 894 { 895 struct openpic_cpcht_softc *sc; 896 uint32_t ht_irq; 897 898 openpic_enable(dev, irq, vec); 899 900 sc = device_get_softc(dev); 901 902 if (cpcht_irqmap != NULL && irq < 128 && 903 cpcht_irqmap[irq].ht_base > 0) { 904 mtx_lock_spin(&sc->sc_ht_mtx); 905 906 /* Program the data port */ 907 out8rb(cpcht_irqmap[irq].ht_base + PCIR_HT_COMMAND, 908 0x10 + (cpcht_irqmap[irq].ht_source << 1)); 909 910 /* Unmask the interrupt */ 911 ht_irq = in32rb(cpcht_irqmap[irq].ht_base + 4); 912 ht_irq &= ~HTAPIC_MASK; 913 out32rb(cpcht_irqmap[irq].ht_base + 4, ht_irq); 914 915 mtx_unlock_spin(&sc->sc_ht_mtx); 916 } 917 918 openpic_cpcht_eoi(dev, irq); 919 } 920 921 static void 922 openpic_cpcht_unmask(device_t dev, u_int irq) 923 { 924 struct openpic_cpcht_softc *sc; 925 uint32_t ht_irq; 926 927 openpic_unmask(dev, irq); 928 929 sc = device_get_softc(dev); 930 931 if (cpcht_irqmap != NULL && irq < 128 && 932 cpcht_irqmap[irq].ht_base > 0) { 933 mtx_lock_spin(&sc->sc_ht_mtx); 934 935 /* Program the data port */ 936 out8rb(cpcht_irqmap[irq].ht_base + PCIR_HT_COMMAND, 937 0x10 + (cpcht_irqmap[irq].ht_source << 1)); 938 939 /* Unmask the interrupt */ 940 ht_irq = in32rb(cpcht_irqmap[irq].ht_base + 4); 941 ht_irq &= ~HTAPIC_MASK; 942 out32rb(cpcht_irqmap[irq].ht_base + 4, ht_irq); 943 944 mtx_unlock_spin(&sc->sc_ht_mtx); 945 } 946 947 openpic_cpcht_eoi(dev, irq); 948 } 949 950 static void 951 openpic_cpcht_eoi(device_t dev, u_int irq) 952 { 953 struct openpic_cpcht_softc *sc; 954 uint32_t off, mask; 955 956 if (irq == 255) 957 return; 958 959 sc = device_get_softc(dev); 960 961 if (cpcht_irqmap != NULL && irq < 128 && 962 cpcht_irqmap[irq].ht_base > 0 && !cpcht_irqmap[irq].edge) { 963 /* If this is an HT IRQ, acknowledge it at the remote APIC */ 964 965 if (cpcht_irqmap[irq].apple_eoi) { 966 off = (cpcht_irqmap[irq].ht_source >> 3) & ~3; 967 mask = 1 << (cpcht_irqmap[irq].ht_source & 0x1f); 968 out32rb(cpcht_irqmap[irq].apple_eoi + off, mask); 969 } else { 970 mtx_lock_spin(&sc->sc_ht_mtx); 971 972 out8rb(cpcht_irqmap[irq].ht_base + PCIR_HT_COMMAND, 973 0x11 + (cpcht_irqmap[irq].ht_source << 1)); 974 out32rb(cpcht_irqmap[irq].ht_base + 4, 975 cpcht_irqmap[irq].eoi_data); 976 977 mtx_unlock_spin(&sc->sc_ht_mtx); 978 } 979 } 980 981 openpic_eoi(dev, irq); 982 } 983