1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright 2019 Justin Hibbits 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 20 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 22 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_platform.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/module.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/smp.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 48 #include <machine/bus.h> 49 #include <machine/intr_machdep.h> 50 #include <machine/md_var.h> 51 52 #include <dev/ofw/ofw_bus.h> 53 #include <dev/ofw/ofw_bus_subr.h> 54 55 #ifdef POWERNV 56 #include <powerpc/powernv/opal.h> 57 #endif 58 59 #include "pic_if.h" 60 61 #define XIVE_PRIORITY 7 /* Random non-zero number */ 62 #define MAX_XIVE_IRQS (1<<24) /* 24-bit XIRR field */ 63 64 /* Registers */ 65 #define XIVE_TM_QW1_OS 0x010 /* Guest OS registers */ 66 #define XIVE_TM_QW2_HV_POOL 0x020 /* Hypervisor pool registers */ 67 #define XIVE_TM_QW3_HV 0x030 /* Hypervisor registers */ 68 69 #define XIVE_TM_NSR 0x00 70 #define XIVE_TM_CPPR 0x01 71 #define XIVE_TM_IPB 0x02 72 #define XIVE_TM_LSMFB 0x03 73 #define XIVE_TM_ACK_CNT 0x04 74 #define XIVE_TM_INC 0x05 75 #define XIVE_TM_AGE 0x06 76 #define XIVE_TM_PIPR 0x07 77 78 #define TM_WORD0 0x0 79 #define TM_WORD2 0x8 80 #define TM_QW2W2_VP 0x80000000 81 82 #define XIVE_TM_SPC_ACK 0x800 83 #define TM_QW3NSR_HE_SHIFT 14 84 #define TM_QW3_NSR_HE_NONE 0 85 #define TM_QW3_NSR_HE_POOL 1 86 #define TM_QW3_NSR_HE_PHYS 2 87 #define TM_QW3_NSR_HE_LSI 3 88 #define XIVE_TM_SPC_PULL_POOL_CTX 0x828 89 90 #define XIVE_IRQ_LOAD_EOI 0x000 91 #define XIVE_IRQ_STORE_EOI 0x400 92 #define XIVE_IRQ_PQ_00 0xc00 93 #define XIVE_IRQ_PQ_01 0xd00 94 95 #define XIVE_IRQ_VAL_P 0x02 96 #define XIVE_IRQ_VAL_Q 0x01 97 98 struct xive_softc; 99 struct xive_irq; 100 101 extern void (*powernv_smp_ap_extra_init)(void); 102 103 /* Private support */ 104 static void xive_setup_cpu(void); 105 static void xive_smp_cpu_startup(void); 106 static void xive_init_irq(struct xive_irq *irqd, u_int irq); 107 static struct xive_irq *xive_configure_irq(u_int irq); 108 static int xive_provision_page(struct xive_softc *sc); 109 110 /* Interfaces */ 111 static int xive_probe(device_t); 112 static int xive_attach(device_t); 113 static int xics_probe(device_t); 114 static int xics_attach(device_t); 115 116 static void xive_bind(device_t, u_int, cpuset_t, void **); 117 static void xive_dispatch(device_t, struct trapframe *); 118 static void xive_enable(device_t, u_int, u_int, void **); 119 static void xive_eoi(device_t, u_int, void *); 120 static void xive_ipi(device_t, u_int); 121 static void xive_mask(device_t, u_int, void *); 122 static void xive_unmask(device_t, u_int, void *); 123 static void xive_translate_code(device_t dev, u_int irq, int code, 124 enum intr_trigger *trig, enum intr_polarity *pol); 125 126 static device_method_t xive_methods[] = { 127 /* Device interface */ 128 DEVMETHOD(device_probe, xive_probe), 129 DEVMETHOD(device_attach, xive_attach), 130 131 /* PIC interface */ 132 DEVMETHOD(pic_bind, xive_bind), 133 DEVMETHOD(pic_dispatch, xive_dispatch), 134 DEVMETHOD(pic_enable, xive_enable), 135 DEVMETHOD(pic_eoi, xive_eoi), 136 DEVMETHOD(pic_ipi, xive_ipi), 137 DEVMETHOD(pic_mask, xive_mask), 138 DEVMETHOD(pic_unmask, xive_unmask), 139 DEVMETHOD(pic_translate_code, xive_translate_code), 140 141 DEVMETHOD_END 142 }; 143 144 static device_method_t xics_methods[] = { 145 /* Device interface */ 146 DEVMETHOD(device_probe, xics_probe), 147 DEVMETHOD(device_attach, xics_attach), 148 149 DEVMETHOD_END 150 }; 151 152 struct xive_softc { 153 struct mtx sc_mtx; 154 struct resource *sc_mem; 155 vm_size_t sc_prov_page_size; 156 uint32_t sc_offset; 157 }; 158 159 struct xive_queue { 160 uint32_t *q_page; 161 uint32_t *q_eoi_page; 162 uint32_t q_toggle; 163 uint32_t q_size; 164 uint32_t q_index; 165 uint32_t q_mask; 166 }; 167 168 struct xive_irq { 169 uint32_t girq; 170 uint32_t lirq; 171 uint64_t vp; 172 uint64_t flags; 173 #define OPAL_XIVE_IRQ_SHIFT_BUG 0x00000008 174 #define OPAL_XIVE_IRQ_LSI 0x00000004 175 #define OPAL_XIVE_IRQ_STORE_EOI 0x00000002 176 #define OPAL_XIVE_IRQ_TRIGGER_PAGE 0x00000001 177 uint8_t prio; 178 vm_offset_t eoi_page; 179 vm_offset_t trig_page; 180 vm_size_t esb_size; 181 int chip; 182 }; 183 184 struct xive_cpu { 185 uint64_t vp; 186 uint64_t flags; 187 struct xive_irq ipi_data; 188 struct xive_queue queue; /* We only use a single queue for now. */ 189 uint64_t cam; 190 uint32_t chip; 191 }; 192 193 static driver_t xive_driver = { 194 "xive", 195 xive_methods, 196 sizeof(struct xive_softc) 197 }; 198 199 static driver_t xics_driver = { 200 "xivevc", 201 xics_methods, 202 0 203 }; 204 205 static devclass_t xive_devclass; 206 static devclass_t xics_devclass; 207 208 EARLY_DRIVER_MODULE(xive, ofwbus, xive_driver, xive_devclass, 0, 0, 209 BUS_PASS_INTERRUPT-1); 210 EARLY_DRIVER_MODULE(xivevc, ofwbus, xics_driver, xics_devclass, 0, 0, 211 BUS_PASS_INTERRUPT); 212 213 MALLOC_DEFINE(M_XIVE, "xive", "XIVE Memory"); 214 215 DPCPU_DEFINE_STATIC(struct xive_cpu, xive_cpu_data); 216 217 static int xive_ipi_vector = -1; 218 219 /* 220 * XIVE Exploitation mode driver. 221 * 222 * The XIVE, present in the POWER9 CPU, can run in two modes: XICS emulation 223 * mode, and "Exploitation mode". XICS emulation mode is compatible with the 224 * POWER8 and earlier XICS interrupt controller, using OPAL calls to emulate 225 * hypervisor calls and memory accesses. Exploitation mode gives us raw access 226 * to the XIVE MMIO, improving performance significantly. 227 * 228 * The XIVE controller is a very bizarre interrupt controller. It uses queues 229 * in memory to pass interrupts around, and maps itself into 512GB of physical 230 * device address space, giving each interrupt in the system one or more pages 231 * of address space. An IRQ is tied to a virtual processor, which could be a 232 * physical CPU thread, or a guest CPU thread (LPAR running on a physical 233 * thread). Thus, the controller can route interrupts directly to guest OSes 234 * bypassing processing by the hypervisor, thereby improving performance of the 235 * guest OS. 236 * 237 * An IRQ, in addition to being tied to a virtual processor, has one or two 238 * page mappings: an EOI page, and an optional trigger page. The trigger page 239 * could be the same as the EOI page. Level-sensitive interrupts (LSIs) don't 240 * have a trigger page, as they're external interrupts controlled by physical 241 * lines. MSIs and IPIs have trigger pages. An IPI is really just another IRQ 242 * in the XIVE, which is triggered by software. 243 * 244 * An interesting behavior of the XIVE controller is that oftentimes the 245 * contents of an address location don't actually matter, but the direction of 246 * the action is the signifier (read vs write), and the address is significant. 247 * Hence, masking and unmasking an interrupt is done by reading different 248 * addresses in the EOI page, and triggering an interrupt consists of writing to 249 * the trigger page. 250 * 251 * Additionally, the MMIO region mapped is CPU-sensitive, just like the 252 * per-processor register space (private access) in OpenPIC. In order for a CPU 253 * to receive interrupts it must itself configure its CPPR (Current Processor 254 * Priority Register), it cannot be set by any other processor. This 255 * necessitates the xive_smp_cpu_startup() function. 256 * 257 * Queues are pages of memory, sized powers-of-two, that are shared with the 258 * XIVE. The XIVE writes into the queue with an alternating polarity bit, which 259 * flips when the queue wraps. 260 */ 261 262 /* 263 * Offset-based read/write interfaces. 264 */ 265 static uint16_t 266 xive_read_2(struct xive_softc *sc, bus_size_t offset) 267 { 268 269 return (bus_read_2(sc->sc_mem, sc->sc_offset + offset)); 270 } 271 272 static void 273 xive_write_1(struct xive_softc *sc, bus_size_t offset, uint8_t val) 274 { 275 276 bus_write_1(sc->sc_mem, sc->sc_offset + offset, val); 277 } 278 279 /* EOI and Trigger page access interfaces. */ 280 static uint64_t 281 xive_read_mmap8(vm_offset_t addr) 282 { 283 return (*(volatile uint64_t *)addr); 284 } 285 286 static void 287 xive_write_mmap8(vm_offset_t addr, uint64_t val) 288 { 289 *(uint64_t *)(addr) = val; 290 } 291 292 /* Device interfaces. */ 293 static int 294 xive_probe(device_t dev) 295 { 296 297 if (!ofw_bus_is_compatible(dev, "ibm,opal-xive-pe")) 298 return (ENXIO); 299 300 device_set_desc(dev, "External Interrupt Virtualization Engine"); 301 302 /* Make sure we always win against the xicp driver. */ 303 return (BUS_PROBE_DEFAULT); 304 } 305 306 static int 307 xics_probe(device_t dev) 308 { 309 310 if (!ofw_bus_is_compatible(dev, "ibm,opal-xive-vc")) 311 return (ENXIO); 312 313 device_set_desc(dev, "External Interrupt Virtualization Engine Root"); 314 return (BUS_PROBE_DEFAULT); 315 } 316 317 static int 318 xive_attach(device_t dev) 319 { 320 struct xive_softc *sc = device_get_softc(dev); 321 struct xive_cpu *xive_cpud; 322 phandle_t phandle = ofw_bus_get_node(dev); 323 int64_t vp_block; 324 int error; 325 int rid; 326 int i, order; 327 uint64_t vp_id; 328 int64_t ipi_irq; 329 330 opal_call(OPAL_XIVE_RESET, OPAL_XIVE_XICS_MODE_EXP); 331 332 error = OF_getencprop(phandle, "ibm,xive-provision-page-size", 333 (pcell_t *)&sc->sc_prov_page_size, sizeof(sc->sc_prov_page_size)); 334 335 rid = 1; /* Get the Hypervisor-level register set. */ 336 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 337 &rid, RF_ACTIVE); 338 sc->sc_offset = XIVE_TM_QW3_HV; 339 340 mtx_init(&sc->sc_mtx, "XIVE", NULL, MTX_DEF); 341 342 /* Workaround for qemu single-thread powernv */ 343 if (mp_maxid == 0) 344 order = 1; 345 else 346 order = fls(mp_maxid + (mp_maxid - 1)) - 1; 347 348 do { 349 vp_block = opal_call(OPAL_XIVE_ALLOCATE_VP_BLOCK, order); 350 if (vp_block == OPAL_BUSY) 351 DELAY(10); 352 else if (vp_block == OPAL_XIVE_PROVISIONING) 353 xive_provision_page(sc); 354 else 355 break; 356 } while (1); 357 358 if (vp_block < 0) { 359 device_printf(dev, 360 "Unable to allocate VP block. Opal error %d\n", 361 (int)vp_block); 362 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->sc_mem); 363 return (ENXIO); 364 } 365 366 /* 367 * Set up the VPs. Try to do as much as we can in attach, to lessen 368 * what's needed at AP spawn time. 369 */ 370 CPU_FOREACH(i) { 371 vp_id = pcpu_find(i)->pc_hwref; 372 373 xive_cpud = DPCPU_ID_PTR(i, xive_cpu_data); 374 xive_cpud->vp = vp_id + vp_block; 375 opal_call(OPAL_XIVE_GET_VP_INFO, xive_cpud->vp, NULL, 376 vtophys(&xive_cpud->cam), NULL, vtophys(&xive_cpud->chip)); 377 378 xive_cpud->cam = be64toh(xive_cpud->cam); 379 xive_cpud->chip = be64toh(xive_cpud->chip); 380 381 /* Allocate the queue page and populate the queue state data. */ 382 xive_cpud->queue.q_page = contigmalloc(PAGE_SIZE, M_XIVE, 383 M_ZERO | M_WAITOK, 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0); 384 xive_cpud->queue.q_size = 1 << PAGE_SHIFT; 385 xive_cpud->queue.q_mask = 386 ((xive_cpud->queue.q_size / sizeof(int)) - 1); 387 xive_cpud->queue.q_toggle = 0; 388 xive_cpud->queue.q_index = 0; 389 do { 390 error = opal_call(OPAL_XIVE_SET_VP_INFO, xive_cpud->vp, 391 OPAL_XIVE_VP_ENABLED, 0); 392 } while (error == OPAL_BUSY); 393 error = opal_call(OPAL_XIVE_SET_QUEUE_INFO, vp_id, 394 XIVE_PRIORITY, vtophys(xive_cpud->queue.q_page), PAGE_SHIFT, 395 OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED); 396 397 do { 398 ipi_irq = opal_call(OPAL_XIVE_ALLOCATE_IRQ, 399 xive_cpud->chip); 400 } while (ipi_irq == OPAL_BUSY); 401 402 if (ipi_irq < 0) 403 device_printf(root_pic, 404 "Failed allocating IPI. OPAL error %d\n", 405 (int)ipi_irq); 406 else { 407 xive_init_irq(&xive_cpud->ipi_data, ipi_irq); 408 xive_cpud->ipi_data.vp = vp_id; 409 xive_cpud->ipi_data.lirq = MAX_XIVE_IRQS; 410 opal_call(OPAL_XIVE_SET_IRQ_CONFIG, ipi_irq, 411 xive_cpud->ipi_data.vp, XIVE_PRIORITY, 412 MAX_XIVE_IRQS); 413 } 414 } 415 416 powerpc_register_pic(dev, OF_xref_from_node(phandle), MAX_XIVE_IRQS, 417 1 /* Number of IPIs */, FALSE); 418 root_pic = dev; 419 420 xive_setup_cpu(); 421 powernv_smp_ap_extra_init = xive_smp_cpu_startup; 422 423 return (0); 424 } 425 426 static int 427 xics_attach(device_t dev) 428 { 429 phandle_t phandle = ofw_bus_get_node(dev); 430 431 /* The XIVE (root PIC) will handle all our interrupts */ 432 powerpc_register_pic(root_pic, OF_xref_from_node(phandle), 433 MAX_XIVE_IRQS, 1 /* Number of IPIs */, FALSE); 434 435 return (0); 436 } 437 438 /* 439 * PIC I/F methods. 440 */ 441 442 static void 443 xive_bind(device_t dev, u_int irq, cpuset_t cpumask, void **priv) 444 { 445 struct xive_irq *irqd; 446 int cpu; 447 int ncpus, i, error; 448 449 if (*priv == NULL) 450 *priv = xive_configure_irq(irq); 451 452 irqd = *priv; 453 454 /* 455 * This doesn't appear to actually support affinity groups, so pick a 456 * random CPU. 457 */ 458 ncpus = 0; 459 CPU_FOREACH(cpu) 460 if (CPU_ISSET(cpu, &cpumask)) ncpus++; 461 462 i = mftb() % ncpus; 463 ncpus = 0; 464 CPU_FOREACH(cpu) { 465 if (!CPU_ISSET(cpu, &cpumask)) 466 continue; 467 if (ncpus == i) 468 break; 469 ncpus++; 470 } 471 472 opal_call(OPAL_XIVE_SYNC, OPAL_XIVE_SYNC_QUEUE, irq); 473 474 irqd->vp = pcpu_find(cpu)->pc_hwref; 475 error = opal_call(OPAL_XIVE_SET_IRQ_CONFIG, irq, irqd->vp, 476 XIVE_PRIORITY, irqd->lirq); 477 478 if (error < 0) 479 panic("Cannot bind interrupt %d to CPU %d", irq, cpu); 480 481 xive_eoi(dev, irq, irqd); 482 } 483 484 /* Read the next entry in the queue page and update the index. */ 485 static int 486 xive_read_eq(struct xive_queue *q) 487 { 488 uint32_t i = be32toh(q->q_page[q->q_index]); 489 490 /* Check validity, using current queue polarity. */ 491 if ((i >> 31) == q->q_toggle) 492 return (0); 493 494 q->q_index = (q->q_index + 1) & q->q_mask; 495 496 if (q->q_index == 0) 497 q->q_toggle ^= 1; 498 499 return (i & 0x7fffffff); 500 } 501 502 static void 503 xive_dispatch(device_t dev, struct trapframe *tf) 504 { 505 struct xive_softc *sc; 506 struct xive_cpu *xive_cpud; 507 uint32_t vector; 508 uint16_t ack; 509 uint8_t cppr, he; 510 511 sc = device_get_softc(dev); 512 513 xive_cpud = DPCPU_PTR(xive_cpu_data); 514 for (;;) { 515 ack = xive_read_2(sc, XIVE_TM_SPC_ACK); 516 cppr = (ack & 0xff); 517 518 he = ack >> TM_QW3NSR_HE_SHIFT; 519 520 if (he == TM_QW3_NSR_HE_NONE) 521 break; 522 523 else if (__predict_false(he != TM_QW3_NSR_HE_PHYS)) { 524 /* 525 * We don't support TM_QW3_NSR_HE_POOL or 526 * TM_QW3_NSR_HE_LSI interrupts. 527 */ 528 device_printf(dev, 529 "Unexpected interrupt he type: %d\n", he); 530 goto end; 531 } 532 533 xive_write_1(sc, XIVE_TM_CPPR, cppr); 534 535 for (;;) { 536 vector = xive_read_eq(&xive_cpud->queue); 537 538 if (vector == 0) 539 break; 540 541 if (vector == MAX_XIVE_IRQS) 542 vector = xive_ipi_vector; 543 544 powerpc_dispatch_intr(vector, tf); 545 } 546 } 547 end: 548 xive_write_1(sc, XIVE_TM_CPPR, 0xff); 549 } 550 551 static void 552 xive_enable(device_t dev, u_int irq, u_int vector, void **priv) 553 { 554 struct xive_irq *irqd; 555 cell_t status, cpu; 556 557 if (irq == MAX_XIVE_IRQS) { 558 if (xive_ipi_vector == -1) 559 xive_ipi_vector = vector; 560 return; 561 } 562 if (*priv == NULL) 563 *priv = xive_configure_irq(irq); 564 565 irqd = *priv; 566 567 /* Bind to this CPU to start */ 568 cpu = PCPU_GET(hwref); 569 irqd->lirq = vector; 570 571 for (;;) { 572 status = opal_call(OPAL_XIVE_SET_IRQ_CONFIG, irq, cpu, 573 XIVE_PRIORITY, vector); 574 if (status != OPAL_BUSY) 575 break; 576 DELAY(10); 577 } 578 579 if (status != 0) 580 panic("OPAL_SET_XIVE IRQ %d -> cpu %d failed: %d", irq, 581 cpu, status); 582 583 xive_unmask(dev, irq, *priv); 584 } 585 586 static void 587 xive_eoi(device_t dev, u_int irq, void *priv) 588 { 589 struct xive_irq *rirq; 590 struct xive_cpu *cpud; 591 uint8_t eoi_val; 592 593 if (irq == MAX_XIVE_IRQS) { 594 cpud = DPCPU_PTR(xive_cpu_data); 595 rirq = &cpud->ipi_data; 596 } else 597 rirq = priv; 598 599 if (rirq->flags & OPAL_XIVE_IRQ_STORE_EOI) 600 xive_write_mmap8(rirq->eoi_page + XIVE_IRQ_STORE_EOI, 0); 601 else if (rirq->flags & OPAL_XIVE_IRQ_LSI) 602 xive_read_mmap8(rirq->eoi_page + XIVE_IRQ_LOAD_EOI); 603 else { 604 eoi_val = xive_read_mmap8(rirq->eoi_page + XIVE_IRQ_PQ_00); 605 if ((eoi_val & XIVE_IRQ_VAL_Q) && rirq->trig_page != 0) 606 xive_write_mmap8(rirq->trig_page, 0); 607 } 608 } 609 610 static void 611 xive_ipi(device_t dev, u_int cpu) 612 { 613 struct xive_cpu *xive_cpud; 614 615 xive_cpud = DPCPU_ID_PTR(cpu, xive_cpu_data); 616 617 if (xive_cpud->ipi_data.trig_page == 0) 618 return; 619 xive_write_mmap8(xive_cpud->ipi_data.trig_page, 0); 620 } 621 622 static void 623 xive_mask(device_t dev, u_int irq, void *priv) 624 { 625 struct xive_irq *rirq; 626 627 /* Never mask IPIs */ 628 if (irq == MAX_XIVE_IRQS) 629 return; 630 631 rirq = priv; 632 633 if (!(rirq->flags & OPAL_XIVE_IRQ_LSI)) 634 return; 635 xive_read_mmap8(rirq->eoi_page + XIVE_IRQ_PQ_01); 636 } 637 638 static void 639 xive_unmask(device_t dev, u_int irq, void *priv) 640 { 641 struct xive_irq *rirq; 642 643 rirq = priv; 644 645 xive_read_mmap8(rirq->eoi_page + XIVE_IRQ_PQ_00); 646 } 647 648 static void 649 xive_translate_code(device_t dev, u_int irq, int code, 650 enum intr_trigger *trig, enum intr_polarity *pol) 651 { 652 switch (code) { 653 case 0: 654 /* L to H edge */ 655 *trig = INTR_TRIGGER_EDGE; 656 *pol = INTR_POLARITY_HIGH; 657 break; 658 case 1: 659 /* Active L level */ 660 *trig = INTR_TRIGGER_LEVEL; 661 *pol = INTR_POLARITY_LOW; 662 break; 663 default: 664 *trig = INTR_TRIGGER_CONFORM; 665 *pol = INTR_POLARITY_CONFORM; 666 } 667 } 668 669 /* Private functions. */ 670 /* 671 * Setup the current CPU. Called by the BSP at driver attachment, and by each 672 * AP at wakeup (via xive_smp_cpu_startup()). 673 */ 674 static void 675 xive_setup_cpu(void) 676 { 677 struct xive_softc *sc; 678 struct xive_cpu *cpup; 679 uint32_t val; 680 681 cpup = DPCPU_PTR(xive_cpu_data); 682 683 sc = device_get_softc(root_pic); 684 685 val = bus_read_4(sc->sc_mem, XIVE_TM_QW2_HV_POOL + TM_WORD2); 686 if (val & TM_QW2W2_VP) 687 bus_read_8(sc->sc_mem, XIVE_TM_SPC_PULL_POOL_CTX); 688 689 bus_write_4(sc->sc_mem, XIVE_TM_QW2_HV_POOL + TM_WORD0, 0xff); 690 bus_write_4(sc->sc_mem, XIVE_TM_QW2_HV_POOL + TM_WORD2, 691 TM_QW2W2_VP | cpup->cam); 692 693 xive_unmask(root_pic, cpup->ipi_data.girq, &cpup->ipi_data); 694 xive_write_1(sc, XIVE_TM_CPPR, 0xff); 695 } 696 697 /* Populate an IRQ structure, mapping the EOI and trigger pages. */ 698 static void 699 xive_init_irq(struct xive_irq *irqd, u_int irq) 700 { 701 uint64_t eoi_phys, trig_phys; 702 uint32_t esb_shift; 703 704 opal_call(OPAL_XIVE_GET_IRQ_INFO, irq, 705 vtophys(&irqd->flags), vtophys(&eoi_phys), 706 vtophys(&trig_phys), vtophys(&esb_shift), 707 vtophys(&irqd->chip)); 708 709 irqd->flags = be64toh(irqd->flags); 710 eoi_phys = be64toh(eoi_phys); 711 trig_phys = be64toh(trig_phys); 712 esb_shift = be32toh(esb_shift); 713 irqd->chip = be32toh(irqd->chip); 714 715 irqd->girq = irq; 716 irqd->esb_size = 1 << esb_shift; 717 irqd->eoi_page = (vm_offset_t)pmap_mapdev(eoi_phys, irqd->esb_size); 718 719 if (eoi_phys == trig_phys) 720 irqd->trig_page = irqd->eoi_page; 721 else if (trig_phys != 0) 722 irqd->trig_page = (vm_offset_t)pmap_mapdev(trig_phys, 723 irqd->esb_size); 724 else 725 irqd->trig_page = 0; 726 727 opal_call(OPAL_XIVE_GET_IRQ_CONFIG, irq, vtophys(&irqd->vp), 728 vtophys(&irqd->prio), vtophys(&irqd->lirq)); 729 730 irqd->vp = be64toh(irqd->vp); 731 irqd->prio = be64toh(irqd->prio); 732 irqd->lirq = be32toh(irqd->lirq); 733 } 734 735 /* Allocate an IRQ struct before populating it. */ 736 static struct xive_irq * 737 xive_configure_irq(u_int irq) 738 { 739 struct xive_irq *irqd; 740 741 irqd = malloc(sizeof(struct xive_irq), M_XIVE, M_WAITOK); 742 743 xive_init_irq(irqd, irq); 744 745 return (irqd); 746 } 747 748 /* 749 * Part of the OPAL API. OPAL_XIVE_ALLOCATE_VP_BLOCK might require more pages, 750 * provisioned through this call. 751 */ 752 static int 753 xive_provision_page(struct xive_softc *sc) 754 { 755 void *prov_page; 756 int error; 757 758 do { 759 prov_page = contigmalloc(sc->sc_prov_page_size, M_XIVE, 0, 760 0, BUS_SPACE_MAXADDR, 761 sc->sc_prov_page_size, sc->sc_prov_page_size); 762 763 error = opal_call(OPAL_XIVE_DONATE_PAGE, -1, 764 vtophys(prov_page)); 765 } while (error == OPAL_XIVE_PROVISIONING); 766 767 return (0); 768 } 769 770 /* The XIVE_TM_CPPR register must be set by each thread */ 771 static void 772 xive_smp_cpu_startup(void) 773 { 774 775 xive_setup_cpu(); 776 } 777