1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright 2011 Nathan Whitehorn 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 20 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 22 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_platform.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/module.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/smp.h> 41 42 #include <vm/vm.h> 43 #include <vm/pmap.h> 44 45 #include <machine/bus.h> 46 #include <machine/intr_machdep.h> 47 #include <machine/md_var.h> 48 #include <machine/rtas.h> 49 50 #include <dev/ofw/ofw_bus.h> 51 #include <dev/ofw/ofw_bus_subr.h> 52 53 #ifdef POWERNV 54 #include <powerpc/powernv/opal.h> 55 #endif 56 57 #include "phyp-hvcall.h" 58 #include "pic_if.h" 59 60 #define XICP_PRIORITY 5 /* Random non-zero number */ 61 #define XICP_IPI 2 62 #define MAX_XICP_IRQS (1<<24) /* 24-bit XIRR field */ 63 64 static int xicp_probe(device_t); 65 static int xicp_attach(device_t); 66 static int xics_probe(device_t); 67 static int xics_attach(device_t); 68 69 static void xicp_bind(device_t dev, u_int irq, cpuset_t cpumask); 70 static void xicp_dispatch(device_t, struct trapframe *); 71 static void xicp_enable(device_t, u_int, u_int); 72 static void xicp_eoi(device_t, u_int); 73 static void xicp_ipi(device_t, u_int); 74 static void xicp_mask(device_t, u_int); 75 static void xicp_unmask(device_t, u_int); 76 77 static device_method_t xicp_methods[] = { 78 /* Device interface */ 79 DEVMETHOD(device_probe, xicp_probe), 80 DEVMETHOD(device_attach, xicp_attach), 81 82 /* PIC interface */ 83 DEVMETHOD(pic_bind, xicp_bind), 84 DEVMETHOD(pic_dispatch, xicp_dispatch), 85 DEVMETHOD(pic_enable, xicp_enable), 86 DEVMETHOD(pic_eoi, xicp_eoi), 87 DEVMETHOD(pic_ipi, xicp_ipi), 88 DEVMETHOD(pic_mask, xicp_mask), 89 DEVMETHOD(pic_unmask, xicp_unmask), 90 91 DEVMETHOD_END 92 }; 93 94 static device_method_t xics_methods[] = { 95 /* Device interface */ 96 DEVMETHOD(device_probe, xics_probe), 97 DEVMETHOD(device_attach, xics_attach), 98 99 DEVMETHOD_END 100 }; 101 102 struct xicp_softc { 103 struct mtx sc_mtx; 104 struct resource *mem[MAXCPU]; 105 106 int cpu_range[2]; 107 108 int ibm_int_on; 109 int ibm_int_off; 110 int ibm_get_xive; 111 int ibm_set_xive; 112 113 /* XXX: inefficient -- hash table? tree? */ 114 struct { 115 int irq; 116 int vector; 117 int cpu; 118 } intvecs[256]; 119 int nintvecs; 120 }; 121 122 static driver_t xicp_driver = { 123 "xicp", 124 xicp_methods, 125 sizeof(struct xicp_softc) 126 }; 127 128 static driver_t xics_driver = { 129 "xics", 130 xics_methods, 131 0 132 }; 133 134 static devclass_t xicp_devclass; 135 static devclass_t xics_devclass; 136 137 EARLY_DRIVER_MODULE(xicp, ofwbus, xicp_driver, xicp_devclass, 0, 0, 138 BUS_PASS_INTERRUPT-1); 139 EARLY_DRIVER_MODULE(xics, ofwbus, xics_driver, xics_devclass, 0, 0, 140 BUS_PASS_INTERRUPT); 141 142 #ifdef POWERNV 143 static struct resource * 144 xicp_mem_for_cpu(int cpu) 145 { 146 device_t dev; 147 struct xicp_softc *sc; 148 int i; 149 150 for (i = 0; (dev = devclass_get_device(xicp_devclass, i)) != NULL; i++){ 151 sc = device_get_softc(dev); 152 if (cpu >= sc->cpu_range[0] && cpu < sc->cpu_range[1]) 153 return (sc->mem[cpu - sc->cpu_range[0]]); 154 } 155 156 return (NULL); 157 } 158 #endif 159 160 static int 161 xicp_probe(device_t dev) 162 { 163 164 if (!ofw_bus_is_compatible(dev, "ibm,ppc-xicp")) 165 return (ENXIO); 166 167 device_set_desc(dev, "External Interrupt Presentation Controller"); 168 return (BUS_PROBE_GENERIC); 169 } 170 171 static int 172 xics_probe(device_t dev) 173 { 174 175 if (!ofw_bus_is_compatible(dev, "ibm,ppc-xics")) 176 return (ENXIO); 177 178 device_set_desc(dev, "External Interrupt Source Controller"); 179 return (BUS_PROBE_GENERIC); 180 } 181 182 static int 183 xicp_attach(device_t dev) 184 { 185 struct xicp_softc *sc = device_get_softc(dev); 186 phandle_t phandle = ofw_bus_get_node(dev); 187 188 if (rtas_exists()) { 189 sc->ibm_int_on = rtas_token_lookup("ibm,int-on"); 190 sc->ibm_int_off = rtas_token_lookup("ibm,int-off"); 191 sc->ibm_set_xive = rtas_token_lookup("ibm,set-xive"); 192 sc->ibm_get_xive = rtas_token_lookup("ibm,get-xive"); 193 #ifdef POWERNV 194 } else if (opal_check() == 0) { 195 /* No init needed */ 196 #endif 197 } else { 198 device_printf(dev, "Cannot attach without RTAS or OPAL\n"); 199 return (ENXIO); 200 } 201 202 if (OF_hasprop(phandle, "ibm,interrupt-server-ranges")) { 203 OF_getencprop(phandle, "ibm,interrupt-server-ranges", 204 sc->cpu_range, sizeof(sc->cpu_range)); 205 sc->cpu_range[1] += sc->cpu_range[0]; 206 device_printf(dev, "Handling CPUs %d-%d\n", sc->cpu_range[0], 207 sc->cpu_range[1]-1); 208 } else { 209 sc->cpu_range[0] = 0; 210 sc->cpu_range[1] = mp_ncpus; 211 } 212 213 #ifdef POWERNV 214 if (mfmsr() & PSL_HV) { 215 int i; 216 217 for (i = 0; i < sc->cpu_range[1] - sc->cpu_range[0]; i++) { 218 sc->mem[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 219 &i, RF_ACTIVE); 220 if (sc->mem[i] == NULL) { 221 device_printf(dev, "Could not alloc mem " 222 "resource %d\n", i); 223 return (ENXIO); 224 } 225 226 /* Unmask interrupts on all cores */ 227 bus_write_1(sc->mem[i], 4, 0xff); 228 bus_write_1(sc->mem[i], 12, 0xff); 229 } 230 } 231 #endif 232 233 mtx_init(&sc->sc_mtx, "XICP", NULL, MTX_DEF); 234 sc->nintvecs = 0; 235 236 powerpc_register_pic(dev, OF_xref_from_node(phandle), MAX_XICP_IRQS, 237 1 /* Number of IPIs */, FALSE); 238 root_pic = dev; 239 240 return (0); 241 } 242 243 static int 244 xics_attach(device_t dev) 245 { 246 phandle_t phandle = ofw_bus_get_node(dev); 247 248 /* The XICP (root PIC) will handle all our interrupts */ 249 powerpc_register_pic(root_pic, OF_xref_from_node(phandle), 250 MAX_XICP_IRQS, 1 /* Number of IPIs */, FALSE); 251 252 return (0); 253 } 254 255 /* 256 * PIC I/F methods. 257 */ 258 259 static void 260 xicp_bind(device_t dev, u_int irq, cpuset_t cpumask) 261 { 262 struct xicp_softc *sc = device_get_softc(dev); 263 cell_t status, cpu; 264 int ncpus, i, error; 265 266 /* Ignore IPIs */ 267 if (irq == MAX_XICP_IRQS) 268 return; 269 270 /* 271 * This doesn't appear to actually support affinity groups, so pick a 272 * random CPU. 273 */ 274 ncpus = 0; 275 CPU_FOREACH(cpu) 276 if (CPU_ISSET(cpu, &cpumask)) ncpus++; 277 278 i = mftb() % ncpus; 279 ncpus = 0; 280 CPU_FOREACH(cpu) { 281 if (!CPU_ISSET(cpu, &cpumask)) 282 continue; 283 if (ncpus == i) 284 break; 285 ncpus++; 286 } 287 288 cpu = pcpu_find(cpu)->pc_hwref; 289 290 /* XXX: super inefficient */ 291 for (i = 0; i < sc->nintvecs; i++) { 292 if (sc->intvecs[i].irq == irq) { 293 sc->intvecs[i].cpu = cpu; 294 break; 295 } 296 } 297 KASSERT(i < sc->nintvecs, ("Binding non-configured interrupt")); 298 299 if (rtas_exists()) 300 error = rtas_call_method(sc->ibm_set_xive, 3, 1, irq, cpu, 301 XICP_PRIORITY, &status); 302 #ifdef POWERNV 303 else 304 error = opal_call(OPAL_SET_XIVE, irq, cpu << 2, XICP_PRIORITY); 305 #endif 306 307 if (error < 0) 308 panic("Cannot bind interrupt %d to CPU %d", irq, cpu); 309 } 310 311 static void 312 xicp_dispatch(device_t dev, struct trapframe *tf) 313 { 314 struct xicp_softc *sc; 315 struct resource *regs = NULL; 316 uint64_t xirr, junk; 317 int i; 318 319 #ifdef POWERNV 320 if (mfmsr() & PSL_HV) { 321 regs = xicp_mem_for_cpu(PCPU_GET(hwref)); 322 KASSERT(regs != NULL, 323 ("Can't find regs for CPU %ld", (uintptr_t)PCPU_GET(hwref))); 324 } 325 #endif 326 327 sc = device_get_softc(dev); 328 for (;;) { 329 /* Return value in R4, use the PFT call */ 330 if (regs) { 331 xirr = bus_read_4(regs, 4); 332 } else { 333 /* Return value in R4, use the PFT call */ 334 phyp_pft_hcall(H_XIRR, 0, 0, 0, 0, &xirr, &junk, &junk); 335 } 336 xirr &= 0x00ffffff; 337 338 if (xirr == 0) { /* No more pending interrupts? */ 339 if (regs) 340 bus_write_1(regs, 4, 0xff); 341 else 342 phyp_hcall(H_CPPR, (uint64_t)0xff); 343 break; 344 } 345 if (xirr == XICP_IPI) { /* Magic number for IPIs */ 346 xirr = MAX_XICP_IRQS; /* Map to FreeBSD magic */ 347 348 /* Clear IPI */ 349 if (regs) 350 bus_write_1(regs, 12, 0xff); 351 else 352 phyp_hcall(H_IPI, (uint64_t)(PCPU_GET(hwref)), 353 0xff); 354 } 355 356 /* XXX: super inefficient */ 357 for (i = 0; i < sc->nintvecs; i++) { 358 if (sc->intvecs[i].irq == xirr) 359 break; 360 } 361 362 KASSERT(i < sc->nintvecs, ("Unmapped XIRR")); 363 powerpc_dispatch_intr(sc->intvecs[i].vector, tf); 364 } 365 } 366 367 static void 368 xicp_enable(device_t dev, u_int irq, u_int vector) 369 { 370 struct xicp_softc *sc; 371 cell_t status, cpu; 372 373 sc = device_get_softc(dev); 374 375 KASSERT(sc->nintvecs + 1 < nitems(sc->intvecs), 376 ("Too many XICP interrupts")); 377 378 /* Bind to this CPU to start: distrib. ID is last entry in gserver# */ 379 cpu = PCPU_GET(hwref); 380 381 mtx_lock(&sc->sc_mtx); 382 sc->intvecs[sc->nintvecs].irq = irq; 383 sc->intvecs[sc->nintvecs].vector = vector; 384 sc->intvecs[sc->nintvecs].cpu = cpu; 385 mb(); 386 sc->nintvecs++; 387 mtx_unlock(&sc->sc_mtx); 388 389 /* IPIs are also enabled */ 390 if (irq == MAX_XICP_IRQS) 391 return; 392 393 if (rtas_exists()) { 394 rtas_call_method(sc->ibm_set_xive, 3, 1, irq, cpu, 395 XICP_PRIORITY, &status); 396 xicp_unmask(dev, irq); 397 #ifdef POWERNV 398 } else { 399 status = opal_call(OPAL_SET_XIVE, irq, cpu << 2, XICP_PRIORITY); 400 /* Unmask implicit for OPAL */ 401 402 if (status != 0) 403 panic("OPAL_SET_XIVE IRQ %d -> cpu %d failed: %d", irq, 404 cpu, status); 405 #endif 406 } 407 } 408 409 static void 410 xicp_eoi(device_t dev, u_int irq) 411 { 412 uint64_t xirr; 413 414 if (irq == MAX_XICP_IRQS) /* Remap IPI interrupt to internal value */ 415 irq = XICP_IPI; 416 xirr = irq | (XICP_PRIORITY << 24); 417 418 #ifdef POWERNV 419 if (mfmsr() & PSL_HV) 420 bus_write_4(xicp_mem_for_cpu(PCPU_GET(hwref)), 4, xirr); 421 else 422 #endif 423 phyp_hcall(H_EOI, xirr); 424 } 425 426 static void 427 xicp_ipi(device_t dev, u_int cpu) 428 { 429 430 #ifdef POWERNV 431 cpu = pcpu_find(cpu)->pc_hwref; 432 433 if (mfmsr() & PSL_HV) 434 bus_write_1(xicp_mem_for_cpu(cpu), 12, XICP_PRIORITY); 435 else 436 #endif 437 phyp_hcall(H_IPI, (uint64_t)cpu, XICP_PRIORITY); 438 } 439 440 static void 441 xicp_mask(device_t dev, u_int irq) 442 { 443 struct xicp_softc *sc = device_get_softc(dev); 444 cell_t status; 445 446 if (irq == MAX_XICP_IRQS) 447 return; 448 449 if (rtas_exists()) { 450 rtas_call_method(sc->ibm_int_off, 1, 1, irq, &status); 451 #ifdef POWERNV 452 } else { 453 int i; 454 455 for (i = 0; i < sc->nintvecs; i++) { 456 if (sc->intvecs[i].irq == irq) { 457 break; 458 } 459 } 460 KASSERT(i < sc->nintvecs, ("Masking unconfigured interrupt")); 461 opal_call(OPAL_SET_XIVE, irq, sc->intvecs[i].cpu << 2, 0xff); 462 #endif 463 } 464 } 465 466 static void 467 xicp_unmask(device_t dev, u_int irq) 468 { 469 struct xicp_softc *sc = device_get_softc(dev); 470 cell_t status; 471 472 if (irq == MAX_XICP_IRQS) 473 return; 474 475 if (rtas_exists()) { 476 rtas_call_method(sc->ibm_int_on, 1, 1, irq, &status); 477 #ifdef POWERNV 478 } else { 479 int i; 480 481 for (i = 0; i < sc->nintvecs; i++) { 482 if (sc->intvecs[i].irq == irq) { 483 break; 484 } 485 } 486 KASSERT(i < sc->nintvecs, ("Unmasking unconfigured interrupt")); 487 opal_call(OPAL_SET_XIVE, irq, sc->intvecs[i].cpu << 2, 488 XICP_PRIORITY); 489 #endif 490 } 491 } 492 493