1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008 Marcel Moolenaar 5 * Copyright (c) 2009 Nathan Whitehorn 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/pcpu.h> 38 #include <sys/proc.h> 39 #include <sys/sched.h> 40 #include <sys/smp.h> 41 #include <vm/vm.h> 42 #include <vm/pmap.h> 43 44 #include <machine/bus.h> 45 #include <machine/cpu.h> 46 #include <machine/hid.h> 47 #include <machine/platformvar.h> 48 #include <machine/rtas.h> 49 #include <machine/smp.h> 50 #include <machine/spr.h> 51 #include <machine/trap.h> 52 53 #include <dev/ofw/openfirm.h> 54 #include <machine/ofw_machdep.h> 55 56 #include "platform_if.h" 57 58 #ifdef SMP 59 extern void *ap_pcpu; 60 #endif 61 62 #ifdef __powerpc64__ 63 static uint8_t splpar_vpa[MAXCPU][640] __aligned(128); /* XXX: dpcpu */ 64 #endif 65 66 static vm_offset_t realmaxaddr = VM_MAX_ADDRESS; 67 68 static int chrp_probe(platform_t); 69 static int chrp_attach(platform_t); 70 void chrp_mem_regions(platform_t, struct mem_region *phys, int *physsz, 71 struct mem_region *avail, int *availsz); 72 static vm_offset_t chrp_real_maxaddr(platform_t); 73 static u_long chrp_timebase_freq(platform_t, struct cpuref *cpuref); 74 static int chrp_smp_first_cpu(platform_t, struct cpuref *cpuref); 75 static int chrp_smp_next_cpu(platform_t, struct cpuref *cpuref); 76 static int chrp_smp_get_bsp(platform_t, struct cpuref *cpuref); 77 static void chrp_smp_ap_init(platform_t); 78 static int chrp_cpuref_init(void); 79 #ifdef SMP 80 static int chrp_smp_start_cpu(platform_t, struct pcpu *cpu); 81 static struct cpu_group *chrp_smp_topo(platform_t plat); 82 #endif 83 static void chrp_reset(platform_t); 84 #ifdef __powerpc64__ 85 #include "phyp-hvcall.h" 86 static void phyp_cpu_idle(sbintime_t sbt); 87 #endif 88 89 static struct cpuref platform_cpuref[MAXCPU]; 90 static int platform_cpuref_cnt; 91 static int platform_cpuref_valid; 92 93 static platform_method_t chrp_methods[] = { 94 PLATFORMMETHOD(platform_probe, chrp_probe), 95 PLATFORMMETHOD(platform_attach, chrp_attach), 96 PLATFORMMETHOD(platform_mem_regions, chrp_mem_regions), 97 PLATFORMMETHOD(platform_real_maxaddr, chrp_real_maxaddr), 98 PLATFORMMETHOD(platform_timebase_freq, chrp_timebase_freq), 99 100 PLATFORMMETHOD(platform_smp_ap_init, chrp_smp_ap_init), 101 PLATFORMMETHOD(platform_smp_first_cpu, chrp_smp_first_cpu), 102 PLATFORMMETHOD(platform_smp_next_cpu, chrp_smp_next_cpu), 103 PLATFORMMETHOD(platform_smp_get_bsp, chrp_smp_get_bsp), 104 #ifdef SMP 105 PLATFORMMETHOD(platform_smp_start_cpu, chrp_smp_start_cpu), 106 PLATFORMMETHOD(platform_smp_topo, chrp_smp_topo), 107 #endif 108 109 PLATFORMMETHOD(platform_reset, chrp_reset), 110 111 { 0, 0 } 112 }; 113 114 static platform_def_t chrp_platform = { 115 "chrp", 116 chrp_methods, 117 0 118 }; 119 120 PLATFORM_DEF(chrp_platform); 121 122 static int 123 chrp_probe(platform_t plat) 124 { 125 if (OF_finddevice("/memory") != -1 || OF_finddevice("/memory@0") != -1) 126 return (BUS_PROBE_GENERIC); 127 128 return (ENXIO); 129 } 130 131 static int 132 chrp_attach(platform_t plat) 133 { 134 #ifdef __powerpc64__ 135 int i; 136 137 /* XXX: check for /rtas/ibm,hypertas-functions? */ 138 if (!(mfmsr() & PSL_HV)) { 139 struct mem_region *phys, *avail; 140 int nphys, navail; 141 mem_regions(&phys, &nphys, &avail, &navail); 142 realmaxaddr = phys[0].mr_size; 143 144 pmap_mmu_install("mmu_phyp", BUS_PROBE_SPECIFIC); 145 cpu_idle_hook = phyp_cpu_idle; 146 147 /* Set up important VPA fields */ 148 for (i = 0; i < MAXCPU; i++) { 149 bzero(splpar_vpa[i], sizeof(splpar_vpa)); 150 /* First two: VPA size */ 151 splpar_vpa[i][4] = 152 (uint8_t)((sizeof(splpar_vpa[i]) >> 8) & 0xff); 153 splpar_vpa[i][5] = 154 (uint8_t)(sizeof(splpar_vpa[i]) & 0xff); 155 splpar_vpa[i][0xba] = 1; /* Maintain FPRs */ 156 splpar_vpa[i][0xbb] = 1; /* Maintain PMCs */ 157 splpar_vpa[i][0xfc] = 0xff; /* Maintain full SLB */ 158 splpar_vpa[i][0xfd] = 0xff; 159 splpar_vpa[i][0xff] = 1; /* Maintain Altivec */ 160 } 161 mb(); 162 163 /* Set up hypervisor CPU stuff */ 164 chrp_smp_ap_init(plat); 165 } 166 #endif 167 chrp_cpuref_init(); 168 169 /* Some systems (e.g. QEMU) need Open Firmware to stand down */ 170 ofw_quiesce(); 171 172 return (0); 173 } 174 175 static int 176 parse_drconf_memory(struct mem_region *ofmem, int *msz, 177 struct mem_region *ofavail, int *asz) 178 { 179 phandle_t phandle; 180 vm_offset_t base; 181 int i, idx, len, lasz, lmsz, res; 182 uint32_t flags, lmb_size[2]; 183 uint32_t *dmem; 184 185 lmsz = *msz; 186 lasz = *asz; 187 188 phandle = OF_finddevice("/ibm,dynamic-reconfiguration-memory"); 189 if (phandle == -1) 190 /* No drconf node, return. */ 191 return (0); 192 193 res = OF_getencprop(phandle, "ibm,lmb-size", lmb_size, 194 sizeof(lmb_size)); 195 if (res == -1) 196 return (0); 197 printf("Logical Memory Block size: %d MB\n", lmb_size[1] >> 20); 198 199 /* Parse the /ibm,dynamic-memory. 200 The first position gives the # of entries. The next two words 201 reflect the address of the memory block. The next four words are 202 the DRC index, reserved, list index and flags. 203 (see PAPR C.6.6.2 ibm,dynamic-reconfiguration-memory) 204 205 #el Addr DRC-idx res list-idx flags 206 ------------------------------------------------- 207 | 4 | 8 | 4 | 4 | 4 | 4 |.... 208 ------------------------------------------------- 209 */ 210 211 len = OF_getproplen(phandle, "ibm,dynamic-memory"); 212 if (len > 0) { 213 214 /* We have to use a variable length array on the stack 215 since we have very limited stack space. 216 */ 217 cell_t arr[len/sizeof(cell_t)]; 218 219 res = OF_getencprop(phandle, "ibm,dynamic-memory", arr, 220 sizeof(arr)); 221 if (res == -1) 222 return (0); 223 224 /* Number of elements */ 225 idx = arr[0]; 226 227 /* First address, in arr[1], arr[2]*/ 228 dmem = &arr[1]; 229 230 for (i = 0; i < idx; i++) { 231 base = ((uint64_t)dmem[0] << 32) + dmem[1]; 232 dmem += 4; 233 flags = dmem[1]; 234 /* Use region only if available and not reserved. */ 235 if ((flags & 0x8) && !(flags & 0x80)) { 236 ofmem[lmsz].mr_start = base; 237 ofmem[lmsz].mr_size = (vm_size_t)lmb_size[1]; 238 ofavail[lasz].mr_start = base; 239 ofavail[lasz].mr_size = (vm_size_t)lmb_size[1]; 240 lmsz++; 241 lasz++; 242 } 243 dmem += 2; 244 } 245 } 246 247 *msz = lmsz; 248 *asz = lasz; 249 250 return (1); 251 } 252 253 void 254 chrp_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, 255 struct mem_region *avail, int *availsz) 256 { 257 vm_offset_t maxphysaddr; 258 int i; 259 260 ofw_mem_regions(phys, physsz, avail, availsz); 261 parse_drconf_memory(phys, physsz, avail, availsz); 262 263 /* 264 * On some firmwares (SLOF), some memory may be marked available that 265 * doesn't actually exist. This manifests as an extension of the last 266 * available segment past the end of physical memory, so truncate that 267 * one. 268 */ 269 maxphysaddr = 0; 270 for (i = 0; i < *physsz; i++) 271 if (phys[i].mr_start + phys[i].mr_size > maxphysaddr) 272 maxphysaddr = phys[i].mr_start + phys[i].mr_size; 273 274 for (i = 0; i < *availsz; i++) 275 if (avail[i].mr_start + avail[i].mr_size > maxphysaddr) 276 avail[i].mr_size = maxphysaddr - avail[i].mr_start; 277 } 278 279 static vm_offset_t 280 chrp_real_maxaddr(platform_t plat) 281 { 282 return (realmaxaddr); 283 } 284 285 static u_long 286 chrp_timebase_freq(platform_t plat, struct cpuref *cpuref) 287 { 288 phandle_t cpus, cpunode; 289 int32_t ticks = -1; 290 int res; 291 char buf[8]; 292 293 cpus = OF_finddevice("/cpus"); 294 if (cpus == -1) 295 panic("CPU tree not found on Open Firmware\n"); 296 297 for (cpunode = OF_child(cpus); cpunode != 0; cpunode = OF_peer(cpunode)) { 298 res = OF_getprop(cpunode, "device_type", buf, sizeof(buf)); 299 if (res > 0 && strcmp(buf, "cpu") == 0) 300 break; 301 } 302 if (cpunode <= 0) 303 panic("CPU node not found on Open Firmware\n"); 304 305 OF_getencprop(cpunode, "timebase-frequency", &ticks, sizeof(ticks)); 306 307 if (ticks <= 0) 308 panic("Unable to determine timebase frequency!"); 309 310 return (ticks); 311 } 312 313 static int 314 chrp_smp_first_cpu(platform_t plat, struct cpuref *cpuref) 315 { 316 317 if (platform_cpuref_valid == 0) 318 return (EINVAL); 319 320 cpuref->cr_cpuid = 0; 321 cpuref->cr_hwref = platform_cpuref[0].cr_hwref; 322 323 return (0); 324 } 325 326 static int 327 chrp_smp_next_cpu(platform_t plat, struct cpuref *cpuref) 328 { 329 int id; 330 331 if (platform_cpuref_valid == 0) 332 return (EINVAL); 333 334 id = cpuref->cr_cpuid + 1; 335 if (id >= platform_cpuref_cnt) 336 return (ENOENT); 337 338 cpuref->cr_cpuid = platform_cpuref[id].cr_cpuid; 339 cpuref->cr_hwref = platform_cpuref[id].cr_hwref; 340 341 return (0); 342 } 343 344 static int 345 chrp_smp_get_bsp(platform_t plat, struct cpuref *cpuref) 346 { 347 348 cpuref->cr_cpuid = platform_cpuref[0].cr_cpuid; 349 cpuref->cr_hwref = platform_cpuref[0].cr_hwref; 350 return (0); 351 } 352 353 static int 354 chrp_cpuref_init(void) 355 { 356 phandle_t cpu, dev; 357 char buf[32]; 358 int a, res; 359 cell_t interrupt_servers[32]; 360 uint64_t bsp; 361 362 if (platform_cpuref_valid) 363 return (0); 364 365 dev = OF_peer(0); 366 dev = OF_child(dev); 367 while (dev != 0) { 368 res = OF_getprop(dev, "name", buf, sizeof(buf)); 369 if (res > 0 && strcmp(buf, "cpus") == 0) 370 break; 371 dev = OF_peer(dev); 372 } 373 374 bsp = 0; 375 for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) { 376 res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); 377 if (res > 0 && strcmp(buf, "cpu") == 0) { 378 res = OF_getproplen(cpu, "ibm,ppc-interrupt-server#s"); 379 if (res > 0) { 380 381 382 OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", 383 interrupt_servers, res); 384 385 for (a = 0; a < res/sizeof(cell_t); a++) { 386 platform_cpuref[platform_cpuref_cnt].cr_hwref = interrupt_servers[a]; 387 platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt; 388 389 platform_cpuref_cnt++; 390 } 391 } 392 } 393 } 394 395 platform_cpuref_valid = 1; 396 397 return (0); 398 } 399 400 401 #ifdef SMP 402 static int 403 chrp_smp_start_cpu(platform_t plat, struct pcpu *pc) 404 { 405 cell_t start_cpu; 406 int result, err, timeout; 407 408 if (!rtas_exists()) { 409 printf("RTAS uninitialized: unable to start AP %d\n", 410 pc->pc_cpuid); 411 return (ENXIO); 412 } 413 414 start_cpu = rtas_token_lookup("start-cpu"); 415 if (start_cpu == -1) { 416 printf("RTAS unknown method: unable to start AP %d\n", 417 pc->pc_cpuid); 418 return (ENXIO); 419 } 420 421 ap_pcpu = pc; 422 powerpc_sync(); 423 424 result = rtas_call_method(start_cpu, 3, 1, pc->pc_hwref, EXC_RST, pc, 425 &err); 426 if (result < 0 || err != 0) { 427 printf("RTAS error (%d/%d): unable to start AP %d\n", 428 result, err, pc->pc_cpuid); 429 return (ENXIO); 430 } 431 432 timeout = 10000; 433 while (!pc->pc_awake && timeout--) 434 DELAY(100); 435 436 return ((pc->pc_awake) ? 0 : EBUSY); 437 } 438 439 static struct cpu_group * 440 chrp_smp_topo(platform_t plat) 441 { 442 struct pcpu *pc, *last_pc; 443 int i, ncores, ncpus; 444 445 ncores = ncpus = 0; 446 last_pc = NULL; 447 for (i = 0; i <= mp_maxid; i++) { 448 pc = pcpu_find(i); 449 if (pc == NULL) 450 continue; 451 if (last_pc == NULL || pc->pc_hwref != last_pc->pc_hwref) 452 ncores++; 453 last_pc = pc; 454 ncpus++; 455 } 456 457 if (ncpus % ncores != 0) { 458 printf("WARNING: Irregular SMP topology. Performance may be " 459 "suboptimal (%d CPUS, %d cores)\n", ncpus, ncores); 460 return (smp_topo_none()); 461 } 462 463 /* Don't do anything fancier for non-threaded SMP */ 464 if (ncpus == ncores) 465 return (smp_topo_none()); 466 467 return (smp_topo_1level(CG_SHARE_L1, ncpus / ncores, CG_FLAG_SMT)); 468 } 469 #endif 470 471 static void 472 chrp_reset(platform_t platform) 473 { 474 OF_reboot(); 475 } 476 477 #ifdef __powerpc64__ 478 static void 479 phyp_cpu_idle(sbintime_t sbt) 480 { 481 register_t msr; 482 483 msr = mfmsr(); 484 485 mtmsr(msr & ~PSL_EE); 486 if (sched_runnable()) { 487 mtmsr(msr); 488 return; 489 } 490 491 phyp_hcall(H_CEDE); /* Re-enables interrupts internally */ 492 mtmsr(msr); 493 } 494 495 static void 496 chrp_smp_ap_init(platform_t platform) 497 { 498 if (!(mfmsr() & PSL_HV)) { 499 /* Register VPA */ 500 phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(hwref), 501 splpar_vpa[PCPU_GET(hwref)]); 502 503 /* Set interrupt priority */ 504 phyp_hcall(H_CPPR, 0xff); 505 } 506 } 507 #else 508 static void 509 chrp_smp_ap_init(platform_t platform) 510 { 511 } 512 #endif 513 514