1 /*- 2 * Copyright (c) 2015 Nathan Whitehorn 3 * Copyright (c) 2017-2018 Semihalf 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/pcpu.h> 36 #include <sys/proc.h> 37 #include <sys/smp.h> 38 #include <vm/vm.h> 39 #include <vm/pmap.h> 40 41 #include <machine/bus.h> 42 #include <machine/cpu.h> 43 #include <machine/hid.h> 44 #include <machine/platformvar.h> 45 #include <machine/pmap.h> 46 #include <machine/rtas.h> 47 #include <machine/smp.h> 48 #include <machine/spr.h> 49 #include <machine/trap.h> 50 51 #include <dev/ofw/openfirm.h> 52 #include <machine/ofw_machdep.h> 53 #include <powerpc/aim/mmu_oea64.h> 54 55 #include "platform_if.h" 56 #include "opal.h" 57 58 #ifdef SMP 59 extern void *ap_pcpu; 60 #endif 61 62 void (*powernv_smp_ap_extra_init)(void); 63 64 static int powernv_probe(platform_t); 65 static int powernv_attach(platform_t); 66 void powernv_mem_regions(platform_t, struct mem_region *phys, int *physsz, 67 struct mem_region *avail, int *availsz); 68 static void powernv_numa_mem_regions(platform_t plat, struct numa_mem_region *phys, int *physsz); 69 static u_long powernv_timebase_freq(platform_t, struct cpuref *cpuref); 70 static int powernv_smp_first_cpu(platform_t, struct cpuref *cpuref); 71 static int powernv_smp_next_cpu(platform_t, struct cpuref *cpuref); 72 static int powernv_smp_get_bsp(platform_t, struct cpuref *cpuref); 73 static void powernv_smp_ap_init(platform_t); 74 #ifdef SMP 75 static int powernv_smp_start_cpu(platform_t, struct pcpu *cpu); 76 static void powernv_smp_probe_threads(platform_t); 77 static struct cpu_group *powernv_smp_topo(platform_t plat); 78 #endif 79 static void powernv_reset(platform_t); 80 static void powernv_cpu_idle(sbintime_t sbt); 81 static int powernv_cpuref_init(void); 82 83 static platform_method_t powernv_methods[] = { 84 PLATFORMMETHOD(platform_probe, powernv_probe), 85 PLATFORMMETHOD(platform_attach, powernv_attach), 86 PLATFORMMETHOD(platform_mem_regions, powernv_mem_regions), 87 PLATFORMMETHOD(platform_numa_mem_regions, powernv_numa_mem_regions), 88 PLATFORMMETHOD(platform_timebase_freq, powernv_timebase_freq), 89 90 PLATFORMMETHOD(platform_smp_ap_init, powernv_smp_ap_init), 91 PLATFORMMETHOD(platform_smp_first_cpu, powernv_smp_first_cpu), 92 PLATFORMMETHOD(platform_smp_next_cpu, powernv_smp_next_cpu), 93 PLATFORMMETHOD(platform_smp_get_bsp, powernv_smp_get_bsp), 94 #ifdef SMP 95 PLATFORMMETHOD(platform_smp_start_cpu, powernv_smp_start_cpu), 96 PLATFORMMETHOD(platform_smp_probe_threads, powernv_smp_probe_threads), 97 PLATFORMMETHOD(platform_smp_topo, powernv_smp_topo), 98 #endif 99 100 PLATFORMMETHOD(platform_reset, powernv_reset), 101 102 { 0, 0 } 103 }; 104 105 static platform_def_t powernv_platform = { 106 "powernv", 107 powernv_methods, 108 0 109 }; 110 111 static struct cpuref platform_cpuref[MAXCPU]; 112 static int platform_cpuref_cnt; 113 static int platform_cpuref_valid; 114 115 PLATFORM_DEF(powernv_platform); 116 117 static uint64_t powernv_boot_pir; 118 119 static int 120 powernv_probe(platform_t plat) 121 { 122 if (opal_check() == 0) 123 return (BUS_PROBE_SPECIFIC); 124 125 return (ENXIO); 126 } 127 128 static int 129 powernv_attach(platform_t plat) 130 { 131 uint32_t nptlp, shift = 0, slb_encoding = 0; 132 int32_t lp_size, lp_encoding; 133 char buf[255]; 134 pcell_t prop; 135 phandle_t cpu; 136 int res, len, idx; 137 register_t msr; 138 139 /* Ping OPAL again just to make sure */ 140 opal_check(); 141 142 #if BYTE_ORDER == LITTLE_ENDIAN 143 opal_call(OPAL_REINIT_CPUS, 2 /* Little endian */); 144 #else 145 opal_call(OPAL_REINIT_CPUS, 1 /* Big endian */); 146 #endif 147 148 if (cpu_idle_hook == NULL) 149 cpu_idle_hook = powernv_cpu_idle; 150 151 powernv_boot_pir = mfspr(SPR_PIR); 152 153 /* LPID must not be altered when PSL_DR or PSL_IR is set */ 154 msr = mfmsr(); 155 mtmsr(msr & ~(PSL_DR | PSL_IR)); 156 157 /* Direct interrupts to SRR instead of HSRR and reset LPCR otherwise */ 158 mtspr(SPR_LPID, 0); 159 isync(); 160 161 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) 162 lpcr |= LPCR_HVICE; 163 164 mtspr(SPR_LPCR, lpcr); 165 isync(); 166 167 mtmsr(msr); 168 169 powernv_cpuref_init(); 170 171 /* Set SLB count from device tree */ 172 cpu = OF_peer(0); 173 cpu = OF_child(cpu); 174 while (cpu != 0) { 175 res = OF_getprop(cpu, "name", buf, sizeof(buf)); 176 if (res > 0 && strcmp(buf, "cpus") == 0) 177 break; 178 cpu = OF_peer(cpu); 179 } 180 if (cpu == 0) 181 goto out; 182 183 cpu = OF_child(cpu); 184 while (cpu != 0) { 185 res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); 186 if (res > 0 && strcmp(buf, "cpu") == 0) 187 break; 188 cpu = OF_peer(cpu); 189 } 190 if (cpu == 0) 191 goto out; 192 193 res = OF_getencprop(cpu, "ibm,slb-size", &prop, sizeof(prop)); 194 if (res > 0) 195 n_slbs = prop; 196 197 /* 198 * Scan the large page size property for PAPR compatible machines. 199 * See PAPR D.5 Changes to Section 5.1.4, 'CPU Node Properties' 200 * for the encoding of the property. 201 */ 202 203 len = OF_getproplen(cpu, "ibm,segment-page-sizes"); 204 if (len > 0) { 205 /* 206 * We have to use a variable length array on the stack 207 * since we have very limited stack space. 208 */ 209 pcell_t arr[len/sizeof(cell_t)]; 210 res = OF_getencprop(cpu, "ibm,segment-page-sizes", arr, 211 sizeof(arr)); 212 len /= 4; 213 idx = 0; 214 while (len > 0) { 215 shift = arr[idx]; 216 slb_encoding = arr[idx + 1]; 217 nptlp = arr[idx + 2]; 218 idx += 3; 219 len -= 3; 220 while (len > 0 && nptlp) { 221 lp_size = arr[idx]; 222 lp_encoding = arr[idx+1]; 223 if (slb_encoding == SLBV_L && lp_encoding == 0) 224 break; 225 226 idx += 2; 227 len -= 2; 228 nptlp--; 229 } 230 if (nptlp && slb_encoding == SLBV_L && lp_encoding == 0) 231 break; 232 } 233 234 if (len == 0) 235 panic("Standard large pages (SLB[L] = 1, PTE[LP] = 0) " 236 "not supported by this system."); 237 238 moea64_large_page_shift = shift; 239 moea64_large_page_size = 1ULL << lp_size; 240 } 241 242 out: 243 return (0); 244 } 245 246 247 void 248 powernv_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, 249 struct mem_region *avail, int *availsz) 250 { 251 252 ofw_mem_regions(phys, physsz, avail, availsz); 253 } 254 255 static void 256 powernv_numa_mem_regions(platform_t plat, struct numa_mem_region *phys, int *physsz) 257 { 258 259 ofw_numa_mem_regions(phys, physsz); 260 } 261 262 static u_long 263 powernv_timebase_freq(platform_t plat, struct cpuref *cpuref) 264 { 265 char buf[8]; 266 phandle_t cpu, dev, root; 267 int res; 268 int32_t ticks = -1; 269 270 root = OF_peer(0); 271 dev = OF_child(root); 272 while (dev != 0) { 273 res = OF_getprop(dev, "name", buf, sizeof(buf)); 274 if (res > 0 && strcmp(buf, "cpus") == 0) 275 break; 276 dev = OF_peer(dev); 277 } 278 279 for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) { 280 res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); 281 if (res > 0 && strcmp(buf, "cpu") == 0) 282 break; 283 } 284 if (cpu == 0) 285 return (512000000); 286 287 OF_getencprop(cpu, "timebase-frequency", &ticks, sizeof(ticks)); 288 289 if (ticks <= 0) 290 panic("Unable to determine timebase frequency!"); 291 292 return (ticks); 293 294 } 295 296 static int 297 powernv_cpuref_init(void) 298 { 299 phandle_t cpu, dev; 300 char buf[32]; 301 int a, res, tmp_cpuref_cnt; 302 static struct cpuref tmp_cpuref[MAXCPU]; 303 cell_t interrupt_servers[32]; 304 uint64_t bsp; 305 306 if (platform_cpuref_valid) 307 return (0); 308 309 dev = OF_peer(0); 310 dev = OF_child(dev); 311 while (dev != 0) { 312 res = OF_getprop(dev, "name", buf, sizeof(buf)); 313 if (res > 0 && strcmp(buf, "cpus") == 0) 314 break; 315 dev = OF_peer(dev); 316 } 317 318 bsp = 0; 319 tmp_cpuref_cnt = 0; 320 for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) { 321 res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); 322 if (res > 0 && strcmp(buf, "cpu") == 0) { 323 res = OF_getproplen(cpu, "ibm,ppc-interrupt-server#s"); 324 if (res > 0) { 325 OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", 326 interrupt_servers, res); 327 328 for (a = 0; a < res/sizeof(cell_t); a++) { 329 tmp_cpuref[tmp_cpuref_cnt].cr_hwref = interrupt_servers[a]; 330 tmp_cpuref[tmp_cpuref_cnt].cr_cpuid = tmp_cpuref_cnt; 331 tmp_cpuref[tmp_cpuref_cnt].cr_domain = interrupt_servers[a] >> 11; 332 if (interrupt_servers[a] == (uint32_t)powernv_boot_pir) 333 bsp = tmp_cpuref_cnt; 334 335 tmp_cpuref_cnt++; 336 } 337 } 338 } 339 } 340 341 /* Map IDs, so BSP has CPUID 0 regardless of hwref */ 342 for (a = bsp; a < tmp_cpuref_cnt; a++) { 343 platform_cpuref[platform_cpuref_cnt].cr_hwref = tmp_cpuref[a].cr_hwref; 344 platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt; 345 platform_cpuref[platform_cpuref_cnt].cr_domain = tmp_cpuref[a].cr_domain; 346 platform_cpuref_cnt++; 347 } 348 for (a = 0; a < bsp; a++) { 349 platform_cpuref[platform_cpuref_cnt].cr_hwref = tmp_cpuref[a].cr_hwref; 350 platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt; 351 platform_cpuref[platform_cpuref_cnt].cr_domain = tmp_cpuref[a].cr_domain; 352 platform_cpuref_cnt++; 353 } 354 355 platform_cpuref_valid = 1; 356 357 return (0); 358 } 359 360 static int 361 powernv_smp_first_cpu(platform_t plat, struct cpuref *cpuref) 362 { 363 if (platform_cpuref_valid == 0) 364 return (EINVAL); 365 366 cpuref->cr_cpuid = 0; 367 cpuref->cr_hwref = platform_cpuref[0].cr_hwref; 368 cpuref->cr_domain = platform_cpuref[0].cr_domain; 369 370 return (0); 371 } 372 373 static int 374 powernv_smp_next_cpu(platform_t plat, struct cpuref *cpuref) 375 { 376 int id; 377 378 if (platform_cpuref_valid == 0) 379 return (EINVAL); 380 381 id = cpuref->cr_cpuid + 1; 382 if (id >= platform_cpuref_cnt) 383 return (ENOENT); 384 385 cpuref->cr_cpuid = platform_cpuref[id].cr_cpuid; 386 cpuref->cr_hwref = platform_cpuref[id].cr_hwref; 387 cpuref->cr_domain = platform_cpuref[id].cr_domain; 388 389 return (0); 390 } 391 392 static int 393 powernv_smp_get_bsp(platform_t plat, struct cpuref *cpuref) 394 { 395 396 cpuref->cr_cpuid = platform_cpuref[0].cr_cpuid; 397 cpuref->cr_hwref = platform_cpuref[0].cr_hwref; 398 cpuref->cr_domain = platform_cpuref[0].cr_domain; 399 return (0); 400 } 401 402 #ifdef SMP 403 static int 404 powernv_smp_start_cpu(platform_t plat, struct pcpu *pc) 405 { 406 int result; 407 408 ap_pcpu = pc; 409 powerpc_sync(); 410 411 result = opal_call(OPAL_START_CPU, pc->pc_hwref, EXC_RST); 412 if (result != OPAL_SUCCESS) { 413 printf("OPAL error (%d): unable to start AP %d\n", 414 result, (int)pc->pc_hwref); 415 return (ENXIO); 416 } 417 418 return (0); 419 } 420 421 static void 422 powernv_smp_probe_threads(platform_t plat) 423 { 424 char buf[8]; 425 phandle_t cpu, dev, root; 426 int res, nthreads; 427 428 root = OF_peer(0); 429 430 dev = OF_child(root); 431 while (dev != 0) { 432 res = OF_getprop(dev, "name", buf, sizeof(buf)); 433 if (res > 0 && strcmp(buf, "cpus") == 0) 434 break; 435 dev = OF_peer(dev); 436 } 437 438 nthreads = 1; 439 for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) { 440 res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); 441 if (res <= 0 || strcmp(buf, "cpu") != 0) 442 continue; 443 444 res = OF_getproplen(cpu, "ibm,ppc-interrupt-server#s"); 445 446 if (res >= 0) 447 nthreads = res / sizeof(cell_t); 448 else 449 nthreads = 1; 450 break; 451 } 452 453 smp_threads_per_core = nthreads; 454 if (mp_ncpus % nthreads == 0) 455 mp_ncores = mp_ncpus / nthreads; 456 } 457 458 static struct cpu_group * 459 powernv_smp_topo(platform_t plat) 460 { 461 if (mp_ncpus % smp_threads_per_core != 0) { 462 printf("WARNING: Irregular SMP topology. Performance may be " 463 "suboptimal (%d threads, %d on first core)\n", 464 mp_ncpus, smp_threads_per_core); 465 return (smp_topo_none()); 466 } 467 468 /* Don't do anything fancier for non-threaded SMP */ 469 if (smp_threads_per_core == 1) 470 return (smp_topo_none()); 471 472 return (smp_topo_1level(CG_SHARE_L1, smp_threads_per_core, 473 CG_FLAG_SMT)); 474 } 475 476 #endif 477 478 static void 479 powernv_reset(platform_t platform) 480 { 481 482 opal_call(OPAL_CEC_REBOOT); 483 } 484 485 static void 486 powernv_smp_ap_init(platform_t platform) 487 { 488 489 if (powernv_smp_ap_extra_init != NULL) 490 powernv_smp_ap_extra_init(); 491 } 492 493 static void 494 powernv_cpu_idle(sbintime_t sbt) 495 { 496 } 497