1 /*- 2 * Copyright (c) 2008-2012 Semihalf. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include "opt_platform.h" 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/pcpu.h> 36 #include <sys/proc.h> 37 #include <sys/smp.h> 38 39 #include <machine/bus.h> 40 #include <machine/cpu.h> 41 #include <machine/hid.h> 42 #include <machine/_inttypes.h> 43 #include <machine/machdep.h> 44 #include <machine/md_var.h> 45 #include <machine/platform.h> 46 #include <machine/platformvar.h> 47 #include <machine/smp.h> 48 #include <machine/spr.h> 49 #include <machine/vmparam.h> 50 51 #include <dev/fdt/fdt_common.h> 52 #include <dev/ofw/ofw_bus.h> 53 #include <dev/ofw/ofw_bus_subr.h> 54 #include <dev/ofw/openfirm.h> 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_extern.h> 59 60 #include <powerpc/mpc85xx/mpc85xx.h> 61 62 #include "platform_if.h" 63 64 #ifdef SMP 65 extern void *ap_pcpu; 66 extern vm_paddr_t kernload; /* Kernel physical load address */ 67 extern uint8_t __boot_page[]; /* Boot page body */ 68 extern uint32_t bp_kernload; 69 70 struct cpu_release { 71 uint32_t entry_h; 72 uint32_t entry_l; 73 uint32_t r3_h; 74 uint32_t r3_l; 75 uint32_t reserved; 76 uint32_t pir; 77 }; 78 #endif 79 80 extern uint32_t *bootinfo; 81 vm_offset_t ccsrbar_va; 82 83 static int cpu, maxcpu; 84 85 static int mpc85xx_probe(platform_t); 86 static void mpc85xx_mem_regions(platform_t, struct mem_region *phys, 87 int *physsz, struct mem_region *avail, int *availsz); 88 static u_long mpc85xx_timebase_freq(platform_t, struct cpuref *cpuref); 89 static int mpc85xx_smp_first_cpu(platform_t, struct cpuref *cpuref); 90 static int mpc85xx_smp_next_cpu(platform_t, struct cpuref *cpuref); 91 static int mpc85xx_smp_get_bsp(platform_t, struct cpuref *cpuref); 92 static int mpc85xx_smp_start_cpu(platform_t, struct pcpu *cpu); 93 static void mpc85xx_idle(platform_t, int cpu); 94 static int mpc85xx_idle_wakeup(platform_t plat, int cpu); 95 96 static void mpc85xx_reset(platform_t); 97 98 static platform_method_t mpc85xx_methods[] = { 99 PLATFORMMETHOD(platform_probe, mpc85xx_probe), 100 PLATFORMMETHOD(platform_attach, mpc85xx_attach), 101 PLATFORMMETHOD(platform_mem_regions, mpc85xx_mem_regions), 102 PLATFORMMETHOD(platform_timebase_freq, mpc85xx_timebase_freq), 103 104 PLATFORMMETHOD(platform_smp_first_cpu, mpc85xx_smp_first_cpu), 105 PLATFORMMETHOD(platform_smp_next_cpu, mpc85xx_smp_next_cpu), 106 PLATFORMMETHOD(platform_smp_get_bsp, mpc85xx_smp_get_bsp), 107 PLATFORMMETHOD(platform_smp_start_cpu, mpc85xx_smp_start_cpu), 108 109 PLATFORMMETHOD(platform_reset, mpc85xx_reset), 110 PLATFORMMETHOD(platform_idle, mpc85xx_idle), 111 PLATFORMMETHOD(platform_idle_wakeup, mpc85xx_idle_wakeup), 112 113 PLATFORMMETHOD_END 114 }; 115 116 DEFINE_CLASS_0(mpc85xx, mpc85xx_platform, mpc85xx_methods, 0); 117 118 PLATFORM_DEF(mpc85xx_platform); 119 120 static int 121 mpc85xx_probe(platform_t plat) 122 { 123 u_int pvr = mfpvr() >> 16; 124 125 if ((pvr & 0xfff0) == FSL_E500v1) 126 return (BUS_PROBE_DEFAULT); 127 128 return (ENXIO); 129 } 130 131 int 132 mpc85xx_attach(platform_t plat) 133 { 134 phandle_t cpus, child, ccsr; 135 const char *soc_name_guesses[] = {"/soc", "soc", NULL}; 136 const char **name; 137 pcell_t ranges[6], acells, pacells, scells; 138 uint32_t sr; 139 uint64_t ccsrbar, ccsrsize; 140 int i, law_max, tgt; 141 142 if ((cpus = OF_finddevice("/cpus")) != -1) { 143 for (maxcpu = 0, child = OF_child(cpus); child != 0; 144 child = OF_peer(child), maxcpu++) 145 ; 146 } else 147 maxcpu = 1; 148 149 /* 150 * Locate CCSR region. Irritatingly, there is no way to find it 151 * unless you already know where it is. Try to infer its location 152 * from the device tree. 153 */ 154 155 ccsr = -1; 156 for (name = soc_name_guesses; *name != NULL && ccsr == -1; name++) 157 ccsr = OF_finddevice(*name); 158 if (ccsr == -1) { 159 char type[64]; 160 161 /* That didn't work. Search for devices of type "soc" */ 162 child = OF_child(OF_peer(0)); 163 for (OF_child(child); child != 0; child = OF_peer(child)) { 164 if (OF_getprop(child, "device_type", type, sizeof(type)) 165 <= 0) 166 continue; 167 168 if (strcmp(type, "soc") == 0) { 169 ccsr = child; 170 break; 171 } 172 } 173 } 174 175 if (ccsr == -1) 176 panic("Could not locate CCSR window!"); 177 178 OF_getprop(ccsr, "#size-cells", &scells, sizeof(scells)); 179 OF_getprop(ccsr, "#address-cells", &acells, sizeof(acells)); 180 OF_searchprop(OF_parent(ccsr), "#address-cells", &pacells, 181 sizeof(pacells)); 182 OF_getprop(ccsr, "ranges", ranges, sizeof(ranges)); 183 ccsrbar = ccsrsize = 0; 184 for (i = acells; i < acells + pacells; i++) { 185 ccsrbar <<= 32; 186 ccsrbar |= ranges[i]; 187 } 188 for (i = acells + pacells; i < acells + pacells + scells; i++) { 189 ccsrsize <<= 32; 190 ccsrsize |= ranges[i]; 191 } 192 ccsrbar_va = pmap_early_io_map(ccsrbar, ccsrsize); 193 194 mpc85xx_fix_errata(ccsrbar_va); 195 mpc85xx_enable_l3_cache(); 196 197 /* 198 * Clear local access windows. Skip DRAM entries, so we don't shoot 199 * ourselves in the foot. 200 */ 201 law_max = law_getmax(); 202 for (i = 0; i < law_max; i++) { 203 sr = ccsr_read4(OCP85XX_LAWSR(i)); 204 if ((sr & OCP85XX_ENA_MASK) == 0) 205 continue; 206 tgt = (sr & 0x01f00000) >> 20; 207 if (tgt == OCP85XX_TGTIF_RAM1 || tgt == OCP85XX_TGTIF_RAM2 || 208 tgt == OCP85XX_TGTIF_RAM_INTL) 209 continue; 210 211 ccsr_write4(OCP85XX_LAWSR(i), sr & OCP85XX_DIS_MASK); 212 } 213 214 return (0); 215 } 216 217 void 218 mpc85xx_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, 219 struct mem_region *avail, int *availsz) 220 { 221 222 ofw_mem_regions(phys, physsz, avail, availsz); 223 } 224 225 static u_long 226 mpc85xx_timebase_freq(platform_t plat, struct cpuref *cpuref) 227 { 228 u_long ticks; 229 phandle_t cpus, child; 230 pcell_t freq; 231 232 if (bootinfo != NULL) { 233 if (bootinfo[0] == 1) { 234 /* Backward compatibility. See 8-STABLE. */ 235 ticks = bootinfo[3] >> 3; 236 } else { 237 /* Compatibility with Juniper's loader. */ 238 ticks = bootinfo[5] >> 3; 239 } 240 } else 241 ticks = 0; 242 243 if ((cpus = OF_finddevice("/cpus")) == -1) 244 goto out; 245 246 if ((child = OF_child(cpus)) == 0) 247 goto out; 248 249 switch (OF_getproplen(child, "timebase-frequency")) { 250 case 4: 251 { 252 uint32_t tbase; 253 OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase)); 254 ticks = tbase; 255 return (ticks); 256 } 257 case 8: 258 { 259 uint64_t tbase; 260 OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase)); 261 ticks = tbase; 262 return (ticks); 263 } 264 default: 265 break; 266 } 267 268 freq = 0; 269 if (OF_getprop(child, "bus-frequency", (void *)&freq, 270 sizeof(freq)) <= 0) 271 goto out; 272 273 if (freq == 0) 274 goto out; 275 276 /* 277 * Time Base and Decrementer are updated every 8 CCB bus clocks. 278 * HID0[SEL_TBCLK] = 0 279 */ 280 if (mpc85xx_is_qoriq()) 281 ticks = freq / 32; 282 else 283 ticks = freq / 8; 284 285 out: 286 if (ticks <= 0) 287 panic("Unable to determine timebase frequency!"); 288 289 return (ticks); 290 } 291 292 static int 293 mpc85xx_smp_first_cpu(platform_t plat, struct cpuref *cpuref) 294 { 295 296 cpu = 0; 297 cpuref->cr_cpuid = cpu; 298 cpuref->cr_hwref = cpuref->cr_cpuid; 299 if (bootverbose) 300 printf("powerpc_smp_first_cpu: cpuid %d\n", cpuref->cr_cpuid); 301 cpu++; 302 303 return (0); 304 } 305 306 static int 307 mpc85xx_smp_next_cpu(platform_t plat, struct cpuref *cpuref) 308 { 309 310 if (cpu >= maxcpu) 311 return (ENOENT); 312 313 cpuref->cr_cpuid = cpu++; 314 cpuref->cr_hwref = cpuref->cr_cpuid; 315 if (bootverbose) 316 printf("powerpc_smp_next_cpu: cpuid %d\n", cpuref->cr_cpuid); 317 318 return (0); 319 } 320 321 static int 322 mpc85xx_smp_get_bsp(platform_t plat, struct cpuref *cpuref) 323 { 324 325 cpuref->cr_cpuid = mfspr(SPR_PIR); 326 cpuref->cr_hwref = cpuref->cr_cpuid; 327 328 return (0); 329 } 330 331 #ifdef SMP 332 static int 333 mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pcpu *pc) 334 { 335 vm_paddr_t rel_pa, bptr; 336 volatile struct cpu_release *rel; 337 vm_offset_t rel_va, rel_page; 338 phandle_t node; 339 int i; 340 341 /* If we're calling this, the node already exists. */ 342 node = OF_finddevice("/cpus"); 343 for (i = 0, node = OF_child(node); i < pc->pc_cpuid; 344 i++, node = OF_peer(node)) 345 ; 346 if (OF_getencprop(node, "cpu-release-addr", (pcell_t *)&rel_pa, 347 sizeof(rel_pa)) == -1) { 348 return (ENOENT); 349 } 350 351 rel_page = kva_alloc(PAGE_SIZE); 352 if (rel_page == 0) 353 return (ENOMEM); 354 355 critical_enter(); 356 rel_va = rel_page + (rel_pa & PAGE_MASK); 357 pmap_kenter(rel_page, rel_pa & ~PAGE_MASK); 358 rel = (struct cpu_release *)rel_va; 359 bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload; 360 cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel)); 361 rel->pir = pc->pc_cpuid; __asm __volatile("sync"); 362 rel->entry_h = (bptr >> 32); 363 rel->entry_l = bptr; __asm __volatile("sync"); 364 cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel)); 365 if (bootverbose) 366 printf("Waking up CPU %d via CPU release page %p\n", 367 pc->pc_cpuid, rel); 368 critical_exit(); 369 pmap_kremove(rel_page); 370 kva_free(rel_page, PAGE_SIZE); 371 372 return (0); 373 } 374 #endif 375 376 static int 377 mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc) 378 { 379 #ifdef SMP 380 vm_paddr_t bptr; 381 uint32_t reg; 382 int timeout; 383 uintptr_t brr; 384 int cpuid; 385 int epapr_boot = 0; 386 uint32_t tgt; 387 388 if (mpc85xx_is_qoriq()) { 389 reg = ccsr_read4(OCP85XX_COREDISR); 390 cpuid = pc->pc_cpuid; 391 392 if ((reg & (1 << cpuid)) != 0) { 393 printf("%s: CPU %d is disabled!\n", __func__, pc->pc_cpuid); 394 return (-1); 395 } 396 397 brr = OCP85XX_BRR; 398 } else { 399 brr = OCP85XX_EEBPCR; 400 cpuid = pc->pc_cpuid + 24; 401 } 402 bp_kernload = kernload; 403 /* 404 * bp_kernload is in the boot page. Sync the cache because ePAPR 405 * booting has the other core(s) already running. 406 */ 407 cpu_flush_dcache(&bp_kernload, sizeof(bp_kernload)); 408 409 ap_pcpu = pc; 410 __asm __volatile("msync; isync"); 411 412 /* First try the ePAPR way. */ 413 if (mpc85xx_smp_start_cpu_epapr(plat, pc) == 0) { 414 epapr_boot = 1; 415 goto spin_wait; 416 } 417 418 reg = ccsr_read4(brr); 419 if ((reg & (1 << cpuid)) != 0) { 420 printf("SMP: CPU %d already out of hold-off state!\n", 421 pc->pc_cpuid); 422 return (ENXIO); 423 } 424 425 /* Flush caches to have our changes hit DRAM. */ 426 cpu_flush_dcache(__boot_page, 4096); 427 428 bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload; 429 KASSERT((bptr & 0xfff) == 0, 430 ("%s: boot page is not aligned (%#jx)", __func__, (uintmax_t)bptr)); 431 if (mpc85xx_is_qoriq()) { 432 /* 433 * Read DDR controller configuration to select proper BPTR target ID. 434 * 435 * On P5020 bit 29 of DDR1_CS0_CONFIG enables DDR controllers 436 * interleaving. If this bit is set, we have to use 437 * OCP85XX_TGTIF_RAM_INTL as BPTR target ID. On other QorIQ DPAA SoCs, 438 * this bit is reserved and always 0. 439 */ 440 441 reg = ccsr_read4(OCP85XX_DDR1_CS0_CONFIG); 442 if (reg & (1 << 29)) 443 tgt = OCP85XX_TGTIF_RAM_INTL; 444 else 445 tgt = OCP85XX_TGTIF_RAM1; 446 447 /* 448 * Set BSTR to the physical address of the boot page 449 */ 450 ccsr_write4(OCP85XX_BSTRH, bptr >> 32); 451 ccsr_write4(OCP85XX_BSTRL, bptr); 452 ccsr_write4(OCP85XX_BSTAR, OCP85XX_ENA_MASK | 453 (tgt << OCP85XX_TRGT_SHIFT_QORIQ) | (ffsl(PAGE_SIZE) - 2)); 454 455 /* Read back OCP85XX_BSTAR to synchronize write */ 456 ccsr_read4(OCP85XX_BSTAR); 457 458 /* 459 * Enable and configure time base on new CPU. 460 */ 461 462 /* Set TB clock source to platform clock / 32 */ 463 reg = ccsr_read4(CCSR_CTBCKSELR); 464 ccsr_write4(CCSR_CTBCKSELR, reg & ~(1 << pc->pc_cpuid)); 465 466 /* Enable TB */ 467 reg = ccsr_read4(CCSR_CTBENR); 468 ccsr_write4(CCSR_CTBENR, reg | (1 << pc->pc_cpuid)); 469 } else { 470 /* 471 * Set BPTR to the physical address of the boot page 472 */ 473 bptr = (bptr >> 12) | 0x80000000u; 474 ccsr_write4(OCP85XX_BPTR, bptr); 475 __asm __volatile("isync; msync"); 476 } 477 478 /* 479 * Release AP from hold-off state 480 */ 481 reg = ccsr_read4(brr); 482 ccsr_write4(brr, reg | (1 << cpuid)); 483 __asm __volatile("isync; msync"); 484 485 spin_wait: 486 timeout = 500; 487 while (!pc->pc_awake && timeout--) 488 DELAY(1000); /* wait 1ms */ 489 490 /* 491 * Disable boot page translation so that the 4K page at the default 492 * address (= 0xfffff000) isn't permanently remapped and thus not 493 * usable otherwise. 494 */ 495 if (!epapr_boot) { 496 if (mpc85xx_is_qoriq()) 497 ccsr_write4(OCP85XX_BSTAR, 0); 498 else 499 ccsr_write4(OCP85XX_BPTR, 0); 500 __asm __volatile("isync; msync"); 501 } 502 503 if (!pc->pc_awake) 504 panic("SMP: CPU %d didn't wake up.\n", pc->pc_cpuid); 505 return ((pc->pc_awake) ? 0 : EBUSY); 506 #else 507 /* No SMP support */ 508 return (ENXIO); 509 #endif 510 } 511 512 static void 513 mpc85xx_reset(platform_t plat) 514 { 515 516 /* 517 * Try the dedicated reset register first. 518 * If the SoC doesn't have one, we'll fall 519 * back to using the debug control register. 520 */ 521 ccsr_write4(OCP85XX_RSTCR, 2); 522 523 /* Clear DBCR0, disables debug interrupts and events. */ 524 mtspr(SPR_DBCR0, 0); 525 __asm __volatile("isync"); 526 527 /* Enable Debug Interrupts in MSR. */ 528 mtmsr(mfmsr() | PSL_DE); 529 530 /* Enable debug interrupts and issue reset. */ 531 mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | DBCR0_RST_SYSTEM); 532 533 printf("Reset failed...\n"); 534 while (1) 535 ; 536 } 537 538 static void 539 mpc85xx_idle(platform_t plat, int cpu) 540 { 541 uint32_t reg; 542 543 if (mpc85xx_is_qoriq()) { 544 reg = ccsr_read4(OCP85XX_RCPM_CDOZCR); 545 ccsr_write4(OCP85XX_RCPM_CDOZCR, reg | (1 << cpu)); 546 ccsr_read4(OCP85XX_RCPM_CDOZCR); 547 } else { 548 reg = mfmsr(); 549 /* Freescale E500 core RM section 6.4.1. */ 550 __asm __volatile("msync; mtmsr %0; isync" :: 551 "r" (reg | PSL_WE)); 552 } 553 } 554 555 static int 556 mpc85xx_idle_wakeup(platform_t plat, int cpu) 557 { 558 uint32_t reg; 559 560 if (mpc85xx_is_qoriq()) { 561 reg = ccsr_read4(OCP85XX_RCPM_CDOZCR); 562 ccsr_write4(OCP85XX_RCPM_CDOZCR, reg & ~(1 << cpu)); 563 ccsr_read4(OCP85XX_RCPM_CDOZCR); 564 565 return (1); 566 } 567 568 return (0); 569 } 570