xref: /freebsd/sys/powerpc/mpc85xx/platform_mpc85xx.c (revision d96700a6da2afa88607fbd7405ade439424d10d9)
1 /*-
2  * Copyright (c) 2008-2012 Semihalf.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include "opt_platform.h"
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/pcpu.h>
36 #include <sys/proc.h>
37 #include <sys/smp.h>
38 
39 #include <machine/bus.h>
40 #include <machine/cpu.h>
41 #include <machine/hid.h>
42 #include <machine/_inttypes.h>
43 #include <machine/machdep.h>
44 #include <machine/md_var.h>
45 #include <machine/platform.h>
46 #include <machine/platformvar.h>
47 #include <machine/smp.h>
48 #include <machine/spr.h>
49 #include <machine/vmparam.h>
50 
51 #include <dev/fdt/fdt_common.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 #include <dev/ofw/openfirm.h>
55 
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_extern.h>
59 
60 #include <powerpc/mpc85xx/mpc85xx.h>
61 
62 #include "platform_if.h"
63 
64 #ifdef SMP
65 extern void *ap_pcpu;
66 extern vm_paddr_t kernload;		/* Kernel physical load address */
67 extern uint8_t __boot_page[];		/* Boot page body */
68 extern uint32_t bp_kernload;
69 
70 struct cpu_release {
71 	uint32_t entry_h;
72 	uint32_t entry_l;
73 	uint32_t r3_h;
74 	uint32_t r3_l;
75 	uint32_t reserved;
76 	uint32_t pir;
77 };
78 #endif
79 
80 extern uint32_t *bootinfo;
81 vm_offset_t ccsrbar_va;
82 
83 static int cpu, maxcpu;
84 
85 static int mpc85xx_probe(platform_t);
86 static void mpc85xx_mem_regions(platform_t, struct mem_region *phys,
87     int *physsz, struct mem_region *avail, int *availsz);
88 static u_long mpc85xx_timebase_freq(platform_t, struct cpuref *cpuref);
89 static int mpc85xx_smp_first_cpu(platform_t, struct cpuref *cpuref);
90 static int mpc85xx_smp_next_cpu(platform_t, struct cpuref *cpuref);
91 static int mpc85xx_smp_get_bsp(platform_t, struct cpuref *cpuref);
92 static int mpc85xx_smp_start_cpu(platform_t, struct pcpu *cpu);
93 static void mpc85xx_idle(platform_t, int cpu);
94 static int mpc85xx_idle_wakeup(platform_t plat, int cpu);
95 
96 static void mpc85xx_reset(platform_t);
97 
98 static platform_method_t mpc85xx_methods[] = {
99 	PLATFORMMETHOD(platform_probe,		mpc85xx_probe),
100 	PLATFORMMETHOD(platform_attach,		mpc85xx_attach),
101 	PLATFORMMETHOD(platform_mem_regions,	mpc85xx_mem_regions),
102 	PLATFORMMETHOD(platform_timebase_freq,	mpc85xx_timebase_freq),
103 
104 	PLATFORMMETHOD(platform_smp_first_cpu,	mpc85xx_smp_first_cpu),
105 	PLATFORMMETHOD(platform_smp_next_cpu,	mpc85xx_smp_next_cpu),
106 	PLATFORMMETHOD(platform_smp_get_bsp,	mpc85xx_smp_get_bsp),
107 	PLATFORMMETHOD(platform_smp_start_cpu,	mpc85xx_smp_start_cpu),
108 
109 	PLATFORMMETHOD(platform_reset,		mpc85xx_reset),
110 	PLATFORMMETHOD(platform_idle,		mpc85xx_idle),
111 	PLATFORMMETHOD(platform_idle_wakeup,	mpc85xx_idle_wakeup),
112 
113 	PLATFORMMETHOD_END
114 };
115 
116 DEFINE_CLASS_0(mpc85xx, mpc85xx_platform, mpc85xx_methods, 0);
117 
118 PLATFORM_DEF(mpc85xx_platform);
119 
120 static int
121 mpc85xx_probe(platform_t plat)
122 {
123 	u_int pvr = (mfpvr() >> 16) & 0xFFFF;
124 
125 	switch (pvr) {
126 		case FSL_E500v1:
127 		case FSL_E500v2:
128 		case FSL_E500mc:
129 		case FSL_E5500:
130 		case FSL_E6500:
131 			return (BUS_PROBE_DEFAULT);
132 	}
133 	return (ENXIO);
134 }
135 
136 int
137 mpc85xx_attach(platform_t plat)
138 {
139 	phandle_t cpus, child, ccsr;
140 	const char *soc_name_guesses[] = {"/soc", "soc", NULL};
141 	const char **name;
142 	pcell_t ranges[6], acells, pacells, scells;
143 	uint64_t ccsrbar, ccsrsize;
144 	int i;
145 
146 	if ((cpus = OF_finddevice("/cpus")) != -1) {
147 		for (maxcpu = 0, child = OF_child(cpus); child != 0;
148 		    child = OF_peer(child), maxcpu++)
149 			;
150 	} else
151 		maxcpu = 1;
152 
153 	/*
154 	 * Locate CCSR region. Irritatingly, there is no way to find it
155 	 * unless you already know where it is. Try to infer its location
156 	 * from the device tree.
157 	 */
158 
159 	ccsr = -1;
160 	for (name = soc_name_guesses; *name != NULL && ccsr == -1; name++)
161 		ccsr = OF_finddevice(*name);
162 	if (ccsr == -1) {
163 		char type[64];
164 
165 	 	/* That didn't work. Search for devices of type "soc" */
166 		child = OF_child(OF_peer(0));
167 		for (OF_child(child); child != 0; child = OF_peer(child)) {
168 			if (OF_getprop(child, "device_type", type, sizeof(type))
169 			    <= 0)
170 				continue;
171 
172 			if (strcmp(type, "soc") == 0) {
173 				ccsr = child;
174 				break;
175 			}
176 		}
177 	}
178 
179 	if (ccsr == -1)
180 		panic("Could not locate CCSR window!");
181 
182 	OF_getprop(ccsr, "#size-cells", &scells, sizeof(scells));
183 	OF_getprop(ccsr, "#address-cells", &acells, sizeof(acells));
184 	OF_searchprop(OF_parent(ccsr), "#address-cells", &pacells,
185 	    sizeof(pacells));
186 	OF_getprop(ccsr, "ranges", ranges, sizeof(ranges));
187 	ccsrbar = ccsrsize = 0;
188 	for (i = acells; i < acells + pacells; i++) {
189 		ccsrbar <<= 32;
190 		ccsrbar |= ranges[i];
191 	}
192 	for (i = acells + pacells; i < acells + pacells + scells; i++) {
193 		ccsrsize <<= 32;
194 		ccsrsize |= ranges[i];
195 	}
196 	ccsrbar_va = pmap_early_io_map(ccsrbar, ccsrsize);
197 
198 	mpc85xx_fix_errata(ccsrbar_va);
199 	mpc85xx_enable_l3_cache();
200 
201 	return (0);
202 }
203 
204 void
205 mpc85xx_mem_regions(platform_t plat, struct mem_region *phys, int *physsz,
206     struct mem_region *avail, int *availsz)
207 {
208 
209 	ofw_mem_regions(phys, physsz, avail, availsz);
210 }
211 
212 static u_long
213 mpc85xx_timebase_freq(platform_t plat, struct cpuref *cpuref)
214 {
215 	u_long ticks;
216 	phandle_t cpus, child;
217 	pcell_t freq;
218 
219 	if (bootinfo != NULL) {
220 		if (bootinfo[0] == 1) {
221 			/* Backward compatibility. See 8-STABLE. */
222 			ticks = bootinfo[3] >> 3;
223 		} else {
224 			/* Compatibility with Juniper's loader. */
225 			ticks = bootinfo[5] >> 3;
226 		}
227 	} else
228 		ticks = 0;
229 
230 	if ((cpus = OF_finddevice("/cpus")) == -1)
231 		goto out;
232 
233 	if ((child = OF_child(cpus)) == 0)
234 		goto out;
235 
236 	switch (OF_getproplen(child, "timebase-frequency")) {
237 	case 4:
238 	{
239 		uint32_t tbase;
240 		OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase));
241 		ticks = tbase;
242 		return (ticks);
243 	}
244 	case 8:
245 	{
246 		uint64_t tbase;
247 		OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase));
248 		ticks = tbase;
249 		return (ticks);
250 	}
251 	default:
252 		break;
253 	}
254 
255 	freq = 0;
256 	if (OF_getprop(child, "bus-frequency", (void *)&freq,
257 	    sizeof(freq)) <= 0)
258 		goto out;
259 
260 	if (freq == 0)
261 		goto out;
262 
263 	/*
264 	 * Time Base and Decrementer are updated every 8 CCB bus clocks.
265 	 * HID0[SEL_TBCLK] = 0
266 	 */
267 	if (mpc85xx_is_qoriq())
268 		ticks = freq / 32;
269 	else
270 		ticks = freq / 8;
271 
272 out:
273 	if (ticks <= 0)
274 		panic("Unable to determine timebase frequency!");
275 
276 	return (ticks);
277 }
278 
279 static int
280 mpc85xx_smp_first_cpu(platform_t plat, struct cpuref *cpuref)
281 {
282 
283 	cpu = 0;
284 	cpuref->cr_cpuid = cpu;
285 	cpuref->cr_hwref = cpuref->cr_cpuid;
286 	if (bootverbose)
287 		printf("powerpc_smp_first_cpu: cpuid %d\n", cpuref->cr_cpuid);
288 	cpu++;
289 
290 	return (0);
291 }
292 
293 static int
294 mpc85xx_smp_next_cpu(platform_t plat, struct cpuref *cpuref)
295 {
296 
297 	if (cpu >= maxcpu)
298 		return (ENOENT);
299 
300 	cpuref->cr_cpuid = cpu++;
301 	cpuref->cr_hwref = cpuref->cr_cpuid;
302 	if (bootverbose)
303 		printf("powerpc_smp_next_cpu: cpuid %d\n", cpuref->cr_cpuid);
304 
305 	return (0);
306 }
307 
308 static int
309 mpc85xx_smp_get_bsp(platform_t plat, struct cpuref *cpuref)
310 {
311 
312 	cpuref->cr_cpuid = mfspr(SPR_PIR);
313 	cpuref->cr_hwref = cpuref->cr_cpuid;
314 
315 	return (0);
316 }
317 
318 #ifdef SMP
319 static int
320 mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pcpu *pc)
321 {
322 	vm_paddr_t rel_pa, bptr;
323 	volatile struct cpu_release *rel;
324 	vm_offset_t rel_va, rel_page;
325 	phandle_t node;
326 	int i;
327 
328 	/* If we're calling this, the node already exists. */
329 	node = OF_finddevice("/cpus");
330 	for (i = 0, node = OF_child(node); i < pc->pc_cpuid;
331 	    i++, node = OF_peer(node))
332 		;
333 	if (OF_getencprop(node, "cpu-release-addr", (pcell_t *)&rel_pa,
334 	    sizeof(rel_pa)) == -1) {
335 		return (ENOENT);
336 	}
337 
338 	rel_page = kva_alloc(PAGE_SIZE);
339 	if (rel_page == 0)
340 		return (ENOMEM);
341 
342 	critical_enter();
343 	rel_va = rel_page + (rel_pa & PAGE_MASK);
344 	pmap_kenter(rel_page, rel_pa & ~PAGE_MASK);
345 	rel = (struct cpu_release *)rel_va;
346 	bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload;
347 	cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
348 	rel->pir = pc->pc_cpuid; __asm __volatile("sync");
349 	rel->entry_h = (bptr >> 32);
350 	rel->entry_l = bptr; __asm __volatile("sync");
351 	cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
352 	if (bootverbose)
353 		printf("Waking up CPU %d via CPU release page %p\n",
354 		    pc->pc_cpuid, rel);
355 	critical_exit();
356 	pmap_kremove(rel_page);
357 	kva_free(rel_page, PAGE_SIZE);
358 
359 	return (0);
360 }
361 #endif
362 
363 static int
364 mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc)
365 {
366 #ifdef SMP
367 	vm_paddr_t bptr;
368 	uint32_t reg;
369 	int timeout;
370 	uintptr_t brr;
371 	int cpuid;
372 	int epapr_boot = 0;
373 	uint32_t tgt;
374 
375 	if (mpc85xx_is_qoriq()) {
376 		reg = ccsr_read4(OCP85XX_COREDISR);
377 		cpuid = pc->pc_cpuid;
378 
379 		if ((reg & (1 << cpuid)) != 0) {
380 		    printf("%s: CPU %d is disabled!\n", __func__, pc->pc_cpuid);
381 		    return (-1);
382 		}
383 
384 		brr = OCP85XX_BRR;
385 	} else {
386 		brr = OCP85XX_EEBPCR;
387 		cpuid = pc->pc_cpuid + 24;
388 	}
389 	bp_kernload = kernload;
390 	/*
391 	 * bp_kernload is in the boot page.  Sync the cache because ePAPR
392 	 * booting has the other core(s) already running.
393 	 */
394 	cpu_flush_dcache(&bp_kernload, sizeof(bp_kernload));
395 
396 	ap_pcpu = pc;
397 	__asm __volatile("msync; isync");
398 
399 	/* First try the ePAPR way. */
400 	if (mpc85xx_smp_start_cpu_epapr(plat, pc) == 0) {
401 		epapr_boot = 1;
402 		goto spin_wait;
403 	}
404 
405 	reg = ccsr_read4(brr);
406 	if ((reg & (1 << cpuid)) != 0) {
407 		printf("SMP: CPU %d already out of hold-off state!\n",
408 		    pc->pc_cpuid);
409 		return (ENXIO);
410 	}
411 
412 	/* Flush caches to have our changes hit DRAM. */
413 	cpu_flush_dcache(__boot_page, 4096);
414 
415 	bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload;
416 	KASSERT((bptr & 0xfff) == 0,
417 	    ("%s: boot page is not aligned (%#jx)", __func__, (uintmax_t)bptr));
418 	if (mpc85xx_is_qoriq()) {
419 		/*
420 		 * Read DDR controller configuration to select proper BPTR target ID.
421 		 *
422 		 * On P5020 bit 29 of DDR1_CS0_CONFIG enables DDR controllers
423 		 * interleaving. If this bit is set, we have to use
424 		 * OCP85XX_TGTIF_RAM_INTL as BPTR target ID. On other QorIQ DPAA SoCs,
425 		 * this bit is reserved and always 0.
426 		 */
427 
428 		reg = ccsr_read4(OCP85XX_DDR1_CS0_CONFIG);
429 		if (reg & (1 << 29))
430 			tgt = OCP85XX_TGTIF_RAM_INTL;
431 		else
432 			tgt = OCP85XX_TGTIF_RAM1;
433 
434 		/*
435 		 * Set BSTR to the physical address of the boot page
436 		 */
437 		ccsr_write4(OCP85XX_BSTRH, bptr >> 32);
438 		ccsr_write4(OCP85XX_BSTRL, bptr);
439 		ccsr_write4(OCP85XX_BSTAR, OCP85XX_ENA_MASK |
440 		    (tgt << OCP85XX_TRGT_SHIFT_QORIQ) | (ffsl(PAGE_SIZE) - 2));
441 
442 		/* Read back OCP85XX_BSTAR to synchronize write */
443 		ccsr_read4(OCP85XX_BSTAR);
444 
445 		/*
446 		 * Enable and configure time base on new CPU.
447 		 */
448 
449 		/* Set TB clock source to platform clock / 32 */
450 		reg = ccsr_read4(CCSR_CTBCKSELR);
451 		ccsr_write4(CCSR_CTBCKSELR, reg & ~(1 << pc->pc_cpuid));
452 
453 		/* Enable TB */
454 		reg = ccsr_read4(CCSR_CTBENR);
455 		ccsr_write4(CCSR_CTBENR, reg | (1 << pc->pc_cpuid));
456 	} else {
457 		/*
458 		 * Set BPTR to the physical address of the boot page
459 		 */
460 		bptr = (bptr >> 12) | 0x80000000u;
461 		ccsr_write4(OCP85XX_BPTR, bptr);
462 		__asm __volatile("isync; msync");
463 	}
464 
465 	/*
466 	 * Release AP from hold-off state
467 	 */
468 	reg = ccsr_read4(brr);
469 	ccsr_write4(brr, reg | (1 << cpuid));
470 	__asm __volatile("isync; msync");
471 
472 spin_wait:
473 	timeout = 500;
474 	while (!pc->pc_awake && timeout--)
475 		DELAY(1000);	/* wait 1ms */
476 
477 	/*
478 	 * Disable boot page translation so that the 4K page at the default
479 	 * address (= 0xfffff000) isn't permanently remapped and thus not
480 	 * usable otherwise.
481 	 */
482 	if (!epapr_boot) {
483 		if (mpc85xx_is_qoriq())
484 			ccsr_write4(OCP85XX_BSTAR, 0);
485 		else
486 			ccsr_write4(OCP85XX_BPTR, 0);
487 		__asm __volatile("isync; msync");
488 	}
489 
490 	if (!pc->pc_awake)
491 		panic("SMP: CPU %d didn't wake up.\n", pc->pc_cpuid);
492 	return ((pc->pc_awake) ? 0 : EBUSY);
493 #else
494 	/* No SMP support */
495 	return (ENXIO);
496 #endif
497 }
498 
499 static void
500 mpc85xx_reset(platform_t plat)
501 {
502 
503 	/*
504 	 * Try the dedicated reset register first.
505 	 * If the SoC doesn't have one, we'll fall
506 	 * back to using the debug control register.
507 	 */
508 	ccsr_write4(OCP85XX_RSTCR, 2);
509 
510 	/* Clear DBCR0, disables debug interrupts and events. */
511 	mtspr(SPR_DBCR0, 0);
512 	__asm __volatile("isync");
513 
514 	/* Enable Debug Interrupts in MSR. */
515 	mtmsr(mfmsr() | PSL_DE);
516 
517 	/* Enable debug interrupts and issue reset. */
518 	mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | DBCR0_RST_SYSTEM);
519 
520 	printf("Reset failed...\n");
521 	while (1)
522 		;
523 }
524 
525 static void
526 mpc85xx_idle(platform_t plat, int cpu)
527 {
528 	uint32_t reg;
529 
530 	if (mpc85xx_is_qoriq()) {
531 		reg = ccsr_read4(OCP85XX_RCPM_CDOZCR);
532 		ccsr_write4(OCP85XX_RCPM_CDOZCR, reg | (1 << cpu));
533 		ccsr_read4(OCP85XX_RCPM_CDOZCR);
534 	} else {
535 		reg = mfmsr();
536 		/* Freescale E500 core RM section 6.4.1. */
537 		__asm __volatile("msync; mtmsr %0; isync" ::
538 		    "r" (reg | PSL_WE));
539 	}
540 }
541 
542 static int
543 mpc85xx_idle_wakeup(platform_t plat, int cpu)
544 {
545 	uint32_t reg;
546 
547 	if (mpc85xx_is_qoriq()) {
548 		reg = ccsr_read4(OCP85XX_RCPM_CDOZCR);
549 		ccsr_write4(OCP85XX_RCPM_CDOZCR, reg & ~(1 << cpu));
550 		ccsr_read4(OCP85XX_RCPM_CDOZCR);
551 
552 		return (1);
553 	}
554 
555 	return (0);
556 }
557