xref: /freebsd/sys/powerpc/mpc85xx/platform_mpc85xx.c (revision eaa797943eeac5614edfdc8f6309f332343c3dd2)
1 /*-
2  * Copyright (c) 2008-2012 Semihalf.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include "opt_platform.h"
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/pcpu.h>
36 #include <sys/proc.h>
37 #include <sys/smp.h>
38 
39 #include <machine/bus.h>
40 #include <machine/cpu.h>
41 #include <machine/hid.h>
42 #include <machine/_inttypes.h>
43 #include <machine/machdep.h>
44 #include <machine/md_var.h>
45 #include <machine/platform.h>
46 #include <machine/platformvar.h>
47 #include <machine/smp.h>
48 #include <machine/spr.h>
49 #include <machine/vmparam.h>
50 
51 #include <dev/fdt/fdt_common.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 #include <dev/ofw/openfirm.h>
55 
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_extern.h>
59 
60 #include <powerpc/mpc85xx/mpc85xx.h>
61 
62 #include "platform_if.h"
63 
64 #ifdef SMP
65 extern void *ap_pcpu;
66 extern vm_paddr_t kernload;		/* Kernel physical load address */
67 extern uint8_t __boot_page[];		/* Boot page body */
68 extern uint32_t bp_kernload;
69 
70 struct cpu_release {
71 	uint32_t entry_h;
72 	uint32_t entry_l;
73 	uint32_t r3_h;
74 	uint32_t r3_l;
75 	uint32_t reserved;
76 	uint32_t pir;
77 };
78 #endif
79 
80 extern uint32_t *bootinfo;
81 vm_offset_t ccsrbar_va;
82 
83 static int cpu, maxcpu;
84 
85 static int mpc85xx_probe(platform_t);
86 static void mpc85xx_mem_regions(platform_t, struct mem_region *phys,
87     int *physsz, struct mem_region *avail, int *availsz);
88 static u_long mpc85xx_timebase_freq(platform_t, struct cpuref *cpuref);
89 static int mpc85xx_smp_first_cpu(platform_t, struct cpuref *cpuref);
90 static int mpc85xx_smp_next_cpu(platform_t, struct cpuref *cpuref);
91 static int mpc85xx_smp_get_bsp(platform_t, struct cpuref *cpuref);
92 static int mpc85xx_smp_start_cpu(platform_t, struct pcpu *cpu);
93 static void mpc85xx_idle(platform_t, int cpu);
94 static int mpc85xx_idle_wakeup(platform_t plat, int cpu);
95 
96 static void mpc85xx_reset(platform_t);
97 
98 static platform_method_t mpc85xx_methods[] = {
99 	PLATFORMMETHOD(platform_probe,		mpc85xx_probe),
100 	PLATFORMMETHOD(platform_attach,		mpc85xx_attach),
101 	PLATFORMMETHOD(platform_mem_regions,	mpc85xx_mem_regions),
102 	PLATFORMMETHOD(platform_timebase_freq,	mpc85xx_timebase_freq),
103 
104 	PLATFORMMETHOD(platform_smp_first_cpu,	mpc85xx_smp_first_cpu),
105 	PLATFORMMETHOD(platform_smp_next_cpu,	mpc85xx_smp_next_cpu),
106 	PLATFORMMETHOD(platform_smp_get_bsp,	mpc85xx_smp_get_bsp),
107 	PLATFORMMETHOD(platform_smp_start_cpu,	mpc85xx_smp_start_cpu),
108 
109 	PLATFORMMETHOD(platform_reset,		mpc85xx_reset),
110 	PLATFORMMETHOD(platform_idle,		mpc85xx_idle),
111 	PLATFORMMETHOD(platform_idle_wakeup,	mpc85xx_idle_wakeup),
112 
113 	PLATFORMMETHOD_END
114 };
115 
116 DEFINE_CLASS_0(mpc85xx, mpc85xx_platform, mpc85xx_methods, 0);
117 
118 PLATFORM_DEF(mpc85xx_platform);
119 
120 static int
121 mpc85xx_probe(platform_t plat)
122 {
123 	u_int pvr = (mfpvr() >> 16) & 0xFFFF;
124 
125 	switch (pvr) {
126 		case FSL_E500v1:
127 		case FSL_E500v2:
128 		case FSL_E500mc:
129 		case FSL_E5500:
130 		case FSL_E6500:
131 			return (BUS_PROBE_DEFAULT);
132 	}
133 	return (ENXIO);
134 }
135 
136 int
137 mpc85xx_attach(platform_t plat)
138 {
139 	phandle_t cpus, child, ccsr;
140 	const char *soc_name_guesses[] = {"/soc", "soc", NULL};
141 	const char **name;
142 	pcell_t ranges[6], acells, pacells, scells;
143 	uint64_t ccsrbar, ccsrsize;
144 	int i;
145 
146 	if ((cpus = OF_finddevice("/cpus")) != -1) {
147 		for (maxcpu = 0, child = OF_child(cpus); child != 0;
148 		    child = OF_peer(child), maxcpu++)
149 			;
150 	} else
151 		maxcpu = 1;
152 
153 	/*
154 	 * Locate CCSR region. Irritatingly, there is no way to find it
155 	 * unless you already know where it is. Try to infer its location
156 	 * from the device tree.
157 	 */
158 
159 	ccsr = -1;
160 	for (name = soc_name_guesses; *name != NULL && ccsr == -1; name++)
161 		ccsr = OF_finddevice(*name);
162 	if (ccsr == -1) {
163 		char type[64];
164 
165 	 	/* That didn't work. Search for devices of type "soc" */
166 		child = OF_child(OF_peer(0));
167 		for (OF_child(child); child != 0; child = OF_peer(child)) {
168 			if (OF_getprop(child, "device_type", type, sizeof(type))
169 			    <= 0)
170 				continue;
171 
172 			if (strcmp(type, "soc") == 0) {
173 				ccsr = child;
174 				break;
175 			}
176 		}
177 	}
178 
179 	if (ccsr == -1)
180 		panic("Could not locate CCSR window!");
181 
182 	OF_getprop(ccsr, "#size-cells", &scells, sizeof(scells));
183 	OF_getprop(ccsr, "#address-cells", &acells, sizeof(acells));
184 	OF_searchprop(OF_parent(ccsr), "#address-cells", &pacells,
185 	    sizeof(pacells));
186 	OF_getprop(ccsr, "ranges", ranges, sizeof(ranges));
187 	ccsrbar = ccsrsize = 0;
188 	for (i = acells; i < acells + pacells; i++) {
189 		ccsrbar <<= 32;
190 		ccsrbar |= ranges[i];
191 	}
192 	for (i = acells + pacells; i < acells + pacells + scells; i++) {
193 		ccsrsize <<= 32;
194 		ccsrsize |= ranges[i];
195 	}
196 	ccsrbar_va = pmap_early_io_map(ccsrbar, ccsrsize);
197 
198 #if 0
199 	mpc85xx_fix_errata(ccsrbar_va);
200 #endif
201 	mpc85xx_enable_l3_cache();
202 
203 	return (0);
204 }
205 
206 void
207 mpc85xx_mem_regions(platform_t plat, struct mem_region *phys, int *physsz,
208     struct mem_region *avail, int *availsz)
209 {
210 
211 	ofw_mem_regions(phys, physsz, avail, availsz);
212 }
213 
214 static u_long
215 mpc85xx_timebase_freq(platform_t plat, struct cpuref *cpuref)
216 {
217 	u_long ticks;
218 	phandle_t cpus, child;
219 	pcell_t freq;
220 
221 	if (bootinfo != NULL) {
222 		if (bootinfo[0] == 1) {
223 			/* Backward compatibility. See 8-STABLE. */
224 			ticks = bootinfo[3] >> 3;
225 		} else {
226 			/* Compatibility with Juniper's loader. */
227 			ticks = bootinfo[5] >> 3;
228 		}
229 	} else
230 		ticks = 0;
231 
232 	if ((cpus = OF_finddevice("/cpus")) == -1)
233 		goto out;
234 
235 	if ((child = OF_child(cpus)) == 0)
236 		goto out;
237 
238 	switch (OF_getproplen(child, "timebase-frequency")) {
239 	case 4:
240 	{
241 		uint32_t tbase;
242 		OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase));
243 		ticks = tbase;
244 		return (ticks);
245 	}
246 	case 8:
247 	{
248 		uint64_t tbase;
249 		OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase));
250 		ticks = tbase;
251 		return (ticks);
252 	}
253 	default:
254 		break;
255 	}
256 
257 	freq = 0;
258 	if (OF_getprop(child, "bus-frequency", (void *)&freq,
259 	    sizeof(freq)) <= 0)
260 		goto out;
261 
262 	if (freq == 0)
263 		goto out;
264 
265 	/*
266 	 * Time Base and Decrementer are updated every 8 CCB bus clocks.
267 	 * HID0[SEL_TBCLK] = 0
268 	 */
269 	if (mpc85xx_is_qoriq())
270 		ticks = freq / 32;
271 	else
272 		ticks = freq / 8;
273 
274 out:
275 	if (ticks <= 0)
276 		panic("Unable to determine timebase frequency!");
277 
278 	return (ticks);
279 }
280 
281 static int
282 mpc85xx_smp_first_cpu(platform_t plat, struct cpuref *cpuref)
283 {
284 
285 	cpu = 0;
286 	cpuref->cr_cpuid = cpu;
287 	cpuref->cr_hwref = cpuref->cr_cpuid;
288 	if (bootverbose)
289 		printf("powerpc_smp_first_cpu: cpuid %d\n", cpuref->cr_cpuid);
290 	cpu++;
291 
292 	return (0);
293 }
294 
295 static int
296 mpc85xx_smp_next_cpu(platform_t plat, struct cpuref *cpuref)
297 {
298 
299 	if (cpu >= maxcpu)
300 		return (ENOENT);
301 
302 	cpuref->cr_cpuid = cpu++;
303 	cpuref->cr_hwref = cpuref->cr_cpuid;
304 	if (bootverbose)
305 		printf("powerpc_smp_next_cpu: cpuid %d\n", cpuref->cr_cpuid);
306 
307 	return (0);
308 }
309 
310 static int
311 mpc85xx_smp_get_bsp(platform_t plat, struct cpuref *cpuref)
312 {
313 
314 	cpuref->cr_cpuid = mfspr(SPR_PIR);
315 	cpuref->cr_hwref = cpuref->cr_cpuid;
316 
317 	return (0);
318 }
319 
320 #ifdef SMP
321 static int
322 mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pcpu *pc)
323 {
324 	vm_paddr_t rel_pa, bptr;
325 	volatile struct cpu_release *rel;
326 	vm_offset_t rel_va, rel_page;
327 	phandle_t node;
328 	int i;
329 
330 	/* If we're calling this, the node already exists. */
331 	node = OF_finddevice("/cpus");
332 	for (i = 0, node = OF_child(node); i < pc->pc_cpuid;
333 	    i++, node = OF_peer(node))
334 		;
335 	if (OF_getencprop(node, "cpu-release-addr", (pcell_t *)&rel_pa,
336 	    sizeof(rel_pa)) == -1) {
337 		return (ENOENT);
338 	}
339 
340 	rel_page = kva_alloc(PAGE_SIZE);
341 	if (rel_page == 0)
342 		return (ENOMEM);
343 
344 	critical_enter();
345 	rel_va = rel_page + (rel_pa & PAGE_MASK);
346 	pmap_kenter(rel_page, rel_pa & ~PAGE_MASK);
347 	rel = (struct cpu_release *)rel_va;
348 	bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload;
349 	cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
350 	rel->pir = pc->pc_cpuid; __asm __volatile("sync");
351 	rel->entry_h = (bptr >> 32);
352 	rel->entry_l = bptr; __asm __volatile("sync");
353 	cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
354 	if (bootverbose)
355 		printf("Waking up CPU %d via CPU release page %p\n",
356 		    pc->pc_cpuid, rel);
357 	critical_exit();
358 	pmap_kremove(rel_page);
359 	kva_free(rel_page, PAGE_SIZE);
360 
361 	return (0);
362 }
363 #endif
364 
365 static int
366 mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc)
367 {
368 #ifdef SMP
369 	vm_paddr_t bptr;
370 	uint32_t reg;
371 	int timeout;
372 	uintptr_t brr;
373 	int cpuid;
374 	int epapr_boot = 0;
375 	uint32_t tgt;
376 
377 	if (mpc85xx_is_qoriq()) {
378 		reg = ccsr_read4(OCP85XX_COREDISR);
379 		cpuid = pc->pc_cpuid;
380 
381 		if ((reg & (1 << cpuid)) != 0) {
382 		    printf("%s: CPU %d is disabled!\n", __func__, pc->pc_cpuid);
383 		    return (-1);
384 		}
385 
386 		brr = OCP85XX_BRR;
387 	} else {
388 		brr = OCP85XX_EEBPCR;
389 		cpuid = pc->pc_cpuid + 24;
390 	}
391 	bp_kernload = kernload;
392 	/*
393 	 * bp_kernload is in the boot page.  Sync the cache because ePAPR
394 	 * booting has the other core(s) already running.
395 	 */
396 	cpu_flush_dcache(&bp_kernload, sizeof(bp_kernload));
397 
398 	ap_pcpu = pc;
399 	__asm __volatile("msync; isync");
400 
401 	/* First try the ePAPR way. */
402 	if (mpc85xx_smp_start_cpu_epapr(plat, pc) == 0) {
403 		epapr_boot = 1;
404 		goto spin_wait;
405 	}
406 
407 	reg = ccsr_read4(brr);
408 	if ((reg & (1 << cpuid)) != 0) {
409 		printf("SMP: CPU %d already out of hold-off state!\n",
410 		    pc->pc_cpuid);
411 		return (ENXIO);
412 	}
413 
414 	/* Flush caches to have our changes hit DRAM. */
415 	cpu_flush_dcache(__boot_page, 4096);
416 
417 	bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload;
418 	KASSERT((bptr & 0xfff) == 0,
419 	    ("%s: boot page is not aligned (%#jx)", __func__, (uintmax_t)bptr));
420 	if (mpc85xx_is_qoriq()) {
421 		/*
422 		 * Read DDR controller configuration to select proper BPTR target ID.
423 		 *
424 		 * On P5020 bit 29 of DDR1_CS0_CONFIG enables DDR controllers
425 		 * interleaving. If this bit is set, we have to use
426 		 * OCP85XX_TGTIF_RAM_INTL as BPTR target ID. On other QorIQ DPAA SoCs,
427 		 * this bit is reserved and always 0.
428 		 */
429 
430 		reg = ccsr_read4(OCP85XX_DDR1_CS0_CONFIG);
431 		if (reg & (1 << 29))
432 			tgt = OCP85XX_TGTIF_RAM_INTL;
433 		else
434 			tgt = OCP85XX_TGTIF_RAM1;
435 
436 		/*
437 		 * Set BSTR to the physical address of the boot page
438 		 */
439 		ccsr_write4(OCP85XX_BSTRH, bptr >> 32);
440 		ccsr_write4(OCP85XX_BSTRL, bptr);
441 		ccsr_write4(OCP85XX_BSTAR, OCP85XX_ENA_MASK |
442 		    (tgt << OCP85XX_TRGT_SHIFT_QORIQ) | (ffsl(PAGE_SIZE) - 2));
443 
444 		/* Read back OCP85XX_BSTAR to synchronize write */
445 		ccsr_read4(OCP85XX_BSTAR);
446 
447 		/*
448 		 * Enable and configure time base on new CPU.
449 		 */
450 
451 		/* Set TB clock source to platform clock / 32 */
452 		reg = ccsr_read4(CCSR_CTBCKSELR);
453 		ccsr_write4(CCSR_CTBCKSELR, reg & ~(1 << pc->pc_cpuid));
454 
455 		/* Enable TB */
456 		reg = ccsr_read4(CCSR_CTBENR);
457 		ccsr_write4(CCSR_CTBENR, reg | (1 << pc->pc_cpuid));
458 	} else {
459 		/*
460 		 * Set BPTR to the physical address of the boot page
461 		 */
462 		bptr = (bptr >> 12) | 0x80000000u;
463 		ccsr_write4(OCP85XX_BPTR, bptr);
464 		__asm __volatile("isync; msync");
465 	}
466 
467 	/*
468 	 * Release AP from hold-off state
469 	 */
470 	reg = ccsr_read4(brr);
471 	ccsr_write4(brr, reg | (1 << cpuid));
472 	__asm __volatile("isync; msync");
473 
474 spin_wait:
475 	timeout = 500;
476 	while (!pc->pc_awake && timeout--)
477 		DELAY(1000);	/* wait 1ms */
478 
479 	/*
480 	 * Disable boot page translation so that the 4K page at the default
481 	 * address (= 0xfffff000) isn't permanently remapped and thus not
482 	 * usable otherwise.
483 	 */
484 	if (!epapr_boot) {
485 		if (mpc85xx_is_qoriq())
486 			ccsr_write4(OCP85XX_BSTAR, 0);
487 		else
488 			ccsr_write4(OCP85XX_BPTR, 0);
489 		__asm __volatile("isync; msync");
490 	}
491 
492 	if (!pc->pc_awake)
493 		panic("SMP: CPU %d didn't wake up.\n", pc->pc_cpuid);
494 	return ((pc->pc_awake) ? 0 : EBUSY);
495 #else
496 	/* No SMP support */
497 	return (ENXIO);
498 #endif
499 }
500 
501 static void
502 mpc85xx_reset(platform_t plat)
503 {
504 
505 	/*
506 	 * Try the dedicated reset register first.
507 	 * If the SoC doesn't have one, we'll fall
508 	 * back to using the debug control register.
509 	 */
510 	ccsr_write4(OCP85XX_RSTCR, 2);
511 
512 	/* Clear DBCR0, disables debug interrupts and events. */
513 	mtspr(SPR_DBCR0, 0);
514 	__asm __volatile("isync");
515 
516 	/* Enable Debug Interrupts in MSR. */
517 	mtmsr(mfmsr() | PSL_DE);
518 
519 	/* Enable debug interrupts and issue reset. */
520 	mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | DBCR0_RST_SYSTEM);
521 
522 	printf("Reset failed...\n");
523 	while (1)
524 		;
525 }
526 
527 static void
528 mpc85xx_idle(platform_t plat, int cpu)
529 {
530 	uint32_t reg;
531 
532 	if (mpc85xx_is_qoriq()) {
533 		reg = ccsr_read4(OCP85XX_RCPM_CDOZCR);
534 		ccsr_write4(OCP85XX_RCPM_CDOZCR, reg | (1 << cpu));
535 		ccsr_read4(OCP85XX_RCPM_CDOZCR);
536 	} else {
537 		reg = mfmsr();
538 		/* Freescale E500 core RM section 6.4.1. */
539 		__asm __volatile("msync; mtmsr %0; isync" ::
540 		    "r" (reg | PSL_WE));
541 	}
542 }
543 
544 static int
545 mpc85xx_idle_wakeup(platform_t plat, int cpu)
546 {
547 	uint32_t reg;
548 
549 	if (mpc85xx_is_qoriq()) {
550 		reg = ccsr_read4(OCP85XX_RCPM_CDOZCR);
551 		ccsr_write4(OCP85XX_RCPM_CDOZCR, reg & ~(1 << cpu));
552 		ccsr_read4(OCP85XX_RCPM_CDOZCR);
553 
554 		return (1);
555 	}
556 
557 	return (0);
558 }
559