xref: /freebsd/sys/powerpc/pseries/platform_chrp.c (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Marcel Moolenaar
5  * Copyright (c) 2009 Nathan Whitehorn
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/endian.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/pcpu.h>
39 #include <sys/proc.h>
40 #include <sys/sched.h>
41 #include <sys/smp.h>
42 #include <vm/vm.h>
43 #include <vm/pmap.h>
44 
45 #include <machine/bus.h>
46 #include <machine/cpu.h>
47 #include <machine/hid.h>
48 #include <machine/platformvar.h>
49 #include <machine/rtas.h>
50 #include <machine/smp.h>
51 #include <machine/spr.h>
52 #include <machine/trap.h>
53 
54 #include <dev/ofw/openfirm.h>
55 #include <machine/ofw_machdep.h>
56 
57 #include "platform_if.h"
58 
59 #ifdef SMP
60 extern void *ap_pcpu;
61 #endif
62 
63 #ifdef __powerpc64__
64 static uint8_t splpar_vpa[MAXCPU][640] __aligned(128); /* XXX: dpcpu */
65 #endif
66 
67 static vm_offset_t realmaxaddr = VM_MAX_ADDRESS;
68 
69 static int chrp_probe(platform_t);
70 static int chrp_attach(platform_t);
71 void chrp_mem_regions(platform_t, struct mem_region *phys, int *physsz,
72     struct mem_region *avail, int *availsz);
73 static vm_offset_t chrp_real_maxaddr(platform_t);
74 static u_long chrp_timebase_freq(platform_t, struct cpuref *cpuref);
75 static int chrp_smp_first_cpu(platform_t, struct cpuref *cpuref);
76 static int chrp_smp_next_cpu(platform_t, struct cpuref *cpuref);
77 static int chrp_smp_get_bsp(platform_t, struct cpuref *cpuref);
78 static void chrp_smp_ap_init(platform_t);
79 static int chrp_cpuref_init(void);
80 #ifdef SMP
81 static int chrp_smp_start_cpu(platform_t, struct pcpu *cpu);
82 static void chrp_smp_probe_threads(platform_t plat);
83 static struct cpu_group *chrp_smp_topo(platform_t plat);
84 #endif
85 static void chrp_reset(platform_t);
86 #ifdef __powerpc64__
87 #include "phyp-hvcall.h"
88 static void phyp_cpu_idle(sbintime_t sbt);
89 #endif
90 
91 static struct cpuref platform_cpuref[MAXCPU];
92 static int platform_cpuref_cnt;
93 static int platform_cpuref_valid;
94 
95 static platform_method_t chrp_methods[] = {
96 	PLATFORMMETHOD(platform_probe, 		chrp_probe),
97 	PLATFORMMETHOD(platform_attach,		chrp_attach),
98 	PLATFORMMETHOD(platform_mem_regions,	chrp_mem_regions),
99 	PLATFORMMETHOD(platform_real_maxaddr,	chrp_real_maxaddr),
100 	PLATFORMMETHOD(platform_timebase_freq,	chrp_timebase_freq),
101 
102 	PLATFORMMETHOD(platform_smp_ap_init,	chrp_smp_ap_init),
103 	PLATFORMMETHOD(platform_smp_first_cpu,	chrp_smp_first_cpu),
104 	PLATFORMMETHOD(platform_smp_next_cpu,	chrp_smp_next_cpu),
105 	PLATFORMMETHOD(platform_smp_get_bsp,	chrp_smp_get_bsp),
106 #ifdef SMP
107 	PLATFORMMETHOD(platform_smp_start_cpu,	chrp_smp_start_cpu),
108 	PLATFORMMETHOD(platform_smp_probe_threads,	chrp_smp_probe_threads),
109 	PLATFORMMETHOD(platform_smp_topo,	chrp_smp_topo),
110 #endif
111 
112 	PLATFORMMETHOD(platform_reset,		chrp_reset),
113 	{ 0, 0 }
114 };
115 
116 static platform_def_t chrp_platform = {
117 	"chrp",
118 	chrp_methods,
119 	0
120 };
121 
122 PLATFORM_DEF(chrp_platform);
123 
124 static int
125 chrp_probe(platform_t plat)
126 {
127 	if (OF_finddevice("/memory") != -1 || OF_finddevice("/memory@0") != -1)
128 		return (BUS_PROBE_GENERIC);
129 
130 	return (ENXIO);
131 }
132 
133 static int
134 chrp_attach(platform_t plat)
135 {
136 	int quiesce;
137 #ifdef __powerpc64__
138 	int i;
139 #if BYTE_ORDER == LITTLE_ENDIAN
140 	int result;
141 #endif
142 
143 	/* XXX: check for /rtas/ibm,hypertas-functions? */
144 	if (!(mfmsr() & PSL_HV)) {
145 		struct mem_region *phys, *avail;
146 		int nphys, navail;
147 		vm_offset_t off;
148 
149 		mem_regions(&phys, &nphys, &avail, &navail);
150 
151 		realmaxaddr = 0;
152 		for (i = 0; i < nphys; i++) {
153 			off = phys[i].mr_start + phys[i].mr_size;
154 			realmaxaddr = MAX(off, realmaxaddr);
155 		}
156 
157 		if (!radix_mmu)
158 			pmap_mmu_install("mmu_phyp", BUS_PROBE_SPECIFIC);
159 		cpu_idle_hook = phyp_cpu_idle;
160 
161 		/* Set up important VPA fields */
162 		for (i = 0; i < MAXCPU; i++) {
163 			/* First two: VPA size */
164 			splpar_vpa[i][4] =
165 			    (uint8_t)((sizeof(splpar_vpa[i]) >> 8) & 0xff);
166 			splpar_vpa[i][5] =
167 			    (uint8_t)(sizeof(splpar_vpa[i]) & 0xff);
168 			splpar_vpa[i][0xba] = 1;	/* Maintain FPRs */
169 			splpar_vpa[i][0xbb] = 1;	/* Maintain PMCs */
170 			splpar_vpa[i][0xfc] = 0xff;	/* Maintain full SLB */
171 			splpar_vpa[i][0xfd] = 0xff;
172 			splpar_vpa[i][0xff] = 1;	/* Maintain Altivec */
173 		}
174 		mb();
175 
176 		/* Set up hypervisor CPU stuff */
177 		chrp_smp_ap_init(plat);
178 
179 #if BYTE_ORDER == LITTLE_ENDIAN
180 		/*
181 		 * Ask the hypervisor to update the LPAR ILE bit.
182 		 *
183 		 * This involves all processors reentering the hypervisor
184 		 * so the change appears simultaneously in all processors.
185 		 * This can take a long time.
186 		 */
187 		for(;;) {
188 			result = phyp_hcall(H_SET_MODE, 1UL,
189 			    H_SET_MODE_RSRC_ILE, 0, 0);
190 			if (result == H_SUCCESS)
191 				break;
192 			DELAY(1000);
193 		}
194 #endif
195 
196 	}
197 #endif
198 	chrp_cpuref_init();
199 
200 	/* Some systems (e.g. QEMU) need Open Firmware to stand down */
201 	quiesce = 1;
202 	TUNABLE_INT_FETCH("debug.quiesce_ofw", &quiesce);
203 	if (quiesce)
204 		ofw_quiesce();
205 
206 	return (0);
207 }
208 
209 static int
210 parse_drconf_memory(struct mem_region *ofmem, int *msz,
211 		    struct mem_region *ofavail, int *asz)
212 {
213 	phandle_t phandle;
214 	vm_offset_t base;
215 	int i, idx, len, lasz, lmsz, res;
216 	uint32_t flags, lmb_size[2];
217 	uint32_t *dmem;
218 
219 	lmsz = *msz;
220 	lasz = *asz;
221 
222 	phandle = OF_finddevice("/ibm,dynamic-reconfiguration-memory");
223 	if (phandle == -1)
224 		/* No drconf node, return. */
225 		return (0);
226 
227 	res = OF_getencprop(phandle, "ibm,lmb-size", lmb_size,
228 	    sizeof(lmb_size));
229 	if (res == -1)
230 		return (0);
231 	printf("Logical Memory Block size: %d MB\n", lmb_size[1] >> 20);
232 
233 	/* Parse the /ibm,dynamic-memory.
234 	   The first position gives the # of entries. The next two words
235  	   reflect the address of the memory block. The next four words are
236 	   the DRC index, reserved, list index and flags.
237 	   (see PAPR C.6.6.2 ibm,dynamic-reconfiguration-memory)
238 
239 	    #el  Addr   DRC-idx  res   list-idx  flags
240 	   -------------------------------------------------
241 	   | 4 |   8   |   4   |   4   |   4   |   4   |....
242 	   -------------------------------------------------
243 	*/
244 
245 	len = OF_getproplen(phandle, "ibm,dynamic-memory");
246 	if (len > 0) {
247 		/* We have to use a variable length array on the stack
248 		   since we have very limited stack space.
249 		*/
250 		cell_t arr[len/sizeof(cell_t)];
251 
252 		res = OF_getencprop(phandle, "ibm,dynamic-memory", arr,
253 		    sizeof(arr));
254 		if (res == -1)
255 			return (0);
256 
257 		/* Number of elements */
258 		idx = arr[0];
259 
260 		/* First address, in arr[1], arr[2]*/
261 		dmem = &arr[1];
262 
263 		for (i = 0; i < idx; i++) {
264 			base = ((uint64_t)dmem[0] << 32) + dmem[1];
265 			dmem += 4;
266 			flags = dmem[1];
267 			/* Use region only if available and not reserved. */
268 			if ((flags & 0x8) && !(flags & 0x80)) {
269 				ofmem[lmsz].mr_start = base;
270 				ofmem[lmsz].mr_size = (vm_size_t)lmb_size[1];
271 				ofavail[lasz].mr_start = base;
272 				ofavail[lasz].mr_size = (vm_size_t)lmb_size[1];
273 				lmsz++;
274 				lasz++;
275 			}
276 			dmem += 2;
277 		}
278 	}
279 
280 	*msz = lmsz;
281 	*asz = lasz;
282 
283 	return (1);
284 }
285 
286 void
287 chrp_mem_regions(platform_t plat, struct mem_region *phys, int *physsz,
288     struct mem_region *avail, int *availsz)
289 {
290 	vm_offset_t maxphysaddr;
291 	int i;
292 
293 	ofw_mem_regions(phys, physsz, avail, availsz);
294 	parse_drconf_memory(phys, physsz, avail, availsz);
295 
296 	/*
297 	 * On some firmwares (SLOF), some memory may be marked available that
298 	 * doesn't actually exist. This manifests as an extension of the last
299 	 * available segment past the end of physical memory, so truncate that
300 	 * one.
301 	 */
302 	maxphysaddr = 0;
303 	for (i = 0; i < *physsz; i++)
304 		if (phys[i].mr_start + phys[i].mr_size > maxphysaddr)
305 			maxphysaddr = phys[i].mr_start + phys[i].mr_size;
306 
307 	for (i = 0; i < *availsz; i++)
308 		if (avail[i].mr_start + avail[i].mr_size > maxphysaddr)
309 			avail[i].mr_size = maxphysaddr - avail[i].mr_start;
310 }
311 
312 static vm_offset_t
313 chrp_real_maxaddr(platform_t plat)
314 {
315 	return (realmaxaddr);
316 }
317 
318 static u_long
319 chrp_timebase_freq(platform_t plat, struct cpuref *cpuref)
320 {
321 	phandle_t cpus, cpunode;
322 	int32_t ticks = -1;
323 	int res;
324 	char buf[8];
325 
326 	cpus = OF_finddevice("/cpus");
327 	if (cpus == -1)
328 		panic("CPU tree not found on Open Firmware\n");
329 
330 	for (cpunode = OF_child(cpus); cpunode != 0; cpunode = OF_peer(cpunode)) {
331 		res = OF_getprop(cpunode, "device_type", buf, sizeof(buf));
332 		if (res > 0 && strcmp(buf, "cpu") == 0)
333 			break;
334 	}
335 	if (cpunode <= 0)
336 		panic("CPU node not found on Open Firmware\n");
337 
338 	OF_getencprop(cpunode, "timebase-frequency", &ticks, sizeof(ticks));
339 
340 	if (ticks <= 0)
341 		panic("Unable to determine timebase frequency!");
342 
343 	return (ticks);
344 }
345 
346 static int
347 chrp_smp_first_cpu(platform_t plat, struct cpuref *cpuref)
348 {
349 
350 	if (platform_cpuref_valid == 0)
351 		return (EINVAL);
352 
353 	cpuref->cr_cpuid = 0;
354 	cpuref->cr_hwref = platform_cpuref[0].cr_hwref;
355 
356 	return (0);
357 }
358 
359 static int
360 chrp_smp_next_cpu(platform_t plat, struct cpuref *cpuref)
361 {
362 	int id;
363 
364 	if (platform_cpuref_valid == 0)
365 		return (EINVAL);
366 
367 	id = cpuref->cr_cpuid + 1;
368 	if (id >= platform_cpuref_cnt)
369 		return (ENOENT);
370 
371 	cpuref->cr_cpuid = platform_cpuref[id].cr_cpuid;
372 	cpuref->cr_hwref = platform_cpuref[id].cr_hwref;
373 
374 	return (0);
375 }
376 
377 static int
378 chrp_smp_get_bsp(platform_t plat, struct cpuref *cpuref)
379 {
380 
381 	cpuref->cr_cpuid = platform_cpuref[0].cr_cpuid;
382 	cpuref->cr_hwref = platform_cpuref[0].cr_hwref;
383 	return (0);
384 }
385 
386 static void
387 get_cpu_reg(phandle_t cpu, cell_t *reg)
388 {
389 	int res;
390 
391 	res = OF_getproplen(cpu, "reg");
392 	if (res != sizeof(cell_t))
393 		panic("Unexpected length for CPU property reg on Open Firmware\n");
394 	OF_getencprop(cpu, "reg", reg, res);
395 }
396 
397 static int
398 chrp_cpuref_init(void)
399 {
400 	phandle_t cpu, dev, chosen, pbsp;
401 	ihandle_t ibsp;
402 	char buf[32];
403 	int a, bsp, res, res2, tmp_cpuref_cnt;
404 	static struct cpuref tmp_cpuref[MAXCPU];
405 	cell_t interrupt_servers[32], addr_cells, size_cells, reg, bsp_reg;
406 
407 	if (platform_cpuref_valid)
408 		return (0);
409 
410 	dev = OF_peer(0);
411 	dev = OF_child(dev);
412 	while (dev != 0) {
413 		res = OF_getprop(dev, "name", buf, sizeof(buf));
414 		if (res > 0 && strcmp(buf, "cpus") == 0)
415 			break;
416 		dev = OF_peer(dev);
417 	}
418 
419 	/* Make sure that cpus reg property have 1 address cell and 0 size cells */
420 	res = OF_getproplen(dev, "#address-cells");
421 	res2 = OF_getproplen(dev, "#size-cells");
422 	if (res != res2 || res != sizeof(cell_t))
423 		panic("CPU properties #address-cells and #size-cells not found on Open Firmware\n");
424 	OF_getencprop(dev, "#address-cells", &addr_cells, sizeof(addr_cells));
425 	OF_getencprop(dev, "#size-cells", &size_cells, sizeof(size_cells));
426 	if (addr_cells != 1 || size_cells != 0)
427 		panic("Unexpected values for CPU properties #address-cells and #size-cells on Open Firmware\n");
428 
429 	/* Look for boot CPU in /chosen/cpu and /chosen/fdtbootcpu */
430 
431 	chosen = OF_finddevice("/chosen");
432 	if (chosen == -1)
433 		panic("Device /chosen not found on Open Firmware\n");
434 
435 	bsp_reg = -1;
436 
437 	/* /chosen/cpu */
438 	if (OF_getproplen(chosen, "cpu") == sizeof(ihandle_t)) {
439 		OF_getprop(chosen, "cpu", &ibsp, sizeof(ibsp));
440 		pbsp = OF_instance_to_package(be32toh(ibsp));
441 		if (pbsp != -1)
442 			get_cpu_reg(pbsp, &bsp_reg);
443 	}
444 
445 	/* /chosen/fdtbootcpu */
446 	if (bsp_reg == -1) {
447 		if (OF_getproplen(chosen, "fdtbootcpu") == sizeof(cell_t))
448 			OF_getprop(chosen, "fdtbootcpu", &bsp_reg, sizeof(bsp_reg));
449 	}
450 
451 	if (bsp_reg == -1)
452 		panic("Boot CPU not found on Open Firmware\n");
453 
454 	bsp = -1;
455 	tmp_cpuref_cnt = 0;
456 	for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) {
457 		res = OF_getprop(cpu, "device_type", buf, sizeof(buf));
458 		if (res > 0 && strcmp(buf, "cpu") == 0) {
459 			res = OF_getproplen(cpu, "ibm,ppc-interrupt-server#s");
460 			if (res > 0) {
461 				OF_getencprop(cpu, "ibm,ppc-interrupt-server#s",
462 				    interrupt_servers, res);
463 
464 				get_cpu_reg(cpu, &reg);
465 				if (reg == bsp_reg)
466 					bsp = tmp_cpuref_cnt;
467 
468 				for (a = 0; a < res/sizeof(cell_t); a++) {
469 					tmp_cpuref[tmp_cpuref_cnt].cr_hwref = interrupt_servers[a];
470 					tmp_cpuref[tmp_cpuref_cnt].cr_cpuid = tmp_cpuref_cnt;
471 					tmp_cpuref_cnt++;
472 				}
473 			}
474 		}
475 	}
476 
477 	if (bsp == -1)
478 		panic("Boot CPU not found\n");
479 
480 	/* Map IDs, so BSP has CPUID 0 regardless of hwref */
481 	for (a = bsp; a < tmp_cpuref_cnt; a++) {
482 		platform_cpuref[platform_cpuref_cnt].cr_hwref = tmp_cpuref[a].cr_hwref;
483 		platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt;
484 		platform_cpuref_cnt++;
485 	}
486 	for (a = 0; a < bsp; a++) {
487 		platform_cpuref[platform_cpuref_cnt].cr_hwref = tmp_cpuref[a].cr_hwref;
488 		platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt;
489 		platform_cpuref_cnt++;
490 	}
491 
492 	platform_cpuref_valid = 1;
493 
494 	return (0);
495 }
496 
497 #ifdef SMP
498 static int
499 chrp_smp_start_cpu(platform_t plat, struct pcpu *pc)
500 {
501 	cell_t start_cpu;
502 	int result, err, timeout;
503 
504 	if (!rtas_exists()) {
505 		printf("RTAS uninitialized: unable to start AP %d\n",
506 		    pc->pc_cpuid);
507 		return (ENXIO);
508 	}
509 
510 	start_cpu = rtas_token_lookup("start-cpu");
511 	if (start_cpu == -1) {
512 		printf("RTAS unknown method: unable to start AP %d\n",
513 		    pc->pc_cpuid);
514 		return (ENXIO);
515 	}
516 
517 	ap_pcpu = pc;
518 	powerpc_sync();
519 
520 	result = rtas_call_method(start_cpu, 3, 1, pc->pc_hwref, EXC_RST, pc,
521 	    &err);
522 	if (result < 0 || err != 0) {
523 		printf("RTAS error (%d/%d): unable to start AP %d\n",
524 		    result, err, pc->pc_cpuid);
525 		return (ENXIO);
526 	}
527 
528 	timeout = 10000;
529 	while (!pc->pc_awake && timeout--)
530 		DELAY(100);
531 
532 	return ((pc->pc_awake) ? 0 : EBUSY);
533 }
534 
535 static void
536 chrp_smp_probe_threads(platform_t plat)
537 {
538 	struct pcpu *pc, *last_pc;
539 	int i, ncores;
540 
541 	ncores = 0;
542 	last_pc = NULL;
543 	for (i = 0; i <= mp_maxid; i++) {
544 		pc = pcpu_find(i);
545 		if (pc == NULL)
546 			continue;
547 		if (last_pc == NULL || pc->pc_hwref != last_pc->pc_hwref)
548 			ncores++;
549 		last_pc = pc;
550 	}
551 
552 	mp_ncores = ncores;
553 	if (mp_ncpus % ncores == 0)
554 		smp_threads_per_core = mp_ncpus / ncores;
555 }
556 
557 static struct cpu_group *
558 chrp_smp_topo(platform_t plat)
559 {
560 
561 	if (mp_ncpus % mp_ncores != 0) {
562 		printf("WARNING: Irregular SMP topology. Performance may be "
563 		     "suboptimal (%d CPUS, %d cores)\n", mp_ncpus, mp_ncores);
564 		return (smp_topo_none());
565 	}
566 
567 	/* Don't do anything fancier for non-threaded SMP */
568 	if (mp_ncpus == mp_ncores)
569 		return (smp_topo_none());
570 
571 	return (smp_topo_1level(CG_SHARE_L1, smp_threads_per_core,
572 	    CG_FLAG_SMT));
573 }
574 #endif
575 
576 static void
577 chrp_reset(platform_t platform)
578 {
579 	OF_reboot();
580 }
581 
582 #ifdef __powerpc64__
583 static void
584 phyp_cpu_idle(sbintime_t sbt)
585 {
586 	register_t msr;
587 
588 	msr = mfmsr();
589 
590 	mtmsr(msr & ~PSL_EE);
591 	if (sched_runnable()) {
592 		mtmsr(msr);
593 		return;
594 	}
595 
596 	phyp_hcall(H_CEDE); /* Re-enables interrupts internally */
597 	mtmsr(msr);
598 }
599 
600 static void
601 chrp_smp_ap_init(platform_t platform)
602 {
603 	if (!(mfmsr() & PSL_HV)) {
604 		/* Register VPA */
605 		phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(hwref),
606 		    splpar_vpa[PCPU_GET(hwref)]);
607 
608 		/* Set interrupt priority */
609 		phyp_hcall(H_CPPR, 0xff);
610 	}
611 }
612 #else
613 static void
614 chrp_smp_ap_init(platform_t platform)
615 {
616 }
617 #endif
618