xref: /freebsd/sys/powerpc/powerpc/platform.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005 Peter Grehan
5  * Copyright (c) 2009 Nathan Whitehorn
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 /*
33  * Dispatch platform calls to the appropriate platform implementation
34  * through a previously registered kernel object.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/ktr.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46 #include <sys/types.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_phys.h>
52 
53 #include <machine/cpu.h>
54 #include <machine/md_var.h>
55 #include <machine/ofw_machdep.h>
56 #include <machine/platform.h>
57 #include <machine/platformvar.h>
58 #include <machine/smp.h>
59 #include <machine/vmparam.h>
60 
61 #include "platform_if.h"
62 
63 static platform_def_t	*plat_def_impl;
64 static platform_t	plat_obj;
65 static struct kobj_ops	plat_kernel_kops;
66 static struct platform_kobj	plat_kernel_obj;
67 
68 static char plat_name[64] = "";
69 SYSCTL_STRING(_hw, OID_AUTO, platform, CTLFLAG_RDTUN,
70     plat_name, 0, "Platform currently in use");
71 
72 static struct mem_affinity mem_info[VM_PHYSSEG_MAX + 1];
73 static int vm_locality_table[MAXMEMDOM * MAXMEMDOM];
74 static struct mem_region pregions[PHYS_AVAIL_SZ];
75 static struct numa_mem_region numa_pregions[PHYS_AVAIL_SZ];
76 static struct mem_region aregions[PHYS_AVAIL_SZ];
77 static int nnumapregions, npregions, naregions;
78 
79 /*
80  * Memory region utilities: determine if two regions overlap,
81  * and merge two overlapping regions into one
82  */
83 static int
memr_overlap(struct mem_region * r1,struct mem_region * r2)84 memr_overlap(struct mem_region *r1, struct mem_region *r2)
85 {
86 	if ((r1->mr_start + r1->mr_size) < r2->mr_start ||
87 	    (r2->mr_start + r2->mr_size) < r1->mr_start)
88 		return (FALSE);
89 
90 	return (TRUE);
91 }
92 
93 static void
memr_merge(struct mem_region * from,struct mem_region * to)94 memr_merge(struct mem_region *from, struct mem_region *to)
95 {
96 	vm_offset_t end;
97 	end = uqmax(to->mr_start + to->mr_size, from->mr_start + from->mr_size);
98 	to->mr_start = uqmin(from->mr_start, to->mr_start);
99 	to->mr_size = end - to->mr_start;
100 }
101 
102 /*
103  * Quick sort callout for comparing memory regions.
104  */
105 static int
mr_cmp(const void * a,const void * b)106 mr_cmp(const void *a, const void *b)
107 {
108 	const struct mem_region *regiona, *regionb;
109 
110 	regiona = a;
111 	regionb = b;
112 	if (regiona->mr_start < regionb->mr_start)
113 		return (-1);
114 	else if (regiona->mr_start > regionb->mr_start)
115 		return (1);
116 	else
117 		return (0);
118 }
119 
120 void
numa_mem_regions(struct numa_mem_region ** phys,int * physsz)121 numa_mem_regions(struct numa_mem_region **phys, int *physsz)
122 {
123 	struct mem_affinity *mi;
124 	int i, j, maxdom, ndomain, offset;
125 
126 	nnumapregions = 0;
127 	PLATFORM_NUMA_MEM_REGIONS(plat_obj, numa_pregions, &nnumapregions);
128 
129 	if (physsz != NULL)
130 		*physsz = nnumapregions;
131 	if (phys != NULL)
132 		*phys = numa_pregions;
133 	if (physsz == NULL || phys == NULL) {
134 		printf("unset value\n");
135 		return;
136 	}
137 	maxdom = 0;
138 	for (i = 0; i < nnumapregions; i++)
139 		if (numa_pregions[i].mr_domain > maxdom)
140 			maxdom = numa_pregions[i].mr_domain;
141 
142 	mi = mem_info;
143 	for (i = 0; i < nnumapregions; i++, mi++) {
144 		mi->start = numa_pregions[i].mr_start;
145 		mi->end = numa_pregions[i].mr_start + numa_pregions[i].mr_size;
146 		mi->domain = numa_pregions[i].mr_domain;
147 	}
148 	offset = 0;
149 	vm_locality_table[offset] = 10;
150 	ndomain = maxdom + 1;
151 	if (ndomain > 1) {
152 		for (i = 0; i < ndomain; i++) {
153 			for (j = 0; j < ndomain; j++) {
154 				/*
155 				 * Not sure what these values should actually be
156 				 */
157 				if (i == j)
158 					vm_locality_table[offset] = 10;
159 				else
160 					vm_locality_table[offset] = 21;
161 				offset++;
162 			}
163 		}
164 	}
165 	vm_phys_register_domains(ndomain, mem_info, vm_locality_table);
166 }
167 
168 void
mem_regions(struct mem_region ** phys,int * physsz,struct mem_region ** avail,int * availsz)169 mem_regions(struct mem_region **phys, int *physsz, struct mem_region **avail,
170     int *availsz)
171 {
172 	int i, j, still_merging;
173 
174 	if (npregions == 0) {
175 		PLATFORM_MEM_REGIONS(plat_obj, pregions, &npregions,
176 		    aregions, &naregions);
177 		qsort(pregions, npregions, sizeof(*pregions), mr_cmp);
178 		qsort(aregions, naregions, sizeof(*aregions), mr_cmp);
179 
180 		/* Remove overlapping available regions */
181 		do {
182 			still_merging = FALSE;
183 			for (i = 0; i < naregions; i++) {
184 				if (aregions[i].mr_size == 0)
185 					continue;
186 				for (j = i+1; j < naregions; j++) {
187 					if (aregions[j].mr_size == 0)
188 						continue;
189 					if (!memr_overlap(&aregions[j],
190 					    &aregions[i]))
191 						continue;
192 
193 					memr_merge(&aregions[j], &aregions[i]);
194 					/* mark inactive */
195 					aregions[j].mr_size = 0;
196 					still_merging = TRUE;
197 				}
198 			}
199 		} while (still_merging == TRUE);
200 
201 		/* Collapse zero-length available regions */
202 		for (i = 0; i < naregions; i++) {
203 			if (aregions[i].mr_size == 0) {
204 				memcpy(&aregions[i], &aregions[i+1],
205 				    (naregions - i - 1)*sizeof(*aregions));
206 				naregions--;
207 				i--;
208 			}
209 		}
210 	}
211 
212 	if (phys != NULL)
213 		*phys = pregions;
214 	if (avail != NULL)
215 		*avail = aregions;
216 	if (physsz != NULL)
217 		*physsz = npregions;
218 	if (availsz != NULL)
219 		*availsz = naregions;
220 }
221 
222 int
mem_valid(vm_offset_t addr,int len)223 mem_valid(vm_offset_t addr, int len)
224 {
225 	int i;
226 
227 	if (npregions == 0) {
228 		struct mem_region *p, *a;
229 		int na, np;
230 		mem_regions(&p, &np, &a, &na);
231 	}
232 
233 	for (i = 0; i < npregions; i++)
234 		if ((addr >= pregions[i].mr_start)
235 		   && (addr + len <= pregions[i].mr_start + pregions[i].mr_size))
236 			return (0);
237 
238 	return (EFAULT);
239 }
240 
241 vm_offset_t
platform_real_maxaddr(void)242 platform_real_maxaddr(void)
243 {
244 	return (PLATFORM_REAL_MAXADDR(plat_obj));
245 }
246 
247 const char *
installed_platform(void)248 installed_platform(void)
249 {
250 	return (plat_def_impl->name);
251 }
252 
253 u_long
platform_timebase_freq(struct cpuref * cpu)254 platform_timebase_freq(struct cpuref *cpu)
255 {
256 	return (PLATFORM_TIMEBASE_FREQ(plat_obj, cpu));
257 }
258 
259 /*
260  * Put the current CPU, as last step in suspend, to sleep
261  */
262 void
platform_sleep(void)263 platform_sleep(void)
264 {
265         PLATFORM_SLEEP(plat_obj);
266 }
267 
268 int
platform_smp_first_cpu(struct cpuref * cpu)269 platform_smp_first_cpu(struct cpuref *cpu)
270 {
271 	return (PLATFORM_SMP_FIRST_CPU(plat_obj, cpu));
272 }
273 
274 int
platform_smp_next_cpu(struct cpuref * cpu)275 platform_smp_next_cpu(struct cpuref *cpu)
276 {
277 	return (PLATFORM_SMP_NEXT_CPU(plat_obj, cpu));
278 }
279 
280 int
platform_smp_get_bsp(struct cpuref * cpu)281 platform_smp_get_bsp(struct cpuref *cpu)
282 {
283 	return (PLATFORM_SMP_GET_BSP(plat_obj, cpu));
284 }
285 
286 int
platform_smp_start_cpu(struct pcpu * cpu)287 platform_smp_start_cpu(struct pcpu *cpu)
288 {
289 	return (PLATFORM_SMP_START_CPU(plat_obj, cpu));
290 }
291 
292 void
platform_smp_ap_init(void)293 platform_smp_ap_init(void)
294 {
295 	PLATFORM_SMP_AP_INIT(plat_obj);
296 }
297 
298 void
platform_smp_probe_threads(void)299 platform_smp_probe_threads(void)
300 {
301 	PLATFORM_SMP_PROBE_THREADS(plat_obj);
302 }
303 
304 #ifdef SMP
305 struct cpu_group *
cpu_topo(void)306 cpu_topo(void)
307 {
308 	return (PLATFORM_SMP_TOPO(plat_obj));
309 }
310 #endif
311 
312 int
platform_node_numa_domain(phandle_t node)313 platform_node_numa_domain(phandle_t node)
314 {
315 	return (PLATFORM_NODE_NUMA_DOMAIN(plat_obj, node));
316 }
317 
318 /*
319  * Reset back to firmware.
320  */
321 void
cpu_reset(void)322 cpu_reset(void)
323 {
324         PLATFORM_RESET(plat_obj);
325 }
326 
platform_smp_timebase_sync(u_long tb,int ap)327 void platform_smp_timebase_sync(u_long tb, int ap)
328 {
329 
330 	PLATFORM_SMP_TIMEBASE_SYNC(plat_obj, tb, ap);
331 }
332 
333 /*
334  * Platform install routines. Highest priority wins, using the same
335  * algorithm as bus attachment.
336  */
337 SET_DECLARE(platform_set, platform_def_t);
338 
339 void
platform_probe_and_attach(void)340 platform_probe_and_attach(void)
341 {
342 	platform_def_t	**platpp, *platp;
343 	int		prio, best_prio;
344 
345 	plat_obj = &plat_kernel_obj;
346 	best_prio = 0;
347 
348 	/*
349 	 * Try to locate the best platform kobj
350 	 */
351 	SET_FOREACH(platpp, platform_set) {
352 		platp = *platpp;
353 
354 		/*
355 		 * Take care of compiling the selected class, and
356 		 * then statically initialise the MMU object
357 		 */
358 		kobj_class_compile_static(platp, &plat_kernel_kops);
359 		kobj_init_static((kobj_t)plat_obj, platp);
360 
361 		prio = PLATFORM_PROBE(plat_obj);
362 
363 		/* Check for errors */
364 		if (prio > 0)
365 			continue;
366 
367 		/*
368 		 * Check if this module was specifically requested through
369 		 * the loader tunable we provide.
370 		 */
371 		if (strcmp(platp->name,plat_name) == 0) {
372 			plat_def_impl = platp;
373 			break;
374 		}
375 
376 		/* Otherwise, see if it is better than our current best */
377 		if (plat_def_impl == NULL || prio > best_prio) {
378 			best_prio = prio;
379 			plat_def_impl = platp;
380 		}
381 
382 		/*
383 		 * We can't free the KOBJ, since it is static. Reset the ops
384 		 * member of this class so that we can come back later.
385 		 */
386 		platp->ops = NULL;
387 	}
388 
389 	if (plat_def_impl == NULL)
390 		panic("No platform module found!");
391 
392 	/*
393 	 * Recompile to make sure we ended with the
394 	 * correct one, and then attach.
395 	 */
396 
397 	kobj_class_compile_static(plat_def_impl, &plat_kernel_kops);
398 	kobj_init_static((kobj_t)plat_obj, plat_def_impl);
399 
400 	strlcpy(plat_name,plat_def_impl->name,sizeof(plat_name));
401 
402 	PLATFORM_ATTACH(plat_obj);
403 }
404