xref: /titanic_51/usr/src/uts/sun4v/os/fillsysinfo.c (revision 98157a7002f4f2cf7978f3084ca5577f0a1d72b2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/errno.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/cpuvar.h>
33 #include <sys/clock.h>
34 #include <sys/promif.h>
35 #include <sys/promimpl.h>
36 #include <sys/systm.h>
37 #include <sys/machsystm.h>
38 #include <sys/debug.h>
39 #include <sys/sunddi.h>
40 #include <sys/modctl.h>
41 #include <sys/cpu_module.h>
42 #include <sys/kobj.h>
43 #include <sys/cmp.h>
44 #include <sys/async.h>
45 #include <vm/page.h>
46 #include <vm/hat_sfmmu.h>
47 #include <sys/sysmacros.h>
48 #include <sys/mach_descrip.h>
49 #include <sys/mdesc.h>
50 #include <sys/archsystm.h>
51 #include <sys/error.h>
52 #include <sys/mmu.h>
53 #include <sys/bitmap.h>
54 #include <sys/intreg.h>
55 
56 struct cpu_node cpunodes[NCPU];
57 
58 uint64_t cpu_q_entries;
59 uint64_t dev_q_entries;
60 uint64_t cpu_rq_entries;
61 uint64_t cpu_nrq_entries;
62 uint64_t ncpu_guest_max;
63 
64 void fill_cpu(md_t *, mde_cookie_t);
65 
66 static uint64_t get_mmu_ctx_bits(md_t *, mde_cookie_t);
67 static uint64_t get_mmu_tsbs(md_t *, mde_cookie_t);
68 static uint64_t	get_mmu_shcontexts(md_t *, mde_cookie_t);
69 static uint64_t get_cpu_pagesizes(md_t *, mde_cookie_t);
70 static char *construct_isalist(md_t *, mde_cookie_t, char **);
71 static void init_md_broken(md_t *, mde_cookie_t *);
72 static int get_l2_cache_info(md_t *, mde_cookie_t, uint64_t *, uint64_t *,
73     uint64_t *);
74 static void get_q_sizes(md_t *, mde_cookie_t);
75 static void get_va_bits(md_t *, mde_cookie_t);
76 static size_t get_ra_limit(md_t *);
77 static int get_l2_cache_node_count(md_t *);
78 
79 uint64_t	system_clock_freq;
80 uint_t		niommu_tsbs = 0;
81 
82 static int n_l2_caches = 0;
83 
84 /* prevent compilation with VAC defined */
85 #ifdef VAC
86 #error "The sun4v architecture does not support VAC"
87 #endif
88 
89 #define	S_VAC_SIZE	MMU_PAGESIZE
90 #define	S_VAC_SHIFT	MMU_PAGESHIFT
91 
92 int		vac_size = S_VAC_SIZE;
93 uint_t		vac_mask = MMU_PAGEMASK & (S_VAC_SIZE - 1);
94 int		vac_shift = S_VAC_SHIFT;
95 uintptr_t	shm_alignment = S_VAC_SIZE;
96 
97 void
98 map_wellknown_devices()
99 {
100 }
101 
102 void
103 fill_cpu(md_t *mdp, mde_cookie_t cpuc)
104 {
105 	struct cpu_node *cpunode;
106 	uint64_t cpuid;
107 	uint64_t clk_freq;
108 	char *namebuf;
109 	char *namebufp;
110 	int namelen;
111 	uint64_t associativity = 0, linesize = 0, size = 0;
112 
113 	if (md_get_prop_val(mdp, cpuc, "id", &cpuid)) {
114 		return;
115 	}
116 
117 	/* All out-of-range cpus will be stopped later. */
118 	if (cpuid >= NCPU) {
119 		cmn_err(CE_CONT, "fill_cpu: out of range cpuid %ld - "
120 		    "cpu excluded from configuration\n", cpuid);
121 
122 		return;
123 	}
124 
125 	cpunode = &cpunodes[cpuid];
126 	cpunode->cpuid = (int)cpuid;
127 	cpunode->device_id = cpuid;
128 
129 	if (sizeof (cpunode->fru_fmri) > strlen(CPU_FRU_FMRI))
130 		(void) strcpy(cpunode->fru_fmri, CPU_FRU_FMRI);
131 
132 	if (md_get_prop_data(mdp, cpuc,
133 	    "compatible", (uint8_t **)&namebuf, &namelen)) {
134 		cmn_err(CE_PANIC, "fill_cpu: Cannot read compatible "
135 		    "property");
136 	}
137 	namebufp = namebuf;
138 	if (strncmp(namebufp, "SUNW,", 5) == 0)
139 		namebufp += 5;
140 	if (strlen(namebufp) > sizeof (cpunode->name))
141 		cmn_err(CE_PANIC, "Compatible property too big to "
142 		    "fit into the cpunode name buffer");
143 	(void) strcpy(cpunode->name, namebufp);
144 
145 	if (md_get_prop_val(mdp, cpuc,
146 	    "clock-frequency", &clk_freq)) {
147 			clk_freq = 0;
148 	}
149 	cpunode->clock_freq = clk_freq;
150 
151 	ASSERT(cpunode->clock_freq != 0);
152 	/*
153 	 * Compute scaling factor based on rate of %tick. This is used
154 	 * to convert from ticks derived from %tick to nanoseconds. See
155 	 * comment in sun4u/sys/clock.h for details.
156 	 */
157 	cpunode->tick_nsec_scale = (uint_t)(((uint64_t)NANOSEC <<
158 	    (32 - TICK_NSEC_SHIFT)) / cpunode->clock_freq);
159 
160 	/*
161 	 * The nodeid is not used in sun4v at all. Setting it
162 	 * to positive value to make starting of slave CPUs
163 	 * code happy.
164 	 */
165 	cpunode->nodeid = cpuid + 1;
166 
167 	/*
168 	 * Obtain the L2 cache information from MD.
169 	 * If "Cache" node exists, then set L2 cache properties
170 	 * as read from MD.
171 	 * If node does not exists, then set the L2 cache properties
172 	 * in individual CPU module.
173 	 */
174 	if ((!get_l2_cache_info(mdp, cpuc,
175 	    &associativity, &size, &linesize)) ||
176 	    associativity == 0 || size == 0 || linesize == 0) {
177 		cpu_fiximp(cpunode);
178 	} else {
179 		/*
180 		 * Do not expect L2 cache properties to be bigger
181 		 * than 32-bit quantity.
182 		 */
183 		cpunode->ecache_associativity = (int)associativity;
184 		cpunode->ecache_size = (int)size;
185 		cpunode->ecache_linesize = (int)linesize;
186 	}
187 
188 	cpunode->ecache_setsize =
189 	    cpunode->ecache_size / cpunode->ecache_associativity;
190 
191 	/*
192 	 * Initialize the mapping for exec unit, chip and core.
193 	 */
194 	cpunode->exec_unit_mapping = NO_EU_MAPPING_FOUND;
195 	cpunode->l2_cache_mapping = NO_MAPPING_FOUND;
196 	cpunode->core_mapping = NO_CORE_MAPPING_FOUND;
197 
198 	if (ecache_setsize == 0)
199 		ecache_setsize = cpunode->ecache_setsize;
200 	if (ecache_alignsize == 0)
201 		ecache_alignsize = cpunode->ecache_linesize;
202 
203 }
204 
205 void
206 empty_cpu(int cpuid)
207 {
208 	bzero(&cpunodes[cpuid], sizeof (struct cpu_node));
209 }
210 
211 /*
212  * Use L2 cache node to derive the chip mapping.
213  */
214 void
215 setup_chip_mappings(md_t *mdp)
216 {
217 	uint64_t ncache, ncpu;
218 	mde_cookie_t *node, *cachelist;
219 	int i, j;
220 	processorid_t cpuid;
221 	int idx = 0;
222 
223 	ncache = md_alloc_scan_dag(mdp, md_root_node(mdp), "cache",
224 	    "fwd", &cachelist);
225 
226 	/*
227 	 * The "cache" node is optional in MD, therefore ncaches can be 0.
228 	 */
229 	if (ncache < 1) {
230 		return;
231 	}
232 
233 	for (i = 0; i < ncache; i++) {
234 		uint64_t cache_level;
235 		uint64_t lcpuid;
236 
237 		if (md_get_prop_val(mdp, cachelist[i], "level", &cache_level))
238 			continue;
239 
240 		if (cache_level != 2)
241 			continue;
242 
243 		/*
244 		 * Found a l2 cache node. Find out the cpu nodes it
245 		 * points to.
246 		 */
247 		ncpu = md_alloc_scan_dag(mdp, cachelist[i], "cpu",
248 		    "back", &node);
249 
250 		if (ncpu < 1)
251 			continue;
252 
253 		for (j = 0; j < ncpu; j++) {
254 			if (md_get_prop_val(mdp, node[j], "id", &lcpuid))
255 				continue;
256 			if (lcpuid >= NCPU)
257 				continue;
258 			cpuid = (processorid_t)lcpuid;
259 			cpunodes[cpuid].l2_cache_mapping = idx;
260 		}
261 		md_free_scan_dag(mdp, &node);
262 
263 		idx++;
264 	}
265 
266 	md_free_scan_dag(mdp, &cachelist);
267 }
268 
269 void
270 setup_exec_unit_mappings(md_t *mdp)
271 {
272 	uint64_t num, num_eunits;
273 	mde_cookie_t cpus_node;
274 	mde_cookie_t *node, *eunit;
275 	int idx, i, j;
276 	processorid_t cpuid;
277 	char *eunit_name = broken_md_flag ? "exec_unit" : "exec-unit";
278 	enum eu_type { INTEGER, FPU } etype;
279 
280 	/*
281 	 * Find the cpu integer exec units - and
282 	 * setup the mappings appropriately.
283 	 */
284 	num = md_alloc_scan_dag(mdp, md_root_node(mdp), "cpus", "fwd", &node);
285 	if (num < 1)
286 		cmn_err(CE_PANIC, "No cpus node in machine description");
287 	if (num > 1)
288 		cmn_err(CE_PANIC, "More than 1 cpus node in machine"
289 		    " description");
290 
291 	cpus_node = node[0];
292 	md_free_scan_dag(mdp, &node);
293 
294 	num_eunits = md_alloc_scan_dag(mdp, cpus_node, eunit_name,
295 	    "fwd", &eunit);
296 	if (num_eunits > 0) {
297 		char *int_str = broken_md_flag ? "int" : "integer";
298 		char *fpu_str = "fp";
299 
300 		/* Spin through and find all the integer exec units */
301 		for (i = 0; i < num_eunits; i++) {
302 			char *p;
303 			char *val;
304 			int vallen;
305 			uint64_t lcpuid;
306 
307 			/* ignore nodes with no type */
308 			if (md_get_prop_data(mdp, eunit[i], "type",
309 			    (uint8_t **)&val, &vallen))
310 				continue;
311 
312 			for (p = val; *p != '\0'; p += strlen(p) + 1) {
313 				if (strcmp(p, int_str) == 0) {
314 					etype = INTEGER;
315 					goto found;
316 				}
317 				if (strcmp(p, fpu_str) == 0) {
318 					etype = FPU;
319 					goto found;
320 				}
321 			}
322 
323 			continue;
324 found:
325 			idx = NCPU + i;
326 			/*
327 			 * find the cpus attached to this EU and
328 			 * update their mapping indices
329 			 */
330 			num = md_alloc_scan_dag(mdp, eunit[i], "cpu",
331 			    "back", &node);
332 
333 			if (num < 1)
334 				cmn_err(CE_PANIC, "exec-unit node in MD"
335 				    " not attached to a cpu node");
336 
337 			for (j = 0; j < num; j++) {
338 				if (md_get_prop_val(mdp, node[j], "id",
339 				    &lcpuid))
340 					continue;
341 				if (lcpuid >= NCPU)
342 					continue;
343 				cpuid = (processorid_t)lcpuid;
344 				switch (etype) {
345 				case INTEGER:
346 					cpunodes[cpuid].exec_unit_mapping = idx;
347 					break;
348 				case FPU:
349 					cpunodes[cpuid].fpu_mapping = idx;
350 					break;
351 				}
352 			}
353 			md_free_scan_dag(mdp, &node);
354 		}
355 
356 
357 		md_free_scan_dag(mdp, &eunit);
358 	}
359 }
360 
361 /*
362  * All the common setup of sun4v CPU modules is done by this routine.
363  */
364 void
365 cpu_setup_common(char **cpu_module_isa_set)
366 {
367 	extern int mmu_exported_pagesize_mask;
368 	int nocpus, i;
369 	size_t ra_limit;
370 	mde_cookie_t *cpulist;
371 	md_t *mdp;
372 
373 	if ((mdp = md_get_handle()) == NULL)
374 		cmn_err(CE_PANIC, "Unable to initialize machine description");
375 
376 	nocpus = md_alloc_scan_dag(mdp,
377 	    md_root_node(mdp), "cpu", "fwd", &cpulist);
378 	if (nocpus < 1) {
379 		cmn_err(CE_PANIC, "cpu_common_setup: cpulist allocation "
380 		    "failed or incorrect number of CPUs in MD");
381 	}
382 
383 	init_md_broken(mdp, cpulist);
384 
385 	if (use_page_coloring) {
386 		do_pg_coloring = 1;
387 	}
388 
389 	/*
390 	 * Get the valid mmu page sizes mask, Q sizes and isalist/r
391 	 * from the MD for the first available CPU in cpulist.
392 	 *
393 	 * Do not expect the MMU page sizes mask to be more than 32-bit.
394 	 */
395 	mmu_exported_pagesize_mask = (int)get_cpu_pagesizes(mdp, cpulist[0]);
396 
397 	/*
398 	 * Get the number of contexts and tsbs supported.
399 	 */
400 	if (get_mmu_shcontexts(mdp, cpulist[0]) >= MIN_NSHCONTEXTS &&
401 	    get_mmu_tsbs(mdp, cpulist[0]) >= MIN_NTSBS) {
402 		shctx_on = 1;
403 	}
404 
405 	for (i = 0; i < nocpus; i++)
406 		fill_cpu(mdp, cpulist[i]);
407 
408 	/* setup l2 cache count. */
409 	n_l2_caches = get_l2_cache_node_count(mdp);
410 
411 	setup_chip_mappings(mdp);
412 	setup_exec_unit_mappings(mdp);
413 
414 	/*
415 	 * If MD is broken then append the passed ISA set,
416 	 * otherwise trust the MD.
417 	 */
418 
419 	if (broken_md_flag)
420 		isa_list = construct_isalist(mdp, cpulist[0],
421 		    cpu_module_isa_set);
422 	else
423 		isa_list = construct_isalist(mdp, cpulist[0], NULL);
424 
425 	get_q_sizes(mdp, cpulist[0]);
426 
427 	get_va_bits(mdp, cpulist[0]);
428 
429 	/*
430 	 * ra_limit is the highest real address in the machine.
431 	 */
432 	ra_limit = get_ra_limit(mdp);
433 
434 	md_free_scan_dag(mdp, &cpulist);
435 
436 	(void) md_fini_handle(mdp);
437 
438 	/*
439 	 * Block stores invalidate all pages of the d$ so pagecopy
440 	 * et. al. do not need virtual translations with virtual
441 	 * coloring taken into consideration.
442 	 */
443 	pp_consistent_coloring = 0;
444 
445 	/*
446 	 * The kpm mapping window.
447 	 * kpm_size:
448 	 *	The size of a single kpm range.
449 	 *	The overall size will be: kpm_size * vac_colors.
450 	 * kpm_vbase:
451 	 *	The virtual start address of the kpm range within the kernel
452 	 *	virtual address space. kpm_vbase has to be kpm_size aligned.
453 	 */
454 
455 	/*
456 	 * Make kpm_vbase, kpm_size aligned to kpm_size_shift.
457 	 * To do this find the nearest power of 2 size that the
458 	 * actual ra_limit fits within.
459 	 * If it is an even power of two use that, otherwise use the
460 	 * next power of two larger than ra_limit.
461 	 */
462 
463 	ASSERT(ra_limit != 0);
464 
465 	kpm_size_shift = (ra_limit & (ra_limit - 1)) != 0 ?
466 	    highbit(ra_limit) : highbit(ra_limit) - 1;
467 
468 	/*
469 	 * No virtual caches on sun4v so size matches size shift
470 	 */
471 	kpm_size = 1ul << kpm_size_shift;
472 
473 	if (va_bits < VA_ADDRESS_SPACE_BITS) {
474 		/*
475 		 * In case of VA hole
476 		 * kpm_base = hole_end + 1TB
477 		 * Starting 1TB beyond where VA hole ends because on Niagara
478 		 * processor software must not use pages within 4GB of the
479 		 * VA hole as instruction pages to avoid problems with
480 		 * prefetching into the VA hole.
481 		 */
482 		kpm_vbase = (caddr_t)((0ull - (1ull << (va_bits - 1))) +
483 		    (1ull << 40));
484 	} else {		/* Number of VA bits 64 ... no VA hole */
485 		kpm_vbase = (caddr_t)0x8000000000000000ull;	/* 8 EB */
486 	}
487 
488 	/*
489 	 * The traptrace code uses either %tick or %stick for
490 	 * timestamping.  The sun4v require use of %stick.
491 	 */
492 	traptrace_use_stick = 1;
493 }
494 
495 /*
496  * Get the nctxs from MD. If absent panic.
497  */
498 static uint64_t
499 get_mmu_ctx_bits(md_t *mdp, mde_cookie_t cpu_node_cookie)
500 {
501 	uint64_t ctx_bits;
502 
503 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-#context-bits",
504 	    &ctx_bits))
505 		ctx_bits = 0;
506 
507 	if (ctx_bits < MIN_NCTXS_BITS || ctx_bits > MAX_NCTXS_BITS)
508 		cmn_err(CE_PANIC, "Incorrect %ld number of contexts bits "
509 		    "returned by MD", ctx_bits);
510 
511 	return (ctx_bits);
512 }
513 
514 /*
515  * Get the number of tsbs from MD. If absent the default value is 0.
516  */
517 static uint64_t
518 get_mmu_tsbs(md_t *mdp, mde_cookie_t cpu_node_cookie)
519 {
520 	uint64_t number_tsbs;
521 
522 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-max-#tsbs",
523 	    &number_tsbs))
524 		number_tsbs = 0;
525 
526 	return (number_tsbs);
527 }
528 
529 /*
530  * Get the number of shared contexts from MD. If absent the default value is 0.
531  *
532  */
533 static uint64_t
534 get_mmu_shcontexts(md_t *mdp, mde_cookie_t cpu_node_cookie)
535 {
536 	uint64_t number_contexts;
537 
538 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-#shared-contexts",
539 	    &number_contexts))
540 		number_contexts = 0;
541 
542 	return (number_contexts);
543 }
544 
545 /*
546  * Initalize supported page sizes information.
547  * Set to 0, if the page sizes mask information is absent in MD.
548  */
549 static uint64_t
550 get_cpu_pagesizes(md_t *mdp, mde_cookie_t cpu_node_cookie)
551 {
552 	uint64_t mmu_page_size_list;
553 
554 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-page-size-list",
555 	    &mmu_page_size_list))
556 		mmu_page_size_list = 0;
557 
558 	if (mmu_page_size_list == 0 || mmu_page_size_list > MAX_PAGESIZE_MASK)
559 		cmn_err(CE_PANIC, "Incorrect 0x%lx pagesize mask returned"
560 		    "by MD", mmu_page_size_list);
561 
562 	return (mmu_page_size_list);
563 }
564 
565 /*
566  * This routine gets the isalist information from MD and appends
567  * the CPU module ISA set if required.
568  */
569 static char *
570 construct_isalist(md_t *mdp, mde_cookie_t cpu_node_cookie,
571     char **cpu_module_isa_set)
572 {
573 	extern int at_flags;
574 	char *md_isalist;
575 	int md_isalen;
576 	char *isabuf;
577 	int isalen;
578 	char **isa_set;
579 	char *p, *q;
580 	int cpu_module_isalen = 0, found = 0;
581 
582 	(void) md_get_prop_data(mdp, cpu_node_cookie,
583 	    "isalist", (uint8_t **)&isabuf, &isalen);
584 
585 	/*
586 	 * We support binaries for all the cpus that have shipped so far.
587 	 * The kernel emulates instructions that are not supported by hardware.
588 	 */
589 	at_flags = EF_SPARC_SUN_US3 | EF_SPARC_32PLUS | EF_SPARC_SUN_US1;
590 
591 	/*
592 	 * Construct the space separated isa_list.
593 	 */
594 	if (cpu_module_isa_set != NULL) {
595 		for (isa_set = cpu_module_isa_set; *isa_set != NULL;
596 		    isa_set++) {
597 			cpu_module_isalen += strlen(*isa_set);
598 			cpu_module_isalen++;	/* for space character */
599 		}
600 	}
601 
602 	/*
603 	 * Allocate the buffer of MD isa buffer length + CPU module
604 	 * isa buffer length.
605 	 */
606 	md_isalen = isalen + cpu_module_isalen + 2;
607 	md_isalist = (char *)prom_alloc((caddr_t)0, md_isalen, 0);
608 	if (md_isalist == NULL)
609 		cmn_err(CE_PANIC, "construct_isalist: Allocation failed for "
610 		    "md_isalist");
611 
612 	md_isalist[0] = '\0'; /* create an empty string to start */
613 	for (p = isabuf, q = p + isalen; p < q; p += strlen(p) + 1) {
614 		(void) strlcat(md_isalist, p, md_isalen);
615 		(void) strcat(md_isalist, " ");
616 	}
617 
618 	/*
619 	 * Check if the isa_set is present in isalist returned by MD.
620 	 * If yes, then no need to append it, if no then append it to
621 	 * isalist returned by MD.
622 	 */
623 	if (cpu_module_isa_set != NULL) {
624 		for (isa_set = cpu_module_isa_set; *isa_set != NULL;
625 		    isa_set++) {
626 			found = 0;
627 			for (p = isabuf, q = p + isalen; p < q;
628 			    p += strlen(p) + 1) {
629 				if (strcmp(p, *isa_set) == 0) {
630 					found = 1;
631 					break;
632 				}
633 			}
634 			if (!found) {
635 				(void) strlcat(md_isalist, *isa_set, md_isalen);
636 				(void) strcat(md_isalist, " ");
637 			}
638 		}
639 	}
640 
641 	/* Get rid of any trailing white spaces */
642 	md_isalist[strlen(md_isalist) - 1] = '\0';
643 
644 	return (md_isalist);
645 }
646 
647 uint64_t
648 get_ra_limit(md_t *mdp)
649 {
650 	mde_cookie_t *mem_list;
651 	mde_cookie_t *mblock_list;
652 	int i;
653 	int memnodes;
654 	int nmblock;
655 	uint64_t base;
656 	uint64_t size;
657 	uint64_t ra_limit = 0, new_limit = 0;
658 
659 	memnodes = md_alloc_scan_dag(mdp,
660 	    md_root_node(mdp), "memory", "fwd", &mem_list);
661 
662 	ASSERT(memnodes == 1);
663 
664 	nmblock = md_alloc_scan_dag(mdp,
665 	    mem_list[0], "mblock", "fwd", &mblock_list);
666 	if (nmblock < 1)
667 		cmn_err(CE_PANIC, "cannot find mblock nodes in MD");
668 
669 	for (i = 0; i < nmblock; i++) {
670 		if (md_get_prop_val(mdp, mblock_list[i], "base", &base))
671 			cmn_err(CE_PANIC, "base property missing from MD"
672 			    " mblock node");
673 		if (md_get_prop_val(mdp, mblock_list[i], "size", &size))
674 			cmn_err(CE_PANIC, "size property missing from MD"
675 			    " mblock node");
676 
677 		ASSERT(size != 0);
678 
679 		new_limit = base + size;
680 
681 		if (base > new_limit)
682 			cmn_err(CE_PANIC, "mblock in MD wrapped around");
683 
684 		if (new_limit > ra_limit)
685 			ra_limit = new_limit;
686 	}
687 
688 	ASSERT(ra_limit != 0);
689 
690 	if (ra_limit > MAX_REAL_ADDRESS) {
691 		cmn_err(CE_WARN, "Highest real address in MD too large"
692 		    " clipping to %llx\n", MAX_REAL_ADDRESS);
693 		ra_limit = MAX_REAL_ADDRESS;
694 	}
695 
696 	md_free_scan_dag(mdp, &mblock_list);
697 
698 	md_free_scan_dag(mdp, &mem_list);
699 
700 	return (ra_limit);
701 }
702 
703 /*
704  * This routine sets the globals for CPU and DEV mondo queue entries and
705  * resumable and non-resumable error queue entries.
706  *
707  * First, look up the number of bits available to pass an entry number.
708  * This can vary by platform and may result in allocating an unreasonably
709  * (or impossibly) large amount of memory for the corresponding table,
710  * so we clamp it by 'max_entries'.  Finally, since the q size is used when
711  * calling contig_mem_alloc(), which expects a power of 2, clamp the q size
712  * down to a power of 2.  If the prop is missing, use 'default_entries'.
713  */
714 static uint64_t
715 get_single_q_size(md_t *mdp, mde_cookie_t cpu_node_cookie,
716     char *qnamep, uint64_t default_entries, uint64_t max_entries)
717 {
718 	uint64_t entries;
719 
720 	if (default_entries > max_entries)
721 		cmn_err(CE_CONT, "!get_single_q_size: dflt %ld > "
722 		    "max %ld for %s\n", default_entries, max_entries, qnamep);
723 
724 	if (md_get_prop_val(mdp, cpu_node_cookie, qnamep, &entries)) {
725 		if (!broken_md_flag)
726 			cmn_err(CE_PANIC, "Missing %s property in MD cpu node",
727 			    qnamep);
728 		entries = default_entries;
729 	} else {
730 		entries = 1 << entries;
731 	}
732 
733 	entries = MIN(entries, max_entries);
734 	/* If not a power of 2, truncate to a power of 2. */
735 	if ((entries & (entries - 1)) != 0) {
736 		entries = 1 << (highbit(entries) - 1);
737 	}
738 
739 	return (entries);
740 }
741 
742 /* Scaling constant used to compute size of cpu mondo queue */
743 #define	CPU_MONDO_Q_MULTIPLIER	8
744 
745 static void
746 get_q_sizes(md_t *mdp, mde_cookie_t cpu_node_cookie)
747 {
748 	uint64_t max_qsize;
749 	mde_cookie_t *platlist;
750 	int nrnode;
751 
752 	/*
753 	 * Compute the maximum number of entries for the cpu mondo queue.
754 	 * Use the appropriate property in the platform node, if it is
755 	 * available.  Else, base it on NCPU.
756 	 */
757 	nrnode = md_alloc_scan_dag(mdp,
758 	    md_root_node(mdp), "platform", "fwd", &platlist);
759 
760 	ASSERT(nrnode == 1);
761 
762 	ncpu_guest_max = NCPU;
763 	(void) md_get_prop_val(mdp, platlist[0], "max-cpus", &ncpu_guest_max);
764 	max_qsize = ncpu_guest_max * CPU_MONDO_Q_MULTIPLIER;
765 
766 	md_free_scan_dag(mdp, &platlist);
767 
768 	cpu_q_entries = get_single_q_size(mdp, cpu_node_cookie,
769 	    "q-cpu-mondo-#bits", DEFAULT_CPU_Q_ENTRIES, max_qsize);
770 
771 	dev_q_entries = get_single_q_size(mdp, cpu_node_cookie,
772 	    "q-dev-mondo-#bits", DEFAULT_DEV_Q_ENTRIES, MAXIVNUM);
773 
774 	cpu_rq_entries = get_single_q_size(mdp, cpu_node_cookie,
775 	    "q-resumable-#bits", CPU_RQ_ENTRIES, MAX_CPU_RQ_ENTRIES);
776 
777 	cpu_nrq_entries = get_single_q_size(mdp, cpu_node_cookie,
778 	    "q-nonresumable-#bits", CPU_NRQ_ENTRIES, MAX_CPU_NRQ_ENTRIES);
779 }
780 
781 
782 static void
783 get_va_bits(md_t *mdp, mde_cookie_t cpu_node_cookie)
784 {
785 	uint64_t value = VA_ADDRESS_SPACE_BITS;
786 
787 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-#va-bits", &value))
788 		cmn_err(CE_PANIC, "mmu-#va-bits property  not found in MD");
789 
790 
791 	if (value == 0 || value > VA_ADDRESS_SPACE_BITS)
792 		cmn_err(CE_PANIC, "Incorrect number of va bits in MD");
793 
794 	/* Do not expect number of VA bits to be more than 32-bit quantity */
795 
796 	va_bits = (int)value;
797 
798 	/*
799 	 * Correct the value for VA bits on UltraSPARC-T1 based systems
800 	 * in case of broken MD.
801 	 */
802 	if (broken_md_flag)
803 		va_bits = DEFAULT_VA_ADDRESS_SPACE_BITS;
804 }
805 
806 int
807 l2_cache_node_count(void)
808 {
809 	return (n_l2_caches);
810 }
811 
812 /*
813  * count the number of l2 caches.
814  */
815 int
816 get_l2_cache_node_count(md_t *mdp)
817 {
818 	int i;
819 	mde_cookie_t *cachenodes;
820 	uint64_t level;
821 	int n_cachenodes = md_alloc_scan_dag(mdp, md_root_node(mdp),
822 	    "cache", "fwd", &cachenodes);
823 	int l2_caches = 0;
824 
825 	for (i = 0; i < n_cachenodes; i++) {
826 		if (md_get_prop_val(mdp, cachenodes[i], "level", &level) != 0) {
827 			level = 0;
828 		}
829 		if (level == 2) {
830 			l2_caches++;
831 		}
832 	}
833 	md_free_scan_dag(mdp, &cachenodes);
834 	return (l2_caches);
835 }
836 
837 /*
838  * This routine returns the L2 cache information such as -- associativity,
839  * size and linesize.
840  */
841 static int
842 get_l2_cache_info(md_t *mdp, mde_cookie_t cpu_node_cookie,
843 	    uint64_t *associativity, uint64_t *size, uint64_t *linesize)
844 {
845 	mde_cookie_t *cachelist;
846 	int ncaches, i;
847 	uint64_t cache_level = 0;
848 
849 	ncaches = md_alloc_scan_dag(mdp, cpu_node_cookie, "cache",
850 	    "fwd", &cachelist);
851 	/*
852 	 * The "cache" node is optional in MD, therefore ncaches can be 0.
853 	 */
854 	if (ncaches < 1) {
855 		return (0);
856 	}
857 
858 	for (i = 0; i < ncaches; i++) {
859 		uint64_t local_assoc;
860 		uint64_t local_size;
861 		uint64_t local_lsize;
862 
863 		if (md_get_prop_val(mdp, cachelist[i], "level", &cache_level))
864 			continue;
865 
866 		if (cache_level != 2) continue;
867 
868 		/* If properties are missing from this cache ignore it */
869 
870 		if ((md_get_prop_val(mdp, cachelist[i],
871 		    "associativity", &local_assoc))) {
872 			continue;
873 		}
874 
875 		if ((md_get_prop_val(mdp, cachelist[i],
876 		    "size", &local_size))) {
877 			continue;
878 		}
879 
880 		if ((md_get_prop_val(mdp, cachelist[i],
881 		    "line-size", &local_lsize))) {
882 			continue;
883 		}
884 
885 		*associativity = local_assoc;
886 		*size = local_size;
887 		*linesize = local_lsize;
888 		break;
889 	}
890 
891 	md_free_scan_dag(mdp, &cachelist);
892 
893 	return ((cache_level == 2) ? 1 : 0);
894 }
895 
896 
897 /*
898  * Set the broken_md_flag to 1 if the MD doesn't have
899  * the domaining-enabled property in the platform node and the
900  * platform uses the UltraSPARC-T1 cpu. This flag is used to
901  * workaround some of the incorrect MD properties.
902  */
903 static void
904 init_md_broken(md_t *mdp, mde_cookie_t *cpulist)
905 {
906 	int nrnode;
907 	mde_cookie_t *platlist, rootnode;
908 	uint64_t val = 0;
909 	char *namebuf;
910 	int namelen;
911 
912 	rootnode = md_root_node(mdp);
913 	ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE);
914 	ASSERT(cpulist);
915 
916 	nrnode = md_alloc_scan_dag(mdp, rootnode, "platform", "fwd",
917 	    &platlist);
918 
919 	if (nrnode < 1)
920 		cmn_err(CE_PANIC, "init_md_broken: platform node missing");
921 
922 	if (md_get_prop_data(mdp, cpulist[0],
923 	    "compatible", (uint8_t **)&namebuf, &namelen)) {
924 		cmn_err(CE_PANIC, "init_md_broken: "
925 		    "Cannot read 'compatible' property of 'cpu' node");
926 	}
927 
928 	if (md_get_prop_val(mdp, platlist[0],
929 	    "domaining-enabled", &val) == -1 &&
930 	    strcmp(namebuf, "SUNW,UltraSPARC-T1") == 0)
931 		broken_md_flag = 1;
932 
933 	md_free_scan_dag(mdp, &platlist);
934 }
935