xref: /titanic_41/usr/src/uts/sun4v/os/fillsysinfo.c (revision 9dd828891378a0a6a509ab601b4c5c20ca5562ec)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/errno.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/cpuvar.h>
33 #include <sys/clock.h>
34 #include <sys/promif.h>
35 #include <sys/promimpl.h>
36 #include <sys/systm.h>
37 #include <sys/machsystm.h>
38 #include <sys/debug.h>
39 #include <sys/sunddi.h>
40 #include <sys/modctl.h>
41 #include <sys/cpu_module.h>
42 #include <sys/kobj.h>
43 #include <sys/cmp.h>
44 #include <sys/async.h>
45 #include <vm/page.h>
46 #include <vm/hat_sfmmu.h>
47 #include <sys/sysmacros.h>
48 #include <sys/mach_descrip.h>
49 #include <sys/mdesc.h>
50 #include <sys/archsystm.h>
51 #include <sys/error.h>
52 #include <sys/mmu.h>
53 #include <sys/bitmap.h>
54 
55 int ncpunode;
56 struct cpu_node cpunodes[NCPU];
57 
58 uint64_t cpu_q_entries;
59 uint64_t dev_q_entries;
60 uint64_t cpu_rq_entries;
61 uint64_t cpu_nrq_entries;
62 
63 void fill_cpu(md_t *, mde_cookie_t);
64 
65 static uint64_t get_mmu_ctx_bits(md_t *, mde_cookie_t);
66 static uint64_t get_cpu_pagesizes(md_t *, mde_cookie_t);
67 static char *construct_isalist(md_t *, mde_cookie_t, char **);
68 static void set_at_flags(char *, int, char **);
69 static void init_md_broken(md_t *);
70 static int get_l2_cache_info(md_t *, mde_cookie_t, uint64_t *, uint64_t *,
71     uint64_t *);
72 static id_t get_exec_unit_mapping(md_t *, mde_cookie_t, mde_cookie_t *);
73 static int find_exec_unit_id(mde_cookie_t, mde_cookie_t *);
74 static void get_q_sizes(md_t *, mde_cookie_t);
75 static void get_va_bits(md_t *, mde_cookie_t);
76 static size_t get_ra_limit(md_t *);
77 
78 uint64_t	system_clock_freq;
79 int		niobus = 0;
80 uint_t		niommu_tsbs = 0;
81 
82 /* prevent compilation with VAC defined */
83 #ifdef VAC
84 #error "The sun4v architecture does not support VAC"
85 #endif
86 
87 #define	S_VAC_SIZE	MMU_PAGESIZE
88 #define	S_VAC_SHIFT	MMU_PAGESHIFT
89 
90 int		vac_size = S_VAC_SIZE;
91 uint_t		vac_mask = MMU_PAGEMASK & (S_VAC_SIZE - 1);
92 int		vac_shift = S_VAC_SHIFT;
93 uintptr_t	shm_alignment = S_VAC_SIZE;
94 
95 void
96 map_wellknown_devices()
97 {
98 }
99 
100 /*
101  * For backward compatibility we need to verify that we can handle
102  * running on platforms which shipped with missing MD properties.
103  */
104 #define	ONTARIO_PLATNAME1	"SUNW,Sun-Fire-T200"
105 #define	ONTARIO_PLATNAME2	"SUNW,Sun-Fire-T2000"
106 #define	ONTARIO_PLATNAME3	"SUNW,SPARC-Enterprise-T2000"
107 #define	ERIE_PLATNAME1		"SUNW,Sun-Fire-T100"
108 #define	ERIE_PLATNAME2		"SUNW,Sun-Fire-T1000"
109 #define	ERIE_PLATNAME3		"SUNW,SPARC-Enterprise-T1000"
110 
111 void
112 fill_cpu(md_t *mdp, mde_cookie_t cpuc)
113 {
114 	struct cpu_node *cpunode;
115 	uint64_t cpuid;
116 	uint64_t clk_freq;
117 	char *namebuf;
118 	char *namebufp;
119 	int namelen;
120 	uint64_t associativity = 0, linesize = 0, size = 0;
121 	int status;
122 
123 	if (md_get_prop_val(mdp, cpuc, "id", &cpuid)) {
124 		return;
125 	}
126 
127 	if (cpuid >= NCPU) {
128 		cmn_err(CE_CONT, "fill_cpu: out of range cpuid %ld - "
129 		    "cpu excluded from configuration", cpuid);
130 
131 		mutex_enter(&cpu_lock);
132 
133 		/*
134 		 * Since the CPU cannot be used, make sure it
135 		 * is in a safe place. If the firmware does not
136 		 * support CPU stop, this is known to be true.
137 		 * If it fails to stop for any other reason, the
138 		 * system is in an inconsistent state and cannot
139 		 * be allowed to continue.
140 		 */
141 		status = stopcpu_bycpuid(cpuid);
142 
143 		if ((status != 0) && (status != ENOTSUP)) {
144 			cmn_err(CE_PANIC, "failed to stop cpu %lu (%d)",
145 			    cpuid, status);
146 		}
147 
148 		mutex_exit(&cpu_lock);
149 		return;
150 	}
151 
152 	cpunode = &cpunodes[cpuid];
153 	cpunode->cpuid = (int)cpuid;
154 	cpunode->device_id = cpuid;
155 
156 	if (sizeof (cpunode->fru_fmri) > strlen(CPU_FRU_FMRI))
157 		(void) strcpy(cpunode->fru_fmri, CPU_FRU_FMRI);
158 
159 	if (md_get_prop_data(mdp, cpuc,
160 	    "compatible", (uint8_t **)&namebuf, &namelen)) {
161 		cmn_err(CE_PANIC, "fill_cpu: Cannot read compatible "
162 		    "property");
163 	}
164 	namebufp = namebuf;
165 	if (strncmp(namebufp, "SUNW,", 5) == 0)
166 		namebufp += 5;
167 	if (strlen(namebufp) > sizeof (cpunode->name))
168 		cmn_err(CE_PANIC, "Compatible property too big to "
169 		    "fit into the cpunode name buffer");
170 	(void) strcpy(cpunode->name, namebufp);
171 
172 	if (md_get_prop_val(mdp, cpuc,
173 	    "clock-frequency", &clk_freq)) {
174 			clk_freq = 0;
175 	}
176 	cpunode->clock_freq = clk_freq;
177 
178 	ASSERT(cpunode->clock_freq != 0);
179 	/*
180 	 * Compute scaling factor based on rate of %tick. This is used
181 	 * to convert from ticks derived from %tick to nanoseconds. See
182 	 * comment in sun4u/sys/clock.h for details.
183 	 */
184 	cpunode->tick_nsec_scale = (uint_t)(((uint64_t)NANOSEC <<
185 	    (32 - TICK_NSEC_SHIFT)) / cpunode->clock_freq);
186 
187 	/*
188 	 * The nodeid is not used in sun4v at all. Setting it
189 	 * to positive value to make starting of slave CPUs
190 	 * code happy.
191 	 */
192 	cpunode->nodeid = cpuid + 1;
193 
194 	/*
195 	 * Obtain the L2 cache information from MD.
196 	 * If "Cache" node exists, then set L2 cache properties
197 	 * as read from MD.
198 	 * If node does not exists, then set the L2 cache properties
199 	 * in individual CPU module.
200 	 */
201 	if ((!get_l2_cache_info(mdp, cpuc,
202 	    &associativity, &size, &linesize)) ||
203 	    associativity == 0 || size == 0 || linesize == 0) {
204 		cpu_fiximp(cpunode);
205 	} else {
206 		/*
207 		 * Do not expect L2 cache properties to be bigger
208 		 * than 32-bit quantity.
209 		 */
210 		cpunode->ecache_associativity = (int)associativity;
211 		cpunode->ecache_size = (int)size;
212 		cpunode->ecache_linesize = (int)linesize;
213 	}
214 
215 	cpunode->ecache_setsize =
216 	    cpunode->ecache_size / cpunode->ecache_associativity;
217 
218 		/*
219 		 * Start off by assigning the cpu id as the default
220 		 * mapping index.
221 		 */
222 
223 	cpunode->exec_unit_mapping = NO_EU_MAPPING_FOUND;
224 
225 	if (ecache_setsize == 0)
226 		ecache_setsize = cpunode->ecache_setsize;
227 	if (ecache_alignsize == 0)
228 		ecache_alignsize = cpunode->ecache_linesize;
229 
230 	ncpunode++;
231 }
232 
233 void
234 empty_cpu(int cpuid)
235 {
236 	bzero(&cpunodes[cpuid], sizeof (struct cpu_node));
237 	ncpunode--;
238 }
239 
240 void
241 setup_exec_unit_mappings(md_t *mdp)
242 {
243 	uint64_t num, num_eunits;
244 	mde_cookie_t cpus_node;
245 	mde_cookie_t *node, *eunit;
246 	int idx, i, j;
247 	processorid_t cpuid;
248 	char *eunit_name = broken_md_flag ? "exec_unit" : "exec-unit";
249 
250 	/*
251 	 * Find the cpu integer exec units - and
252 	 * setup the mappings appropriately.
253 	 */
254 	num = md_alloc_scan_dag(mdp, md_root_node(mdp), "cpus", "fwd", &node);
255 	if (num < 1)
256 		cmn_err(CE_PANIC, "No cpus node in machine desccription");
257 	if (num > 1)
258 		cmn_err(CE_PANIC, "More than 1 cpus node in machine"
259 		    " description");
260 
261 	cpus_node = node[0];
262 	md_free_scan_dag(mdp, &node);
263 
264 	num_eunits = md_alloc_scan_dag(mdp, cpus_node, eunit_name,
265 	    "fwd", &eunit);
266 	if (num_eunits > 0) {
267 		char *match_type = broken_md_flag ? "int" : "integer";
268 
269 		/* Spin through and find all the integer exec units */
270 		for (i = 0; i < num_eunits; i++) {
271 			char *p;
272 			char *val;
273 			int vallen;
274 			uint64_t lcpuid;
275 
276 				/* ignore nodes with no type */
277 			if (md_get_prop_data(mdp, eunit[i], "type",
278 				(uint8_t **)&val, &vallen)) continue;
279 
280 			for (p = val; *p != '\0'; p += strlen(p) + 1) {
281 				if (strcmp(p, match_type) == 0)
282 					goto found;
283 			}
284 
285 			continue;
286 found:
287 			idx = NCPU + i;
288 			/*
289 			 * find the cpus attached to this EU and
290 			 * update their mapping indices
291 			 */
292 			num = md_alloc_scan_dag(mdp, eunit[i], "cpu",
293 			    "back", &node);
294 
295 			if (num < 1)
296 				cmn_err(CE_PANIC, "exec-unit node in MD"
297 				    " not attached to a cpu node");
298 
299 			for (j = 0; j < num; j++) {
300 				if (md_get_prop_val(mdp, node[j], "id",
301 				    &lcpuid))
302 					continue;
303 				if (lcpuid >= NCPU)
304 					continue;
305 				cpuid = (processorid_t)lcpuid;
306 				cpunodes[cpuid].exec_unit_mapping = idx;
307 			}
308 			md_free_scan_dag(mdp, &node);
309 		}
310 
311 
312 		md_free_scan_dag(mdp, &eunit);
313 	}
314 }
315 
316 /*
317  * All the common setup of sun4v CPU modules is done by this routine.
318  */
319 void
320 cpu_setup_common(char **cpu_module_isa_set)
321 {
322 	extern int disable_delay_tlb_flush, delay_tlb_flush;
323 	extern int mmu_exported_pagesize_mask;
324 	int nocpus, i;
325 	size_t ra_limit;
326 	mde_cookie_t *cpulist;
327 	md_t *mdp;
328 
329 	if ((mdp = md_get_handle()) == NULL)
330 		cmn_err(CE_PANIC, "Unable to initialize machine description");
331 
332 	init_md_broken(mdp);
333 
334 	nocpus = md_alloc_scan_dag(mdp,
335 	    md_root_node(mdp), "cpu", "fwd", &cpulist);
336 	if (nocpus < 1) {
337 		cmn_err(CE_PANIC, "cpu_common_setup: cpulist allocation "
338 		    "failed or incorrect number of CPUs in MD");
339 	}
340 
341 	if (use_page_coloring) {
342 		do_pg_coloring = 1;
343 		if (use_virtual_coloring) {
344 			/*
345 			 * XXX Sun4v cpus don't have virtual caches
346 			 */
347 			do_virtual_coloring = 1;
348 		}
349 	}
350 
351 	/*
352 	 * Get the valid mmu page sizes mask, Q sizes and isalist/r
353 	 * from the MD for the first available CPU in cpulist.
354 	 *
355 	 * Do not expect the MMU page sizes mask to be more than 32-bit.
356 	 */
357 	mmu_exported_pagesize_mask = (int)get_cpu_pagesizes(mdp, cpulist[0]);
358 
359 	for (i = 0; i < nocpus; i++)
360 		fill_cpu(mdp, cpulist[i]);
361 
362 	setup_exec_unit_mappings(mdp);
363 
364 	/*
365 	 * If MD is broken then append the passed ISA set,
366 	 * otherwise trust the MD.
367 	 */
368 
369 	if (broken_md_flag)
370 		isa_list = construct_isalist(mdp, cpulist[0],
371 		    cpu_module_isa_set);
372 	else
373 		isa_list = construct_isalist(mdp, cpulist[0], NULL);
374 
375 	get_q_sizes(mdp, cpulist[0]);
376 
377 	get_va_bits(mdp, cpulist[0]);
378 
379 	/*
380 	 * ra_limit is the highest real address in the machine.
381 	 */
382 	ra_limit = get_ra_limit(mdp);
383 
384 	md_free_scan_dag(mdp, &cpulist);
385 
386 	(void) md_fini_handle(mdp);
387 
388 	/*
389 	 * Block stores invalidate all pages of the d$ so pagecopy
390 	 * et. al. do not need virtual translations with virtual
391 	 * coloring taken into consideration.
392 	 */
393 	pp_consistent_coloring = 0;
394 
395 	/*
396 	 * The kpm mapping window.
397 	 * kpm_size:
398 	 *	The size of a single kpm range.
399 	 *	The overall size will be: kpm_size * vac_colors.
400 	 * kpm_vbase:
401 	 *	The virtual start address of the kpm range within the kernel
402 	 *	virtual address space. kpm_vbase has to be kpm_size aligned.
403 	 */
404 
405 	/*
406 	 * Make kpm_vbase, kpm_size aligned to kpm_size_shift.
407 	 * To do this find the nearest power of 2 size that the
408 	 * actual ra_limit fits within.
409 	 * If it is an even power of two use that, otherwise use the
410 	 * next power of two larger than ra_limit.
411 	 */
412 
413 	ASSERT(ra_limit != 0);
414 
415 	kpm_size_shift = (ra_limit & (ra_limit - 1)) != 0 ?
416 		highbit(ra_limit) : highbit(ra_limit) - 1;
417 
418 	/*
419 	 * No virtual caches on sun4v so size matches size shift
420 	 */
421 	kpm_size = 1ul << kpm_size_shift;
422 
423 	if (va_bits < VA_ADDRESS_SPACE_BITS) {
424 		/*
425 		 * In case of VA hole
426 		 * kpm_base = hole_end + 1TB
427 		 * Starting 1TB beyond where VA hole ends because on Niagara
428 		 * processor software must not use pages within 4GB of the
429 		 * VA hole as instruction pages to avoid problems with
430 		 * prefetching into the VA hole.
431 		 */
432 		kpm_vbase = (caddr_t)((0ull - (1ull << (va_bits - 1))) +
433 		    (1ull << 40));
434 	} else {		/* Number of VA bits 64 ... no VA hole */
435 		kpm_vbase = (caddr_t)0x8000000000000000ull;	/* 8 EB */
436 	}
437 
438 	/*
439 	 * The traptrace code uses either %tick or %stick for
440 	 * timestamping.  The sun4v require use of %stick.
441 	 */
442 	traptrace_use_stick = 1;
443 
444 	/*
445 	 * sun4v provides demap_all
446 	 */
447 	if (!disable_delay_tlb_flush)
448 		delay_tlb_flush = 1;
449 }
450 
451 /*
452  * Get the nctxs from MD. If absent panic.
453  */
454 static uint64_t
455 get_mmu_ctx_bits(md_t *mdp, mde_cookie_t cpu_node_cookie)
456 {
457 	uint64_t ctx_bits;
458 
459 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-#context-bits",
460 	    &ctx_bits))
461 		ctx_bits = 0;
462 
463 	if (ctx_bits < MIN_NCTXS_BITS || ctx_bits > MAX_NCTXS_BITS)
464 		cmn_err(CE_PANIC, "Incorrect %ld number of contexts bits "
465 		    "returned by MD", ctx_bits);
466 
467 	return (ctx_bits);
468 }
469 
470 /*
471  * Initalize supported page sizes information.
472  * Set to 0, if the page sizes mask information is absent in MD.
473  */
474 static uint64_t
475 get_cpu_pagesizes(md_t *mdp, mde_cookie_t cpu_node_cookie)
476 {
477 	uint64_t mmu_page_size_list;
478 
479 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-page-size-list",
480 	    &mmu_page_size_list))
481 		mmu_page_size_list = 0;
482 
483 	if (mmu_page_size_list == 0 || mmu_page_size_list > MAX_PAGESIZE_MASK)
484 		cmn_err(CE_PANIC, "Incorrect 0x%lx pagesize mask returned"
485 		    "by MD", mmu_page_size_list);
486 
487 	return (mmu_page_size_list);
488 }
489 
490 /*
491  * This routine gets the isalist information from MD and appends
492  * the CPU module ISA set if required.
493  */
494 static char *
495 construct_isalist(md_t *mdp, mde_cookie_t cpu_node_cookie,
496     char **cpu_module_isa_set)
497 {
498 	extern int at_flags;
499 	char *md_isalist;
500 	int md_isalen;
501 	char *isabuf;
502 	int isalen;
503 	char **isa_set;
504 	char *p, *q;
505 	int cpu_module_isalen = 0, found = 0;
506 
507 	(void) md_get_prop_data(mdp, cpu_node_cookie,
508 	    "isalist", (uint8_t **)&isabuf, &isalen);
509 
510 	/*
511 	 * We support binaries for all the cpus that have shipped so far.
512 	 * The kernel emulates instructions that are not supported by hardware.
513 	 */
514 	at_flags = EF_SPARC_SUN_US3 | EF_SPARC_32PLUS | EF_SPARC_SUN_US1;
515 
516 	/*
517 	 * Construct the space separated isa_list.
518 	 */
519 	if (cpu_module_isa_set != NULL) {
520 		for (isa_set = cpu_module_isa_set; *isa_set != NULL;
521 		    isa_set++) {
522 			cpu_module_isalen += strlen(*isa_set);
523 			cpu_module_isalen++;	/* for space character */
524 		}
525 	}
526 
527 	/*
528 	 * Allocate the buffer of MD isa buffer length + CPU module
529 	 * isa buffer length.
530 	 */
531 	md_isalen = isalen + cpu_module_isalen + 2;
532 	md_isalist = (char *)prom_alloc((caddr_t)0, md_isalen, 0);
533 	if (md_isalist == NULL)
534 		cmn_err(CE_PANIC, "construct_isalist: Allocation failed for "
535 		    "md_isalist");
536 
537 	md_isalist[0] = '\0'; /* create an empty string to start */
538 	for (p = isabuf, q = p + isalen; p < q; p += strlen(p) + 1) {
539 		(void) strlcat(md_isalist, p, md_isalen);
540 		(void) strcat(md_isalist, " ");
541 	}
542 
543 	/*
544 	 * Check if the isa_set is present in isalist returned by MD.
545 	 * If yes, then no need to append it, if no then append it to
546 	 * isalist returned by MD.
547 	 */
548 	if (cpu_module_isa_set != NULL) {
549 		for (isa_set = cpu_module_isa_set; *isa_set != NULL;
550 		    isa_set++) {
551 			found = 0;
552 			for (p = isabuf, q = p + isalen; p < q;
553 			    p += strlen(p) + 1) {
554 				if (strcmp(p, *isa_set) == 0) {
555 					found = 1;
556 					break;
557 				}
558 			}
559 			if (!found) {
560 				(void) strlcat(md_isalist, *isa_set, md_isalen);
561 				(void) strcat(md_isalist, " ");
562 			}
563 		}
564 	}
565 
566 	/* Get rid of any trailing white spaces */
567 	md_isalist[strlen(md_isalist) - 1] = '\0';
568 
569 	return (md_isalist);
570 }
571 
572 uint64_t
573 get_ra_limit(md_t *mdp)
574 {
575 	mde_cookie_t *mem_list;
576 	mde_cookie_t *mblock_list;
577 	int i;
578 	int memnodes;
579 	int nmblock;
580 	uint64_t base;
581 	uint64_t size;
582 	uint64_t ra_limit = 0, new_limit = 0;
583 
584 	memnodes = md_alloc_scan_dag(mdp,
585 	    md_root_node(mdp), "memory", "fwd", &mem_list);
586 
587 	ASSERT(memnodes == 1);
588 
589 	nmblock = md_alloc_scan_dag(mdp,
590 	    mem_list[0], "mblock", "fwd", &mblock_list);
591 	if (nmblock < 1)
592 		cmn_err(CE_PANIC, "cannot find mblock nodes in MD");
593 
594 	for (i = 0; i < nmblock; i++) {
595 		if (md_get_prop_val(mdp, mblock_list[i], "base", &base))
596 			cmn_err(CE_PANIC, "base property missing from MD"
597 			    " mblock node");
598 		if (md_get_prop_val(mdp, mblock_list[i], "size", &size))
599 			cmn_err(CE_PANIC, "size property missing from MD"
600 			    " mblock node");
601 
602 		ASSERT(size != 0);
603 
604 		new_limit = base + size;
605 
606 		if (base > new_limit)
607 			cmn_err(CE_PANIC, "mblock in MD wrapped around");
608 
609 		if (new_limit > ra_limit)
610 		    ra_limit = new_limit;
611 	}
612 
613 	ASSERT(ra_limit != 0);
614 
615 	if (ra_limit > MAX_REAL_ADDRESS) {
616 		cmn_err(CE_WARN, "Highest real address in MD too large"
617 		    " clipping to %llx\n", MAX_REAL_ADDRESS);
618 		ra_limit = MAX_REAL_ADDRESS;
619 	}
620 
621 	md_free_scan_dag(mdp, &mblock_list);
622 
623 	md_free_scan_dag(mdp, &mem_list);
624 
625 	return (ra_limit);
626 }
627 
628 /*
629  * This routine sets the globals for CPU and DEV mondo queue entries and
630  * resumable and non-resumable error queue entries.
631  */
632 static uint64_t
633 get_single_q_size(md_t *mdp, mde_cookie_t cpu_node_cookie,
634     char *qnamep, uint64_t default_entries)
635 {
636 	uint64_t entries;
637 
638 	if (md_get_prop_val(mdp, cpu_node_cookie, qnamep, &entries)) {
639 		if (!broken_md_flag)
640 			cmn_err(CE_PANIC, "Missing %s property in MD cpu node",
641 				qnamep);
642 		entries = default_entries;
643 	} else {
644 		entries = 1 << entries;
645 	}
646 	return (entries);
647 }
648 
649 
650 static void
651 get_q_sizes(md_t *mdp, mde_cookie_t cpu_node_cookie)
652 {
653 	cpu_q_entries = get_single_q_size(mdp, cpu_node_cookie,
654 	    "q-cpu-mondo-#bits", DEFAULT_CPU_Q_ENTRIES);
655 
656 	dev_q_entries = get_single_q_size(mdp, cpu_node_cookie,
657 	    "q-dev-mondo-#bits", DEFAULT_DEV_Q_ENTRIES);
658 
659 	cpu_rq_entries = get_single_q_size(mdp, cpu_node_cookie,
660 	    "q-resumable-#bits", CPU_RQ_ENTRIES);
661 
662 	cpu_nrq_entries = get_single_q_size(mdp, cpu_node_cookie,
663 		"q-nonresumable-#bits", CPU_NRQ_ENTRIES);
664 }
665 
666 
667 static void
668 get_va_bits(md_t *mdp, mde_cookie_t cpu_node_cookie)
669 {
670 	uint64_t value = VA_ADDRESS_SPACE_BITS;
671 
672 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-#va-bits", &value))
673 		cmn_err(CE_PANIC, "mmu-#va-bits property  not found in MD");
674 
675 
676 	if (value == 0 || value > VA_ADDRESS_SPACE_BITS)
677 		cmn_err(CE_PANIC, "Incorrect number of va bits in MD");
678 
679 	/* Do not expect number of VA bits to be more than 32-bit quantity */
680 
681 	va_bits = (int)value;
682 
683 	/*
684 	 * Correct the value for VA bits on UltraSPARC-T1 based systems
685 	 * in case of broken MD.
686 	 */
687 	if (broken_md_flag)
688 		va_bits = DEFAULT_VA_ADDRESS_SPACE_BITS;
689 }
690 
691 /*
692  * This routine returns the L2 cache information such as -- associativity,
693  * size and linesize.
694  */
695 static int
696 get_l2_cache_info(md_t *mdp, mde_cookie_t cpu_node_cookie,
697 	    uint64_t *associativity, uint64_t *size, uint64_t *linesize)
698 {
699 	mde_cookie_t *cachelist;
700 	int ncaches, i;
701 	uint64_t max_level;
702 
703 	ncaches = md_alloc_scan_dag(mdp, cpu_node_cookie, "cache",
704 	    "fwd", &cachelist);
705 	/*
706 	 * The "cache" node is optional in MD, therefore ncaches can be 0.
707 	 */
708 	if (ncaches < 1) {
709 		return (0);
710 	}
711 
712 	max_level = 0;
713 	for (i = 0; i < ncaches; i++) {
714 		uint64_t cache_level;
715 		uint64_t local_assoc;
716 		uint64_t local_size;
717 		uint64_t local_lsize;
718 
719 		if (md_get_prop_val(mdp, cachelist[i], "level", &cache_level))
720 			continue;
721 
722 		if (cache_level <= max_level) continue;
723 
724 		/* If properties are missing from this cache ignore it */
725 
726 		if ((md_get_prop_val(mdp, cachelist[i],
727 		    "associativity", &local_assoc))) {
728 			continue;
729 		}
730 
731 		if ((md_get_prop_val(mdp, cachelist[i],
732 		    "size", &local_size))) {
733 			continue;
734 		}
735 
736 		if ((md_get_prop_val(mdp, cachelist[i],
737 		    "line-size", &local_lsize))) {
738 			continue;
739 		}
740 
741 		max_level = cache_level;
742 		*associativity = local_assoc;
743 		*size = local_size;
744 		*linesize = local_lsize;
745 	}
746 
747 	md_free_scan_dag(mdp, &cachelist);
748 
749 	return ((max_level > 0) ? 1 : 0);
750 }
751 
752 /*
753  * The broken_md_flag is set to 1, if the MD doesn't have
754  * the domaining-enabled property in the platform node and the platforms
755  * are Ontario and Erie. This flag is used to workaround some of the
756  * incorrect MD properties.
757  */
758 static void
759 init_md_broken(md_t *mdp)
760 {
761 	int nrnode;
762 	mde_cookie_t *platlist, rootnode;
763 	char *vbuf;
764 	uint64_t val = 0;
765 
766 	rootnode = md_root_node(mdp);
767 	ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE);
768 
769 	nrnode = md_alloc_scan_dag(mdp, md_root_node(mdp), "platform", "fwd",
770 	    &platlist);
771 
772 	ASSERT(nrnode == 1);
773 
774 	if (md_get_prop_str(mdp, platlist[0], "name", &vbuf) != 0)
775 		panic("platform name not found in machine description");
776 
777 	/*
778 	 * If domaining-enable prop doesn't exist and the platform name is
779 	 * Ontario or Erie the md is broken.
780 	 */
781 
782 	if (md_get_prop_val(mdp, platlist[0], "domaining-enabled", &val) != 0 &&
783 	((strcmp(vbuf, ONTARIO_PLATNAME1) == 0) ||
784 	(strcmp(vbuf, ONTARIO_PLATNAME2) == 0) ||
785 	(strcmp(vbuf, ONTARIO_PLATNAME3) == 0) ||
786 	(strcmp(vbuf, ERIE_PLATNAME1) == 0) ||
787 	(strcmp(vbuf, ERIE_PLATNAME2) == 0) ||
788 	(strcmp(vbuf, ERIE_PLATNAME3) == 0)))
789 		broken_md_flag = 1;
790 
791 	md_free_scan_dag(mdp, &platlist);
792 }
793