xref: /titanic_41/usr/src/uts/sun4v/os/fillsysinfo.c (revision 8275a87e46b79352e8c1a918b91373159c477438)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/errno.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/cpu.h>
32 #include <sys/cpuvar.h>
33 #include <sys/clock.h>
34 #include <sys/promif.h>
35 #include <sys/promimpl.h>
36 #include <sys/systm.h>
37 #include <sys/machsystm.h>
38 #include <sys/debug.h>
39 #include <sys/sunddi.h>
40 #include <sys/modctl.h>
41 #include <sys/cpu_module.h>
42 #include <sys/kobj.h>
43 #include <sys/cmp.h>
44 #include <sys/async.h>
45 #include <vm/page.h>
46 #include <vm/hat_sfmmu.h>
47 #include <sys/sysmacros.h>
48 #include <sys/mach_descrip.h>
49 #include <sys/mdesc.h>
50 #include <sys/archsystm.h>
51 #include <sys/error.h>
52 #include <sys/mmu.h>
53 #include <sys/bitmap.h>
54 #include <sys/intreg.h>
55 
56 struct cpu_node cpunodes[NCPU];
57 
58 uint64_t cpu_q_entries;
59 uint64_t dev_q_entries;
60 uint64_t cpu_rq_entries;
61 uint64_t cpu_nrq_entries;
62 uint64_t ncpu_guest_max;
63 
64 void fill_cpu(md_t *, mde_cookie_t);
65 
66 static uint64_t get_mmu_ctx_bits(md_t *, mde_cookie_t);
67 static uint64_t get_mmu_tsbs(md_t *, mde_cookie_t);
68 static uint64_t	get_mmu_shcontexts(md_t *, mde_cookie_t);
69 static uint64_t get_cpu_pagesizes(md_t *, mde_cookie_t);
70 static char *construct_isalist(md_t *, mde_cookie_t, char **);
71 static void init_md_broken(md_t *, mde_cookie_t *);
72 static int get_l2_cache_info(md_t *, mde_cookie_t, uint64_t *, uint64_t *,
73     uint64_t *);
74 static void get_q_sizes(md_t *, mde_cookie_t);
75 static void get_va_bits(md_t *, mde_cookie_t);
76 static size_t get_ra_limit(md_t *);
77 
78 uint64_t	system_clock_freq;
79 uint_t		niommu_tsbs = 0;
80 
81 /* prevent compilation with VAC defined */
82 #ifdef VAC
83 #error "The sun4v architecture does not support VAC"
84 #endif
85 
86 #define	S_VAC_SIZE	MMU_PAGESIZE
87 #define	S_VAC_SHIFT	MMU_PAGESHIFT
88 
89 int		vac_size = S_VAC_SIZE;
90 uint_t		vac_mask = MMU_PAGEMASK & (S_VAC_SIZE - 1);
91 int		vac_shift = S_VAC_SHIFT;
92 uintptr_t	shm_alignment = S_VAC_SIZE;
93 
94 void
95 map_wellknown_devices()
96 {
97 }
98 
99 void
100 fill_cpu(md_t *mdp, mde_cookie_t cpuc)
101 {
102 	struct cpu_node *cpunode;
103 	uint64_t cpuid;
104 	uint64_t clk_freq;
105 	char *namebuf;
106 	char *namebufp;
107 	int namelen;
108 	uint64_t associativity = 0, linesize = 0, size = 0;
109 
110 	if (md_get_prop_val(mdp, cpuc, "id", &cpuid)) {
111 		return;
112 	}
113 
114 	/* All out-of-range cpus will be stopped later. */
115 	if (cpuid >= NCPU) {
116 		cmn_err(CE_CONT, "fill_cpu: out of range cpuid %ld - "
117 		    "cpu excluded from configuration\n", cpuid);
118 
119 		return;
120 	}
121 
122 	cpunode = &cpunodes[cpuid];
123 	cpunode->cpuid = (int)cpuid;
124 	cpunode->device_id = cpuid;
125 
126 	if (sizeof (cpunode->fru_fmri) > strlen(CPU_FRU_FMRI))
127 		(void) strcpy(cpunode->fru_fmri, CPU_FRU_FMRI);
128 
129 	if (md_get_prop_data(mdp, cpuc,
130 	    "compatible", (uint8_t **)&namebuf, &namelen)) {
131 		cmn_err(CE_PANIC, "fill_cpu: Cannot read compatible "
132 		    "property");
133 	}
134 	namebufp = namebuf;
135 	if (strncmp(namebufp, "SUNW,", 5) == 0)
136 		namebufp += 5;
137 	if (strlen(namebufp) > sizeof (cpunode->name))
138 		cmn_err(CE_PANIC, "Compatible property too big to "
139 		    "fit into the cpunode name buffer");
140 	(void) strcpy(cpunode->name, namebufp);
141 
142 	if (md_get_prop_val(mdp, cpuc,
143 	    "clock-frequency", &clk_freq)) {
144 			clk_freq = 0;
145 	}
146 	cpunode->clock_freq = clk_freq;
147 
148 	ASSERT(cpunode->clock_freq != 0);
149 	/*
150 	 * Compute scaling factor based on rate of %tick. This is used
151 	 * to convert from ticks derived from %tick to nanoseconds. See
152 	 * comment in sun4u/sys/clock.h for details.
153 	 */
154 	cpunode->tick_nsec_scale = (uint_t)(((uint64_t)NANOSEC <<
155 	    (32 - TICK_NSEC_SHIFT)) / cpunode->clock_freq);
156 
157 	/*
158 	 * The nodeid is not used in sun4v at all. Setting it
159 	 * to positive value to make starting of slave CPUs
160 	 * code happy.
161 	 */
162 	cpunode->nodeid = cpuid + 1;
163 
164 	/*
165 	 * Obtain the L2 cache information from MD.
166 	 * If "Cache" node exists, then set L2 cache properties
167 	 * as read from MD.
168 	 * If node does not exists, then set the L2 cache properties
169 	 * in individual CPU module.
170 	 */
171 	if ((!get_l2_cache_info(mdp, cpuc,
172 	    &associativity, &size, &linesize)) ||
173 	    associativity == 0 || size == 0 || linesize == 0) {
174 		cpu_fiximp(cpunode);
175 	} else {
176 		/*
177 		 * Do not expect L2 cache properties to be bigger
178 		 * than 32-bit quantity.
179 		 */
180 		cpunode->ecache_associativity = (int)associativity;
181 		cpunode->ecache_size = (int)size;
182 		cpunode->ecache_linesize = (int)linesize;
183 	}
184 
185 	cpunode->ecache_setsize =
186 	    cpunode->ecache_size / cpunode->ecache_associativity;
187 
188 	/*
189 	 * Initialize the mapping for exec unit, chip and core.
190 	 */
191 	cpunode->exec_unit_mapping = NO_EU_MAPPING_FOUND;
192 	cpunode->l2_cache_mapping = NO_MAPPING_FOUND;
193 	cpunode->core_mapping = NO_CORE_MAPPING_FOUND;
194 
195 	if (ecache_setsize == 0)
196 		ecache_setsize = cpunode->ecache_setsize;
197 	if (ecache_alignsize == 0)
198 		ecache_alignsize = cpunode->ecache_linesize;
199 
200 }
201 
202 void
203 empty_cpu(int cpuid)
204 {
205 	bzero(&cpunodes[cpuid], sizeof (struct cpu_node));
206 }
207 
208 /*
209  * Use L2 cache node to derive the chip mapping.
210  */
211 void
212 setup_chip_mappings(md_t *mdp)
213 {
214 	uint64_t ncache, ncpu;
215 	mde_cookie_t *node, *cachelist;
216 	int i, j;
217 	processorid_t cpuid;
218 	int idx = 0;
219 
220 	ncache = md_alloc_scan_dag(mdp, md_root_node(mdp), "cache",
221 	    "fwd", &cachelist);
222 
223 	/*
224 	 * The "cache" node is optional in MD, therefore ncaches can be 0.
225 	 */
226 	if (ncache < 1) {
227 		return;
228 	}
229 
230 	for (i = 0; i < ncache; i++) {
231 		uint64_t cache_level;
232 		uint64_t lcpuid;
233 
234 		if (md_get_prop_val(mdp, cachelist[i], "level", &cache_level))
235 			continue;
236 
237 		if (cache_level != 2)
238 			continue;
239 
240 		/*
241 		 * Found a l2 cache node. Find out the cpu nodes it
242 		 * points to.
243 		 */
244 		ncpu = md_alloc_scan_dag(mdp, cachelist[i], "cpu",
245 		    "back", &node);
246 
247 		if (ncpu < 1)
248 			continue;
249 
250 		for (j = 0; j < ncpu; j++) {
251 			if (md_get_prop_val(mdp, node[j], "id", &lcpuid))
252 				continue;
253 			if (lcpuid >= NCPU)
254 				continue;
255 			cpuid = (processorid_t)lcpuid;
256 			cpunodes[cpuid].l2_cache_mapping = idx;
257 		}
258 		md_free_scan_dag(mdp, &node);
259 
260 		idx++;
261 	}
262 
263 	md_free_scan_dag(mdp, &cachelist);
264 }
265 
266 void
267 setup_exec_unit_mappings(md_t *mdp)
268 {
269 	uint64_t num, num_eunits;
270 	mde_cookie_t cpus_node;
271 	mde_cookie_t *node, *eunit;
272 	int idx, i, j;
273 	processorid_t cpuid;
274 	char *eunit_name = broken_md_flag ? "exec_unit" : "exec-unit";
275 	enum eu_type { INTEGER, FPU } etype;
276 
277 	/*
278 	 * Find the cpu integer exec units - and
279 	 * setup the mappings appropriately.
280 	 */
281 	num = md_alloc_scan_dag(mdp, md_root_node(mdp), "cpus", "fwd", &node);
282 	if (num < 1)
283 		cmn_err(CE_PANIC, "No cpus node in machine description");
284 	if (num > 1)
285 		cmn_err(CE_PANIC, "More than 1 cpus node in machine"
286 		    " description");
287 
288 	cpus_node = node[0];
289 	md_free_scan_dag(mdp, &node);
290 
291 	num_eunits = md_alloc_scan_dag(mdp, cpus_node, eunit_name,
292 	    "fwd", &eunit);
293 	if (num_eunits > 0) {
294 		char *int_str = broken_md_flag ? "int" : "integer";
295 		char *fpu_str = "fp";
296 
297 		/* Spin through and find all the integer exec units */
298 		for (i = 0; i < num_eunits; i++) {
299 			char *p;
300 			char *val;
301 			int vallen;
302 			uint64_t lcpuid;
303 
304 			/* ignore nodes with no type */
305 			if (md_get_prop_data(mdp, eunit[i], "type",
306 			    (uint8_t **)&val, &vallen))
307 				continue;
308 
309 			for (p = val; *p != '\0'; p += strlen(p) + 1) {
310 				if (strcmp(p, int_str) == 0) {
311 					etype = INTEGER;
312 					goto found;
313 				}
314 				if (strcmp(p, fpu_str) == 0) {
315 					etype = FPU;
316 					goto found;
317 				}
318 			}
319 
320 			continue;
321 found:
322 			idx = NCPU + i;
323 			/*
324 			 * find the cpus attached to this EU and
325 			 * update their mapping indices
326 			 */
327 			num = md_alloc_scan_dag(mdp, eunit[i], "cpu",
328 			    "back", &node);
329 
330 			if (num < 1)
331 				cmn_err(CE_PANIC, "exec-unit node in MD"
332 				    " not attached to a cpu node");
333 
334 			for (j = 0; j < num; j++) {
335 				if (md_get_prop_val(mdp, node[j], "id",
336 				    &lcpuid))
337 					continue;
338 				if (lcpuid >= NCPU)
339 					continue;
340 				cpuid = (processorid_t)lcpuid;
341 				switch (etype) {
342 				case INTEGER:
343 					cpunodes[cpuid].exec_unit_mapping = idx;
344 					break;
345 				case FPU:
346 					cpunodes[cpuid].fpu_mapping = idx;
347 					break;
348 				}
349 			}
350 			md_free_scan_dag(mdp, &node);
351 		}
352 
353 
354 		md_free_scan_dag(mdp, &eunit);
355 	}
356 }
357 
358 /*
359  * All the common setup of sun4v CPU modules is done by this routine.
360  */
361 void
362 cpu_setup_common(char **cpu_module_isa_set)
363 {
364 	extern int mmu_exported_pagesize_mask;
365 	int nocpus, i;
366 	size_t ra_limit;
367 	mde_cookie_t *cpulist;
368 	md_t *mdp;
369 
370 	if ((mdp = md_get_handle()) == NULL)
371 		cmn_err(CE_PANIC, "Unable to initialize machine description");
372 
373 	nocpus = md_alloc_scan_dag(mdp,
374 	    md_root_node(mdp), "cpu", "fwd", &cpulist);
375 	if (nocpus < 1) {
376 		cmn_err(CE_PANIC, "cpu_common_setup: cpulist allocation "
377 		    "failed or incorrect number of CPUs in MD");
378 	}
379 
380 	init_md_broken(mdp, cpulist);
381 
382 	if (use_page_coloring) {
383 		do_pg_coloring = 1;
384 	}
385 
386 	/*
387 	 * Get the valid mmu page sizes mask, Q sizes and isalist/r
388 	 * from the MD for the first available CPU in cpulist.
389 	 *
390 	 * Do not expect the MMU page sizes mask to be more than 32-bit.
391 	 */
392 	mmu_exported_pagesize_mask = (int)get_cpu_pagesizes(mdp, cpulist[0]);
393 
394 	/*
395 	 * Get the number of contexts and tsbs supported.
396 	 */
397 	if (get_mmu_shcontexts(mdp, cpulist[0]) >= MIN_NSHCONTEXTS &&
398 	    get_mmu_tsbs(mdp, cpulist[0]) >= MIN_NTSBS) {
399 		shctx_on = 1;
400 	}
401 
402 	for (i = 0; i < nocpus; i++)
403 		fill_cpu(mdp, cpulist[i]);
404 
405 	setup_chip_mappings(mdp);
406 	setup_exec_unit_mappings(mdp);
407 
408 	/*
409 	 * If MD is broken then append the passed ISA set,
410 	 * otherwise trust the MD.
411 	 */
412 
413 	if (broken_md_flag)
414 		isa_list = construct_isalist(mdp, cpulist[0],
415 		    cpu_module_isa_set);
416 	else
417 		isa_list = construct_isalist(mdp, cpulist[0], NULL);
418 
419 	get_q_sizes(mdp, cpulist[0]);
420 
421 	get_va_bits(mdp, cpulist[0]);
422 
423 	/*
424 	 * ra_limit is the highest real address in the machine.
425 	 */
426 	ra_limit = get_ra_limit(mdp);
427 
428 	md_free_scan_dag(mdp, &cpulist);
429 
430 	(void) md_fini_handle(mdp);
431 
432 	/*
433 	 * Block stores invalidate all pages of the d$ so pagecopy
434 	 * et. al. do not need virtual translations with virtual
435 	 * coloring taken into consideration.
436 	 */
437 	pp_consistent_coloring = 0;
438 
439 	/*
440 	 * The kpm mapping window.
441 	 * kpm_size:
442 	 *	The size of a single kpm range.
443 	 *	The overall size will be: kpm_size * vac_colors.
444 	 * kpm_vbase:
445 	 *	The virtual start address of the kpm range within the kernel
446 	 *	virtual address space. kpm_vbase has to be kpm_size aligned.
447 	 */
448 
449 	/*
450 	 * Make kpm_vbase, kpm_size aligned to kpm_size_shift.
451 	 * To do this find the nearest power of 2 size that the
452 	 * actual ra_limit fits within.
453 	 * If it is an even power of two use that, otherwise use the
454 	 * next power of two larger than ra_limit.
455 	 */
456 
457 	ASSERT(ra_limit != 0);
458 
459 	kpm_size_shift = (ra_limit & (ra_limit - 1)) != 0 ?
460 	    highbit(ra_limit) : highbit(ra_limit) - 1;
461 
462 	/*
463 	 * No virtual caches on sun4v so size matches size shift
464 	 */
465 	kpm_size = 1ul << kpm_size_shift;
466 
467 	if (va_bits < VA_ADDRESS_SPACE_BITS) {
468 		/*
469 		 * In case of VA hole
470 		 * kpm_base = hole_end + 1TB
471 		 * Starting 1TB beyond where VA hole ends because on Niagara
472 		 * processor software must not use pages within 4GB of the
473 		 * VA hole as instruction pages to avoid problems with
474 		 * prefetching into the VA hole.
475 		 */
476 		kpm_vbase = (caddr_t)((0ull - (1ull << (va_bits - 1))) +
477 		    (1ull << 40));
478 	} else {		/* Number of VA bits 64 ... no VA hole */
479 		kpm_vbase = (caddr_t)0x8000000000000000ull;	/* 8 EB */
480 	}
481 
482 	/*
483 	 * The traptrace code uses either %tick or %stick for
484 	 * timestamping.  The sun4v require use of %stick.
485 	 */
486 	traptrace_use_stick = 1;
487 }
488 
489 /*
490  * Get the nctxs from MD. If absent panic.
491  */
492 static uint64_t
493 get_mmu_ctx_bits(md_t *mdp, mde_cookie_t cpu_node_cookie)
494 {
495 	uint64_t ctx_bits;
496 
497 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-#context-bits",
498 	    &ctx_bits))
499 		ctx_bits = 0;
500 
501 	if (ctx_bits < MIN_NCTXS_BITS || ctx_bits > MAX_NCTXS_BITS)
502 		cmn_err(CE_PANIC, "Incorrect %ld number of contexts bits "
503 		    "returned by MD", ctx_bits);
504 
505 	return (ctx_bits);
506 }
507 
508 /*
509  * Get the number of tsbs from MD. If absent the default value is 0.
510  */
511 static uint64_t
512 get_mmu_tsbs(md_t *mdp, mde_cookie_t cpu_node_cookie)
513 {
514 	uint64_t number_tsbs;
515 
516 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-max-#tsbs",
517 	    &number_tsbs))
518 		number_tsbs = 0;
519 
520 	return (number_tsbs);
521 }
522 
523 /*
524  * Get the number of shared contexts from MD. This property more accurately
525  * describes the total number of contexts available, not just "shared contexts".
526  * If absent the default value is 1,
527  *
528  */
529 static uint64_t
530 get_mmu_shcontexts(md_t *mdp, mde_cookie_t cpu_node_cookie)
531 {
532 	uint64_t number_contexts;
533 
534 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-#shared-contexts",
535 	    &number_contexts))
536 		number_contexts = 0;
537 
538 	return (number_contexts);
539 }
540 
541 /*
542  * Initalize supported page sizes information.
543  * Set to 0, if the page sizes mask information is absent in MD.
544  */
545 static uint64_t
546 get_cpu_pagesizes(md_t *mdp, mde_cookie_t cpu_node_cookie)
547 {
548 	uint64_t mmu_page_size_list;
549 
550 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-page-size-list",
551 	    &mmu_page_size_list))
552 		mmu_page_size_list = 0;
553 
554 	if (mmu_page_size_list == 0 || mmu_page_size_list > MAX_PAGESIZE_MASK)
555 		cmn_err(CE_PANIC, "Incorrect 0x%lx pagesize mask returned"
556 		    "by MD", mmu_page_size_list);
557 
558 	return (mmu_page_size_list);
559 }
560 
561 /*
562  * This routine gets the isalist information from MD and appends
563  * the CPU module ISA set if required.
564  */
565 static char *
566 construct_isalist(md_t *mdp, mde_cookie_t cpu_node_cookie,
567     char **cpu_module_isa_set)
568 {
569 	extern int at_flags;
570 	char *md_isalist;
571 	int md_isalen;
572 	char *isabuf;
573 	int isalen;
574 	char **isa_set;
575 	char *p, *q;
576 	int cpu_module_isalen = 0, found = 0;
577 
578 	(void) md_get_prop_data(mdp, cpu_node_cookie,
579 	    "isalist", (uint8_t **)&isabuf, &isalen);
580 
581 	/*
582 	 * We support binaries for all the cpus that have shipped so far.
583 	 * The kernel emulates instructions that are not supported by hardware.
584 	 */
585 	at_flags = EF_SPARC_SUN_US3 | EF_SPARC_32PLUS | EF_SPARC_SUN_US1;
586 
587 	/*
588 	 * Construct the space separated isa_list.
589 	 */
590 	if (cpu_module_isa_set != NULL) {
591 		for (isa_set = cpu_module_isa_set; *isa_set != NULL;
592 		    isa_set++) {
593 			cpu_module_isalen += strlen(*isa_set);
594 			cpu_module_isalen++;	/* for space character */
595 		}
596 	}
597 
598 	/*
599 	 * Allocate the buffer of MD isa buffer length + CPU module
600 	 * isa buffer length.
601 	 */
602 	md_isalen = isalen + cpu_module_isalen + 2;
603 	md_isalist = (char *)prom_alloc((caddr_t)0, md_isalen, 0);
604 	if (md_isalist == NULL)
605 		cmn_err(CE_PANIC, "construct_isalist: Allocation failed for "
606 		    "md_isalist");
607 
608 	md_isalist[0] = '\0'; /* create an empty string to start */
609 	for (p = isabuf, q = p + isalen; p < q; p += strlen(p) + 1) {
610 		(void) strlcat(md_isalist, p, md_isalen);
611 		(void) strcat(md_isalist, " ");
612 	}
613 
614 	/*
615 	 * Check if the isa_set is present in isalist returned by MD.
616 	 * If yes, then no need to append it, if no then append it to
617 	 * isalist returned by MD.
618 	 */
619 	if (cpu_module_isa_set != NULL) {
620 		for (isa_set = cpu_module_isa_set; *isa_set != NULL;
621 		    isa_set++) {
622 			found = 0;
623 			for (p = isabuf, q = p + isalen; p < q;
624 			    p += strlen(p) + 1) {
625 				if (strcmp(p, *isa_set) == 0) {
626 					found = 1;
627 					break;
628 				}
629 			}
630 			if (!found) {
631 				(void) strlcat(md_isalist, *isa_set, md_isalen);
632 				(void) strcat(md_isalist, " ");
633 			}
634 		}
635 	}
636 
637 	/* Get rid of any trailing white spaces */
638 	md_isalist[strlen(md_isalist) - 1] = '\0';
639 
640 	return (md_isalist);
641 }
642 
643 uint64_t
644 get_ra_limit(md_t *mdp)
645 {
646 	mde_cookie_t *mem_list;
647 	mde_cookie_t *mblock_list;
648 	int i;
649 	int memnodes;
650 	int nmblock;
651 	uint64_t base;
652 	uint64_t size;
653 	uint64_t ra_limit = 0, new_limit = 0;
654 
655 	memnodes = md_alloc_scan_dag(mdp,
656 	    md_root_node(mdp), "memory", "fwd", &mem_list);
657 
658 	ASSERT(memnodes == 1);
659 
660 	nmblock = md_alloc_scan_dag(mdp,
661 	    mem_list[0], "mblock", "fwd", &mblock_list);
662 	if (nmblock < 1)
663 		cmn_err(CE_PANIC, "cannot find mblock nodes in MD");
664 
665 	for (i = 0; i < nmblock; i++) {
666 		if (md_get_prop_val(mdp, mblock_list[i], "base", &base))
667 			cmn_err(CE_PANIC, "base property missing from MD"
668 			    " mblock node");
669 		if (md_get_prop_val(mdp, mblock_list[i], "size", &size))
670 			cmn_err(CE_PANIC, "size property missing from MD"
671 			    " mblock node");
672 
673 		ASSERT(size != 0);
674 
675 		new_limit = base + size;
676 
677 		if (base > new_limit)
678 			cmn_err(CE_PANIC, "mblock in MD wrapped around");
679 
680 		if (new_limit > ra_limit)
681 			ra_limit = new_limit;
682 	}
683 
684 	ASSERT(ra_limit != 0);
685 
686 	if (ra_limit > MAX_REAL_ADDRESS) {
687 		cmn_err(CE_WARN, "Highest real address in MD too large"
688 		    " clipping to %llx\n", MAX_REAL_ADDRESS);
689 		ra_limit = MAX_REAL_ADDRESS;
690 	}
691 
692 	md_free_scan_dag(mdp, &mblock_list);
693 
694 	md_free_scan_dag(mdp, &mem_list);
695 
696 	return (ra_limit);
697 }
698 
699 /*
700  * This routine sets the globals for CPU and DEV mondo queue entries and
701  * resumable and non-resumable error queue entries.
702  *
703  * First, look up the number of bits available to pass an entry number.
704  * This can vary by platform and may result in allocating an unreasonably
705  * (or impossibly) large amount of memory for the corresponding table,
706  * so we clamp it by 'max_entries'.  If the prop is missing, use
707  * 'default_entries'.
708  */
709 static uint64_t
710 get_single_q_size(md_t *mdp, mde_cookie_t cpu_node_cookie,
711     char *qnamep, uint64_t default_entries, uint64_t max_entries)
712 {
713 	uint64_t entries;
714 
715 	if (default_entries > max_entries)
716 		cmn_err(CE_CONT, "!get_single_q_size: dflt %ld > "
717 		    "max %ld for %s\n", default_entries, max_entries, qnamep);
718 
719 	if (md_get_prop_val(mdp, cpu_node_cookie, qnamep, &entries)) {
720 		if (!broken_md_flag)
721 			cmn_err(CE_PANIC, "Missing %s property in MD cpu node",
722 			    qnamep);
723 		entries = default_entries;
724 	} else {
725 		entries = 1 << entries;
726 	}
727 
728 	entries = MIN(entries, max_entries);
729 
730 	return (entries);
731 }
732 
733 /* Scaling constant used to compute size of cpu mondo queue */
734 #define	CPU_MONDO_Q_MULTIPLIER	8
735 
736 static void
737 get_q_sizes(md_t *mdp, mde_cookie_t cpu_node_cookie)
738 {
739 	uint64_t max_qsize;
740 	mde_cookie_t *platlist;
741 	int nrnode;
742 
743 	/*
744 	 * Compute the maximum number of entries for the cpu mondo queue.
745 	 * Use the appropriate property in the platform node, if it is
746 	 * available.  Else, base it on NCPU.
747 	 */
748 	nrnode = md_alloc_scan_dag(mdp,
749 	    md_root_node(mdp), "platform", "fwd", &platlist);
750 
751 	ASSERT(nrnode == 1);
752 
753 	ncpu_guest_max = NCPU;
754 	(void) md_get_prop_val(mdp, platlist[0], "max-cpus", &ncpu_guest_max);
755 	max_qsize = ncpu_guest_max * CPU_MONDO_Q_MULTIPLIER;
756 
757 	md_free_scan_dag(mdp, &platlist);
758 
759 	cpu_q_entries = get_single_q_size(mdp, cpu_node_cookie,
760 	    "q-cpu-mondo-#bits", DEFAULT_CPU_Q_ENTRIES, max_qsize);
761 
762 	dev_q_entries = get_single_q_size(mdp, cpu_node_cookie,
763 	    "q-dev-mondo-#bits", DEFAULT_DEV_Q_ENTRIES, MAXIVNUM);
764 
765 	cpu_rq_entries = get_single_q_size(mdp, cpu_node_cookie,
766 	    "q-resumable-#bits", CPU_RQ_ENTRIES, MAX_CPU_RQ_ENTRIES);
767 
768 	cpu_nrq_entries = get_single_q_size(mdp, cpu_node_cookie,
769 	    "q-nonresumable-#bits", CPU_NRQ_ENTRIES, MAX_CPU_NRQ_ENTRIES);
770 }
771 
772 
773 static void
774 get_va_bits(md_t *mdp, mde_cookie_t cpu_node_cookie)
775 {
776 	uint64_t value = VA_ADDRESS_SPACE_BITS;
777 
778 	if (md_get_prop_val(mdp, cpu_node_cookie, "mmu-#va-bits", &value))
779 		cmn_err(CE_PANIC, "mmu-#va-bits property  not found in MD");
780 
781 
782 	if (value == 0 || value > VA_ADDRESS_SPACE_BITS)
783 		cmn_err(CE_PANIC, "Incorrect number of va bits in MD");
784 
785 	/* Do not expect number of VA bits to be more than 32-bit quantity */
786 
787 	va_bits = (int)value;
788 
789 	/*
790 	 * Correct the value for VA bits on UltraSPARC-T1 based systems
791 	 * in case of broken MD.
792 	 */
793 	if (broken_md_flag)
794 		va_bits = DEFAULT_VA_ADDRESS_SPACE_BITS;
795 }
796 
797 /*
798  * This routine returns the L2 cache information such as -- associativity,
799  * size and linesize.
800  */
801 static int
802 get_l2_cache_info(md_t *mdp, mde_cookie_t cpu_node_cookie,
803 	    uint64_t *associativity, uint64_t *size, uint64_t *linesize)
804 {
805 	mde_cookie_t *cachelist;
806 	int ncaches, i;
807 	uint64_t max_level;
808 
809 	ncaches = md_alloc_scan_dag(mdp, cpu_node_cookie, "cache",
810 	    "fwd", &cachelist);
811 	/*
812 	 * The "cache" node is optional in MD, therefore ncaches can be 0.
813 	 */
814 	if (ncaches < 1) {
815 		return (0);
816 	}
817 
818 	max_level = 0;
819 	for (i = 0; i < ncaches; i++) {
820 		uint64_t cache_level;
821 		uint64_t local_assoc;
822 		uint64_t local_size;
823 		uint64_t local_lsize;
824 
825 		if (md_get_prop_val(mdp, cachelist[i], "level", &cache_level))
826 			continue;
827 
828 		if (cache_level <= max_level) continue;
829 
830 		/* If properties are missing from this cache ignore it */
831 
832 		if ((md_get_prop_val(mdp, cachelist[i],
833 		    "associativity", &local_assoc))) {
834 			continue;
835 		}
836 
837 		if ((md_get_prop_val(mdp, cachelist[i],
838 		    "size", &local_size))) {
839 			continue;
840 		}
841 
842 		if ((md_get_prop_val(mdp, cachelist[i],
843 		    "line-size", &local_lsize))) {
844 			continue;
845 		}
846 
847 		max_level = cache_level;
848 		*associativity = local_assoc;
849 		*size = local_size;
850 		*linesize = local_lsize;
851 	}
852 
853 	md_free_scan_dag(mdp, &cachelist);
854 
855 	return ((max_level > 0) ? 1 : 0);
856 }
857 
858 
859 /*
860  * Set the broken_md_flag to 1 if the MD doesn't have
861  * the domaining-enabled property in the platform node and the
862  * platform uses the UltraSPARC-T1 cpu. This flag is used to
863  * workaround some of the incorrect MD properties.
864  */
865 static void
866 init_md_broken(md_t *mdp, mde_cookie_t *cpulist)
867 {
868 	int nrnode;
869 	mde_cookie_t *platlist, rootnode;
870 	uint64_t val = 0;
871 	char *namebuf;
872 	int namelen;
873 
874 	rootnode = md_root_node(mdp);
875 	ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE);
876 	ASSERT(cpulist);
877 
878 	nrnode = md_alloc_scan_dag(mdp, rootnode, "platform", "fwd",
879 	    &platlist);
880 
881 	if (nrnode < 1)
882 		cmn_err(CE_PANIC, "init_md_broken: platform node missing");
883 
884 	if (md_get_prop_data(mdp, cpulist[0],
885 	    "compatible", (uint8_t **)&namebuf, &namelen)) {
886 		cmn_err(CE_PANIC, "init_md_broken: "
887 		    "Cannot read 'compatible' property of 'cpu' node");
888 	}
889 
890 	if (md_get_prop_val(mdp, platlist[0],
891 	    "domaining-enabled", &val) == -1 &&
892 	    strcmp(namebuf, "SUNW,UltraSPARC-T1") == 0)
893 		broken_md_flag = 1;
894 
895 	md_free_scan_dag(mdp, &platlist);
896 }
897