xref: /titanic_41/usr/src/uts/sun4u/cpu/us3_common_mmu.c (revision 70025d765b044c6d8594bb965a2247a61e991a99)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
32 #include <sys/archsystm.h>
33 #include <sys/vmsystm.h>
34 #include <sys/machparam.h>
35 #include <sys/machsystm.h>
36 #include <vm/vm_dep.h>
37 #include <vm/hat_sfmmu.h>
38 #include <vm/seg_kmem.h>
39 #include <sys/cmn_err.h>
40 #include <sys/debug.h>
41 #include <sys/cpu_module.h>
42 #include <sys/sysmacros.h>
43 #include <sys/panic.h>
44 
45 /*
46  * Note that 'Cheetah PRM' refers to:
47  *   SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III
48  */
49 
50 /* Will be set !NULL for Cheetah+ and derivatives. */
51 extern uchar_t *ctx_pgsz_array;
52 
53 /*
54  * pan_disable_ism_large_pages and pan_disable_large_pages are the Panther-
55  * specific versions of disable_ism_large_pages and disable_large_pages,
56  * and feed back into those two hat variables at hat initialization time,
57  * for Panther-only systems.
58  *
59  * chpjag_disable_ism_large_pages is the Ch/Jaguar-specific version of
60  * disable_ism_large_pages. Ditto for chjag_disable_large_pages.
61  */
62 static int panther_only = 0;
63 
64 static int pan_disable_ism_large_pages = ((1 << TTE64K) |
65 	(1 << TTE512K) | (1 << TTE256M));
66 static int pan_disable_large_pages = (1 << TTE256M);
67 static int pan_disable_auto_large_pages = (1 << TTE4M) | (1 << TTE256M);
68 
69 static int chjag_disable_ism_large_pages = ((1 << TTE64K) |
70 	(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
71 static int chjag_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M));
72 static int chjag_disable_auto_large_pages = ((1 << TTE32M) | (1 << TTE256M));
73 
74 /*
75  * The function returns the USIII-IV mmu-specific values for the
76  * hat's disable_large_pages and disable_ism_large_pages variables.
77  * Currently the hat's disable_large_pages and disable_ism_large_pages
78  * already contain the generic sparc 4 page size info, and the return
79  * values are or'd with those values.
80  */
81 int
82 mmu_large_pages_disabled(uint_t flag)
83 {
84 	int pages_disable = 0;
85 
86 	if (panther_only) {
87 		if (flag == HAT_LOAD) {
88 			pages_disable = pan_disable_large_pages;
89 		} else if (flag == HAT_LOAD_SHARE) {
90 			pages_disable = pan_disable_ism_large_pages;
91 		} else if (flag == HAT_LOAD_AUTOLPG) {
92 			pages_disable = pan_disable_auto_large_pages;
93 		}
94 	} else {
95 		if (flag == HAT_LOAD) {
96 			pages_disable = chjag_disable_large_pages;
97 		} else if (flag == HAT_LOAD_SHARE) {
98 			pages_disable = chjag_disable_ism_large_pages;
99 		} else if (flag == HAT_LOAD_AUTOLPG) {
100 			pages_disable = chjag_disable_auto_large_pages;
101 		}
102 	}
103 	return (pages_disable);
104 }
105 
106 #if defined(CPU_IMP_DUAL_PAGESIZE)
107 /*
108  * If a platform is running with only Ch+ or Jaguar, and then someone DR's
109  * in a Panther board, the Panther mmu will not like it if one of the already
110  * running threads is context switched to the Panther and tries to program
111  * a 512K or 4M page into the T512_1. So make these platforms pay the price
112  * and follow the Panther DTLB restrictions by default. :)
113  * The mmu_init_mmu_page_sizes code below takes care of heterogeneous
114  * platforms that don't support DR, like daktari.
115  *
116  * The effect of these restrictions is to limit the allowable values in
117  * sfmmu_pgsz[0] and sfmmu_pgsz[1], since these hat variables are used in
118  * mmu_set_ctx_page_sizes to set up the values in the ctx_pgsz_array that
119  * are used at context switch time. The value in sfmmu_pgsz[0] is used in
120  * P_pgsz0 and sfmmu_pgsz[1] is used in P_pgsz1, as per Figure F-1-1
121  * IMMU and DMMU Primary Context Register in the Panther Implementation
122  * Supplement and Table 15-21 DMMU Primary Context Register in the
123  * Cheetah+ Delta PRM.
124  */
125 #ifdef MIXEDCPU_DR_SUPPORTED
126 int panther_dtlb_restrictions = 1;
127 #else
128 int panther_dtlb_restrictions = 0;
129 #endif /* MIXEDCPU_DR_SUPPORTED */
130 
131 /*
132  * init_mmu_page_sizes is set to one after the bootup time initialization
133  * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a
134  * valid value.
135  */
136 int init_mmu_page_sizes = 0;
137 
138 /*
139  * mmu_init_large_pages is called with the desired ism_pagesize parameter,
140  * for Panther-only systems. It may be called from set_platform_defaults,
141  * if some value other than 32M is desired, for Panther-only systems.
142  * mmu_ism_pagesize is the tunable.  If it has a bad value, then only warn,
143  * since it would be bad form to panic due
144  * to a user typo.
145  *
146  * The function re-initializes the pan_disable_ism_large_pages and
147  * pan_disable_large_pages variables, which are closely related.
148  * Aka, if 32M is the desired [D]ISM page sizes, then 256M cannot be allowed
149  * for non-ISM large page usage, or DTLB conflict will occur. Please see the
150  * Panther PRM for additional DTLB technical info.
151  */
152 void
153 mmu_init_large_pages(size_t ism_pagesize)
154 {
155 	if (ctx_pgsz_array == NULL) {	/* disable_dual_pgsz flag */
156 		pan_disable_ism_large_pages = ((1 << TTE64K) |
157 			(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
158 		pan_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M));
159 		auto_lpg_maxszc = TTE4M;
160 		return;
161 	}
162 
163 	switch (ism_pagesize) {
164 	case MMU_PAGESIZE4M:
165 		pan_disable_ism_large_pages = ((1 << TTE64K) |
166 			(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
167 		pan_disable_large_pages = (1 << TTE256M);
168 		pan_disable_auto_large_pages = (1 << TTE32M) | (1 << TTE256M);
169 		auto_lpg_maxszc = TTE4M;
170 		break;
171 	case MMU_PAGESIZE32M:
172 		pan_disable_ism_large_pages = ((1 << TTE64K) |
173 			(1 << TTE512K) | (1 << TTE256M));
174 		pan_disable_large_pages = (1 << TTE256M);
175 		pan_disable_auto_large_pages = (1 << TTE4M) | (1 << TTE256M);
176 		auto_lpg_maxszc = TTE32M;
177 		break;
178 	case MMU_PAGESIZE256M:
179 		pan_disable_ism_large_pages = ((1 << TTE64K) |
180 			(1 << TTE512K) | (1 << TTE32M));
181 		pan_disable_large_pages = (1 << TTE32M);
182 		pan_disable_auto_large_pages = (1 << TTE4M) | (1 << TTE32M);
183 		auto_lpg_maxszc = TTE256M;
184 		break;
185 	default:
186 		cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx",
187 			ism_pagesize);
188 		break;
189 	}
190 }
191 
192 /*
193  * Re-initialize mmu_page_sizes and friends, for Panther mmu support.
194  * Called during very early bootup from check_cpus_set().
195  * Can be called to verify that mmu_page_sizes are set up correctly.
196  * Note that ncpus is not initialized at this point in the bootup sequence.
197  */
198 int
199 mmu_init_mmu_page_sizes(int cinfo)
200 {
201 	int npanther = cinfo;
202 
203 	if (!init_mmu_page_sizes) {
204 		if (npanther == ncpunode) {
205 			mmu_page_sizes = MMU_PAGE_SIZES;
206 			mmu_hashcnt = MAX_HASHCNT;
207 			mmu_ism_pagesize = MMU_PAGESIZE32M;
208 			mmu_exported_pagesize_mask = (1 << TTE8K) |
209 			    (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) |
210 			    (1 << TTE32M) | (1 << TTE256M);
211 			panther_dtlb_restrictions = 1;
212 			panther_only = 1;
213 			auto_lpg_maxszc = TTE32M;
214 		} else if (npanther > 0) {
215 			panther_dtlb_restrictions = 1;
216 		}
217 		init_mmu_page_sizes = 1;
218 		return (0);
219 	}
220 	return (1);
221 }
222 
223 
224 /* Cheetah+ and later worst case DTLB parameters */
225 #ifndef	LOCKED_DTLB_ENTRIES
226 #define	LOCKED_DTLB_ENTRIES	5	/* 2 user TSBs, 2 nucleus, + OBP */
227 #endif
228 #define	TOTAL_DTLB_ENTRIES	16
229 #define	AVAIL_32M_ENTRIES	0
230 #define	AVAIL_256M_ENTRIES	0
231 #define	AVAIL_DTLB_ENTRIES	(TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES)
232 static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = {
233 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
234 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
235 	AVAIL_32M_ENTRIES, AVAIL_256M_ENTRIES };
236 
237 /*ARGSUSED*/
238 uint_t
239 mmu_preferred_pgsz(struct hat *hat, caddr_t addr, size_t len)
240 {
241 	sfmmu_t *sfmmup = (sfmmu_t *)hat;
242 	uint_t pgsz0, pgsz1;
243 	uint_t szc, maxszc = mmu_page_sizes - 1;
244 	size_t pgsz;
245 	extern int disable_large_pages;
246 
247 	pgsz0 = (uint_t)sfmmup->sfmmu_pgsz[0];
248 	pgsz1 = (uint_t)sfmmup->sfmmu_pgsz[1];
249 
250 	/*
251 	 * If either of the TLBs are reprogrammed, choose
252 	 * the largest mapping size as the preferred size,
253 	 * if it fits the size and alignment constraints.
254 	 * Else return the largest mapping size that fits,
255 	 * if neither TLB is reprogrammed.
256 	 */
257 	if (pgsz0 > TTE8K || pgsz1 > TTE8K) {
258 		if (pgsz1 > pgsz0) {	/* First try pgsz1 */
259 			pgsz = hw_page_array[pgsz1].hp_size;
260 			if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
261 				return (pgsz1);
262 		}
263 		if (pgsz0 > TTE8K) {	/* Then try pgsz0, if !TTE8K */
264 			pgsz = hw_page_array[pgsz0].hp_size;
265 			if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
266 				return (pgsz0);
267 		}
268 	} else { /* Otherwise pick best fit if neither TLB is reprogrammed. */
269 		for (szc = maxszc; szc > TTE8K; szc--) {
270 			if (disable_large_pages & (1 << szc))
271 				continue;
272 
273 			pgsz = hw_page_array[szc].hp_size;
274 			if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
275 				return (szc);
276 		}
277 	}
278 	return (TTE8K);
279 }
280 
281 /*
282  * The purpose of this code is to indirectly reorganize the sfmmu_pgsz array
283  * in order to handle the Panther mmu DTLB requirements. Panther only supports
284  * the 32M/256M pages in the T512_1 and not in the T16, so the Panther cpu
285  * can only support one of the two largest page sizes at a time (efficiently).
286  * Panther only supports 512K and 4M pages in the T512_0, and 32M/256M pages
287  * in the T512_1.  So check the sfmmu flags and ttecnt before enabling
288  * the T512_1 for 32M or 256M page sizes, and make sure that 512K and 4M
289  * requests go to the T512_0.
290  *
291  * The tmp_pgsz array comes into this routine in sorted order, as it is
292  * sorted from largest to smallest #pages per pagesize in use by the hat code,
293  * and leaves with the Panther mmu DTLB requirements satisfied. Note that
294  * when the array leaves this function it may not contain all of the page
295  * size codes that it had coming into the function.
296  *
297  * Note that for DISM the flag can be set but the ttecnt can be 0, if we
298  * didn't fault any pages in. This allows the t512_1 to be reprogrammed,
299  * because the T16 does not support the two giant page sizes. ouch.
300  */
301 void
302 mmu_fixup_large_pages(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz)
303 {
304 	uint_t pgsz0 = tmp_pgsz[0];
305 	uint_t pgsz1 = tmp_pgsz[1];
306 	uint_t spgsz;
307 
308 	/*
309 	 * Don't program 2nd dtlb for kernel and ism hat
310 	 */
311 	ASSERT(hat->sfmmu_ismhat == NULL);
312 	ASSERT(hat != ksfmmup);
313 	ASSERT(ctx_pgsz_array != NULL);
314 
315 	ASSERT((!SFMMU_FLAGS_ISSET(hat, HAT_32M_FLAG)) ||
316 		(!SFMMU_FLAGS_ISSET(hat, HAT_256M_FLAG)));
317 
318 	if ((SFMMU_FLAGS_ISSET(hat, HAT_32M_FLAG)) || (ttecnt[TTE32M] != 0)) {
319 		spgsz = pgsz1;
320 		pgsz1 = TTE32M;
321 		if (pgsz0 == TTE32M)
322 			pgsz0 = spgsz;
323 	} else if ((SFMMU_FLAGS_ISSET(hat, HAT_256M_FLAG)) ||
324 	    (ttecnt[TTE256M] != 0)) {
325 		spgsz = pgsz1;
326 		pgsz1 = TTE256M;
327 		if (pgsz0 == TTE256M)
328 			pgsz0 = spgsz;
329 	} else if ((pgsz1 == TTE512K) || (pgsz1 == TTE4M)) {
330 		if ((pgsz0 != TTE512K) && (pgsz0 != TTE4M)) {
331 			spgsz = pgsz0;
332 			pgsz0 = pgsz1;
333 			pgsz1 = spgsz;
334 		} else {
335 			pgsz1 = page_szc(MMU_PAGESIZE);
336 		}
337 	}
338 	/*
339 	 * This implements PAGESIZE programming of the T8s
340 	 * if large TTE counts don't exceed the thresholds.
341 	 */
342 	if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
343 		pgsz0 = page_szc(MMU_PAGESIZE);
344 	if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
345 		pgsz1 = page_szc(MMU_PAGESIZE);
346 	tmp_pgsz[0] = pgsz0;
347 	tmp_pgsz[1] = pgsz1;
348 }
349 
350 /*
351  * Function to set up the page size values used to reprogram the DTLBs,
352  * when page sizes used by a process change significantly.
353  */
354 void
355 mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz)
356 {
357 	uint_t pgsz0, pgsz1;
358 
359 	/*
360 	 * Don't program 2nd dtlb for kernel and ism hat
361 	 */
362 	ASSERT(hat->sfmmu_ismhat == NULL);
363 	ASSERT(hat != ksfmmup);
364 
365 	if (ctx_pgsz_array == NULL)	/* disable_dual_pgsz flag */
366 		return;
367 
368 	/*
369 	 * hat->sfmmu_pgsz[] is an array whose elements
370 	 * contain a sorted order of page sizes.  Element
371 	 * 0 is the most commonly used page size, followed
372 	 * by element 1, and so on.
373 	 *
374 	 * ttecnt[] is an array of per-page-size page counts
375 	 * mapped into the process.
376 	 *
377 	 * If the HAT's choice for page sizes is unsuitable,
378 	 * we can override it here.  The new values written
379 	 * to the array will be handed back to us later to
380 	 * do the actual programming of the TLB hardware.
381 	 *
382 	 * The policy we use for programming the dual T8s on
383 	 * Cheetah+ and beyond is as follows:
384 	 *
385 	 *   We have two programmable TLBs, so we look at
386 	 *   the two most common page sizes in the array, which
387 	 *   have already been computed for us by the HAT.
388 	 *   If the TTE count of either of a preferred page size
389 	 *   exceeds the number of unlocked T16 entries,
390 	 *   we reprogram one of the T8s to that page size
391 	 *   to avoid thrashing in the T16.  Else we program
392 	 *   that T8 to the base page size.  Note that we do
393 	 *   not force either T8 to be the base page size if a
394 	 *   process is using more than two page sizes.  Policy
395 	 *   decisions about which page sizes are best to use are
396 	 *   left to the upper layers.
397 	 *
398 	 *   Note that for Panther, 4M and 512K pages need to be
399 	 *   programmed into T512_0, and 32M and 256M into T512_1,
400 	 *   so we don't want to go through the MIN/MAX code.
401 	 *   For partial-Panther systems, we still want to make sure
402 	 *   that 4M and 512K page sizes NEVER get into the T512_1.
403 	 *   Since the DTLB flags are not set up on a per-cpu basis,
404 	 *   Panther rules must be applied for mixed Panther/Cheetah+/
405 	 *   Jaguar configurations.
406 	 */
407 	if (panther_dtlb_restrictions) {
408 		if ((tmp_pgsz[1] == TTE512K) || (tmp_pgsz[1] == TTE4M)) {
409 			if ((tmp_pgsz[0] != TTE512K) &&
410 			    (tmp_pgsz[0] != TTE4M)) {
411 				pgsz1 = tmp_pgsz[0];
412 				pgsz0 = tmp_pgsz[1];
413 			} else {
414 				pgsz0 = tmp_pgsz[0];
415 				pgsz1 = page_szc(MMU_PAGESIZE);
416 			}
417 		} else {
418 			pgsz0 = tmp_pgsz[0];
419 			pgsz1 = tmp_pgsz[1];
420 		}
421 	} else {
422 		pgsz0 = MIN(tmp_pgsz[0], tmp_pgsz[1]);
423 		pgsz1 = MAX(tmp_pgsz[0], tmp_pgsz[1]);
424 	}
425 
426 	/*
427 	 * This implements PAGESIZE programming of the T8s
428 	 * if large TTE counts don't exceed the thresholds.
429 	 */
430 	if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
431 		pgsz0 = page_szc(MMU_PAGESIZE);
432 	if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
433 		pgsz1 = page_szc(MMU_PAGESIZE);
434 	tmp_pgsz[0] = pgsz0;
435 	tmp_pgsz[1] = pgsz1;
436 }
437 
438 /*
439  * The HAT calls this function when an MMU context is allocated so that we
440  * can reprogram the large TLBs appropriately for the new process using
441  * the context.
442  *
443  * The caller must hold the HAT lock.
444  */
445 void
446 mmu_set_ctx_page_sizes(struct hat *hat)
447 {
448 	uint_t pgsz0, pgsz1;
449 	uint_t new_cext;
450 
451 	ASSERT(sfmmu_hat_lock_held(hat));
452 	ASSERT(hat != ksfmmup);
453 
454 	if (ctx_pgsz_array == NULL)	/* disable_dual_pgsz flag */
455 		return;
456 
457 	/*
458 	 * If supported, reprogram the TLBs to a larger pagesize.
459 	 */
460 	pgsz0 = hat->sfmmu_pgsz[0];
461 	pgsz1 = hat->sfmmu_pgsz[1];
462 	ASSERT(pgsz0 < mmu_page_sizes);
463 	ASSERT(pgsz1 < mmu_page_sizes);
464 #ifdef DEBUG
465 	if (panther_dtlb_restrictions) {
466 		ASSERT(pgsz1 != TTE512K);
467 		ASSERT(pgsz1 != TTE4M);
468 	}
469 	if (panther_only) {
470 		ASSERT(pgsz0 != TTE32M);
471 		ASSERT(pgsz0 != TTE256M);
472 	}
473 #endif /* DEBUG */
474 	new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0);
475 	if (hat->sfmmu_cext != new_cext) {
476 		hat->sfmmu_cext = new_cext;
477 	}
478 	ctx_pgsz_array[hat->sfmmu_cnum] = hat->sfmmu_cext;
479 	/*
480 	 * sfmmu_setctx_sec() will take care of the
481 	 * rest of the chores reprogramming the ctx_pgsz_array
482 	 * page size values into the DTLBs.
483 	 */
484 }
485 
486 /*
487  * This function assumes that there are either four or six supported page
488  * sizes and at most two programmable TLBs, so we need to decide which
489  * page sizes are most important and then adjust the TLB page sizes
490  * accordingly (if supported).
491  *
492  * If these assumptions change, this function will need to be
493  * updated to support whatever the new limits are.
494  */
495 void
496 mmu_check_page_sizes(sfmmu_t *sfmmup, uint64_t *ttecnt)
497 {
498 	uint64_t sortcnt[MMU_PAGE_SIZES];
499 	uint8_t tmp_pgsz[MMU_PAGE_SIZES];
500 	uint8_t i, j, max;
501 	uint16_t oldval, newval;
502 
503 	/*
504 	 * We only consider reprogramming the TLBs if one or more of
505 	 * the two most used page sizes changes and we're using
506 	 * large pages in this process, except for Panther 32M/256M pages,
507 	 * which the Panther T16 does not support.
508 	 */
509 	if (sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) {
510 		/* Sort page sizes. */
511 		for (i = 0; i < mmu_page_sizes; i++) {
512 			sortcnt[i] = ttecnt[i];
513 		}
514 		for (j = 0; j < mmu_page_sizes; j++) {
515 			for (i = mmu_page_sizes - 1, max = 0; i > 0; i--) {
516 				if (sortcnt[i] > sortcnt[max])
517 					max = i;
518 			}
519 			tmp_pgsz[j] = max;
520 			sortcnt[max] = 0;
521 		}
522 
523 		/*
524 		 * Handle Panther page dtlb calcs separately. The check
525 		 * for actual or potential 32M/256M pages must occur
526 		 * every time due to lack of T16 support for them.
527 		 * The sort works fine for Ch+/Jag, but Panther has
528 		 * pagesize restrictions for both DTLBs.
529 		 */
530 		oldval = sfmmup->sfmmu_pgsz[0] << 8 | sfmmup->sfmmu_pgsz[1];
531 
532 		if (panther_only) {
533 			mmu_fixup_large_pages(sfmmup, ttecnt, tmp_pgsz);
534 		} else {
535 			/* Check 2 largest values after the sort. */
536 			mmu_setup_page_sizes(sfmmup, ttecnt, tmp_pgsz);
537 		}
538 		newval = tmp_pgsz[0] << 8 | tmp_pgsz[1];
539 		if (newval != oldval) {
540 			sfmmu_steal_context(sfmmup, tmp_pgsz);
541 		}
542 	}
543 }
544 
545 #endif	/* CPU_IMP_DUAL_PAGESIZE */
546 
547 struct heap_lp_page_size {
548 	int    impl;
549 	uint_t tte;
550 	int    use_dt512;
551 };
552 
553 struct heap_lp_page_size heap_lp_pgsz[] = {
554 
555 	{CHEETAH_IMPL, TTE8K, 0},		/* default */
556 	{CHEETAH_IMPL, TTE64K, 0},
557 	{CHEETAH_IMPL, TTE4M, 0},
558 
559 	{ CHEETAH_PLUS_IMPL, TTE4M,  1 },	/* default */
560 	{ CHEETAH_PLUS_IMPL, TTE4M,  0 },
561 	{ CHEETAH_PLUS_IMPL, TTE64K, 1 },
562 	{ CHEETAH_PLUS_IMPL, TTE64K, 0 },
563 	{ CHEETAH_PLUS_IMPL, TTE8K,  0 },
564 
565 	{ JALAPENO_IMPL, TTE4M,  1 },		/* default */
566 	{ JALAPENO_IMPL, TTE4M,  0 },
567 	{ JALAPENO_IMPL, TTE64K, 1 },
568 	{ JALAPENO_IMPL, TTE64K, 0 },
569 	{ JALAPENO_IMPL, TTE8K,  0 },
570 
571 	{ JAGUAR_IMPL, TTE4M, 1 },		/* default */
572 	{ JAGUAR_IMPL, TTE4M, 0 },
573 	{ JAGUAR_IMPL, TTE64K, 1 },
574 	{ JAGUAR_IMPL, TTE64K, 0 },
575 	{ JAGUAR_IMPL, TTE8K, 0 },
576 
577 	{ SERRANO_IMPL, TTE4M,  1 },		/* default */
578 	{ SERRANO_IMPL, TTE4M,  0 },
579 	{ SERRANO_IMPL, TTE64K, 1 },
580 	{ SERRANO_IMPL, TTE64K, 0 },
581 	{ SERRANO_IMPL, TTE8K,  0 },
582 
583 	{ PANTHER_IMPL, TTE4M, 1 },		/* default */
584 	{ PANTHER_IMPL, TTE4M, 0 },
585 	{ PANTHER_IMPL, TTE64K, 1 },
586 	{ PANTHER_IMPL, TTE64K, 0 },
587 	{ PANTHER_IMPL, TTE8K, 0 }
588 };
589 
590 int	heaplp_use_dt512 = -1;
591 
592 void
593 mmu_init_kernel_pgsz(struct hat *hat)
594 {
595 	uint_t tte = page_szc(segkmem_lpsize);
596 	uchar_t new_cext_primary, new_cext_nucleus;
597 
598 	if (heaplp_use_dt512 == 0 || tte > TTE4M) {
599 		/* do not reprogram dt512 tlb */
600 		tte = TTE8K;
601 	}
602 
603 	new_cext_nucleus = TAGACCEXT_MKSZPAIR(tte, TTE8K);
604 	new_cext_primary = TAGACCEXT_MKSZPAIR(TTE8K, tte);
605 
606 	if (ctx_pgsz_array)
607 		ctx_pgsz_array[KCONTEXT] = new_cext_primary;
608 	hat->sfmmu_cext = new_cext_primary;
609 	kcontextreg = ((uint64_t)new_cext_nucleus << CTXREG_NEXT_SHIFT) |
610 		((uint64_t)new_cext_primary << CTXREG_EXT_SHIFT);
611 	mmu_init_kcontext();
612 }
613 
614 size_t
615 mmu_get_kernel_lpsize(size_t lpsize)
616 {
617 	struct heap_lp_page_size *p_lpgsz, *pend_lpgsz;
618 	int impl = cpunodes[getprocessorid()].implementation;
619 	uint_t tte = TTE8K;
620 
621 	pend_lpgsz = (struct heap_lp_page_size *)
622 	    ((char *)heap_lp_pgsz + sizeof (heap_lp_pgsz));
623 
624 	/* search for a valid segkmem_lpsize */
625 	for (p_lpgsz = heap_lp_pgsz; p_lpgsz < pend_lpgsz; p_lpgsz++) {
626 		if (impl != p_lpgsz->impl)
627 			continue;
628 
629 		if (lpsize == 0) {
630 			/*
631 			 * no setting for segkmem_lpsize in /etc/system
632 			 * use default from the table
633 			 */
634 			tte = p_lpgsz->tte;
635 			heaplp_use_dt512 = p_lpgsz->use_dt512;
636 			break;
637 		}
638 
639 		if (lpsize == TTEBYTES(p_lpgsz->tte) &&
640 		    (heaplp_use_dt512 == -1 ||
641 			heaplp_use_dt512 == p_lpgsz->use_dt512)) {
642 
643 			tte = p_lpgsz->tte;
644 			heaplp_use_dt512 = p_lpgsz->use_dt512;
645 
646 			/* found a match */
647 			break;
648 		}
649 	}
650 
651 	if (p_lpgsz == pend_lpgsz) {
652 		/* nothing found: disable large page kernel heap */
653 		tte = TTE8K;
654 		heaplp_use_dt512 = 0;
655 	}
656 
657 	lpsize = TTEBYTES(tte);
658 
659 	return (lpsize);
660 }
661