xref: /titanic_50/usr/src/uts/sun4u/cpu/us3_common_mmu.c (revision 3af08d828975d7e2581b6829e0eecff14d87a483)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/archsystm.h>
32 #include <sys/vmsystm.h>
33 #include <sys/machparam.h>
34 #include <sys/machsystm.h>
35 #include <vm/vm_dep.h>
36 #include <vm/hat_sfmmu.h>
37 #include <vm/seg_kmem.h>
38 #include <sys/cmn_err.h>
39 #include <sys/debug.h>
40 #include <sys/cpu_module.h>
41 #include <sys/sysmacros.h>
42 #include <sys/panic.h>
43 
44 /*
45  * Note that 'Cheetah PRM' refers to:
46  *   SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III
47  */
48 
49 /*
50  * pan_disable_ism_large_pages and pan_disable_large_pages are the Panther-
51  * specific versions of disable_ism_large_pages and disable_large_pages,
52  * and feed back into those two hat variables at hat initialization time,
53  * for Panther-only systems.
54  *
55  * chpjag_disable_ism_large_pages is the Ch/Jaguar-specific version of
56  * disable_ism_large_pages. Ditto for chjag_disable_large_pages.
57  */
58 static int panther_only = 0;
59 
60 static int pan_disable_ism_large_pages = ((1 << TTE64K) |
61 	(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
62 static int pan_disable_large_pages = (1 << TTE256M);
63 static int pan_disable_auto_large_pages =  ((1 << TTE64K) |
64 	(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
65 
66 static int chjag_disable_ism_large_pages = ((1 << TTE64K) |
67 	(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
68 static int chjag_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M));
69 static int chjag_disable_auto_large_pages = ((1 << TTE64K) |
70 	(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
71 
72 /*
73  * The function returns the USIII-IV mmu-specific values for the
74  * hat's disable_large_pages and disable_ism_large_pages variables.
75  * Currently the hat's disable_large_pages and disable_ism_large_pages
76  * already contain the generic sparc 4 page size info, and the return
77  * values are or'd with those values.
78  */
79 int
80 mmu_large_pages_disabled(uint_t flag)
81 {
82 	int pages_disable = 0;
83 
84 	if (panther_only) {
85 		if (flag == HAT_LOAD) {
86 			pages_disable = pan_disable_large_pages;
87 		} else if (flag == HAT_LOAD_SHARE) {
88 			pages_disable = pan_disable_ism_large_pages;
89 		} else if (flag == HAT_LOAD_AUTOLPG) {
90 			pages_disable = pan_disable_auto_large_pages;
91 		}
92 	} else {
93 		if (flag == HAT_LOAD) {
94 			pages_disable = chjag_disable_large_pages;
95 		} else if (flag == HAT_LOAD_SHARE) {
96 			pages_disable = chjag_disable_ism_large_pages;
97 		} else if (flag == HAT_LOAD_AUTOLPG) {
98 			pages_disable = chjag_disable_auto_large_pages;
99 		}
100 	}
101 	return (pages_disable);
102 }
103 
104 #if defined(CPU_IMP_DUAL_PAGESIZE)
105 /*
106  * If a platform is running with only Ch+ or Jaguar, and then someone DR's
107  * in a Panther board, the Panther mmu will not like it if one of the already
108  * running threads is context switched to the Panther and tries to program
109  * a 512K or 4M page into the T512_1. So make these platforms pay the price
110  * and follow the Panther DTLB restrictions by default. :)
111  * The mmu_init_mmu_page_sizes code below takes care of heterogeneous
112  * platforms that don't support DR, like daktari.
113  *
114  * The effect of these restrictions is to limit the allowable values in
115  * sfmmu_pgsz[0] and sfmmu_pgsz[1], since these hat variables are used in
116  * mmu_set_ctx_page_sizes to set up the values in the sfmmu_cext that
117  * are used at context switch time. The value in sfmmu_pgsz[0] is used in
118  * P_pgsz0 and sfmmu_pgsz[1] is used in P_pgsz1, as per Figure F-1-1
119  * IMMU and DMMU Primary Context Register in the Panther Implementation
120  * Supplement and Table 15-21 DMMU Primary Context Register in the
121  * Cheetah+ Delta PRM.
122  */
123 #ifdef MIXEDCPU_DR_SUPPORTED
124 int panther_dtlb_restrictions = 1;
125 #else
126 int panther_dtlb_restrictions = 0;
127 #endif /* MIXEDCPU_DR_SUPPORTED */
128 
129 /*
130  * init_mmu_page_sizes is set to one after the bootup time initialization
131  * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a
132  * valid value.
133  */
134 int init_mmu_page_sizes = 0;
135 
136 /*
137  * mmu_init_large_pages is called with the desired ism_pagesize parameter,
138  * for Panther-only systems. It may be called from set_platform_defaults,
139  * if some value other than 32M is desired, for Panther-only systems.
140  * mmu_ism_pagesize is the tunable.  If it has a bad value, then only warn,
141  * since it would be bad form to panic due
142  * to a user typo.
143  *
144  * The function re-initializes the pan_disable_ism_large_pages and
145  * pan_disable_large_pages variables, which are closely related.
146  * Aka, if 32M is the desired [D]ISM page sizes, then 256M cannot be allowed
147  * for non-ISM large page usage, or DTLB conflict will occur. Please see the
148  * Panther PRM for additional DTLB technical info.
149  */
150 void
151 mmu_init_large_pages(size_t ism_pagesize)
152 {
153 	if (cpu_impl_dual_pgsz == 0) {	/* disable_dual_pgsz flag */
154 		pan_disable_ism_large_pages = ((1 << TTE64K) |
155 			(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
156 		pan_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M));
157 		auto_lpg_maxszc = TTE4M;
158 		return;
159 	}
160 
161 	switch (ism_pagesize) {
162 	case MMU_PAGESIZE4M:
163 		pan_disable_ism_large_pages = ((1 << TTE64K) |
164 			(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
165 		pan_disable_large_pages = (1 << TTE256M);
166 		pan_disable_auto_large_pages = ((1 << TTE64K) |
167 			(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
168 		auto_lpg_maxszc = TTE4M;
169 		break;
170 	case MMU_PAGESIZE32M:
171 		pan_disable_ism_large_pages = ((1 << TTE64K) |
172 			(1 << TTE512K) | (1 << TTE256M));
173 		pan_disable_large_pages = (1 << TTE256M);
174 		pan_disable_auto_large_pages = ((1 << TTE64K) |
175 			(1 << TTE512K) | (1 << TTE4M) | (1 << TTE256M));
176 		auto_lpg_maxszc = TTE32M;
177 		break;
178 	case MMU_PAGESIZE256M:
179 		pan_disable_ism_large_pages = ((1 << TTE64K) |
180 			(1 << TTE512K) | (1 << TTE32M));
181 		pan_disable_large_pages = (1 << TTE32M);
182 		pan_disable_auto_large_pages = ((1 << TTE64K) |
183 			(1 << TTE512K) | (1 << TTE4M) | (1 << TTE32M));
184 		auto_lpg_maxszc = TTE256M;
185 		break;
186 	default:
187 		cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx",
188 			ism_pagesize);
189 		break;
190 	}
191 }
192 
193 /*
194  * Re-initialize mmu_page_sizes and friends, for Panther mmu support.
195  * Called during very early bootup from check_cpus_set().
196  * Can be called to verify that mmu_page_sizes are set up correctly.
197  * Note that ncpus is not initialized at this point in the bootup sequence.
198  */
199 int
200 mmu_init_mmu_page_sizes(int cinfo)
201 {
202 	int npanther = cinfo;
203 
204 	if (!init_mmu_page_sizes) {
205 		if (npanther == ncpunode) {
206 			mmu_page_sizes = MMU_PAGE_SIZES;
207 			mmu_hashcnt = MAX_HASHCNT;
208 			mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
209 			mmu_exported_pagesize_mask = (1 << TTE8K) |
210 			    (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) |
211 			    (1 << TTE32M) | (1 << TTE256M);
212 			panther_dtlb_restrictions = 1;
213 			panther_only = 1;
214 			auto_lpg_maxszc = TTE4M;
215 		} else if (npanther > 0) {
216 			panther_dtlb_restrictions = 1;
217 		}
218 		init_mmu_page_sizes = 1;
219 		return (0);
220 	}
221 	return (1);
222 }
223 
224 
225 /* Cheetah+ and later worst case DTLB parameters */
226 #ifndef	LOCKED_DTLB_ENTRIES
227 #define	LOCKED_DTLB_ENTRIES	5	/* 2 user TSBs, 2 nucleus, + OBP */
228 #endif
229 #define	TOTAL_DTLB_ENTRIES	16
230 #define	AVAIL_32M_ENTRIES	0
231 #define	AVAIL_256M_ENTRIES	0
232 #define	AVAIL_DTLB_ENTRIES	(TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES)
233 static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = {
234 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
235 	AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
236 	AVAIL_32M_ENTRIES, AVAIL_256M_ENTRIES };
237 
238 /*
239  * The purpose of this code is to indirectly reorganize the sfmmu_pgsz array
240  * in order to handle the Panther mmu DTLB requirements. Panther only supports
241  * the 32M/256M pages in the T512_1 and not in the T16, so the Panther cpu
242  * can only support one of the two largest page sizes at a time (efficiently).
243  * Panther only supports 512K and 4M pages in the T512_0, and 32M/256M pages
244  * in the T512_1.  So check the sfmmu flags and ttecnt before enabling
245  * the T512_1 for 32M or 256M page sizes, and make sure that 512K and 4M
246  * requests go to the T512_0.
247  *
248  * The tmp_pgsz array comes into this routine in sorted order, as it is
249  * sorted from largest to smallest #pages per pagesize in use by the hat code,
250  * and leaves with the Panther mmu DTLB requirements satisfied. Note that
251  * when the array leaves this function it may not contain all of the page
252  * size codes that it had coming into the function.
253  *
254  * Note that for DISM the flag can be set but the ttecnt can be 0, if we
255  * didn't fault any pages in. This allows the t512_1 to be reprogrammed,
256  * because the T16 does not support the two giant page sizes. ouch.
257  */
258 void
259 mmu_fixup_large_pages(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz)
260 {
261 	uint_t pgsz0 = tmp_pgsz[0];
262 	uint_t pgsz1 = tmp_pgsz[1];
263 	uint_t spgsz;
264 
265 	/*
266 	 * Don't program 2nd dtlb for kernel and ism hat
267 	 */
268 	ASSERT(hat->sfmmu_ismhat == NULL);
269 	ASSERT(hat != ksfmmup);
270 	ASSERT(cpu_impl_dual_pgsz == 1);
271 
272 	ASSERT((!SFMMU_FLAGS_ISSET(hat, HAT_32M_FLAG)) ||
273 		(!SFMMU_FLAGS_ISSET(hat, HAT_256M_FLAG)));
274 
275 	if ((SFMMU_FLAGS_ISSET(hat, HAT_32M_FLAG)) || (ttecnt[TTE32M] != 0)) {
276 		spgsz = pgsz1;
277 		pgsz1 = TTE32M;
278 		if (pgsz0 == TTE32M)
279 			pgsz0 = spgsz;
280 	} else if ((SFMMU_FLAGS_ISSET(hat, HAT_256M_FLAG)) ||
281 	    (ttecnt[TTE256M] != 0)) {
282 		spgsz = pgsz1;
283 		pgsz1 = TTE256M;
284 		if (pgsz0 == TTE256M)
285 			pgsz0 = spgsz;
286 	} else if ((pgsz1 == TTE512K) || (pgsz1 == TTE4M)) {
287 		if ((pgsz0 != TTE512K) && (pgsz0 != TTE4M)) {
288 			spgsz = pgsz0;
289 			pgsz0 = pgsz1;
290 			pgsz1 = spgsz;
291 		} else {
292 			pgsz1 = page_szc(MMU_PAGESIZE);
293 		}
294 	}
295 	/*
296 	 * This implements PAGESIZE programming of the T8s
297 	 * if large TTE counts don't exceed the thresholds.
298 	 */
299 	if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
300 		pgsz0 = page_szc(MMU_PAGESIZE);
301 	if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
302 		pgsz1 = page_szc(MMU_PAGESIZE);
303 	tmp_pgsz[0] = pgsz0;
304 	tmp_pgsz[1] = pgsz1;
305 }
306 
307 /*
308  * Function to set up the page size values used to reprogram the DTLBs,
309  * when page sizes used by a process change significantly.
310  */
311 void
312 mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz)
313 {
314 	uint_t pgsz0, pgsz1;
315 
316 	/*
317 	 * Don't program 2nd dtlb for kernel and ism hat
318 	 */
319 	ASSERT(hat->sfmmu_ismhat == NULL);
320 	ASSERT(hat != ksfmmup);
321 
322 	if (cpu_impl_dual_pgsz == 0)	/* disable_dual_pgsz flag */
323 		return;
324 
325 	/*
326 	 * hat->sfmmu_pgsz[] is an array whose elements
327 	 * contain a sorted order of page sizes.  Element
328 	 * 0 is the most commonly used page size, followed
329 	 * by element 1, and so on.
330 	 *
331 	 * ttecnt[] is an array of per-page-size page counts
332 	 * mapped into the process.
333 	 *
334 	 * If the HAT's choice for page sizes is unsuitable,
335 	 * we can override it here.  The new values written
336 	 * to the array will be handed back to us later to
337 	 * do the actual programming of the TLB hardware.
338 	 *
339 	 * The policy we use for programming the dual T8s on
340 	 * Cheetah+ and beyond is as follows:
341 	 *
342 	 *   We have two programmable TLBs, so we look at
343 	 *   the two most common page sizes in the array, which
344 	 *   have already been computed for us by the HAT.
345 	 *   If the TTE count of either of a preferred page size
346 	 *   exceeds the number of unlocked T16 entries,
347 	 *   we reprogram one of the T8s to that page size
348 	 *   to avoid thrashing in the T16.  Else we program
349 	 *   that T8 to the base page size.  Note that we do
350 	 *   not force either T8 to be the base page size if a
351 	 *   process is using more than two page sizes.  Policy
352 	 *   decisions about which page sizes are best to use are
353 	 *   left to the upper layers.
354 	 *
355 	 *   Note that for Panther, 4M and 512K pages need to be
356 	 *   programmed into T512_0, and 32M and 256M into T512_1,
357 	 *   so we don't want to go through the MIN/MAX code.
358 	 *   For partial-Panther systems, we still want to make sure
359 	 *   that 4M and 512K page sizes NEVER get into the T512_1.
360 	 *   Since the DTLB flags are not set up on a per-cpu basis,
361 	 *   Panther rules must be applied for mixed Panther/Cheetah+/
362 	 *   Jaguar configurations.
363 	 */
364 	if (panther_dtlb_restrictions) {
365 		if ((tmp_pgsz[1] == TTE512K) || (tmp_pgsz[1] == TTE4M)) {
366 			if ((tmp_pgsz[0] != TTE512K) &&
367 			    (tmp_pgsz[0] != TTE4M)) {
368 				pgsz1 = tmp_pgsz[0];
369 				pgsz0 = tmp_pgsz[1];
370 			} else {
371 				pgsz0 = tmp_pgsz[0];
372 				pgsz1 = page_szc(MMU_PAGESIZE);
373 			}
374 		} else {
375 			pgsz0 = tmp_pgsz[0];
376 			pgsz1 = tmp_pgsz[1];
377 		}
378 	} else {
379 		pgsz0 = MIN(tmp_pgsz[0], tmp_pgsz[1]);
380 		pgsz1 = MAX(tmp_pgsz[0], tmp_pgsz[1]);
381 	}
382 
383 	/*
384 	 * This implements PAGESIZE programming of the T8s
385 	 * if large TTE counts don't exceed the thresholds.
386 	 */
387 	if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
388 		pgsz0 = page_szc(MMU_PAGESIZE);
389 	if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
390 		pgsz1 = page_szc(MMU_PAGESIZE);
391 	tmp_pgsz[0] = pgsz0;
392 	tmp_pgsz[1] = pgsz1;
393 }
394 
395 /*
396  * The HAT calls this function when an MMU context is allocated so that we
397  * can reprogram the large TLBs appropriately for the new process using
398  * the context.
399  *
400  * The caller must hold the HAT lock.
401  */
402 void
403 mmu_set_ctx_page_sizes(struct hat *hat)
404 {
405 	uint_t pgsz0, pgsz1;
406 	uint_t new_cext;
407 
408 	ASSERT(sfmmu_hat_lock_held(hat));
409 	ASSERT(hat != ksfmmup);
410 
411 	if (cpu_impl_dual_pgsz == 0)	/* disable_dual_pgsz flag */
412 		return;
413 
414 	/*
415 	 * If supported, reprogram the TLBs to a larger pagesize.
416 	 */
417 	pgsz0 = hat->sfmmu_pgsz[0];
418 	pgsz1 = hat->sfmmu_pgsz[1];
419 	ASSERT(pgsz0 < mmu_page_sizes);
420 	ASSERT(pgsz1 < mmu_page_sizes);
421 #ifdef DEBUG
422 	if (panther_dtlb_restrictions) {
423 		ASSERT(pgsz1 != TTE512K);
424 		ASSERT(pgsz1 != TTE4M);
425 	}
426 	if (panther_only) {
427 		ASSERT(pgsz0 != TTE32M);
428 		ASSERT(pgsz0 != TTE256M);
429 	}
430 #endif /* DEBUG */
431 	new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0);
432 	if (hat->sfmmu_cext != new_cext) {
433 #ifdef DEBUG
434 		int i;
435 		/*
436 		 * assert cnum should be invalid, this is because pagesize
437 		 * can only be changed after a proc's ctxs are invalidated.
438 		 */
439 		for (i = 0; i < max_mmu_ctxdoms; i++) {
440 			ASSERT(hat->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
441 		}
442 #endif /* DEBUG */
443 		hat->sfmmu_cext = new_cext;
444 	}
445 
446 	/*
447 	 * sfmmu_setctx_sec() will take care of the
448 	 * rest of the chores reprogramming the hat->sfmmu_cext
449 	 * page size values into the DTLBs.
450 	 */
451 }
452 
453 /*
454  * This function assumes that there are either four or six supported page
455  * sizes and at most two programmable TLBs, so we need to decide which
456  * page sizes are most important and then adjust the TLB page sizes
457  * accordingly (if supported).
458  *
459  * If these assumptions change, this function will need to be
460  * updated to support whatever the new limits are.
461  */
462 void
463 mmu_check_page_sizes(sfmmu_t *sfmmup, uint64_t *ttecnt)
464 {
465 	uint64_t sortcnt[MMU_PAGE_SIZES];
466 	uint8_t tmp_pgsz[MMU_PAGE_SIZES];
467 	uint8_t i, j, max;
468 	uint16_t oldval, newval;
469 
470 	/*
471 	 * We only consider reprogramming the TLBs if one or more of
472 	 * the two most used page sizes changes and we're using
473 	 * large pages in this process, except for Panther 32M/256M pages,
474 	 * which the Panther T16 does not support.
475 	 */
476 	if (sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) {
477 		/* Sort page sizes. */
478 		for (i = 0; i < mmu_page_sizes; i++) {
479 			sortcnt[i] = ttecnt[i];
480 		}
481 		for (j = 0; j < mmu_page_sizes; j++) {
482 			for (i = mmu_page_sizes - 1, max = 0; i > 0; i--) {
483 				if (sortcnt[i] > sortcnt[max])
484 					max = i;
485 			}
486 			tmp_pgsz[j] = max;
487 			sortcnt[max] = 0;
488 		}
489 
490 		/*
491 		 * Handle Panther page dtlb calcs separately. The check
492 		 * for actual or potential 32M/256M pages must occur
493 		 * every time due to lack of T16 support for them.
494 		 * The sort works fine for Ch+/Jag, but Panther has
495 		 * pagesize restrictions for both DTLBs.
496 		 */
497 		oldval = sfmmup->sfmmu_pgsz[0] << 8 | sfmmup->sfmmu_pgsz[1];
498 
499 		if (panther_only) {
500 			mmu_fixup_large_pages(sfmmup, ttecnt, tmp_pgsz);
501 		} else {
502 			/* Check 2 largest values after the sort. */
503 			mmu_setup_page_sizes(sfmmup, ttecnt, tmp_pgsz);
504 		}
505 		newval = tmp_pgsz[0] << 8 | tmp_pgsz[1];
506 		if (newval != oldval) {
507 			sfmmu_reprog_pgsz_arr(sfmmup, tmp_pgsz);
508 		}
509 	}
510 }
511 
512 #endif	/* CPU_IMP_DUAL_PAGESIZE */
513 
514 struct heap_lp_page_size {
515 	int    impl;
516 	uint_t tte;
517 	int    use_dt512;
518 };
519 
520 struct heap_lp_page_size heap_lp_pgsz[] = {
521 
522 	{CHEETAH_IMPL, TTE8K, 0},		/* default */
523 	{CHEETAH_IMPL, TTE64K, 0},
524 	{CHEETAH_IMPL, TTE4M, 0},
525 
526 	{ CHEETAH_PLUS_IMPL, TTE4M,  1 },	/* default */
527 	{ CHEETAH_PLUS_IMPL, TTE4M,  0 },
528 	{ CHEETAH_PLUS_IMPL, TTE64K, 1 },
529 	{ CHEETAH_PLUS_IMPL, TTE64K, 0 },
530 	{ CHEETAH_PLUS_IMPL, TTE8K,  0 },
531 
532 	{ JALAPENO_IMPL, TTE4M,  1 },		/* default */
533 	{ JALAPENO_IMPL, TTE4M,  0 },
534 	{ JALAPENO_IMPL, TTE64K, 1 },
535 	{ JALAPENO_IMPL, TTE64K, 0 },
536 	{ JALAPENO_IMPL, TTE8K,  0 },
537 
538 	{ JAGUAR_IMPL, TTE4M, 1 },		/* default */
539 	{ JAGUAR_IMPL, TTE4M, 0 },
540 	{ JAGUAR_IMPL, TTE64K, 1 },
541 	{ JAGUAR_IMPL, TTE64K, 0 },
542 	{ JAGUAR_IMPL, TTE8K, 0 },
543 
544 	{ SERRANO_IMPL, TTE4M,  1 },		/* default */
545 	{ SERRANO_IMPL, TTE4M,  0 },
546 	{ SERRANO_IMPL, TTE64K, 1 },
547 	{ SERRANO_IMPL, TTE64K, 0 },
548 	{ SERRANO_IMPL, TTE8K,  0 },
549 
550 	{ PANTHER_IMPL, TTE4M, 1 },		/* default */
551 	{ PANTHER_IMPL, TTE4M, 0 },
552 	{ PANTHER_IMPL, TTE64K, 1 },
553 	{ PANTHER_IMPL, TTE64K, 0 },
554 	{ PANTHER_IMPL, TTE8K, 0 }
555 };
556 
557 int	heaplp_use_dt512 = -1;
558 
559 void
560 mmu_init_kernel_pgsz(struct hat *hat)
561 {
562 	uint_t tte = page_szc(segkmem_lpsize);
563 	uchar_t new_cext_primary, new_cext_nucleus;
564 
565 	if (heaplp_use_dt512 == 0 || tte > TTE4M) {
566 		/* do not reprogram dt512 tlb */
567 		tte = TTE8K;
568 	}
569 
570 	new_cext_nucleus = TAGACCEXT_MKSZPAIR(tte, TTE8K);
571 	new_cext_primary = TAGACCEXT_MKSZPAIR(TTE8K, tte);
572 
573 	hat->sfmmu_cext = new_cext_primary;
574 	kcontextreg = ((uint64_t)new_cext_nucleus << CTXREG_NEXT_SHIFT) |
575 		((uint64_t)new_cext_primary << CTXREG_EXT_SHIFT);
576 	mmu_init_kcontext();
577 }
578 
579 size_t
580 mmu_get_kernel_lpsize(size_t lpsize)
581 {
582 	struct heap_lp_page_size *p_lpgsz, *pend_lpgsz;
583 	int impl = cpunodes[getprocessorid()].implementation;
584 	uint_t tte = TTE8K;
585 
586 	if (cpu_impl_dual_pgsz == 0) {
587 		heaplp_use_dt512 = 0;
588 		return (MMU_PAGESIZE);
589 	}
590 
591 	pend_lpgsz = (struct heap_lp_page_size *)
592 	    ((char *)heap_lp_pgsz + sizeof (heap_lp_pgsz));
593 
594 	/* search for a valid segkmem_lpsize */
595 	for (p_lpgsz = heap_lp_pgsz; p_lpgsz < pend_lpgsz; p_lpgsz++) {
596 		if (impl != p_lpgsz->impl)
597 			continue;
598 
599 		if (lpsize == 0) {
600 			/*
601 			 * no setting for segkmem_lpsize in /etc/system
602 			 * use default from the table
603 			 */
604 			tte = p_lpgsz->tte;
605 			heaplp_use_dt512 = p_lpgsz->use_dt512;
606 			break;
607 		}
608 
609 		if (lpsize == TTEBYTES(p_lpgsz->tte) &&
610 		    (heaplp_use_dt512 == -1 ||
611 			heaplp_use_dt512 == p_lpgsz->use_dt512)) {
612 
613 			tte = p_lpgsz->tte;
614 			heaplp_use_dt512 = p_lpgsz->use_dt512;
615 
616 			/* found a match */
617 			break;
618 		}
619 	}
620 
621 	if (p_lpgsz == pend_lpgsz) {
622 		/* nothing found: disable large page kernel heap */
623 		tte = TTE8K;
624 		heaplp_use_dt512 = 0;
625 	}
626 
627 	lpsize = TTEBYTES(tte);
628 
629 	return (lpsize);
630 }
631