1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/systm.h> 30 #include <sys/sysmacros.h> 31 #include <sys/archsystm.h> 32 #include <sys/vmsystm.h> 33 #include <sys/machparam.h> 34 #include <sys/machsystm.h> 35 #include <vm/vm_dep.h> 36 #include <vm/hat_sfmmu.h> 37 #include <vm/seg_kmem.h> 38 #include <sys/cmn_err.h> 39 #include <sys/debug.h> 40 #include <sys/cpu_module.h> 41 #include <sys/sysmacros.h> 42 #include <sys/panic.h> 43 44 /* 45 * pan_disable_ism_large_pages and pan_disable_large_pages are the Panther- 46 * specific versions of disable_ism_large_pages and disable_large_pages, 47 * and feed back into those two hat variables at hat initialization time, 48 * for Panther-only systems. 49 * 50 * chpjag_disable_large_pages is the Ch/Jaguar-specific version of 51 * disable_large_pages. Ditto for pan_disable_large_pages. 52 * Note that the Panther and Ch/Jaguar ITLB do not support 32M/256M pages. 53 */ 54 static int panther_only = 0; 55 56 static uint_t pan_disable_large_pages = (1 << TTE256M); 57 static uint_t chjag_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M)); 58 59 static uint_t mmu_disable_ism_large_pages = ((1 << TTE64K) | 60 (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 61 static uint_t mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 62 (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 63 static uint_t mmu_disable_auto_text_large_pages = ((1 << TTE64K) | 64 (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 65 66 /* 67 * The function returns the USIII+(i)-IV+ mmu-specific values for the 68 * hat's disable_large_pages and disable_ism_large_pages variables. 69 * Currently the hat's disable_large_pages and disable_ism_large_pages 70 * already contain the generic sparc 4 page size info, and the return 71 * values are or'd with those values. 72 */ 73 uint_t 74 mmu_large_pages_disabled(uint_t flag) 75 { 76 uint_t pages_disable = 0; 77 extern int use_text_pgsz64K; 78 extern int use_text_pgsz512K; 79 80 if (flag == HAT_LOAD) { 81 if (panther_only) { 82 pages_disable = pan_disable_large_pages; 83 } else { 84 pages_disable = chjag_disable_large_pages; 85 } 86 } else if (flag == HAT_LOAD_SHARE) { 87 pages_disable = mmu_disable_ism_large_pages; 88 } else if (flag == HAT_AUTO_DATA) { 89 pages_disable = mmu_disable_auto_data_large_pages; 90 } else if (flag == HAT_AUTO_TEXT) { 91 pages_disable = mmu_disable_auto_text_large_pages; 92 if (use_text_pgsz512K) { 93 pages_disable &= ~(1 << TTE512K); 94 } 95 if (use_text_pgsz64K) { 96 pages_disable &= ~(1 << TTE64K); 97 } 98 } 99 return (pages_disable); 100 } 101 102 #if defined(CPU_IMP_DUAL_PAGESIZE) 103 /* 104 * If a platform is running with only Ch+ or Jaguar, and then someone DR's 105 * in a Panther board, the Panther mmu will not like it if one of the already 106 * running threads is context switched to the Panther and tries to program 107 * a 512K or 4M page into the T512_1. So make these platforms pay the price 108 * and follow the Panther DTLB restrictions by default. :) 109 * The mmu_init_mmu_page_sizes code below takes care of heterogeneous 110 * platforms that don't support DR, like daktari. 111 * 112 * The effect of these restrictions is to limit the allowable values in 113 * sfmmu_pgsz[0] and sfmmu_pgsz[1], since these hat variables are used in 114 * mmu_set_ctx_page_sizes to set up the values in the sfmmu_cext that 115 * are used at context switch time. The value in sfmmu_pgsz[0] is used in 116 * P_pgsz0 and sfmmu_pgsz[1] is used in P_pgsz1, as per Figure F-1-1 117 * IMMU and DMMU Primary Context Register in the Panther Implementation 118 * Supplement and Table 15-21 DMMU Primary Context Register in the 119 * Cheetah+ Delta PRM. 120 */ 121 #ifdef MIXEDCPU_DR_SUPPORTED 122 int panther_dtlb_restrictions = 1; 123 #else 124 int panther_dtlb_restrictions = 0; 125 #endif /* MIXEDCPU_DR_SUPPORTED */ 126 127 /* 128 * init_mmu_page_sizes is set to one after the bootup time initialization 129 * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a 130 * valid value. 131 */ 132 int init_mmu_page_sizes = 0; 133 134 /* 135 * mmu_init_large_pages is called with the desired ism_pagesize parameter, 136 * for Panther-only systems. It may be called from set_platform_defaults, 137 * if some value other than 32M is desired, for Panther-only systems. 138 * mmu_ism_pagesize is the tunable. If it has a bad value, then only warn, 139 * since it would be bad form to panic due 140 * to a user typo. 141 * 142 * The function re-initializes the disable_ism_large_pages and 143 * pan_disable_large_pages variables, which are closely related. 144 * Aka, if 32M is the desired [D]ISM page sizes, then 256M cannot be allowed 145 * for non-ISM large page usage, or DTLB conflict will occur. Please see the 146 * Panther PRM for additional DTLB technical info. 147 */ 148 void 149 mmu_init_large_pages(size_t ism_pagesize) 150 { 151 if (cpu_impl_dual_pgsz == 0) { /* disable_dual_pgsz flag */ 152 pan_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M)); 153 mmu_disable_ism_large_pages = ((1 << TTE64K) | 154 (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 155 mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 156 (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 157 return; 158 } 159 160 switch (ism_pagesize) { 161 case MMU_PAGESIZE4M: 162 pan_disable_large_pages = (1 << TTE256M); 163 mmu_disable_ism_large_pages = ((1 << TTE64K) | 164 (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 165 mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 166 (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); 167 break; 168 case MMU_PAGESIZE32M: 169 pan_disable_large_pages = (1 << TTE256M); 170 mmu_disable_ism_large_pages = ((1 << TTE64K) | 171 (1 << TTE512K) | (1 << TTE256M)); 172 mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 173 (1 << TTE512K) | (1 << TTE4M) | (1 << TTE256M)); 174 adjust_data_maxlpsize(ism_pagesize); 175 break; 176 case MMU_PAGESIZE256M: 177 pan_disable_large_pages = (1 << TTE32M); 178 mmu_disable_ism_large_pages = ((1 << TTE64K) | 179 (1 << TTE512K) | (1 << TTE32M)); 180 mmu_disable_auto_data_large_pages = ((1 << TTE64K) | 181 (1 << TTE512K) | (1 << TTE4M) | (1 << TTE32M)); 182 adjust_data_maxlpsize(ism_pagesize); 183 break; 184 default: 185 cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx", 186 ism_pagesize); 187 break; 188 } 189 } 190 191 /* 192 * Re-initialize mmu_page_sizes and friends, for Panther mmu support. 193 * Called during very early bootup from check_cpus_set(). 194 * Can be called to verify that mmu_page_sizes are set up correctly. 195 * Note that ncpus is not initialized at this point in the bootup sequence. 196 */ 197 int 198 mmu_init_mmu_page_sizes(int cinfo) 199 { 200 int npanther = cinfo; 201 202 if (!init_mmu_page_sizes) { 203 if (npanther == ncpunode) { 204 mmu_page_sizes = MMU_PAGE_SIZES; 205 mmu_hashcnt = MAX_HASHCNT; 206 mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE; 207 mmu_exported_pagesize_mask = (1 << TTE8K) | 208 (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) | 209 (1 << TTE32M) | (1 << TTE256M); 210 panther_dtlb_restrictions = 1; 211 panther_only = 1; 212 } else if (npanther > 0) { 213 panther_dtlb_restrictions = 1; 214 } 215 init_mmu_page_sizes = 1; 216 return (0); 217 } 218 return (1); 219 } 220 221 222 /* Cheetah+ and later worst case DTLB parameters */ 223 #ifndef LOCKED_DTLB_ENTRIES 224 #define LOCKED_DTLB_ENTRIES 5 /* 2 user TSBs, 2 nucleus, + OBP */ 225 #endif 226 #define TOTAL_DTLB_ENTRIES 16 227 #define AVAIL_32M_ENTRIES 0 228 #define AVAIL_256M_ENTRIES 0 229 #define AVAIL_DTLB_ENTRIES (TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES) 230 static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = { 231 AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES, 232 AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES, 233 AVAIL_32M_ENTRIES, AVAIL_256M_ENTRIES }; 234 235 /* 236 * The purpose of this code is to indirectly reorganize the sfmmu_pgsz array 237 * in order to handle the Panther mmu DTLB requirements. Panther only supports 238 * the 32M/256M pages in the T512_1 and not in the T16, so the Panther cpu 239 * can only support one of the two largest page sizes at a time (efficiently). 240 * Panther only supports 512K and 4M pages in the T512_0, and 32M/256M pages 241 * in the T512_1. So check the sfmmu flags and ttecnt before enabling 242 * the T512_1 for 32M or 256M page sizes, and make sure that 512K and 4M 243 * requests go to the T512_0. 244 * 245 * The tmp_pgsz array comes into this routine in sorted order, as it is 246 * sorted from largest to smallest #pages per pagesize in use by the hat code, 247 * and leaves with the Panther mmu DTLB requirements satisfied. Note that 248 * when the array leaves this function it may not contain all of the page 249 * size codes that it had coming into the function. 250 * 251 * Note that for DISM the flag can be set but the ttecnt can be 0, if we 252 * didn't fault any pages in. This allows the t512_1 to be reprogrammed, 253 * because the T16 does not support the two giant page sizes. ouch. 254 */ 255 void 256 mmu_fixup_large_pages(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz) 257 { 258 uint_t pgsz0 = tmp_pgsz[0]; 259 uint_t pgsz1 = tmp_pgsz[1]; 260 uint_t spgsz; 261 262 /* 263 * Don't program 2nd dtlb for kernel and ism hat 264 */ 265 ASSERT(hat->sfmmu_ismhat == 0); 266 ASSERT(hat != ksfmmup); 267 ASSERT(cpu_impl_dual_pgsz == 1); 268 269 ASSERT(!SFMMU_TTEFLAGS_ISSET(hat, HAT_32M_FLAG) || 270 !SFMMU_TTEFLAGS_ISSET(hat, HAT_256M_FLAG)); 271 ASSERT(!SFMMU_TTEFLAGS_ISSET(hat, HAT_256M_FLAG) || 272 !SFMMU_TTEFLAGS_ISSET(hat, HAT_32M_FLAG)); 273 ASSERT(!SFMMU_FLAGS_ISSET(hat, HAT_32M_ISM) || 274 !SFMMU_FLAGS_ISSET(hat, HAT_256M_ISM)); 275 ASSERT(!SFMMU_FLAGS_ISSET(hat, HAT_256M_ISM) || 276 !SFMMU_FLAGS_ISSET(hat, HAT_32M_ISM)); 277 278 if (SFMMU_TTEFLAGS_ISSET(hat, HAT_32M_FLAG) || 279 (ttecnt[TTE32M] != 0) || 280 SFMMU_FLAGS_ISSET(hat, HAT_32M_ISM)) { 281 282 spgsz = pgsz1; 283 pgsz1 = TTE32M; 284 if (pgsz0 == TTE32M) 285 pgsz0 = spgsz; 286 287 } else if (SFMMU_TTEFLAGS_ISSET(hat, HAT_256M_FLAG) || 288 (ttecnt[TTE256M] != 0) || 289 SFMMU_FLAGS_ISSET(hat, HAT_256M_ISM)) { 290 291 spgsz = pgsz1; 292 pgsz1 = TTE256M; 293 if (pgsz0 == TTE256M) 294 pgsz0 = spgsz; 295 296 } else if ((pgsz1 == TTE512K) || (pgsz1 == TTE4M)) { 297 if ((pgsz0 != TTE512K) && (pgsz0 != TTE4M)) { 298 spgsz = pgsz0; 299 pgsz0 = pgsz1; 300 pgsz1 = spgsz; 301 } else { 302 pgsz1 = page_szc(MMU_PAGESIZE); 303 } 304 } 305 /* 306 * This implements PAGESIZE programming of the T8s 307 * if large TTE counts don't exceed the thresholds. 308 */ 309 if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0]) 310 pgsz0 = page_szc(MMU_PAGESIZE); 311 if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1]) 312 pgsz1 = page_szc(MMU_PAGESIZE); 313 tmp_pgsz[0] = pgsz0; 314 tmp_pgsz[1] = pgsz1; 315 } 316 317 /* 318 * Function to set up the page size values used to reprogram the DTLBs, 319 * when page sizes used by a process change significantly. 320 */ 321 void 322 mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz) 323 { 324 uint_t pgsz0, pgsz1; 325 326 /* 327 * Don't program 2nd dtlb for kernel and ism hat 328 */ 329 ASSERT(hat->sfmmu_ismhat == NULL); 330 ASSERT(hat != ksfmmup); 331 332 if (cpu_impl_dual_pgsz == 0) /* disable_dual_pgsz flag */ 333 return; 334 335 /* 336 * hat->sfmmu_pgsz[] is an array whose elements 337 * contain a sorted order of page sizes. Element 338 * 0 is the most commonly used page size, followed 339 * by element 1, and so on. 340 * 341 * ttecnt[] is an array of per-page-size page counts 342 * mapped into the process. 343 * 344 * If the HAT's choice for page sizes is unsuitable, 345 * we can override it here. The new values written 346 * to the array will be handed back to us later to 347 * do the actual programming of the TLB hardware. 348 * 349 * The policy we use for programming the dual T8s on 350 * Cheetah+ and beyond is as follows: 351 * 352 * We have two programmable TLBs, so we look at 353 * the two most common page sizes in the array, which 354 * have already been computed for us by the HAT. 355 * If the TTE count of either of a preferred page size 356 * exceeds the number of unlocked T16 entries, 357 * we reprogram one of the T8s to that page size 358 * to avoid thrashing in the T16. Else we program 359 * that T8 to the base page size. Note that we do 360 * not force either T8 to be the base page size if a 361 * process is using more than two page sizes. Policy 362 * decisions about which page sizes are best to use are 363 * left to the upper layers. 364 * 365 * Note that for Panther, 4M and 512K pages need to be 366 * programmed into T512_0, and 32M and 256M into T512_1, 367 * so we don't want to go through the MIN/MAX code. 368 * For partial-Panther systems, we still want to make sure 369 * that 4M and 512K page sizes NEVER get into the T512_1. 370 * Since the DTLB flags are not set up on a per-cpu basis, 371 * Panther rules must be applied for mixed Panther/Cheetah+/ 372 * Jaguar configurations. 373 */ 374 if (panther_dtlb_restrictions) { 375 if ((tmp_pgsz[1] == TTE512K) || (tmp_pgsz[1] == TTE4M)) { 376 if ((tmp_pgsz[0] != TTE512K) && 377 (tmp_pgsz[0] != TTE4M)) { 378 pgsz1 = tmp_pgsz[0]; 379 pgsz0 = tmp_pgsz[1]; 380 } else { 381 pgsz0 = tmp_pgsz[0]; 382 pgsz1 = page_szc(MMU_PAGESIZE); 383 } 384 } else { 385 pgsz0 = tmp_pgsz[0]; 386 pgsz1 = tmp_pgsz[1]; 387 } 388 } else { 389 pgsz0 = MIN(tmp_pgsz[0], tmp_pgsz[1]); 390 pgsz1 = MAX(tmp_pgsz[0], tmp_pgsz[1]); 391 } 392 393 /* 394 * This implements PAGESIZE programming of the T8s 395 * if large TTE counts don't exceed the thresholds. 396 */ 397 if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0]) 398 pgsz0 = page_szc(MMU_PAGESIZE); 399 if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1]) 400 pgsz1 = page_szc(MMU_PAGESIZE); 401 tmp_pgsz[0] = pgsz0; 402 tmp_pgsz[1] = pgsz1; 403 } 404 405 /* 406 * The HAT calls this function when an MMU context is allocated so that we 407 * can reprogram the large TLBs appropriately for the new process using 408 * the context. 409 * 410 * The caller must hold the HAT lock. 411 */ 412 void 413 mmu_set_ctx_page_sizes(struct hat *hat) 414 { 415 uint_t pgsz0, pgsz1; 416 uint_t new_cext; 417 418 ASSERT(sfmmu_hat_lock_held(hat)); 419 ASSERT(hat != ksfmmup); 420 421 if (cpu_impl_dual_pgsz == 0) /* disable_dual_pgsz flag */ 422 return; 423 424 /* 425 * If supported, reprogram the TLBs to a larger pagesize. 426 */ 427 pgsz0 = hat->sfmmu_pgsz[0]; 428 pgsz1 = hat->sfmmu_pgsz[1]; 429 ASSERT(pgsz0 < mmu_page_sizes); 430 ASSERT(pgsz1 < mmu_page_sizes); 431 #ifdef DEBUG 432 if (panther_dtlb_restrictions) { 433 ASSERT(pgsz1 != TTE512K); 434 ASSERT(pgsz1 != TTE4M); 435 } 436 if (panther_only) { 437 ASSERT(pgsz0 != TTE32M); 438 ASSERT(pgsz0 != TTE256M); 439 } 440 #endif /* DEBUG */ 441 new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0); 442 if (hat->sfmmu_cext != new_cext) { 443 #ifdef DEBUG 444 int i; 445 /* 446 * assert cnum should be invalid, this is because pagesize 447 * can only be changed after a proc's ctxs are invalidated. 448 */ 449 for (i = 0; i < max_mmu_ctxdoms; i++) { 450 ASSERT(hat->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 451 } 452 #endif /* DEBUG */ 453 hat->sfmmu_cext = new_cext; 454 } 455 456 /* 457 * sfmmu_setctx_sec() will take care of the 458 * rest of the chores reprogramming the hat->sfmmu_cext 459 * page size values into the DTLBs. 460 */ 461 } 462 463 /* 464 * This function assumes that there are either four or six supported page 465 * sizes and at most two programmable TLBs, so we need to decide which 466 * page sizes are most important and then adjust the TLB page sizes 467 * accordingly (if supported). 468 * 469 * If these assumptions change, this function will need to be 470 * updated to support whatever the new limits are. 471 */ 472 void 473 mmu_check_page_sizes(sfmmu_t *sfmmup, uint64_t *ttecnt) 474 { 475 uint64_t sortcnt[MMU_PAGE_SIZES]; 476 uint8_t tmp_pgsz[MMU_PAGE_SIZES]; 477 uint8_t i, j, max; 478 uint16_t oldval, newval; 479 480 /* 481 * We only consider reprogramming the TLBs if one or more of 482 * the two most used page sizes changes and we're using 483 * large pages in this process, except for Panther 32M/256M pages, 484 * which the Panther T16 does not support. 485 */ 486 if (SFMMU_LGPGS_INUSE(sfmmup)) { 487 /* Sort page sizes. */ 488 for (i = 0; i < mmu_page_sizes; i++) { 489 sortcnt[i] = ttecnt[i]; 490 } 491 for (j = 0; j < mmu_page_sizes; j++) { 492 for (i = mmu_page_sizes - 1, max = 0; i > 0; i--) { 493 if (sortcnt[i] > sortcnt[max]) 494 max = i; 495 } 496 tmp_pgsz[j] = max; 497 sortcnt[max] = 0; 498 } 499 500 /* 501 * Handle Panther page dtlb calcs separately. The check 502 * for actual or potential 32M/256M pages must occur 503 * every time due to lack of T16 support for them. 504 * The sort works fine for Ch+/Jag, but Panther has 505 * pagesize restrictions for both DTLBs. 506 */ 507 oldval = sfmmup->sfmmu_pgsz[0] << 8 | sfmmup->sfmmu_pgsz[1]; 508 509 if (panther_only) { 510 mmu_fixup_large_pages(sfmmup, ttecnt, tmp_pgsz); 511 } else { 512 /* Check 2 largest values after the sort. */ 513 mmu_setup_page_sizes(sfmmup, ttecnt, tmp_pgsz); 514 } 515 newval = tmp_pgsz[0] << 8 | tmp_pgsz[1]; 516 if (newval != oldval) { 517 sfmmu_reprog_pgsz_arr(sfmmup, tmp_pgsz); 518 } 519 } 520 } 521 522 #endif /* CPU_IMP_DUAL_PAGESIZE */ 523 524 struct heap_lp_page_size { 525 int impl; 526 uint_t tte; 527 int use_dt512; 528 }; 529 530 struct heap_lp_page_size heap_lp_pgsz[] = { 531 532 {CHEETAH_IMPL, TTE8K, 0}, /* default */ 533 {CHEETAH_IMPL, TTE64K, 0}, 534 {CHEETAH_IMPL, TTE4M, 0}, 535 536 { CHEETAH_PLUS_IMPL, TTE4M, 1 }, /* default */ 537 { CHEETAH_PLUS_IMPL, TTE4M, 0 }, 538 { CHEETAH_PLUS_IMPL, TTE64K, 1 }, 539 { CHEETAH_PLUS_IMPL, TTE64K, 0 }, 540 { CHEETAH_PLUS_IMPL, TTE8K, 0 }, 541 542 { JALAPENO_IMPL, TTE4M, 1 }, /* default */ 543 { JALAPENO_IMPL, TTE4M, 0 }, 544 { JALAPENO_IMPL, TTE64K, 1 }, 545 { JALAPENO_IMPL, TTE64K, 0 }, 546 { JALAPENO_IMPL, TTE8K, 0 }, 547 548 { JAGUAR_IMPL, TTE4M, 1 }, /* default */ 549 { JAGUAR_IMPL, TTE4M, 0 }, 550 { JAGUAR_IMPL, TTE64K, 1 }, 551 { JAGUAR_IMPL, TTE64K, 0 }, 552 { JAGUAR_IMPL, TTE8K, 0 }, 553 554 { SERRANO_IMPL, TTE4M, 1 }, /* default */ 555 { SERRANO_IMPL, TTE4M, 0 }, 556 { SERRANO_IMPL, TTE64K, 1 }, 557 { SERRANO_IMPL, TTE64K, 0 }, 558 { SERRANO_IMPL, TTE8K, 0 }, 559 560 { PANTHER_IMPL, TTE4M, 1 }, /* default */ 561 { PANTHER_IMPL, TTE4M, 0 }, 562 { PANTHER_IMPL, TTE64K, 1 }, 563 { PANTHER_IMPL, TTE64K, 0 }, 564 { PANTHER_IMPL, TTE8K, 0 } 565 }; 566 567 int heaplp_use_dt512 = -1; 568 569 void 570 mmu_init_kernel_pgsz(struct hat *hat) 571 { 572 uint_t tte = page_szc(segkmem_lpsize); 573 uchar_t new_cext_primary, new_cext_nucleus; 574 575 if (heaplp_use_dt512 == 0 || tte > TTE4M) { 576 /* do not reprogram dt512 tlb */ 577 tte = TTE8K; 578 } 579 580 new_cext_nucleus = TAGACCEXT_MKSZPAIR(tte, TTE8K); 581 new_cext_primary = TAGACCEXT_MKSZPAIR(TTE8K, tte); 582 583 hat->sfmmu_cext = new_cext_primary; 584 kcontextreg = ((uint64_t)new_cext_nucleus << CTXREG_NEXT_SHIFT) | 585 ((uint64_t)new_cext_primary << CTXREG_EXT_SHIFT); 586 } 587 588 size_t 589 mmu_get_kernel_lpsize(size_t lpsize) 590 { 591 struct heap_lp_page_size *p_lpgsz, *pend_lpgsz; 592 int impl = cpunodes[getprocessorid()].implementation; 593 uint_t tte = TTE8K; 594 595 if (cpu_impl_dual_pgsz == 0) { 596 heaplp_use_dt512 = 0; 597 return (MMU_PAGESIZE); 598 } 599 600 pend_lpgsz = (struct heap_lp_page_size *) 601 ((char *)heap_lp_pgsz + sizeof (heap_lp_pgsz)); 602 603 /* search for a valid segkmem_lpsize */ 604 for (p_lpgsz = heap_lp_pgsz; p_lpgsz < pend_lpgsz; p_lpgsz++) { 605 if (impl != p_lpgsz->impl) 606 continue; 607 608 if (lpsize == 0) { 609 /* 610 * no setting for segkmem_lpsize in /etc/system 611 * use default from the table 612 */ 613 tte = p_lpgsz->tte; 614 heaplp_use_dt512 = p_lpgsz->use_dt512; 615 break; 616 } 617 618 if (lpsize == TTEBYTES(p_lpgsz->tte) && 619 (heaplp_use_dt512 == -1 || 620 heaplp_use_dt512 == p_lpgsz->use_dt512)) { 621 622 tte = p_lpgsz->tte; 623 heaplp_use_dt512 = p_lpgsz->use_dt512; 624 625 /* found a match */ 626 break; 627 } 628 } 629 630 if (p_lpgsz == pend_lpgsz) { 631 /* nothing found: disable large page kernel heap */ 632 tte = TTE8K; 633 heaplp_use_dt512 = 0; 634 } 635 636 lpsize = TTEBYTES(tte); 637 638 return (lpsize); 639 } 640