1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * UNIX machine dependent virtual memory support. 28 */ 29 30 #ifndef _VM_DEP_H 31 #define _VM_DEP_H 32 33 #pragma ident "%Z%%M% %I% %E% SMI" 34 35 #ifdef __cplusplus 36 extern "C" { 37 #endif 38 39 #include <sys/clock.h> 40 #include <vm/hat_pte.h> 41 42 /* 43 * WARNING: vm_dep.h is included by files in common. As such, macros 44 * dependent upon PTE36 such as LARGEPAGESIZE cannot be used in this file. 45 */ 46 47 #define GETTICK() tsc_read() 48 49 /* memranges in descending order */ 50 extern pfn_t *memranges; 51 52 #define MEMRANGEHI(mtype) \ 53 ((mtype > 0) ? memranges[mtype - 1] - 1: physmax) 54 #define MEMRANGELO(mtype) (memranges[mtype]) 55 56 #define MTYPE_FREEMEM(mt) \ 57 (mnoderanges[mt].mnr_mt_clpgcnt + \ 58 mnoderanges[mt].mnr_mt_flpgcnt + \ 59 mnoderanges[mt].mnr_mt_lgpgcnt) 60 61 /* 62 * combined memory ranges from mnode and memranges[] to manage single 63 * mnode/mtype dimension in the page lists. 64 */ 65 typedef struct { 66 pfn_t mnr_pfnlo; 67 pfn_t mnr_pfnhi; 68 int mnr_mnode; 69 int mnr_memrange; /* index into memranges[] */ 70 /* maintain page list stats */ 71 pgcnt_t mnr_mt_pgmax; /* mnode/mtype max page cnt */ 72 pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */ 73 pgcnt_t mnr_mt_flpgcnt; /* free list cnt - small pages */ 74 pgcnt_t mnr_mt_lgpgcnt; /* free list cnt - large pages */ 75 #ifdef DEBUG 76 struct mnr_mts { /* mnode/mtype szc stats */ 77 pgcnt_t mnr_mts_pgcnt; 78 int mnr_mts_colors; 79 pgcnt_t *mnr_mtsc_pgcnt; 80 } *mnr_mts; 81 #endif 82 } mnoderange_t; 83 84 #ifdef DEBUG 85 #define PLCNT_SZ(ctrs_sz) { \ 86 int szc, colors; \ 87 ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * \ 88 mmu_page_sizes; \ 89 for (szc = 0; szc < mmu_page_sizes; szc++) { \ 90 colors = page_get_pagecolors(szc); \ 91 ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors; \ 92 } \ 93 } 94 95 #define PLCNT_INIT(addr) { \ 96 int mt, szc, colors; \ 97 for (mt = 0; mt < mnoderangecnt; mt++) { \ 98 mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr; \ 99 addr += (sizeof (struct mnr_mts) * mmu_page_sizes); \ 100 for (szc = 0; szc < mmu_page_sizes; szc++) { \ 101 colors = page_get_pagecolors(szc); \ 102 mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = \ 103 colors; \ 104 mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt = \ 105 (pgcnt_t *)addr; \ 106 addr += (sizeof (pgcnt_t) * colors); \ 107 } \ 108 } \ 109 } 110 #define PLCNT_DO(pp, mtype, szc, cnt, flags) { \ 111 int bin = PP_2_BIN(pp); \ 112 if (flags & PG_CACHE_LIST) \ 113 atomic_add_long(&mnoderanges[mtype]. \ 114 mnr_mt_clpgcnt, cnt); \ 115 else if (szc) \ 116 atomic_add_long(&mnoderanges[mtype]. \ 117 mnr_mt_lgpgcnt, cnt); \ 118 else \ 119 atomic_add_long(&mnoderanges[mtype]. \ 120 mnr_mt_flpgcnt, cnt); \ 121 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc]. \ 122 mnr_mts_pgcnt, cnt); \ 123 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc]. \ 124 mnr_mtsc_pgcnt[bin], cnt); \ 125 } 126 #else 127 #define PLCNT_SZ(ctrs_sz) 128 #define PLCNT_INIT(base) 129 #define PLCNT_DO(pp, mtype, szc, cnt, flags) { \ 130 if (flags & PG_CACHE_LIST) \ 131 atomic_add_long(&mnoderanges[mtype]. \ 132 mnr_mt_clpgcnt, cnt); \ 133 else if (szc) \ 134 atomic_add_long(&mnoderanges[mtype]. \ 135 mnr_mt_lgpgcnt, cnt); \ 136 else \ 137 atomic_add_long(&mnoderanges[mtype]. \ 138 mnr_mt_flpgcnt, cnt); \ 139 } 140 #endif 141 142 #define PLCNT_INCR(pp, mnode, mtype, szc, flags) { \ 143 long cnt = (1 << PAGE_BSZS_SHIFT(szc)); \ 144 ASSERT(mtype == PP_2_MTYPE(pp)); \ 145 if (physmax4g && mtype <= mtype4g) \ 146 atomic_add_long(&freemem4g, cnt); \ 147 PLCNT_DO(pp, mtype, szc, cnt, flags); \ 148 } 149 150 #define PLCNT_DECR(pp, mnode, mtype, szc, flags) { \ 151 long cnt = ((-1) << PAGE_BSZS_SHIFT(szc)); \ 152 ASSERT(mtype == PP_2_MTYPE(pp)); \ 153 if (physmax4g && mtype <= mtype4g) \ 154 atomic_add_long(&freemem4g, cnt); \ 155 PLCNT_DO(pp, mtype, szc, cnt, flags); \ 156 } 157 158 /* 159 * macros to update page list max counts. no-op on x86. 160 */ 161 #define PLCNT_XFER_NORELOC(pp) 162 163 #define PLCNT_MODIFY_MAX(pfn, cnt) mtype_modify_max(pfn, (pgcnt_t)cnt) 164 165 extern mnoderange_t *mnoderanges; 166 extern int mnoderangecnt; 167 extern int mtype4g; 168 169 /* 170 * 4g memory management variables for systems with more than 4g of memory: 171 * 172 * physical memory below 4g is required for 32bit dma devices and, currently, 173 * for kmem memory. On systems with more than 4g of memory, the pool of memory 174 * below 4g can be depleted without any paging activity given that there is 175 * likely to be sufficient memory above 4g. 176 * 177 * physmax4g is set true if the largest pfn is over 4g. The rest of the 178 * 4g memory management code is enabled only when physmax4g is true. 179 * 180 * maxmem4g is the count of the maximum number of pages on the page lists 181 * with physical addresses below 4g. It can be a lot less then 4g given that 182 * BIOS may reserve large chunks of space below 4g for hot plug pci devices, 183 * agp aperture etc. 184 * 185 * freemem4g maintains the count of the number of available pages on the 186 * page lists with physical addresses below 4g. 187 * 188 * DESFREE4G specifies the desired amount of below 4g memory. It defaults to 189 * 6% (desfree4gshift = 4) of maxmem4g. 190 * 191 * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G 192 * and the amount of physical memory above 4g is greater than freemem4g. 193 * In this case, page_get_* routines will restrict below 4g allocations 194 * for requests that don't specifically require it. 195 */ 196 197 extern int physmax4g; 198 extern pgcnt_t maxmem4g; 199 extern pgcnt_t freemem4g; 200 extern int lotsfree4gshift; 201 extern int desfree4gshift; 202 #define LOTSFREE4G (maxmem4g >> lotsfree4gshift) 203 #define DESFREE4G (maxmem4g >> desfree4gshift) 204 205 #define RESTRICT4G_ALLOC \ 206 (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem)) 207 208 /* 209 * 16m memory management: 210 * 211 * reserve some amount of physical memory below 16m for legacy devices. 212 * 213 * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above 214 * 16m or if the 16m pool drops below DESFREE16M. 215 * 216 * In this case, general page allocations via page_get_{free,cache}list 217 * routines will be restricted from allocating from the 16m pool. Allocations 218 * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations 219 * are not restricted. 220 */ 221 222 #define FREEMEM16M MTYPE_FREEMEM(0) 223 #define DESFREE16M desfree16m 224 #define RESTRICT16M_ALLOC(freemem, pgcnt, flags) \ 225 ((freemem != 0) && ((flags & PG_PANIC) == 0) && \ 226 ((freemem >= (FREEMEM16M)) || \ 227 (FREEMEM16M < (DESFREE16M + pgcnt)))) 228 extern pgcnt_t desfree16m; 229 230 extern int restricted_kmemalloc; 231 extern int memrange_num(pfn_t); 232 extern int pfn_2_mtype(pfn_t); 233 extern int mtype_func(int, int, uint_t); 234 extern void mtype_modify_max(pfn_t, long); 235 extern int mnode_pgcnt(int); 236 237 #define NUM_MEM_RANGES 4 /* memory range types */ 238 239 /* 240 * Per page size free lists. Allocated dynamically. 241 * dimensions [mtype][mmu_page_sizes][colors] 242 * 243 * mtype specifies a physical memory range with a unique mnode. 244 */ 245 246 extern page_t ****page_freelists; 247 248 #define PAGE_FREELISTS(mnode, szc, color, mtype) \ 249 (*(page_freelists[mtype][szc] + (color))) 250 251 /* 252 * For now there is only a single size cache list. Allocated dynamically. 253 * dimensions [mtype][colors] 254 * 255 * mtype specifies a physical memory range with a unique mnode. 256 */ 257 extern page_t ***page_cachelists; 258 259 #define PAGE_CACHELISTS(mnode, color, mtype) \ 260 (*(page_cachelists[mtype] + (color))) 261 262 /* 263 * There are mutexes for both the page freelist 264 * and the page cachelist. We want enough locks to make contention 265 * reasonable, but not too many -- otherwise page_freelist_lock() gets 266 * so expensive that it becomes the bottleneck! 267 */ 268 269 #define NPC_MUTEX 16 270 271 extern kmutex_t *fpc_mutex[NPC_MUTEX]; 272 extern kmutex_t *cpc_mutex[NPC_MUTEX]; 273 274 extern page_t *page_get_mnode_freelist(int, uint_t, int, uchar_t, uint_t); 275 extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int); 276 277 /* Find the bin for the given page if it was of size szc */ 278 #define PP_2_BIN_SZC(pp, szc) \ 279 (((pp->p_pagenum) & page_colors_mask) >> \ 280 (hw_page_array[szc].hp_shift - hw_page_array[0].hp_shift)) 281 282 #define PP_2_BIN(pp) (PP_2_BIN_SZC(pp, pp->p_szc)) 283 284 #define PP_2_MEM_NODE(pp) (PFN_2_MEM_NODE(pp->p_pagenum)) 285 #define PP_2_MTYPE(pp) (pfn_2_mtype(pp->p_pagenum)) 286 #define PP_2_SZC(pp) (pp->p_szc) 287 288 #define SZCPAGES(szc) (1 << PAGE_BSZS_SHIFT(szc)) 289 #define PFN_BASE(pfnum, szc) (pfnum & ~(SZCPAGES(szc) - 1)) 290 291 extern struct cpu cpus[]; 292 #define CPU0 cpus 293 294 #if defined(__amd64) 295 296 /* 297 * set the mtype range (called from page_get_{free,cache}list) 298 * - set range to above 4g if the system has more than 4g of memory and the 299 * amount of memory below 4g runs low. If not, set range to above 16m if 300 * 16m threshold is reached otherwise set range to all of memory 301 * starting from the hi pfns. 302 * 303 * page_get_anylist gets its mtype range from the specified ddi_dma_attr_t. 304 */ 305 #define MTYPE_INIT(mtype, vp, vaddr, flags, pgsz) { \ 306 mtype = mnoderangecnt - 1; \ 307 if (RESTRICT4G_ALLOC) { \ 308 VM_STAT_ADD(vmm_vmstats.restrict4gcnt); \ 309 /* here only for > 4g systems */ \ 310 flags |= PGI_MT_RANGE4G; \ 311 } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), flags)) { \ 312 flags |= PGI_MT_RANGE16M; \ 313 } else { \ 314 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); \ 315 VM_STAT_COND_ADD((flags & PG_PANIC), \ 316 vmm_vmstats.pgpanicalloc); \ 317 flags |= PGI_MT_RANGE0; \ 318 } \ 319 } 320 321 #elif defined(__i386) 322 323 /* 324 * set the mtype range 325 * - kmem requests needs to be below 4g if restricted_kmemalloc is set. 326 * - for non kmem requests, set range to above 4g if the amount of memory 327 * below 4g runs low. 328 */ 329 330 #define MTYPE_INIT(mtype, vp, vaddr, flags, pgsz) { \ 331 if (restricted_kmemalloc && (vp) == &kvp && \ 332 (caddr_t)(vaddr) >= kernelheap && \ 333 (caddr_t)(vaddr) < ekernelheap) { \ 334 ASSERT(physmax4g); \ 335 mtype = mtype4g; \ 336 if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz), \ 337 btop(pgsz), flags)) { \ 338 flags |= PGI_MT_RANGE16M; \ 339 } else { \ 340 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); \ 341 VM_STAT_COND_ADD((flags & PG_PANIC), \ 342 vmm_vmstats.pgpanicalloc); \ 343 flags |= PGI_MT_RANGE0; \ 344 } \ 345 } else { \ 346 mtype = mnoderangecnt - 1; \ 347 if (RESTRICT4G_ALLOC) { \ 348 VM_STAT_ADD(vmm_vmstats.restrict4gcnt); \ 349 /* here only for > 4g systems */ \ 350 flags |= PGI_MT_RANGE4G; \ 351 } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), \ 352 flags)) { \ 353 flags |= PGI_MT_RANGE16M; \ 354 } else { \ 355 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); \ 356 VM_STAT_COND_ADD((flags & PG_PANIC), \ 357 vmm_vmstats.pgpanicalloc); \ 358 flags |= PGI_MT_RANGE0; \ 359 } \ 360 } \ 361 } 362 363 #endif /* __i386 */ 364 365 /* 366 * macros to loop through the mtype range (page_get_mnode_{free,cache,any}list, 367 * and page_get_contig_pages) 368 * 369 * MTYPE_START sets the initial mtype. -1 if the mtype range specified does 370 * not contain mnode. 371 * 372 * MTYPE_NEXT sets the next mtype. -1 if there are no more valid 373 * mtype in the range. 374 */ 375 376 #define MTYPE_START(mnode, mtype, flags) \ 377 (mtype = mtype_func(mnode, mtype, flags)) 378 379 #define MTYPE_NEXT(mnode, mtype, flags) { \ 380 if (flags & PGI_MT_RANGE) { \ 381 mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT); \ 382 } else { \ 383 mtype = -1; \ 384 } \ 385 } 386 387 /* mtype init for page_get_replacement_page */ 388 389 #define MTYPE_PGR_INIT(mtype, flags, pp, mnode, pgcnt) { \ 390 mtype = mnoderangecnt - 1; \ 391 if (RESTRICT16M_ALLOC(freemem, pgcnt, flags)) { \ 392 flags |= PGI_MT_RANGE16M; \ 393 } else { \ 394 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); \ 395 flags |= PGI_MT_RANGE0; \ 396 } \ 397 } 398 399 #define MNODE_PGCNT(mnode) mnode_pgcnt(mnode) 400 401 #define MNODETYPE_2_PFN(mnode, mtype, pfnlo, pfnhi) \ 402 ASSERT(mnoderanges[mtype].mnr_mnode == mnode); \ 403 pfnlo = mnoderanges[mtype].mnr_pfnlo; \ 404 pfnhi = mnoderanges[mtype].mnr_pfnhi; 405 406 #define PC_BIN_MUTEX(mnode, bin, flags) ((flags & PG_FREE_LIST) ? \ 407 &fpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode] : \ 408 &cpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode]) 409 410 #define FPC_MUTEX(mnode, i) (&fpc_mutex[i][mnode]) 411 #define CPC_MUTEX(mnode, i) (&cpc_mutex[i][mnode]) 412 413 #ifdef DEBUG 414 #define CHK_LPG(pp, szc) chk_lpg(pp, szc) 415 extern void chk_lpg(page_t *, uchar_t); 416 #else 417 #define CHK_LPG(pp, szc) 418 #endif 419 420 #define FULL_REGION_CNT(rg_szc) \ 421 (LEVEL_SIZE(rg_szc) >> LEVEL_SHIFT(rg_szc - 1)) 422 423 /* Return the leader for this mapping size */ 424 #define PP_GROUPLEADER(pp, szc) \ 425 (&(pp)[-(int)((pp)->p_pagenum & (SZCPAGES(szc)-1))]) 426 427 /* Return the root page for this page based on p_szc */ 428 #define PP_PAGEROOT(pp) ((pp)->p_szc == 0 ? (pp) : \ 429 PP_GROUPLEADER((pp), (pp)->p_szc)) 430 431 /* 432 * The counter base must be per page_counter element to prevent 433 * races when re-indexing, and the base page size element should 434 * be aligned on a boundary of the given region size. 435 * 436 * We also round up the number of pages spanned by the counters 437 * for a given region to PC_BASE_ALIGN in certain situations to simplify 438 * the coding for some non-performance critical routines. 439 */ 440 441 #define PC_BASE_ALIGN ((pfn_t)1 << PAGE_BSZS_SHIFT(MMU_PAGE_SIZES-1)) 442 #define PC_BASE_ALIGN_MASK (PC_BASE_ALIGN - 1) 443 444 /* 445 * cpu/mmu-dependent vm variables 446 */ 447 extern uint_t mmu_page_sizes; 448 extern uint_t mmu_exported_page_sizes; 449 450 /* For x86, userszc is the same as the kernel's szc */ 451 #define USERSZC_2_SZC(userszc) (userszc) 452 #define SZC_2_USERSZC(szc) (szc) 453 454 /* 455 * for hw_page_map_t, sized to hold the ratio of large page to base 456 * pagesize (1024 max) 457 */ 458 typedef short hpmctr_t; 459 460 /* 461 * get the setsize of the current cpu - assume homogenous for x86 462 */ 463 extern int l2cache_sz, l2cache_linesz, l2cache_assoc; 464 465 #define L2CACHE_ALIGN l2cache_linesz 466 #define L2CACHE_ALIGN_MAX 64 467 #define CPUSETSIZE() \ 468 (l2cache_assoc ? (l2cache_sz / l2cache_assoc) : MMU_PAGESIZE) 469 470 /* 471 * Return the log2(pagesize(szc) / MMU_PAGESIZE) --- or the shift count 472 * for the number of base pages in this pagesize 473 */ 474 #define PAGE_BSZS_SHIFT(szc) (LEVEL_SHIFT(szc) - MMU_PAGESHIFT) 475 476 /* 477 * Internal PG_ flags. 478 */ 479 #define PGI_RELOCONLY 0x010000 /* opposite of PG_NORELOC */ 480 #define PGI_NOCAGE 0x020000 /* cage is disabled */ 481 #define PGI_PGCPHIPRI 0x040000 /* page_get_contig_page pri alloc */ 482 #define PGI_PGCPSZC0 0x080000 /* relocate base pagesize page */ 483 484 /* 485 * PGI range flags - should not overlap PGI flags 486 */ 487 #define PGI_MT_RANGE0 0x1000000 /* mtype range to 0 */ 488 #define PGI_MT_RANGE16M 0x2000000 /* mtype range to 16m */ 489 #define PGI_MT_RANGE4G 0x4000000 /* mtype range to 4g */ 490 #define PGI_MT_NEXT 0x8000000 /* get next mtype */ 491 #define PGI_MT_RANGE (PGI_MT_RANGE0 | PGI_MT_RANGE16M | PGI_MT_RANGE4G) 492 493 /* 494 * hash as and addr to get a bin. 495 */ 496 497 #define AS_2_BIN(as, seg, vp, addr, bin) \ 498 bin = ((((uintptr_t)(addr) >> PAGESHIFT) + ((uintptr_t)(as) >> 4)) \ 499 & page_colors_mask) 500 501 /* 502 * cpu private vm data - accessed thru CPU->cpu_vm_data 503 * vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock() 504 * vc_pnext_memseg: tracks last memseg visited in page_nextn() 505 * vc_kmptr: orignal unaligned kmem pointer for this vm_cpu_data_t 506 * vc_kmsize: orignal kmem size for this vm_cpu_data_t 507 */ 508 509 typedef struct { 510 struct memseg *vc_pnum_memseg; 511 struct memseg *vc_pnext_memseg; 512 void *vc_kmptr; 513 size_t vc_kmsize; 514 } vm_cpu_data_t; 515 516 /* allocation size to ensure vm_cpu_data_t resides in its own cache line */ 517 #define VM_CPU_DATA_PADSIZE \ 518 (P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX)) 519 520 /* for boot cpu before kmem is initialized */ 521 extern char vm_cpu_data0[]; 522 523 /* 524 * When a bin is empty, and we can't satisfy a color request correctly, 525 * we scan. If we assume that the programs have reasonable spatial 526 * behavior, then it will not be a good idea to use the adjacent color. 527 * Using the adjacent color would result in virtually adjacent addresses 528 * mapping into the same spot in the cache. So, if we stumble across 529 * an empty bin, skip a bunch before looking. After the first skip, 530 * then just look one bin at a time so we don't miss our cache on 531 * every look. Be sure to check every bin. Page_create() will panic 532 * if we miss a page. 533 * 534 * This also explains the `<=' in the for loops in both page_get_freelist() 535 * and page_get_cachelist(). Since we checked the target bin, skipped 536 * a bunch, then continued one a time, we wind up checking the target bin 537 * twice to make sure we get all of them bins. 538 */ 539 #define BIN_STEP 19 540 541 #ifdef VM_STATS 542 struct vmm_vmstats_str { 543 ulong_t pgf_alloc[MMU_PAGE_SIZES]; /* page_get_freelist */ 544 ulong_t pgf_allocok[MMU_PAGE_SIZES]; 545 ulong_t pgf_allocokrem[MMU_PAGE_SIZES]; 546 ulong_t pgf_allocfailed[MMU_PAGE_SIZES]; 547 ulong_t pgf_allocdeferred; 548 ulong_t pgf_allocretry[MMU_PAGE_SIZES]; 549 ulong_t pgc_alloc; /* page_get_cachelist */ 550 ulong_t pgc_allocok; 551 ulong_t pgc_allocokrem; 552 ulong_t pgc_allocokdeferred; 553 ulong_t pgc_allocfailed; 554 ulong_t pgcp_alloc[MMU_PAGE_SIZES]; /* page_get_contig_pages */ 555 ulong_t pgcp_allocfailed[MMU_PAGE_SIZES]; 556 ulong_t pgcp_allocempty[MMU_PAGE_SIZES]; 557 ulong_t pgcp_allocok[MMU_PAGE_SIZES]; 558 ulong_t ptcp[MMU_PAGE_SIZES]; /* page_trylock_contig_pages */ 559 ulong_t ptcpfreethresh[MMU_PAGE_SIZES]; 560 ulong_t ptcpfailexcl[MMU_PAGE_SIZES]; 561 ulong_t ptcpfailszc[MMU_PAGE_SIZES]; 562 ulong_t ptcpfailcage[MMU_PAGE_SIZES]; 563 ulong_t ptcpok[MMU_PAGE_SIZES]; 564 ulong_t pgmf_alloc[MMU_PAGE_SIZES]; /* page_get_mnode_freelist */ 565 ulong_t pgmf_allocfailed[MMU_PAGE_SIZES]; 566 ulong_t pgmf_allocempty[MMU_PAGE_SIZES]; 567 ulong_t pgmf_allocok[MMU_PAGE_SIZES]; 568 ulong_t pgmc_alloc; /* page_get_mnode_cachelist */ 569 ulong_t pgmc_allocfailed; 570 ulong_t pgmc_allocempty; 571 ulong_t pgmc_allocok; 572 ulong_t pladd_free[MMU_PAGE_SIZES]; /* page_list_add/sub */ 573 ulong_t plsub_free[MMU_PAGE_SIZES]; 574 ulong_t pladd_cache; 575 ulong_t plsub_cache; 576 ulong_t plsubpages_szcbig; 577 ulong_t plsubpages_szc0; 578 ulong_t pff_req[MMU_PAGE_SIZES]; /* page_freelist_fill */ 579 ulong_t pff_demote[MMU_PAGE_SIZES]; 580 ulong_t pff_coalok[MMU_PAGE_SIZES]; 581 ulong_t ppr_reloc[MMU_PAGE_SIZES]; /* page_relocate */ 582 ulong_t ppr_relocnoroot[MMU_PAGE_SIZES]; 583 ulong_t ppr_reloc_replnoroot[MMU_PAGE_SIZES]; 584 ulong_t ppr_relocnolock[MMU_PAGE_SIZES]; 585 ulong_t ppr_relocnomem[MMU_PAGE_SIZES]; 586 ulong_t ppr_relocok[MMU_PAGE_SIZES]; 587 ulong_t page_ctrs_coalesce; /* page coalesce counter */ 588 ulong_t page_ctrs_cands_skip; /* candidates useful */ 589 ulong_t page_ctrs_changed; /* ctrs changed after locking */ 590 ulong_t page_ctrs_failed; /* page_freelist_coalesce failed */ 591 ulong_t page_ctrs_coalesce_all; /* page coalesce all counter */ 592 ulong_t page_ctrs_cands_skip_all; /* candidates useful for all func */ 593 ulong_t restrict4gcnt; 594 ulong_t unrestrict16mcnt; /* non-DMA 16m allocs allowed */ 595 ulong_t pgpanicalloc; /* PG_PANIC allocation */ 596 }; 597 extern struct vmm_vmstats_str vmm_vmstats; 598 #endif /* VM_STATS */ 599 600 extern size_t page_ctrs_sz(void); 601 extern caddr_t page_ctrs_alloc(caddr_t); 602 extern void page_ctr_sub(int, int, page_t *, int); 603 extern page_t *page_freelist_fill(uchar_t, int, int, int, pfn_t); 604 extern uint_t page_get_pagecolors(uint_t); 605 606 #ifdef __cplusplus 607 } 608 #endif 609 610 #endif /* _VM_DEP_H */ 611