1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * UNIX machine dependent virtual memory support. 29 */ 30 31 #ifndef _VM_DEP_H 32 #define _VM_DEP_H 33 34 #pragma ident "%Z%%M% %I% %E% SMI" 35 36 #ifdef __cplusplus 37 extern "C" { 38 #endif 39 40 #include <sys/clock.h> 41 #include <vm/hat_pte.h> 42 43 /* 44 * WARNING: vm_dep.h is included by files in common. As such, macros 45 * dependent upon PTE36 such as LARGEPAGESIZE cannot be used in this file. 46 */ 47 48 #define GETTICK() tsc_read() 49 50 /* memranges in descending order */ 51 extern pfn_t *memranges; 52 53 #define MEMRANGEHI(mtype) \ 54 ((mtype > 0) ? memranges[mtype - 1] - 1: physmax) 55 #define MEMRANGELO(mtype) (memranges[mtype]) 56 57 /* 58 * combined memory ranges from mnode and memranges[] to manage single 59 * mnode/mtype dimension in the page lists. 60 */ 61 typedef struct { 62 pfn_t mnr_pfnlo; 63 pfn_t mnr_pfnhi; 64 int mnr_mnode; 65 int mnr_memrange; /* index into memranges[] */ 66 /* maintain page list stats */ 67 pgcnt_t mnr_mt_pgmax; /* mnode/mtype max page cnt */ 68 pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */ 69 pgcnt_t mnr_mt_flpgcnt; /* free list cnt - small pages */ 70 pgcnt_t mnr_mt_lgpgcnt; /* free list cnt - large pages */ 71 #ifdef DEBUG 72 struct mnr_mts { /* mnode/mtype szc stats */ 73 pgcnt_t mnr_mts_pgcnt; 74 int mnr_mts_colors; 75 pgcnt_t *mnr_mtsc_pgcnt; 76 } *mnr_mts; 77 #endif 78 } mnoderange_t; 79 80 #ifdef DEBUG 81 #define PLCNT_SZ(ctrs_sz) { \ 82 int szc, colors; \ 83 ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * \ 84 mmu_page_sizes; \ 85 for (szc = 0; szc < mmu_page_sizes; szc++) { \ 86 colors = page_get_pagecolors(szc); \ 87 ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors; \ 88 } \ 89 } 90 91 #define PLCNT_INIT(addr) { \ 92 int mt, szc, colors; \ 93 for (mt = 0; mt < mnoderangecnt; mt++) { \ 94 mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr; \ 95 addr += (sizeof (struct mnr_mts) * mmu_page_sizes); \ 96 for (szc = 0; szc < mmu_page_sizes; szc++) { \ 97 colors = page_get_pagecolors(szc); \ 98 mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = \ 99 colors; \ 100 mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt = \ 101 (pgcnt_t *)addr; \ 102 addr += (sizeof (pgcnt_t) * colors); \ 103 } \ 104 } \ 105 } 106 #define PLCNT_DO(pp, mtype, szc, cnt, flags) { \ 107 int bin = PP_2_BIN(pp); \ 108 if (flags & PG_CACHE_LIST) \ 109 atomic_add_long(&mnoderanges[mtype]. \ 110 mnr_mt_clpgcnt, cnt); \ 111 else if (szc) \ 112 atomic_add_long(&mnoderanges[mtype]. \ 113 mnr_mt_lgpgcnt, cnt); \ 114 else \ 115 atomic_add_long(&mnoderanges[mtype]. \ 116 mnr_mt_flpgcnt, cnt); \ 117 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc]. \ 118 mnr_mts_pgcnt, cnt); \ 119 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc]. \ 120 mnr_mtsc_pgcnt[bin], cnt); \ 121 } 122 #else 123 #define PLCNT_SZ(ctrs_sz) 124 #define PLCNT_INIT(base) 125 #define PLCNT_DO(pp, mtype, szc, cnt, flags) { \ 126 if (flags & PG_CACHE_LIST) \ 127 atomic_add_long(&mnoderanges[mtype]. \ 128 mnr_mt_clpgcnt, cnt); \ 129 else if (szc) \ 130 atomic_add_long(&mnoderanges[mtype]. \ 131 mnr_mt_lgpgcnt, cnt); \ 132 else \ 133 atomic_add_long(&mnoderanges[mtype]. \ 134 mnr_mt_flpgcnt, cnt); \ 135 } 136 #endif 137 138 #define PLCNT_INCR(pp, mnode, mtype, szc, flags) { \ 139 long cnt = (1 << PAGE_BSZS_SHIFT(szc)); \ 140 ASSERT(mtype == PP_2_MTYPE(pp)); \ 141 if (physmax4g && mtype <= mtype4g) \ 142 atomic_add_long(&freemem4g, cnt); \ 143 PLCNT_DO(pp, mtype, szc, cnt, flags); \ 144 } 145 146 #define PLCNT_DECR(pp, mnode, mtype, szc, flags) { \ 147 long cnt = ((-1) << PAGE_BSZS_SHIFT(szc)); \ 148 ASSERT(mtype == PP_2_MTYPE(pp)); \ 149 if (physmax4g && mtype <= mtype4g) \ 150 atomic_add_long(&freemem4g, cnt); \ 151 PLCNT_DO(pp, mtype, szc, cnt, flags); \ 152 } 153 154 /* 155 * macros to update page list max counts. no-op on x86. 156 */ 157 #define PLCNT_XFER_NORELOC(pp) 158 159 #define PLCNT_MODIFY_MAX(pfn, cnt) mtype_modify_max(pfn, (pgcnt_t)cnt) 160 161 extern mnoderange_t *mnoderanges; 162 extern int mnoderangecnt; 163 extern int mtype4g; 164 165 /* 166 * 4g memory management variables for systems with more than 4g of memory: 167 * 168 * physical memory below 4g is required for 32bit dma devices and, currently, 169 * for kmem memory. On systems with more than 4g of memory, the pool of memory 170 * below 4g can be depleted without any paging activity given that there is 171 * likely to be sufficient memory above 4g. 172 * 173 * physmax4g is set true if the largest pfn is over 4g. The rest of the 174 * 4g memory management code is enabled only when physmax4g is true. 175 * 176 * maxmem4g is the count of the maximum number of pages on the page lists 177 * with physical addresses below 4g. It can be a lot less then 4g given that 178 * BIOS may reserve large chunks of space below 4g for hot plug pci devices, 179 * agp aperture etc. 180 * 181 * freemem4g maintains the count of the number of available pages on the 182 * page lists with physical addresses below 4g. 183 * 184 * DESFREE4G specifies the desired amount of below 4g memory. It defaults to 185 * 6% (desfree4gshift = 4) of maxmem4g. 186 * 187 * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G 188 * and the amount of physical memory above 4g is greater than freemem4g. 189 * In this case, page_get_* routines will restrict below 4g allocations 190 * for requests that don't specifically require it. 191 */ 192 193 extern int physmax4g; 194 extern pgcnt_t maxmem4g; 195 extern pgcnt_t freemem4g; 196 extern int lotsfree4gshift; 197 extern int desfree4gshift; 198 #define LOTSFREE4G (maxmem4g >> lotsfree4gshift) 199 #define DESFREE4G (maxmem4g >> desfree4gshift) 200 201 #define RESTRICT4G_ALLOC \ 202 (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem)) 203 204 extern int restricted_kmemalloc; 205 extern int memrange_num(pfn_t); 206 extern int pfn_2_mtype(pfn_t); 207 extern int mtype_func(int, int, uint_t); 208 extern void mtype_modify_max(pfn_t, long); 209 extern int mnode_pgcnt(int); 210 211 #define NUM_MEM_RANGES 4 /* memory range types */ 212 213 /* 214 * Per page size free lists. Allocated dynamically. 215 * dimensions [mtype][mmu_page_sizes][colors] 216 * 217 * mtype specifies a physical memory range with a unique mnode. 218 */ 219 220 extern page_t ****page_freelists; 221 222 #define PAGE_FREELISTS(mnode, szc, color, mtype) \ 223 (*(page_freelists[mtype][szc] + (color))) 224 225 /* 226 * For now there is only a single size cache list. Allocated dynamically. 227 * dimensions [mtype][colors] 228 * 229 * mtype specifies a physical memory range with a unique mnode. 230 */ 231 extern page_t ***page_cachelists; 232 233 #define PAGE_CACHELISTS(mnode, color, mtype) \ 234 (*(page_cachelists[mtype] + (color))) 235 236 /* 237 * There are mutexes for both the page freelist 238 * and the page cachelist. We want enough locks to make contention 239 * reasonable, but not too many -- otherwise page_freelist_lock() gets 240 * so expensive that it becomes the bottleneck! 241 */ 242 243 #define NPC_MUTEX 16 244 245 extern kmutex_t *fpc_mutex[NPC_MUTEX]; 246 extern kmutex_t *cpc_mutex[NPC_MUTEX]; 247 248 extern page_t *page_get_mnode_freelist(int, uint_t, int, uchar_t, uint_t); 249 extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int); 250 251 /* Find the bin for the given page if it was of size szc */ 252 #define PP_2_BIN_SZC(pp, szc) \ 253 (((pp->p_pagenum) & page_colors_mask) >> \ 254 (hw_page_array[szc].hp_shift - hw_page_array[0].hp_shift)) 255 256 #define PP_2_BIN(pp) (PP_2_BIN_SZC(pp, pp->p_szc)) 257 258 #define PP_2_MEM_NODE(pp) (PFN_2_MEM_NODE(pp->p_pagenum)) 259 #define PP_2_MTYPE(pp) (pfn_2_mtype(pp->p_pagenum)) 260 #define PP_2_SZC(pp) (pp->p_szc) 261 262 #define SZCPAGES(szc) (1 << PAGE_BSZS_SHIFT(szc)) 263 #define PFN_BASE(pfnum, szc) (pfnum & ~(SZCPAGES(szc) - 1)) 264 265 extern struct cpu cpus[]; 266 #define CPU0 cpus 267 268 #if defined(__amd64) 269 270 /* 271 * set the mtype range (called from page_get_{free,cache}list) 272 * - set range to above 4g if the system has more than 4g of memory and the 273 * amount of memory below 4g runs low otherwise set range to all of memory 274 * starting from the hi pfns. 275 * 276 * page_get_anylist gets its mtype range from the specified ddi_dma_attr_t. 277 */ 278 #define MTYPE_INIT(mtype, vp, vaddr, flags) { \ 279 mtype = mnoderangecnt - 1; \ 280 if (RESTRICT4G_ALLOC) { \ 281 VM_STAT_ADD(vmm_vmstats.restrict4gcnt); \ 282 /* here only for > 4g systems */ \ 283 flags |= PGI_MT_RANGE4G; \ 284 } else { \ 285 flags |= PGI_MT_RANGE0; \ 286 } \ 287 } 288 289 #elif defined(__i386) 290 291 /* 292 * set the mtype range 293 * - kmem requests needs to be below 4g if restricted_kmemalloc is set. 294 * - for non kmem requests, set range to above 4g if the amount of memory 295 * below 4g runs low. 296 */ 297 298 #define MTYPE_INIT(mtype, vp, vaddr, flags) { \ 299 if (restricted_kmemalloc && (vp) == &kvp && \ 300 (caddr_t)(vaddr) >= kernelheap && \ 301 (caddr_t)(vaddr) < ekernelheap) { \ 302 ASSERT(physmax4g); \ 303 mtype = mtype4g; \ 304 flags |= PGI_MT_RANGE0; \ 305 } else { \ 306 mtype = mnoderangecnt - 1; \ 307 if (RESTRICT4G_ALLOC) { \ 308 VM_STAT_ADD(vmm_vmstats.restrict4gcnt); \ 309 /* here only for > 4g systems */ \ 310 flags |= PGI_MT_RANGE4G; \ 311 } else { \ 312 flags |= PGI_MT_RANGE0; \ 313 } \ 314 } \ 315 } 316 317 #endif /* __i386 */ 318 319 /* 320 * macros to loop through the mtype range (page_get_mnode_{free,cache,any}list, 321 * and page_get_contig_pages) 322 * 323 * MTYPE_START sets the initial mtype. -1 if the mtype range specified does 324 * not contain mnode. 325 * 326 * MTYPE_NEXT sets the next mtype. -1 if there are no more valid 327 * mtype in the range. 328 */ 329 330 #define MTYPE_START(mnode, mtype, flags) \ 331 (mtype = mtype_func(mnode, mtype, flags)) 332 333 #define MTYPE_NEXT(mnode, mtype, flags) { \ 334 if (flags & PGI_MT_RANGE) { \ 335 mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT); \ 336 } else { \ 337 mtype = -1; \ 338 } \ 339 } 340 341 /* mtype init for page_get_replacement_page */ 342 343 #define MTYPE_PGR_INIT(mtype, flags, pp, mnode) { \ 344 mtype = mnoderangecnt - 1; \ 345 flags |= PGI_MT_RANGE0; \ 346 } 347 348 #define MNODE_PGCNT(mnode) mnode_pgcnt(mnode) 349 350 #define MNODETYPE_2_PFN(mnode, mtype, pfnlo, pfnhi) \ 351 ASSERT(mnoderanges[mtype].mnr_mnode == mnode); \ 352 pfnlo = mnoderanges[mtype].mnr_pfnlo; \ 353 pfnhi = mnoderanges[mtype].mnr_pfnhi; 354 355 #define PC_BIN_MUTEX(mnode, bin, flags) ((flags & PG_FREE_LIST) ? \ 356 &fpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode] : \ 357 &cpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode]) 358 359 #define FPC_MUTEX(mnode, i) (&fpc_mutex[i][mnode]) 360 #define CPC_MUTEX(mnode, i) (&cpc_mutex[i][mnode]) 361 362 #ifdef DEBUG 363 #define CHK_LPG(pp, szc) chk_lpg(pp, szc) 364 extern void chk_lpg(page_t *, uchar_t); 365 #else 366 #define CHK_LPG(pp, szc) 367 #endif 368 369 #define FULL_REGION_CNT(rg_szc) \ 370 (LEVEL_SIZE(rg_szc) >> LEVEL_SHIFT(rg_szc - 1)) 371 372 /* Return the leader for this mapping size */ 373 #define PP_GROUPLEADER(pp, szc) \ 374 (&(pp)[-(int)((pp)->p_pagenum & (SZCPAGES(szc)-1))]) 375 376 /* Return the root page for this page based on p_szc */ 377 #define PP_PAGEROOT(pp) ((pp)->p_szc == 0 ? (pp) : \ 378 PP_GROUPLEADER((pp), (pp)->p_szc)) 379 380 /* 381 * The counter base must be per page_counter element to prevent 382 * races when re-indexing, and the base page size element should 383 * be aligned on a boundary of the given region size. 384 * 385 * We also round up the number of pages spanned by the counters 386 * for a given region to PC_BASE_ALIGN in certain situations to simplify 387 * the coding for some non-performance critical routines. 388 */ 389 390 #define PC_BASE_ALIGN ((pfn_t)1 << PAGE_BSZS_SHIFT(MMU_PAGE_SIZES-1)) 391 #define PC_BASE_ALIGN_MASK (PC_BASE_ALIGN - 1) 392 393 /* 394 * cpu/mmu-dependent vm variables 395 */ 396 extern uint_t mmu_page_sizes; 397 extern uint_t mmu_exported_page_sizes; 398 399 /* For x86, userszc is the same as the kernel's szc */ 400 #define USERSZC_2_SZC(userszc) (userszc) 401 #define SZC_2_USERSZC(szc) (szc) 402 403 /* 404 * for hw_page_map_t, sized to hold the ratio of large page to base 405 * pagesize (1024 max) 406 */ 407 typedef short hpmctr_t; 408 409 /* 410 * get the setsize of the current cpu - assume homogenous for x86 411 */ 412 extern int l2cache_sz, l2cache_linesz, l2cache_assoc; 413 414 #define L2CACHE_ALIGN l2cache_linesz 415 #define L2CACHE_ALIGN_MAX 64 416 #define CPUSETSIZE() \ 417 (l2cache_assoc ? (l2cache_sz / l2cache_assoc) : MMU_PAGESIZE) 418 419 /* 420 * Return the log2(pagesize(szc) / MMU_PAGESIZE) --- or the shift count 421 * for the number of base pages in this pagesize 422 */ 423 #define PAGE_BSZS_SHIFT(szc) (LEVEL_SHIFT(szc) - MMU_PAGESHIFT) 424 425 /* 426 * Internal PG_ flags. 427 */ 428 #define PGI_RELOCONLY 0x010000 /* opposite of PG_NORELOC */ 429 #define PGI_NOCAGE 0x020000 /* cage is disabled */ 430 #define PGI_PGCPHIPRI 0x040000 /* page_get_contig_page pri alloc */ 431 #define PGI_PGCPSZC0 0x080000 /* relocate base pagesize page */ 432 433 /* 434 * PGI range flags - should not overlap PGI flags 435 */ 436 #define PGI_MT_RANGE0 0x1000000 /* mtype range to 0 */ 437 #define PGI_MT_RANGE4G 0x2000000 /* mtype range to 4g */ 438 #define PGI_MT_NEXT 0x4000000 /* get next mtype */ 439 #define PGI_MT_RANGE (PGI_MT_RANGE0 | PGI_MT_RANGE4G) 440 441 /* 442 * hash as and addr to get a bin. 443 */ 444 445 #define AS_2_BIN(as, seg, vp, addr, bin) \ 446 bin = ((((uintptr_t)(addr) >> PAGESHIFT) + ((uintptr_t)(as) >> 4)) \ 447 & page_colors_mask) 448 449 /* 450 * cpu private vm data - accessed thru CPU->cpu_vm_data 451 * vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock() 452 * vc_pnext_memseg: tracks last memseg visited in page_nextn() 453 * vc_kmptr: orignal unaligned kmem pointer for this vm_cpu_data_t 454 * vc_kmsize: orignal kmem size for this vm_cpu_data_t 455 */ 456 457 typedef struct { 458 struct memseg *vc_pnum_memseg; 459 struct memseg *vc_pnext_memseg; 460 void *vc_kmptr; 461 size_t vc_kmsize; 462 } vm_cpu_data_t; 463 464 /* allocation size to ensure vm_cpu_data_t resides in its own cache line */ 465 #define VM_CPU_DATA_PADSIZE \ 466 (P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX)) 467 468 /* for boot cpu before kmem is initialized */ 469 extern char vm_cpu_data0[]; 470 471 /* 472 * When a bin is empty, and we can't satisfy a color request correctly, 473 * we scan. If we assume that the programs have reasonable spatial 474 * behavior, then it will not be a good idea to use the adjacent color. 475 * Using the adjacent color would result in virtually adjacent addresses 476 * mapping into the same spot in the cache. So, if we stumble across 477 * an empty bin, skip a bunch before looking. After the first skip, 478 * then just look one bin at a time so we don't miss our cache on 479 * every look. Be sure to check every bin. Page_create() will panic 480 * if we miss a page. 481 * 482 * This also explains the `<=' in the for loops in both page_get_freelist() 483 * and page_get_cachelist(). Since we checked the target bin, skipped 484 * a bunch, then continued one a time, we wind up checking the target bin 485 * twice to make sure we get all of them bins. 486 */ 487 #define BIN_STEP 19 488 489 #ifdef VM_STATS 490 struct vmm_vmstats_str { 491 ulong_t pgf_alloc[MMU_PAGE_SIZES]; /* page_get_freelist */ 492 ulong_t pgf_allocok[MMU_PAGE_SIZES]; 493 ulong_t pgf_allocokrem[MMU_PAGE_SIZES]; 494 ulong_t pgf_allocfailed[MMU_PAGE_SIZES]; 495 ulong_t pgf_allocdeferred; 496 ulong_t pgf_allocretry[MMU_PAGE_SIZES]; 497 ulong_t pgc_alloc; /* page_get_cachelist */ 498 ulong_t pgc_allocok; 499 ulong_t pgc_allocokrem; 500 ulong_t pgc_allocokdeferred; 501 ulong_t pgc_allocfailed; 502 ulong_t pgcp_alloc[MMU_PAGE_SIZES]; /* page_get_contig_pages */ 503 ulong_t pgcp_allocfailed[MMU_PAGE_SIZES]; 504 ulong_t pgcp_allocempty[MMU_PAGE_SIZES]; 505 ulong_t pgcp_allocok[MMU_PAGE_SIZES]; 506 ulong_t ptcp[MMU_PAGE_SIZES]; /* page_trylock_contig_pages */ 507 ulong_t ptcpfreethresh[MMU_PAGE_SIZES]; 508 ulong_t ptcpfailexcl[MMU_PAGE_SIZES]; 509 ulong_t ptcpfailszc[MMU_PAGE_SIZES]; 510 ulong_t ptcpfailcage[MMU_PAGE_SIZES]; 511 ulong_t ptcpok[MMU_PAGE_SIZES]; 512 ulong_t pgmf_alloc[MMU_PAGE_SIZES]; /* page_get_mnode_freelist */ 513 ulong_t pgmf_allocfailed[MMU_PAGE_SIZES]; 514 ulong_t pgmf_allocempty[MMU_PAGE_SIZES]; 515 ulong_t pgmf_allocok[MMU_PAGE_SIZES]; 516 ulong_t pgmc_alloc; /* page_get_mnode_cachelist */ 517 ulong_t pgmc_allocfailed; 518 ulong_t pgmc_allocempty; 519 ulong_t pgmc_allocok; 520 ulong_t pladd_free[MMU_PAGE_SIZES]; /* page_list_add/sub */ 521 ulong_t plsub_free[MMU_PAGE_SIZES]; 522 ulong_t pladd_cache; 523 ulong_t plsub_cache; 524 ulong_t plsubpages_szcbig; 525 ulong_t plsubpages_szc0; 526 ulong_t pff_req[MMU_PAGE_SIZES]; /* page_freelist_fill */ 527 ulong_t pff_demote[MMU_PAGE_SIZES]; 528 ulong_t pff_coalok[MMU_PAGE_SIZES]; 529 ulong_t ppr_reloc[MMU_PAGE_SIZES]; /* page_relocate */ 530 ulong_t ppr_relocnoroot[MMU_PAGE_SIZES]; 531 ulong_t ppr_reloc_replnoroot[MMU_PAGE_SIZES]; 532 ulong_t ppr_relocnolock[MMU_PAGE_SIZES]; 533 ulong_t ppr_relocnomem[MMU_PAGE_SIZES]; 534 ulong_t ppr_relocok[MMU_PAGE_SIZES]; 535 ulong_t page_ctrs_coalesce; /* page coalesce counter */ 536 ulong_t page_ctrs_cands_skip; /* candidates useful */ 537 ulong_t page_ctrs_changed; /* ctrs changed after locking */ 538 ulong_t page_ctrs_failed; /* page_freelist_coalesce failed */ 539 ulong_t page_ctrs_coalesce_all; /* page coalesce all counter */ 540 ulong_t page_ctrs_cands_skip_all; /* candidates useful for all func */ 541 ulong_t restrict4gcnt; 542 }; 543 extern struct vmm_vmstats_str vmm_vmstats; 544 #endif /* VM_STATS */ 545 546 extern size_t page_ctrs_sz(void); 547 extern caddr_t page_ctrs_alloc(caddr_t); 548 extern void page_ctr_sub(int, int, page_t *, int); 549 extern page_t *page_freelist_fill(uchar_t, int, int, int, pfn_t); 550 extern uint_t page_get_pagecolors(uint_t); 551 552 #ifdef __cplusplus 553 } 554 #endif 555 556 #endif /* _VM_DEP_H */ 557