xref: /titanic_51/usr/src/uts/i86pc/vm/vm_dep.h (revision ace1a5f11236a072fca1b5e0ea1416a083a9f2aa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * UNIX machine dependent virtual memory support.
29  */
30 
31 #ifndef	_VM_DEP_H
32 #define	_VM_DEP_H
33 
34 #pragma ident	"%Z%%M%	%I%	%E% SMI"
35 
36 #ifdef	__cplusplus
37 extern "C" {
38 #endif
39 
40 #include <sys/clock.h>
41 #include <vm/hat_pte.h>
42 
43 /*
44  * WARNING: vm_dep.h is included by files in common. As such, macros
45  * dependent upon PTE36 such as LARGEPAGESIZE cannot be used in this file.
46  */
47 
48 #define	GETTICK()	tsc_read()
49 
50 /* memranges in descending order */
51 extern pfn_t		*memranges;
52 
53 #define	MEMRANGEHI(mtype)						\
54 	((mtype > 0) ? memranges[mtype - 1] - 1: physmax)
55 #define	MEMRANGELO(mtype)	(memranges[mtype])
56 
57 /*
58  * combined memory ranges from mnode and memranges[] to manage single
59  * mnode/mtype dimension in the page lists.
60  */
61 typedef struct {
62 	pfn_t	mnr_pfnlo;
63 	pfn_t	mnr_pfnhi;
64 	int	mnr_mnode;
65 	int	mnr_memrange;		/* index into memranges[] */
66 	/* maintain page list stats */
67 	pgcnt_t	mnr_mt_pgmax;		/* mnode/mtype max page cnt */
68 	pgcnt_t	mnr_mt_clpgcnt;		/* cache list cnt */
69 	pgcnt_t	mnr_mt_flpgcnt;		/* free list cnt - small pages */
70 	pgcnt_t	mnr_mt_lgpgcnt;		/* free list cnt - large pages */
71 #ifdef DEBUG
72 	struct mnr_mts {		/* mnode/mtype szc stats */
73 		pgcnt_t	mnr_mts_pgcnt;
74 		int	mnr_mts_colors;
75 		pgcnt_t *mnr_mtsc_pgcnt;
76 	} 	*mnr_mts;
77 #endif
78 } mnoderange_t;
79 
80 #ifdef DEBUG
81 #define	PLCNT_SZ(ctrs_sz) {						\
82 	int	szc, colors;						\
83 	ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) *		\
84 	    mmu_page_sizes;						\
85 	for (szc = 0; szc < mmu_page_sizes; szc++) {			\
86 		colors = page_get_pagecolors(szc);			\
87 		ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors;	\
88 	}								\
89 }
90 
91 #define	PLCNT_INIT(addr) {						\
92 	int	mt, szc, colors;					\
93 	for (mt = 0; mt < mnoderangecnt; mt++) {			\
94 		mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr;	\
95 		addr += (sizeof (struct mnr_mts) * mmu_page_sizes);	\
96 		for (szc = 0; szc < mmu_page_sizes; szc++) {		\
97 			colors = page_get_pagecolors(szc);		\
98 			mnoderanges[mt].mnr_mts[szc].mnr_mts_colors =	\
99 			    colors;					\
100 			mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt =	\
101 			    (pgcnt_t *)addr;				\
102 			addr += (sizeof (pgcnt_t) * colors);		\
103 		}							\
104 	}								\
105 }
106 #define	PLCNT_DO(pp, mtype, szc, cnt, flags) {				\
107 	int	bin = PP_2_BIN(pp);					\
108 	if (flags & PG_LIST_ISINIT)					\
109 		mnoderanges[mtype].mnr_mt_pgmax += cnt;			\
110 	ASSERT((flags & PG_LIST_ISCAGE) == 0);				\
111 	if (flags & PG_CACHE_LIST)					\
112 		atomic_add_long(&mnoderanges[mtype].			\
113 		    mnr_mt_clpgcnt, cnt);				\
114 	else if (szc)							\
115 		atomic_add_long(&mnoderanges[mtype].			\
116 		    mnr_mt_lgpgcnt, cnt);				\
117 	else								\
118 		atomic_add_long(&mnoderanges[mtype].			\
119 		    mnr_mt_flpgcnt, cnt);				\
120 	atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].		\
121 	    mnr_mts_pgcnt, cnt);					\
122 	atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].		\
123 	    mnr_mtsc_pgcnt[bin], cnt);					\
124 }
125 #else
126 #define	PLCNT_SZ(ctrs_sz)
127 #define	PLCNT_INIT(base)
128 #define	PLCNT_DO(pp, mtype, szc, cnt, flags) {				\
129 	if (flags & PG_LIST_ISINIT)					\
130 		mnoderanges[mtype].mnr_mt_pgmax += cnt;			\
131 	if (flags & PG_CACHE_LIST)					\
132 		atomic_add_long(&mnoderanges[mtype].			\
133 		    mnr_mt_clpgcnt, cnt);				\
134 	else if (szc)							\
135 		atomic_add_long(&mnoderanges[mtype].			\
136 		    mnr_mt_lgpgcnt, cnt);				\
137 	else								\
138 		atomic_add_long(&mnoderanges[mtype].			\
139 		    mnr_mt_flpgcnt, cnt);				\
140 }
141 #endif
142 
143 #define	PLCNT_INCR(pp, mnode, mtype, szc, flags) {			\
144 	long	cnt = (1 << PAGE_BSZS_SHIFT(szc));			\
145 	ASSERT(mtype == PP_2_MTYPE(pp));				\
146 	if (physmax4g && mtype <= mtype4g)				\
147 		atomic_add_long(&freemem4g, cnt);			\
148 	if (flags & PG_LIST_ISINIT) {					\
149 		if (physmax4g && mtype <= mtype4g)			\
150 			maxmem4g += cnt;				\
151 	}								\
152 	PLCNT_DO(pp, mtype, szc, cnt, flags);				\
153 }
154 
155 #define	PLCNT_DECR(pp, mnode, mtype, szc, flags) {			\
156 	long	cnt = ((-1) << PAGE_BSZS_SHIFT(szc));			\
157 	ASSERT(mtype == PP_2_MTYPE(pp));				\
158 	if (physmax4g && mtype <= mtype4g)				\
159 		atomic_add_long(&freemem4g, cnt);			\
160 	PLCNT_DO(pp, mtype, szc, cnt, flags);				\
161 }
162 
163 /*
164  * macros to update page list max counts.  no-op on x86.
165  */
166 #define	PLCNT_MAX_INCR(pp, mnode, mtype, szc)
167 #define	PLCNT_MAX_DECR(pp, mnode, mtype, szc)
168 
169 extern mnoderange_t	*mnoderanges;
170 extern int		mnoderangecnt;
171 extern int		mtype4g;
172 
173 /*
174  * 4g memory management variables for systems with more than 4g of memory:
175  *
176  * physical memory below 4g is required for 32bit dma devices and, currently,
177  * for kmem memory. On systems with more than 4g of memory, the pool of memory
178  * below 4g can be depleted without any paging activity given that there is
179  * likely to be sufficient memory above 4g.
180  *
181  * physmax4g is set true if the largest pfn is over 4g. The rest of the
182  * 4g memory management code is enabled only when physmax4g is true.
183  *
184  * maxmem4g is the count of the maximum number of pages on the page lists
185  * with physical addresses below 4g. It can be a lot less then 4g given that
186  * BIOS may reserve large chunks of space below 4g for hot plug pci devices,
187  * agp aperture etc.
188  *
189  * freemem4g maintains the count of the number of available pages on the
190  * page lists with physical addresses below 4g.
191  *
192  * DESFREE4G specifies the desired amount of below 4g memory. It defaults to
193  * 6% (desfree4gshift = 4) of maxmem4g.
194  *
195  * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G
196  * and the amount of physical memory above 4g is greater than freemem4g.
197  * In this case, page_get_* routines will restrict below 4g allocations
198  * for requests that don't specifically require it.
199  */
200 
201 extern int		physmax4g;
202 extern pgcnt_t		maxmem4g;
203 extern pgcnt_t		freemem4g;
204 extern int		lotsfree4gshift;
205 extern int		desfree4gshift;
206 #define	LOTSFREE4G	(maxmem4g >> lotsfree4gshift)
207 #define	DESFREE4G	(maxmem4g >> desfree4gshift)
208 
209 #define	RESTRICT4G_ALLOC					\
210 	(physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem))
211 
212 extern int		restricted_kmemalloc;
213 extern int		memrange_num(pfn_t);
214 extern int		pfn_2_mtype(pfn_t);
215 extern int		mtype_func(int, int, uint_t);
216 extern int		mnode_pgcnt(int);
217 
218 #define	NUM_MEM_RANGES	4		/* memory range types */
219 
220 /*
221  * Per page size free lists. Allocated dynamically.
222  * dimensions [mtype][mmu_page_sizes][colors]
223  *
224  * mtype specifies a physical memory range with a unique mnode.
225  */
226 
227 extern page_t ****page_freelists;
228 
229 #define	PAGE_FREELISTS(mnode, szc, color, mtype)		\
230 	(*(page_freelists[mtype][szc] + (color)))
231 
232 /*
233  * For now there is only a single size cache list. Allocated dynamically.
234  * dimensions [mtype][colors]
235  *
236  * mtype specifies a physical memory range with a unique mnode.
237  */
238 extern page_t ***page_cachelists;
239 
240 #define	PAGE_CACHELISTS(mnode, color, mtype) 		\
241 	(*(page_cachelists[mtype] + (color)))
242 
243 /*
244  * There are mutexes for both the page freelist
245  * and the page cachelist.  We want enough locks to make contention
246  * reasonable, but not too many -- otherwise page_freelist_lock() gets
247  * so expensive that it becomes the bottleneck!
248  */
249 
250 #define	NPC_MUTEX	16
251 
252 extern kmutex_t	*fpc_mutex[NPC_MUTEX];
253 extern kmutex_t	*cpc_mutex[NPC_MUTEX];
254 
255 extern page_t *page_get_mnode_freelist(int, uint_t, int, uchar_t, uint_t);
256 extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int);
257 
258 /* Find the bin for the given page if it was of size szc */
259 #define	PP_2_BIN_SZC(pp, szc)						\
260 	(((pp->p_pagenum) & page_colors_mask) >>			\
261 	(hw_page_array[szc].hp_shift - hw_page_array[0].hp_shift))
262 
263 #define	PP_2_BIN(pp)		(PP_2_BIN_SZC(pp, pp->p_szc))
264 
265 #define	PP_2_MEM_NODE(pp)	(PFN_2_MEM_NODE(pp->p_pagenum))
266 #define	PP_2_MTYPE(pp)		(pfn_2_mtype(pp->p_pagenum))
267 #define	PP_2_SZC(pp)		(pp->p_szc)
268 
269 #define	SZCPAGES(szc)		(1 << PAGE_BSZS_SHIFT(szc))
270 #define	PFN_BASE(pfnum, szc)	(pfnum & ~(SZCPAGES(szc) - 1))
271 
272 extern struct cpu	cpus[];
273 #define	CPU0		cpus
274 
275 #if defined(__amd64)
276 
277 /*
278  * set the mtype range (called from page_get_{free,cache}list)
279  *   - set range to above 4g if the system has more than 4g of memory and the
280  *   amount of memory below 4g runs low otherwise set range to all of memory
281  *   starting from the hi pfns.
282  *
283  * page_get_anylist gets its mtype range from the specified ddi_dma_attr_t.
284  */
285 #define	MTYPE_INIT(mtype, vp, vaddr, flags) {				\
286 	mtype = mnoderangecnt - 1;					\
287 	if (RESTRICT4G_ALLOC) {						\
288 		VM_STAT_ADD(vmm_vmstats.restrict4gcnt);			\
289 		/* here only for > 4g systems */			\
290 		flags |= PGI_MT_RANGE4G;				\
291 	} else {							\
292 		flags |= PGI_MT_RANGE0;					\
293 	}								\
294 }
295 
296 #elif defined(__i386)
297 
298 /*
299  * set the mtype range
300  *   - kmem requests needs to be below 4g if restricted_kmemalloc is set.
301  *   - for non kmem requests, set range to above 4g if the amount of memory
302  *   below 4g runs low.
303  */
304 
305 #define	MTYPE_INIT(mtype, vp, vaddr, flags) {				\
306 	if (restricted_kmemalloc && (vp) == &kvp &&			\
307 	    (caddr_t)(vaddr) >= kernelheap &&				\
308 	    (caddr_t)(vaddr) < ekernelheap) {				\
309 		ASSERT(physmax4g);					\
310 		mtype = mtype4g;					\
311 		flags |= PGI_MT_RANGE0;					\
312 	} else {							\
313 		mtype = mnoderangecnt - 1;				\
314 		if (RESTRICT4G_ALLOC) {					\
315 			VM_STAT_ADD(vmm_vmstats.restrict4gcnt);		\
316 			/* here only for > 4g systems */		\
317 			flags |= PGI_MT_RANGE4G;			\
318 		} else {						\
319 			flags |= PGI_MT_RANGE0;				\
320 		}							\
321 	}								\
322 }
323 
324 #endif	/* __i386 */
325 
326 /*
327  * macros to loop through the mtype range (page_get_mnode_{free,cache,any}list,
328  * and page_get_contig_pages)
329  *
330  * MTYPE_START sets the initial mtype. -1 if the mtype range specified does
331  * not contain mnode.
332  *
333  * MTYPE_NEXT sets the next mtype. -1 if there are no more valid
334  * mtype in the range.
335  */
336 
337 #define	MTYPE_START(mnode, mtype, flags)				\
338 	(mtype = mtype_func(mnode, mtype, flags))
339 
340 #define	MTYPE_NEXT(mnode, mtype, flags) {				\
341 	if (flags & PGI_MT_RANGE) {					\
342 		mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);	\
343 	} else {							\
344 		mtype = -1;						\
345 	}								\
346 }
347 
348 /* mtype init for page_get_replacement_page */
349 
350 #define	MTYPE_PGR_INIT(mtype, flags, pp, mnode) {			\
351 	mtype = mnoderangecnt - 1;					\
352 	flags |= PGI_MT_RANGE0;						\
353 }
354 
355 #define	MNODE_PGCNT(mnode)		mnode_pgcnt(mnode)
356 
357 #define	MNODETYPE_2_PFN(mnode, mtype, pfnlo, pfnhi)			\
358 	ASSERT(mnoderanges[mtype].mnr_mnode == mnode);			\
359 	pfnlo = mnoderanges[mtype].mnr_pfnlo;				\
360 	pfnhi = mnoderanges[mtype].mnr_pfnhi;
361 
362 #define	PC_BIN_MUTEX(mnode, bin, flags) ((flags & PG_FREE_LIST) ?	\
363 	&fpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode] :			\
364 	&cpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode])
365 
366 #define	FPC_MUTEX(mnode, i)	(&fpc_mutex[i][mnode])
367 #define	CPC_MUTEX(mnode, i)	(&cpc_mutex[i][mnode])
368 
369 #ifdef DEBUG
370 #define	CHK_LPG(pp, szc)	chk_lpg(pp, szc)
371 extern void	chk_lpg(page_t *, uchar_t);
372 #else
373 #define	CHK_LPG(pp, szc)
374 #endif
375 
376 #define	FULL_REGION_CNT(rg_szc)	\
377 	(LEVEL_SIZE(rg_szc) >> LEVEL_SHIFT(rg_szc - 1))
378 
379 /* Return the leader for this mapping size */
380 #define	PP_GROUPLEADER(pp, szc) \
381 	(&(pp)[-(int)((pp)->p_pagenum & (SZCPAGES(szc)-1))])
382 
383 /* Return the root page for this page based on p_szc */
384 #define	PP_PAGEROOT(pp) ((pp)->p_szc == 0 ? (pp) : \
385 	PP_GROUPLEADER((pp), (pp)->p_szc))
386 
387 /*
388  * The counter base must be per page_counter element to prevent
389  * races when re-indexing, and the base page size element should
390  * be aligned on a boundary of the given region size.
391  *
392  * We also round up the number of pages spanned by the counters
393  * for a given region to PC_BASE_ALIGN in certain situations to simplify
394  * the coding for some non-performance critical routines.
395  */
396 
397 #define	PC_BASE_ALIGN		((pfn_t)1 << PAGE_BSZS_SHIFT(MMU_PAGE_SIZES-1))
398 #define	PC_BASE_ALIGN_MASK	(PC_BASE_ALIGN - 1)
399 
400 /*
401  * cpu/mmu-dependent vm variables
402  */
403 extern uint_t mmu_page_sizes;
404 extern uint_t mmu_exported_page_sizes;
405 
406 /* For x86, userszc is the same as the kernel's szc */
407 #define	USERSZC_2_SZC(userszc)	(userszc)
408 #define	SZC_2_USERSZC(szc)	(szc)
409 
410 /*
411  * for hw_page_map_t, sized to hold the ratio of large page to base
412  * pagesize (1024 max)
413  */
414 typedef	short	hpmctr_t;
415 
416 /*
417  * get the setsize of the current cpu - assume homogenous for x86
418  */
419 extern int	l2cache_sz, l2cache_linesz, l2cache_assoc;
420 
421 #define	L2CACHE_ALIGN		l2cache_linesz
422 #define	L2CACHE_ALIGN_MAX	64
423 #define	CPUSETSIZE()		\
424 	(l2cache_assoc ? (l2cache_sz / l2cache_assoc) : MMU_PAGESIZE)
425 
426 /*
427  * Return the log2(pagesize(szc) / MMU_PAGESIZE) --- or the shift count
428  * for the number of base pages in this pagesize
429  */
430 #define	PAGE_BSZS_SHIFT(szc) (LEVEL_SHIFT(szc) - MMU_PAGESHIFT)
431 
432 /*
433  * Internal PG_ flags.
434  */
435 #define	PGI_RELOCONLY	0x010000	/* opposite of PG_NORELOC */
436 #define	PGI_NOCAGE	0x020000	/* cage is disabled */
437 #define	PGI_PGCPHIPRI	0x040000	/* page_get_contig_page pri alloc */
438 #define	PGI_PGCPSZC0	0x080000	/* relocate base pagesize page */
439 
440 /*
441  * PGI range flags - should not overlap PGI flags
442  */
443 #define	PGI_MT_RANGE0	0x1000000	/* mtype range to 0 */
444 #define	PGI_MT_RANGE4G	0x2000000	/* mtype range to 4g */
445 #define	PGI_MT_NEXT	0x4000000	/* get next mtype */
446 #define	PGI_MT_RANGE	(PGI_MT_RANGE0 | PGI_MT_RANGE4G)
447 
448 /*
449  * hash as and addr to get a bin.
450  */
451 
452 #define	AS_2_BIN(as, seg, vp, addr, bin)				\
453 	bin = ((((uintptr_t)(addr) >> PAGESHIFT) + ((uintptr_t)(as) >> 4)) \
454 	    & page_colors_mask)
455 
456 /*
457  * cpu private vm data - accessed thru CPU->cpu_vm_data
458  *	vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock()
459  *	vc_pnext_memseg: tracks last memseg visited in page_nextn()
460  *	vc_kmptr: orignal unaligned kmem pointer for this vm_cpu_data_t
461  *	vc_kmsize: orignal kmem size for this vm_cpu_data_t
462  */
463 
464 typedef struct {
465 	struct memseg	*vc_pnum_memseg;
466 	struct memseg	*vc_pnext_memseg;
467 	void		*vc_kmptr;
468 	size_t		vc_kmsize;
469 } vm_cpu_data_t;
470 
471 /* allocation size to ensure vm_cpu_data_t resides in its own cache line */
472 #define	VM_CPU_DATA_PADSIZE						\
473 	(P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX))
474 
475 /* for boot cpu before kmem is initialized */
476 extern char	vm_cpu_data0[];
477 
478 /*
479  * When a bin is empty, and we can't satisfy a color request correctly,
480  * we scan.  If we assume that the programs have reasonable spatial
481  * behavior, then it will not be a good idea to use the adjacent color.
482  * Using the adjacent color would result in virtually adjacent addresses
483  * mapping into the same spot in the cache.  So, if we stumble across
484  * an empty bin, skip a bunch before looking.  After the first skip,
485  * then just look one bin at a time so we don't miss our cache on
486  * every look. Be sure to check every bin.  Page_create() will panic
487  * if we miss a page.
488  *
489  * This also explains the `<=' in the for loops in both page_get_freelist()
490  * and page_get_cachelist().  Since we checked the target bin, skipped
491  * a bunch, then continued one a time, we wind up checking the target bin
492  * twice to make sure we get all of them bins.
493  */
494 #define	BIN_STEP	19
495 
496 #ifdef VM_STATS
497 struct vmm_vmstats_str {
498 	ulong_t pgf_alloc[MMU_PAGE_SIZES];	/* page_get_freelist */
499 	ulong_t pgf_allocok[MMU_PAGE_SIZES];
500 	ulong_t pgf_allocokrem[MMU_PAGE_SIZES];
501 	ulong_t pgf_allocfailed[MMU_PAGE_SIZES];
502 	ulong_t	pgf_allocdeferred;
503 	ulong_t	pgf_allocretry[MMU_PAGE_SIZES];
504 	ulong_t pgc_alloc;			/* page_get_cachelist */
505 	ulong_t pgc_allocok;
506 	ulong_t pgc_allocokrem;
507 	ulong_t pgc_allocokdeferred;
508 	ulong_t pgc_allocfailed;
509 	ulong_t	pgcp_alloc[MMU_PAGE_SIZES];	/* page_get_contig_pages */
510 	ulong_t	pgcp_allocfailed[MMU_PAGE_SIZES];
511 	ulong_t	pgcp_allocempty[MMU_PAGE_SIZES];
512 	ulong_t	pgcp_allocok[MMU_PAGE_SIZES];
513 	ulong_t	ptcp[MMU_PAGE_SIZES];		/* page_trylock_contig_pages */
514 	ulong_t	ptcpfreethresh[MMU_PAGE_SIZES];
515 	ulong_t	ptcpfailexcl[MMU_PAGE_SIZES];
516 	ulong_t	ptcpfailszc[MMU_PAGE_SIZES];
517 	ulong_t	ptcpfailcage[MMU_PAGE_SIZES];
518 	ulong_t	ptcpok[MMU_PAGE_SIZES];
519 	ulong_t	pgmf_alloc[MMU_PAGE_SIZES];	/* page_get_mnode_freelist */
520 	ulong_t	pgmf_allocfailed[MMU_PAGE_SIZES];
521 	ulong_t	pgmf_allocempty[MMU_PAGE_SIZES];
522 	ulong_t	pgmf_allocok[MMU_PAGE_SIZES];
523 	ulong_t	pgmc_alloc;			/* page_get_mnode_cachelist */
524 	ulong_t	pgmc_allocfailed;
525 	ulong_t	pgmc_allocempty;
526 	ulong_t	pgmc_allocok;
527 	ulong_t	pladd_free[MMU_PAGE_SIZES];	/* page_list_add/sub */
528 	ulong_t	plsub_free[MMU_PAGE_SIZES];
529 	ulong_t	pladd_cache;
530 	ulong_t	plsub_cache;
531 	ulong_t	plsubpages_szcbig;
532 	ulong_t	plsubpages_szc0;
533 	ulong_t	pff_req[MMU_PAGE_SIZES];	/* page_freelist_fill */
534 	ulong_t	pff_demote[MMU_PAGE_SIZES];
535 	ulong_t	pff_coalok[MMU_PAGE_SIZES];
536 	ulong_t	ppr_reloc[MMU_PAGE_SIZES];	/* page_relocate */
537 	ulong_t ppr_relocnoroot[MMU_PAGE_SIZES];
538 	ulong_t ppr_reloc_replnoroot[MMU_PAGE_SIZES];
539 	ulong_t ppr_relocnolock[MMU_PAGE_SIZES];
540 	ulong_t ppr_relocnomem[MMU_PAGE_SIZES];
541 	ulong_t ppr_relocok[MMU_PAGE_SIZES];
542 	ulong_t page_ctrs_coalesce;	/* page coalesce counter */
543 	ulong_t page_ctrs_cands_skip;	/* candidates useful */
544 	ulong_t page_ctrs_changed;	/* ctrs changed after locking */
545 	ulong_t page_ctrs_failed;	/* page_freelist_coalesce failed */
546 	ulong_t page_ctrs_coalesce_all;	/* page coalesce all counter */
547 	ulong_t page_ctrs_cands_skip_all; /* candidates useful for all func */
548 	ulong_t	restrict4gcnt;
549 };
550 extern struct vmm_vmstats_str vmm_vmstats;
551 #endif	/* VM_STATS */
552 
553 extern size_t page_ctrs_sz(void);
554 extern caddr_t page_ctrs_alloc(caddr_t);
555 extern void page_ctr_sub(int, int, page_t *, int);
556 extern page_t *page_freelist_fill(uchar_t, int, int, int, pfn_t);
557 extern uint_t page_get_pagecolors(uint_t);
558 
559 #ifdef	__cplusplus
560 }
561 #endif
562 
563 #endif	/* _VM_DEP_H */
564