xref: /titanic_44/usr/src/uts/i86pc/vm/vm_dep.h (revision fb3fb4f3d76d55b64440afd0af72775dfad3bd1d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * UNIX machine dependent virtual memory support.
29  */
30 
31 #ifndef	_VM_DEP_H
32 #define	_VM_DEP_H
33 
34 #pragma ident	"%Z%%M%	%I%	%E% SMI"
35 
36 #ifdef	__cplusplus
37 extern "C" {
38 #endif
39 
40 #include <sys/clock.h>
41 #include <vm/hat_pte.h>
42 
43 /*
44  * WARNING: vm_dep.h is included by files in common. As such, macros
45  * dependent upon PTE36 such as LARGEPAGESIZE cannot be used in this file.
46  */
47 
48 #define	GETTICK()	tsc_read()
49 
50 /* memranges in descending order */
51 extern pfn_t		*memranges;
52 
53 #define	MEMRANGEHI(mtype)						\
54 	((mtype > 0) ? memranges[mtype - 1] - 1: physmax)
55 #define	MEMRANGELO(mtype)	(memranges[mtype])
56 
57 #define	MTYPE_FREEMEM(mt)						\
58 	(mnoderanges[mt].mnr_mt_clpgcnt +				\
59 	    mnoderanges[mt].mnr_mt_flpgcnt +				\
60 	    mnoderanges[mt].mnr_mt_lgpgcnt)
61 
62 /*
63  * combined memory ranges from mnode and memranges[] to manage single
64  * mnode/mtype dimension in the page lists.
65  */
66 typedef struct {
67 	pfn_t	mnr_pfnlo;
68 	pfn_t	mnr_pfnhi;
69 	int	mnr_mnode;
70 	int	mnr_memrange;		/* index into memranges[] */
71 	/* maintain page list stats */
72 	pgcnt_t	mnr_mt_pgmax;		/* mnode/mtype max page cnt */
73 	pgcnt_t	mnr_mt_clpgcnt;		/* cache list cnt */
74 	pgcnt_t	mnr_mt_flpgcnt;		/* free list cnt - small pages */
75 	pgcnt_t	mnr_mt_lgpgcnt;		/* free list cnt - large pages */
76 #ifdef DEBUG
77 	struct mnr_mts {		/* mnode/mtype szc stats */
78 		pgcnt_t	mnr_mts_pgcnt;
79 		int	mnr_mts_colors;
80 		pgcnt_t *mnr_mtsc_pgcnt;
81 	} 	*mnr_mts;
82 #endif
83 } mnoderange_t;
84 
85 #ifdef DEBUG
86 #define	PLCNT_SZ(ctrs_sz) {						\
87 	int	szc, colors;						\
88 	ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) *		\
89 	    mmu_page_sizes;						\
90 	for (szc = 0; szc < mmu_page_sizes; szc++) {			\
91 		colors = page_get_pagecolors(szc);			\
92 		ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors;	\
93 	}								\
94 }
95 
96 #define	PLCNT_INIT(addr) {						\
97 	int	mt, szc, colors;					\
98 	for (mt = 0; mt < mnoderangecnt; mt++) {			\
99 		mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr;	\
100 		addr += (sizeof (struct mnr_mts) * mmu_page_sizes);	\
101 		for (szc = 0; szc < mmu_page_sizes; szc++) {		\
102 			colors = page_get_pagecolors(szc);		\
103 			mnoderanges[mt].mnr_mts[szc].mnr_mts_colors =	\
104 			    colors;					\
105 			mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt =	\
106 			    (pgcnt_t *)addr;				\
107 			addr += (sizeof (pgcnt_t) * colors);		\
108 		}							\
109 	}								\
110 }
111 #define	PLCNT_DO(pp, mtype, szc, cnt, flags) {				\
112 	int	bin = PP_2_BIN(pp);					\
113 	if (flags & PG_CACHE_LIST)					\
114 		atomic_add_long(&mnoderanges[mtype].			\
115 		    mnr_mt_clpgcnt, cnt);				\
116 	else if (szc)							\
117 		atomic_add_long(&mnoderanges[mtype].			\
118 		    mnr_mt_lgpgcnt, cnt);				\
119 	else								\
120 		atomic_add_long(&mnoderanges[mtype].			\
121 		    mnr_mt_flpgcnt, cnt);				\
122 	atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].		\
123 	    mnr_mts_pgcnt, cnt);					\
124 	atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].		\
125 	    mnr_mtsc_pgcnt[bin], cnt);					\
126 }
127 #else
128 #define	PLCNT_SZ(ctrs_sz)
129 #define	PLCNT_INIT(base)
130 #define	PLCNT_DO(pp, mtype, szc, cnt, flags) {				\
131 	if (flags & PG_CACHE_LIST)					\
132 		atomic_add_long(&mnoderanges[mtype].			\
133 		    mnr_mt_clpgcnt, cnt);				\
134 	else if (szc)							\
135 		atomic_add_long(&mnoderanges[mtype].			\
136 		    mnr_mt_lgpgcnt, cnt);				\
137 	else								\
138 		atomic_add_long(&mnoderanges[mtype].			\
139 		    mnr_mt_flpgcnt, cnt);				\
140 }
141 #endif
142 
143 #define	PLCNT_INCR(pp, mnode, mtype, szc, flags) {			\
144 	long	cnt = (1 << PAGE_BSZS_SHIFT(szc));			\
145 	ASSERT(mtype == PP_2_MTYPE(pp));				\
146 	if (physmax4g && mtype <= mtype4g)				\
147 		atomic_add_long(&freemem4g, cnt);			\
148 	PLCNT_DO(pp, mtype, szc, cnt, flags);				\
149 }
150 
151 #define	PLCNT_DECR(pp, mnode, mtype, szc, flags) {			\
152 	long	cnt = ((-1) << PAGE_BSZS_SHIFT(szc));			\
153 	ASSERT(mtype == PP_2_MTYPE(pp));				\
154 	if (physmax4g && mtype <= mtype4g)				\
155 		atomic_add_long(&freemem4g, cnt);			\
156 	PLCNT_DO(pp, mtype, szc, cnt, flags);				\
157 }
158 
159 /*
160  * macros to update page list max counts.  no-op on x86.
161  */
162 #define	PLCNT_XFER_NORELOC(pp)
163 
164 #define	PLCNT_MODIFY_MAX(pfn, cnt)	mtype_modify_max(pfn, (pgcnt_t)cnt)
165 
166 extern mnoderange_t	*mnoderanges;
167 extern int		mnoderangecnt;
168 extern int		mtype4g;
169 
170 /*
171  * 4g memory management variables for systems with more than 4g of memory:
172  *
173  * physical memory below 4g is required for 32bit dma devices and, currently,
174  * for kmem memory. On systems with more than 4g of memory, the pool of memory
175  * below 4g can be depleted without any paging activity given that there is
176  * likely to be sufficient memory above 4g.
177  *
178  * physmax4g is set true if the largest pfn is over 4g. The rest of the
179  * 4g memory management code is enabled only when physmax4g is true.
180  *
181  * maxmem4g is the count of the maximum number of pages on the page lists
182  * with physical addresses below 4g. It can be a lot less then 4g given that
183  * BIOS may reserve large chunks of space below 4g for hot plug pci devices,
184  * agp aperture etc.
185  *
186  * freemem4g maintains the count of the number of available pages on the
187  * page lists with physical addresses below 4g.
188  *
189  * DESFREE4G specifies the desired amount of below 4g memory. It defaults to
190  * 6% (desfree4gshift = 4) of maxmem4g.
191  *
192  * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G
193  * and the amount of physical memory above 4g is greater than freemem4g.
194  * In this case, page_get_* routines will restrict below 4g allocations
195  * for requests that don't specifically require it.
196  */
197 
198 extern int		physmax4g;
199 extern pgcnt_t		maxmem4g;
200 extern pgcnt_t		freemem4g;
201 extern int		lotsfree4gshift;
202 extern int		desfree4gshift;
203 #define	LOTSFREE4G	(maxmem4g >> lotsfree4gshift)
204 #define	DESFREE4G	(maxmem4g >> desfree4gshift)
205 
206 #define	RESTRICT4G_ALLOC					\
207 	(physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem))
208 
209 /*
210  * 16m memory management:
211  *
212  * reserve some amount of physical memory below 16m for legacy devices.
213  *
214  * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above
215  * 16m or if the 16m pool drops below DESFREE16M.
216  *
217  * In this case, general page allocations via page_get_{free,cache}list
218  * routines will be restricted from allocating from the 16m pool. Allocations
219  * that require specific pfn ranges (page_get_anylist) are not restricted.
220  */
221 
222 #define	FREEMEM16M	MTYPE_FREEMEM(0)
223 #define	DESFREE16M	desfree16m
224 #define	RESTRICT16M_ALLOC(freemem, pgcnt)			\
225 	(freemem != 0 && ((freemem >= (FREEMEM16M)) ||		\
226 	    (FREEMEM16M  < (DESFREE16M + pgcnt))))
227 extern pgcnt_t		desfree16m;
228 
229 extern int		restricted_kmemalloc;
230 extern int		memrange_num(pfn_t);
231 extern int		pfn_2_mtype(pfn_t);
232 extern int		mtype_func(int, int, uint_t);
233 extern void		mtype_modify_max(pfn_t, long);
234 extern int		mnode_pgcnt(int);
235 
236 #define	NUM_MEM_RANGES	4		/* memory range types */
237 
238 /*
239  * Per page size free lists. Allocated dynamically.
240  * dimensions [mtype][mmu_page_sizes][colors]
241  *
242  * mtype specifies a physical memory range with a unique mnode.
243  */
244 
245 extern page_t ****page_freelists;
246 
247 #define	PAGE_FREELISTS(mnode, szc, color, mtype)		\
248 	(*(page_freelists[mtype][szc] + (color)))
249 
250 /*
251  * For now there is only a single size cache list. Allocated dynamically.
252  * dimensions [mtype][colors]
253  *
254  * mtype specifies a physical memory range with a unique mnode.
255  */
256 extern page_t ***page_cachelists;
257 
258 #define	PAGE_CACHELISTS(mnode, color, mtype) 		\
259 	(*(page_cachelists[mtype] + (color)))
260 
261 /*
262  * There are mutexes for both the page freelist
263  * and the page cachelist.  We want enough locks to make contention
264  * reasonable, but not too many -- otherwise page_freelist_lock() gets
265  * so expensive that it becomes the bottleneck!
266  */
267 
268 #define	NPC_MUTEX	16
269 
270 extern kmutex_t	*fpc_mutex[NPC_MUTEX];
271 extern kmutex_t	*cpc_mutex[NPC_MUTEX];
272 
273 extern page_t *page_get_mnode_freelist(int, uint_t, int, uchar_t, uint_t);
274 extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int);
275 
276 /* Find the bin for the given page if it was of size szc */
277 #define	PP_2_BIN_SZC(pp, szc)						\
278 	(((pp->p_pagenum) & page_colors_mask) >>			\
279 	(hw_page_array[szc].hp_shift - hw_page_array[0].hp_shift))
280 
281 #define	PP_2_BIN(pp)		(PP_2_BIN_SZC(pp, pp->p_szc))
282 
283 #define	PP_2_MEM_NODE(pp)	(PFN_2_MEM_NODE(pp->p_pagenum))
284 #define	PP_2_MTYPE(pp)		(pfn_2_mtype(pp->p_pagenum))
285 #define	PP_2_SZC(pp)		(pp->p_szc)
286 
287 #define	SZCPAGES(szc)		(1 << PAGE_BSZS_SHIFT(szc))
288 #define	PFN_BASE(pfnum, szc)	(pfnum & ~(SZCPAGES(szc) - 1))
289 
290 extern struct cpu	cpus[];
291 #define	CPU0		cpus
292 
293 #if defined(__amd64)
294 
295 /*
296  * set the mtype range (called from page_get_{free,cache}list)
297  *   - set range to above 4g if the system has more than 4g of memory and the
298  *   amount of memory below 4g runs low. If not, set range to above 16m if
299  *   16m threshold is reached otherwise set range to all of memory
300  *   starting from the hi pfns.
301  *
302  * page_get_anylist gets its mtype range from the specified ddi_dma_attr_t.
303  */
304 #define	MTYPE_INIT(mtype, vp, vaddr, flags, pgsz) {			\
305 	mtype = mnoderangecnt - 1;					\
306 	if (RESTRICT4G_ALLOC) {						\
307 		VM_STAT_ADD(vmm_vmstats.restrict4gcnt);			\
308 		/* here only for > 4g systems */			\
309 		flags |= PGI_MT_RANGE4G;				\
310 	} else if (RESTRICT16M_ALLOC(freemem, btop(pgsz))) {		\
311 		flags |= PGI_MT_RANGE16M;				\
312 	} else {							\
313 		VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);		\
314 		flags |= PGI_MT_RANGE0;					\
315 	}								\
316 }
317 
318 #elif defined(__i386)
319 
320 /*
321  * set the mtype range
322  *   - kmem requests needs to be below 4g if restricted_kmemalloc is set.
323  *   - for non kmem requests, set range to above 4g if the amount of memory
324  *   below 4g runs low.
325  */
326 
327 #define	MTYPE_INIT(mtype, vp, vaddr, flags, pgsz) {			\
328 	if (restricted_kmemalloc && (vp) == &kvp &&			\
329 	    (caddr_t)(vaddr) >= kernelheap &&				\
330 	    (caddr_t)(vaddr) < ekernelheap) {				\
331 		ASSERT(physmax4g);					\
332 		mtype = mtype4g;					\
333 		if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz),		\
334 		    btop(pgsz))) {					\
335 			flags |= PGI_MT_RANGE16M;			\
336 		} else {						\
337 			VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);	\
338 			flags |= PGI_MT_RANGE0;				\
339 		}							\
340 	} else {							\
341 		mtype = mnoderangecnt - 1;				\
342 		if (RESTRICT4G_ALLOC) {					\
343 			VM_STAT_ADD(vmm_vmstats.restrict4gcnt);		\
344 			/* here only for > 4g systems */		\
345 			flags |= PGI_MT_RANGE4G;			\
346 		} else if (RESTRICT16M_ALLOC(freemem, btop(pgsz))) {	\
347 			flags |= PGI_MT_RANGE16M;			\
348 		} else {						\
349 			VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);	\
350 			flags |= PGI_MT_RANGE0;				\
351 		}							\
352 	}								\
353 }
354 
355 #endif	/* __i386 */
356 
357 /*
358  * macros to loop through the mtype range (page_get_mnode_{free,cache,any}list,
359  * and page_get_contig_pages)
360  *
361  * MTYPE_START sets the initial mtype. -1 if the mtype range specified does
362  * not contain mnode.
363  *
364  * MTYPE_NEXT sets the next mtype. -1 if there are no more valid
365  * mtype in the range.
366  */
367 
368 #define	MTYPE_START(mnode, mtype, flags)				\
369 	(mtype = mtype_func(mnode, mtype, flags))
370 
371 #define	MTYPE_NEXT(mnode, mtype, flags) {				\
372 	if (flags & PGI_MT_RANGE) {					\
373 		mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);	\
374 	} else {							\
375 		mtype = -1;						\
376 	}								\
377 }
378 
379 /* mtype init for page_get_replacement_page */
380 
381 #define	MTYPE_PGR_INIT(mtype, flags, pp, mnode, pgcnt) {		\
382 	mtype = mnoderangecnt - 1;					\
383 	if (RESTRICT16M_ALLOC(freemem, pgcnt)) {			\
384 		flags |= PGI_MT_RANGE16M;				\
385 	} else {							\
386 		VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);		\
387 		flags |= PGI_MT_RANGE0;					\
388 	}								\
389 }
390 
391 #define	MNODE_PGCNT(mnode)		mnode_pgcnt(mnode)
392 
393 #define	MNODETYPE_2_PFN(mnode, mtype, pfnlo, pfnhi)			\
394 	ASSERT(mnoderanges[mtype].mnr_mnode == mnode);			\
395 	pfnlo = mnoderanges[mtype].mnr_pfnlo;				\
396 	pfnhi = mnoderanges[mtype].mnr_pfnhi;
397 
398 #define	PC_BIN_MUTEX(mnode, bin, flags) ((flags & PG_FREE_LIST) ?	\
399 	&fpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode] :			\
400 	&cpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode])
401 
402 #define	FPC_MUTEX(mnode, i)	(&fpc_mutex[i][mnode])
403 #define	CPC_MUTEX(mnode, i)	(&cpc_mutex[i][mnode])
404 
405 #ifdef DEBUG
406 #define	CHK_LPG(pp, szc)	chk_lpg(pp, szc)
407 extern void	chk_lpg(page_t *, uchar_t);
408 #else
409 #define	CHK_LPG(pp, szc)
410 #endif
411 
412 #define	FULL_REGION_CNT(rg_szc)	\
413 	(LEVEL_SIZE(rg_szc) >> LEVEL_SHIFT(rg_szc - 1))
414 
415 /* Return the leader for this mapping size */
416 #define	PP_GROUPLEADER(pp, szc) \
417 	(&(pp)[-(int)((pp)->p_pagenum & (SZCPAGES(szc)-1))])
418 
419 /* Return the root page for this page based on p_szc */
420 #define	PP_PAGEROOT(pp) ((pp)->p_szc == 0 ? (pp) : \
421 	PP_GROUPLEADER((pp), (pp)->p_szc))
422 
423 /*
424  * The counter base must be per page_counter element to prevent
425  * races when re-indexing, and the base page size element should
426  * be aligned on a boundary of the given region size.
427  *
428  * We also round up the number of pages spanned by the counters
429  * for a given region to PC_BASE_ALIGN in certain situations to simplify
430  * the coding for some non-performance critical routines.
431  */
432 
433 #define	PC_BASE_ALIGN		((pfn_t)1 << PAGE_BSZS_SHIFT(MMU_PAGE_SIZES-1))
434 #define	PC_BASE_ALIGN_MASK	(PC_BASE_ALIGN - 1)
435 
436 /*
437  * cpu/mmu-dependent vm variables
438  */
439 extern uint_t mmu_page_sizes;
440 extern uint_t mmu_exported_page_sizes;
441 
442 /* For x86, userszc is the same as the kernel's szc */
443 #define	USERSZC_2_SZC(userszc)	(userszc)
444 #define	SZC_2_USERSZC(szc)	(szc)
445 
446 /*
447  * for hw_page_map_t, sized to hold the ratio of large page to base
448  * pagesize (1024 max)
449  */
450 typedef	short	hpmctr_t;
451 
452 /*
453  * get the setsize of the current cpu - assume homogenous for x86
454  */
455 extern int	l2cache_sz, l2cache_linesz, l2cache_assoc;
456 
457 #define	L2CACHE_ALIGN		l2cache_linesz
458 #define	L2CACHE_ALIGN_MAX	64
459 #define	CPUSETSIZE()		\
460 	(l2cache_assoc ? (l2cache_sz / l2cache_assoc) : MMU_PAGESIZE)
461 
462 /*
463  * Return the log2(pagesize(szc) / MMU_PAGESIZE) --- or the shift count
464  * for the number of base pages in this pagesize
465  */
466 #define	PAGE_BSZS_SHIFT(szc) (LEVEL_SHIFT(szc) - MMU_PAGESHIFT)
467 
468 /*
469  * Internal PG_ flags.
470  */
471 #define	PGI_RELOCONLY	0x010000	/* opposite of PG_NORELOC */
472 #define	PGI_NOCAGE	0x020000	/* cage is disabled */
473 #define	PGI_PGCPHIPRI	0x040000	/* page_get_contig_page pri alloc */
474 #define	PGI_PGCPSZC0	0x080000	/* relocate base pagesize page */
475 
476 /*
477  * PGI range flags - should not overlap PGI flags
478  */
479 #define	PGI_MT_RANGE0	0x1000000	/* mtype range to 0 */
480 #define	PGI_MT_RANGE16M	0x2000000	/* mtype range to 16m */
481 #define	PGI_MT_RANGE4G	0x4000000	/* mtype range to 4g */
482 #define	PGI_MT_NEXT	0x8000000	/* get next mtype */
483 #define	PGI_MT_RANGE	(PGI_MT_RANGE0 | PGI_MT_RANGE16M | PGI_MT_RANGE4G)
484 
485 /*
486  * hash as and addr to get a bin.
487  */
488 
489 #define	AS_2_BIN(as, seg, vp, addr, bin)				\
490 	bin = ((((uintptr_t)(addr) >> PAGESHIFT) + ((uintptr_t)(as) >> 4)) \
491 	    & page_colors_mask)
492 
493 /*
494  * cpu private vm data - accessed thru CPU->cpu_vm_data
495  *	vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock()
496  *	vc_pnext_memseg: tracks last memseg visited in page_nextn()
497  *	vc_kmptr: orignal unaligned kmem pointer for this vm_cpu_data_t
498  *	vc_kmsize: orignal kmem size for this vm_cpu_data_t
499  */
500 
501 typedef struct {
502 	struct memseg	*vc_pnum_memseg;
503 	struct memseg	*vc_pnext_memseg;
504 	void		*vc_kmptr;
505 	size_t		vc_kmsize;
506 } vm_cpu_data_t;
507 
508 /* allocation size to ensure vm_cpu_data_t resides in its own cache line */
509 #define	VM_CPU_DATA_PADSIZE						\
510 	(P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX))
511 
512 /* for boot cpu before kmem is initialized */
513 extern char	vm_cpu_data0[];
514 
515 /*
516  * When a bin is empty, and we can't satisfy a color request correctly,
517  * we scan.  If we assume that the programs have reasonable spatial
518  * behavior, then it will not be a good idea to use the adjacent color.
519  * Using the adjacent color would result in virtually adjacent addresses
520  * mapping into the same spot in the cache.  So, if we stumble across
521  * an empty bin, skip a bunch before looking.  After the first skip,
522  * then just look one bin at a time so we don't miss our cache on
523  * every look. Be sure to check every bin.  Page_create() will panic
524  * if we miss a page.
525  *
526  * This also explains the `<=' in the for loops in both page_get_freelist()
527  * and page_get_cachelist().  Since we checked the target bin, skipped
528  * a bunch, then continued one a time, we wind up checking the target bin
529  * twice to make sure we get all of them bins.
530  */
531 #define	BIN_STEP	19
532 
533 #ifdef VM_STATS
534 struct vmm_vmstats_str {
535 	ulong_t pgf_alloc[MMU_PAGE_SIZES];	/* page_get_freelist */
536 	ulong_t pgf_allocok[MMU_PAGE_SIZES];
537 	ulong_t pgf_allocokrem[MMU_PAGE_SIZES];
538 	ulong_t pgf_allocfailed[MMU_PAGE_SIZES];
539 	ulong_t	pgf_allocdeferred;
540 	ulong_t	pgf_allocretry[MMU_PAGE_SIZES];
541 	ulong_t pgc_alloc;			/* page_get_cachelist */
542 	ulong_t pgc_allocok;
543 	ulong_t pgc_allocokrem;
544 	ulong_t pgc_allocokdeferred;
545 	ulong_t pgc_allocfailed;
546 	ulong_t	pgcp_alloc[MMU_PAGE_SIZES];	/* page_get_contig_pages */
547 	ulong_t	pgcp_allocfailed[MMU_PAGE_SIZES];
548 	ulong_t	pgcp_allocempty[MMU_PAGE_SIZES];
549 	ulong_t	pgcp_allocok[MMU_PAGE_SIZES];
550 	ulong_t	ptcp[MMU_PAGE_SIZES];		/* page_trylock_contig_pages */
551 	ulong_t	ptcpfreethresh[MMU_PAGE_SIZES];
552 	ulong_t	ptcpfailexcl[MMU_PAGE_SIZES];
553 	ulong_t	ptcpfailszc[MMU_PAGE_SIZES];
554 	ulong_t	ptcpfailcage[MMU_PAGE_SIZES];
555 	ulong_t	ptcpok[MMU_PAGE_SIZES];
556 	ulong_t	pgmf_alloc[MMU_PAGE_SIZES];	/* page_get_mnode_freelist */
557 	ulong_t	pgmf_allocfailed[MMU_PAGE_SIZES];
558 	ulong_t	pgmf_allocempty[MMU_PAGE_SIZES];
559 	ulong_t	pgmf_allocok[MMU_PAGE_SIZES];
560 	ulong_t	pgmc_alloc;			/* page_get_mnode_cachelist */
561 	ulong_t	pgmc_allocfailed;
562 	ulong_t	pgmc_allocempty;
563 	ulong_t	pgmc_allocok;
564 	ulong_t	pladd_free[MMU_PAGE_SIZES];	/* page_list_add/sub */
565 	ulong_t	plsub_free[MMU_PAGE_SIZES];
566 	ulong_t	pladd_cache;
567 	ulong_t	plsub_cache;
568 	ulong_t	plsubpages_szcbig;
569 	ulong_t	plsubpages_szc0;
570 	ulong_t	pff_req[MMU_PAGE_SIZES];	/* page_freelist_fill */
571 	ulong_t	pff_demote[MMU_PAGE_SIZES];
572 	ulong_t	pff_coalok[MMU_PAGE_SIZES];
573 	ulong_t	ppr_reloc[MMU_PAGE_SIZES];	/* page_relocate */
574 	ulong_t ppr_relocnoroot[MMU_PAGE_SIZES];
575 	ulong_t ppr_reloc_replnoroot[MMU_PAGE_SIZES];
576 	ulong_t ppr_relocnolock[MMU_PAGE_SIZES];
577 	ulong_t ppr_relocnomem[MMU_PAGE_SIZES];
578 	ulong_t ppr_relocok[MMU_PAGE_SIZES];
579 	ulong_t page_ctrs_coalesce;	/* page coalesce counter */
580 	ulong_t page_ctrs_cands_skip;	/* candidates useful */
581 	ulong_t page_ctrs_changed;	/* ctrs changed after locking */
582 	ulong_t page_ctrs_failed;	/* page_freelist_coalesce failed */
583 	ulong_t page_ctrs_coalesce_all;	/* page coalesce all counter */
584 	ulong_t page_ctrs_cands_skip_all; /* candidates useful for all func */
585 	ulong_t	restrict4gcnt;
586 	ulong_t	unrestrict16mcnt;	/* non-DMA 16m allocs allowed */
587 };
588 extern struct vmm_vmstats_str vmm_vmstats;
589 #endif	/* VM_STATS */
590 
591 extern size_t page_ctrs_sz(void);
592 extern caddr_t page_ctrs_alloc(caddr_t);
593 extern void page_ctr_sub(int, int, page_t *, int);
594 extern page_t *page_freelist_fill(uchar_t, int, int, int, pfn_t);
595 extern uint_t page_get_pagecolors(uint_t);
596 
597 #ifdef	__cplusplus
598 }
599 #endif
600 
601 #endif	/* _VM_DEP_H */
602