xref: /illumos-gate/usr/src/uts/common/vm/vm_page.c (revision 60425338a8e9a5ded7e559e227eedd42d30c8967)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989  AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 /*
30  * University Copyright- Copyright (c) 1982, 1986, 1988
31  * The Regents of the University of California
32  * All Rights Reserved
33  *
34  * University Acknowledgment- Portions of this document are derived from
35  * software developed by the University of California, Berkeley, and its
36  * contributors.
37  */
38 
39 #pragma ident	"%Z%%M%	%I%	%E% SMI"
40 
41 /*
42  * VM - physical page management.
43  */
44 
45 #include <sys/types.h>
46 #include <sys/t_lock.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/errno.h>
50 #include <sys/time.h>
51 #include <sys/vnode.h>
52 #include <sys/vm.h>
53 #include <sys/vtrace.h>
54 #include <sys/swap.h>
55 #include <sys/cmn_err.h>
56 #include <sys/tuneable.h>
57 #include <sys/sysmacros.h>
58 #include <sys/cpuvar.h>
59 #include <sys/callb.h>
60 #include <sys/debug.h>
61 #include <sys/tnf_probe.h>
62 #include <sys/condvar_impl.h>
63 #include <sys/mem_config.h>
64 #include <sys/mem_cage.h>
65 #include <sys/kmem.h>
66 #include <sys/atomic.h>
67 #include <sys/strlog.h>
68 #include <sys/mman.h>
69 #include <sys/ontrap.h>
70 #include <sys/lgrp.h>
71 #include <sys/vfs.h>
72 
73 #include <vm/hat.h>
74 #include <vm/anon.h>
75 #include <vm/page.h>
76 #include <vm/seg.h>
77 #include <vm/pvn.h>
78 #include <vm/seg_kmem.h>
79 #include <vm/vm_dep.h>
80 
81 #include <fs/fs_subr.h>
82 
83 static int nopageage = 0;
84 
85 static pgcnt_t max_page_get;	/* max page_get request size in pages */
86 pgcnt_t total_pages = 0;	/* total number of pages (used by /proc) */
87 
88 /*
89  * freemem_lock protects all freemem variables:
90  * availrmem. Also this lock protects the globals which track the
91  * availrmem changes for accurate kernel footprint calculation.
92  * See below for an explanation of these
93  * globals.
94  */
95 kmutex_t freemem_lock;
96 pgcnt_t availrmem;
97 pgcnt_t availrmem_initial;
98 
99 /*
100  * These globals track availrmem changes to get a more accurate
101  * estimate of tke kernel size. Historically pp_kernel is used for
102  * kernel size and is based on availrmem. But availrmem is adjusted for
103  * locked pages in the system not just for kernel locked pages.
104  * These new counters will track the pages locked through segvn and
105  * by explicit user locking.
106  *
107  * segvn_pages_locked : This keeps track on a global basis how many pages
108  * are currently locked because of I/O.
109  *
110  * pages_locked : How many pages are locked becuase of user specified
111  * locking through mlock or plock.
112  *
113  * pages_useclaim,pages_claimed : These two variables track the
114  * cliam adjustments because of the protection changes on a segvn segment.
115  *
116  * All these globals are protected by the same lock which protects availrmem.
117  */
118 pgcnt_t segvn_pages_locked;
119 pgcnt_t pages_locked;
120 pgcnt_t pages_useclaim;
121 pgcnt_t pages_claimed;
122 
123 
124 /*
125  * new_freemem_lock protects freemem, freemem_wait & freemem_cv.
126  */
127 static kmutex_t	new_freemem_lock;
128 static uint_t	freemem_wait;	/* someone waiting for freemem */
129 static kcondvar_t freemem_cv;
130 
131 /*
132  * The logical page free list is maintained as two lists, the 'free'
133  * and the 'cache' lists.
134  * The free list contains those pages that should be reused first.
135  *
136  * The implementation of the lists is machine dependent.
137  * page_get_freelist(), page_get_cachelist(),
138  * page_list_sub(), and page_list_add()
139  * form the interface to the machine dependent implementation.
140  *
141  * Pages with p_free set are on the cache list.
142  * Pages with p_free and p_age set are on the free list,
143  *
144  * A page may be locked while on either list.
145  */
146 
147 /*
148  * free list accounting stuff.
149  *
150  *
151  * Spread out the value for the number of pages on the
152  * page free and page cache lists.  If there is just one
153  * value, then it must be under just one lock.
154  * The lock contention and cache traffic are a real bother.
155  *
156  * When we acquire and then drop a single pcf lock
157  * we can start in the middle of the array of pcf structures.
158  * If we acquire more than one pcf lock at a time, we need to
159  * start at the front to avoid deadlocking.
160  *
161  * pcf_count holds the number of pages in each pool.
162  *
163  * pcf_block is set when page_create_get_something() has asked the
164  * PSM page freelist and page cachelist routines without specifying
165  * a color and nothing came back.  This is used to block anything
166  * else from moving pages from one list to the other while the
167  * lists are searched again.  If a page is freeed while pcf_block is
168  * set, then pcf_reserve is incremented.  pcgs_unblock() takes care
169  * of clearning pcf_block, doing the wakeups, etc.
170  */
171 
172 #if NCPU <= 4
173 #define	PAD	2
174 #define	PCF_FANOUT	4
175 static	uint_t	pcf_mask = PCF_FANOUT - 1;
176 #else
177 #define	PAD	10
178 #ifdef sun4v
179 #define	PCF_FANOUT	32
180 #else
181 #define	PCF_FANOUT	128
182 #endif
183 static	uint_t	pcf_mask = PCF_FANOUT - 1;
184 #endif
185 
186 struct pcf {
187 	kmutex_t	pcf_lock;	/* protects the structure */
188 	uint_t		pcf_count;	/* page count */
189 	uint_t		pcf_wait;	/* number of waiters */
190 	uint_t		pcf_block; 	/* pcgs flag to page_free() */
191 	uint_t		pcf_reserve; 	/* pages freed after pcf_block set */
192 	uint_t		pcf_fill[PAD];	/* to line up on the caches */
193 };
194 
195 static struct	pcf	pcf[PCF_FANOUT];
196 #define	PCF_INDEX()	((CPU->cpu_id) & (pcf_mask))
197 
198 kmutex_t	pcgs_lock;		/* serializes page_create_get_ */
199 kmutex_t	pcgs_cagelock;		/* serializes NOSLEEP cage allocs */
200 kmutex_t	pcgs_wait_lock;		/* used for delay in pcgs */
201 static kcondvar_t	pcgs_cv;	/* cv for delay in pcgs */
202 
203 #define	PAGE_LOCK_MAXIMUM \
204 	((1 << (sizeof (((page_t *)0)->p_lckcnt) * NBBY)) - 1)
205 
206 #ifdef VM_STATS
207 
208 /*
209  * No locks, but so what, they are only statistics.
210  */
211 
212 static struct page_tcnt {
213 	int	pc_free_cache;		/* free's into cache list */
214 	int	pc_free_dontneed;	/* free's with dontneed */
215 	int	pc_free_pageout;	/* free's from pageout */
216 	int	pc_free_free;		/* free's into free list */
217 	int	pc_free_pages;		/* free's into large page free list */
218 	int	pc_destroy_pages;	/* large page destroy's */
219 	int	pc_get_cache;		/* get's from cache list */
220 	int	pc_get_free;		/* get's from free list */
221 	int	pc_reclaim;		/* reclaim's */
222 	int	pc_abortfree;		/* abort's of free pages */
223 	int	pc_find_hit;		/* find's that find page */
224 	int	pc_find_miss;		/* find's that don't find page */
225 	int	pc_destroy_free;	/* # of free pages destroyed */
226 #define	PC_HASH_CNT	(4*PAGE_HASHAVELEN)
227 	int	pc_find_hashlen[PC_HASH_CNT+1];
228 	int	pc_addclaim_pages;
229 	int	pc_subclaim_pages;
230 	int	pc_free_replacement_page[2];
231 	int	pc_try_demote_pages[6];
232 	int	pc_demote_pages[2];
233 } pagecnt;
234 
235 uint_t	hashin_count;
236 uint_t	hashin_not_held;
237 uint_t	hashin_already;
238 
239 uint_t	hashout_count;
240 uint_t	hashout_not_held;
241 
242 uint_t	page_create_count;
243 uint_t	page_create_not_enough;
244 uint_t	page_create_not_enough_again;
245 uint_t	page_create_zero;
246 uint_t	page_create_hashout;
247 uint_t	page_create_page_lock_failed;
248 uint_t	page_create_trylock_failed;
249 uint_t	page_create_found_one;
250 uint_t	page_create_hashin_failed;
251 uint_t	page_create_dropped_phm;
252 
253 uint_t	page_create_new;
254 uint_t	page_create_exists;
255 uint_t	page_create_putbacks;
256 uint_t	page_create_overshoot;
257 
258 uint_t	page_reclaim_zero;
259 uint_t	page_reclaim_zero_locked;
260 
261 uint_t	page_rename_exists;
262 uint_t	page_rename_count;
263 
264 uint_t	page_lookup_cnt[20];
265 uint_t	page_lookup_nowait_cnt[10];
266 uint_t	page_find_cnt;
267 uint_t	page_exists_cnt;
268 uint_t	page_exists_forreal_cnt;
269 uint_t	page_lookup_dev_cnt;
270 uint_t	get_cachelist_cnt;
271 uint_t	page_create_cnt[10];
272 uint_t	alloc_pages[8];
273 uint_t	page_exphcontg[19];
274 uint_t  page_create_large_cnt[10];
275 
276 /*
277  * Collects statistics.
278  */
279 #define	PAGE_HASH_SEARCH(index, pp, vp, off) { \
280 	uint_t	mylen = 0; \
281 			\
282 	for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash, mylen++) { \
283 		if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
284 			break; \
285 	} \
286 	if ((pp) != NULL) \
287 		pagecnt.pc_find_hit++; \
288 	else \
289 		pagecnt.pc_find_miss++; \
290 	if (mylen > PC_HASH_CNT) \
291 		mylen = PC_HASH_CNT; \
292 	pagecnt.pc_find_hashlen[mylen]++; \
293 }
294 
295 #else	/* VM_STATS */
296 
297 /*
298  * Don't collect statistics
299  */
300 #define	PAGE_HASH_SEARCH(index, pp, vp, off) { \
301 	for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
302 		if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
303 			break; \
304 	} \
305 }
306 
307 #endif	/* VM_STATS */
308 
309 
310 
311 #ifdef DEBUG
312 #define	MEMSEG_SEARCH_STATS
313 #endif
314 
315 #ifdef MEMSEG_SEARCH_STATS
316 struct memseg_stats {
317     uint_t nsearch;
318     uint_t nlastwon;
319     uint_t nhashwon;
320     uint_t nnotfound;
321 } memseg_stats;
322 
323 #define	MEMSEG_STAT_INCR(v) \
324 	atomic_add_32(&memseg_stats.v, 1)
325 #else
326 #define	MEMSEG_STAT_INCR(x)
327 #endif
328 
329 struct memseg *memsegs;		/* list of memory segments */
330 
331 
332 static void page_init_mem_config(void);
333 static int page_do_hashin(page_t *, vnode_t *, u_offset_t);
334 static void page_do_hashout(page_t *);
335 
336 static void page_demote_vp_pages(page_t *);
337 
338 /*
339  * vm subsystem related initialization
340  */
341 void
342 vm_init(void)
343 {
344 	boolean_t callb_vm_cpr(void *, int);
345 
346 	(void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm");
347 	page_init_mem_config();
348 	page_retire_init();
349 }
350 
351 /*
352  * This function is called at startup and when memory is added or deleted.
353  */
354 void
355 init_pages_pp_maximum()
356 {
357 	static pgcnt_t p_min;
358 	static pgcnt_t pages_pp_maximum_startup;
359 	static pgcnt_t avrmem_delta;
360 	static int init_done;
361 	static int user_set;	/* true if set in /etc/system */
362 
363 	if (init_done == 0) {
364 
365 		/* If the user specified a value, save it */
366 		if (pages_pp_maximum != 0) {
367 			user_set = 1;
368 			pages_pp_maximum_startup = pages_pp_maximum;
369 		}
370 
371 		/*
372 		 * Setting of pages_pp_maximum is based first time
373 		 * on the value of availrmem just after the start-up
374 		 * allocations. To preserve this relationship at run
375 		 * time, use a delta from availrmem_initial.
376 		 */
377 		ASSERT(availrmem_initial >= availrmem);
378 		avrmem_delta = availrmem_initial - availrmem;
379 
380 		/* The allowable floor of pages_pp_maximum */
381 		p_min = tune.t_minarmem + 100;
382 
383 		/* Make sure we don't come through here again. */
384 		init_done = 1;
385 	}
386 	/*
387 	 * Determine pages_pp_maximum, the number of currently available
388 	 * pages (availrmem) that can't be `locked'. If not set by
389 	 * the user, we set it to 4% of the currently available memory
390 	 * plus 4MB.
391 	 * But we also insist that it be greater than tune.t_minarmem;
392 	 * otherwise a process could lock down a lot of memory, get swapped
393 	 * out, and never have enough to get swapped back in.
394 	 */
395 	if (user_set)
396 		pages_pp_maximum = pages_pp_maximum_startup;
397 	else
398 		pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25)
399 		    + btop(4 * 1024 * 1024);
400 
401 	if (pages_pp_maximum <= p_min) {
402 		pages_pp_maximum = p_min;
403 	}
404 }
405 
406 void
407 set_max_page_get(pgcnt_t target_total_pages)
408 {
409 	max_page_get = target_total_pages / 2;
410 }
411 
412 static pgcnt_t pending_delete;
413 
414 /*ARGSUSED*/
415 static void
416 page_mem_config_post_add(
417 	void *arg,
418 	pgcnt_t delta_pages)
419 {
420 	set_max_page_get(total_pages - pending_delete);
421 	init_pages_pp_maximum();
422 }
423 
424 /*ARGSUSED*/
425 static int
426 page_mem_config_pre_del(
427 	void *arg,
428 	pgcnt_t delta_pages)
429 {
430 	pgcnt_t nv;
431 
432 	nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages);
433 	set_max_page_get(total_pages - nv);
434 	return (0);
435 }
436 
437 /*ARGSUSED*/
438 static void
439 page_mem_config_post_del(
440 	void *arg,
441 	pgcnt_t delta_pages,
442 	int cancelled)
443 {
444 	pgcnt_t nv;
445 
446 	nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages);
447 	set_max_page_get(total_pages - nv);
448 	if (!cancelled)
449 		init_pages_pp_maximum();
450 }
451 
452 static kphysm_setup_vector_t page_mem_config_vec = {
453 	KPHYSM_SETUP_VECTOR_VERSION,
454 	page_mem_config_post_add,
455 	page_mem_config_pre_del,
456 	page_mem_config_post_del,
457 };
458 
459 static void
460 page_init_mem_config(void)
461 {
462 	int ret;
463 
464 	ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL);
465 	ASSERT(ret == 0);
466 }
467 
468 /*
469  * Evenly spread out the PCF counters for large free pages
470  */
471 static void
472 page_free_large_ctr(pgcnt_t npages)
473 {
474 	static struct pcf	*p = pcf;
475 	pgcnt_t			lump;
476 
477 	freemem += npages;
478 
479 	lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT;
480 
481 	while (npages > 0) {
482 
483 		ASSERT(!p->pcf_block);
484 
485 		if (lump < npages) {
486 			p->pcf_count += (uint_t)lump;
487 			npages -= lump;
488 		} else {
489 			p->pcf_count += (uint_t)npages;
490 			npages = 0;
491 		}
492 
493 		ASSERT(!p->pcf_wait);
494 
495 		if (++p > &pcf[PCF_FANOUT - 1])
496 			p = pcf;
497 	}
498 
499 	ASSERT(npages == 0);
500 }
501 
502 /*
503  * Add a physical chunk of memory to the system freee lists during startup.
504  * Platform specific startup() allocates the memory for the page structs.
505  *
506  * num	- number of page structures
507  * base - page number (pfn) to be associated with the first page.
508  *
509  * Since we are doing this during startup (ie. single threaded), we will
510  * use shortcut routines to avoid any locking overhead while putting all
511  * these pages on the freelists.
512  *
513  * NOTE: Any changes performed to page_free(), must also be performed to
514  *	 add_physmem() since this is how we initialize all page_t's at
515  *	 boot time.
516  */
517 void
518 add_physmem(
519 	page_t	*pp,
520 	pgcnt_t	num,
521 	pfn_t	pnum)
522 {
523 	page_t	*root = NULL;
524 	uint_t	szc = page_num_pagesizes() - 1;
525 	pgcnt_t	large = page_get_pagecnt(szc);
526 	pgcnt_t	cnt = 0;
527 
528 	TRACE_2(TR_FAC_VM, TR_PAGE_INIT,
529 		"add_physmem:pp %p num %lu", pp, num);
530 
531 	/*
532 	 * Arbitrarily limit the max page_get request
533 	 * to 1/2 of the page structs we have.
534 	 */
535 	total_pages += num;
536 	set_max_page_get(total_pages);
537 
538 	PLCNT_MODIFY_MAX(pnum, (long)num);
539 
540 	/*
541 	 * The physical space for the pages array
542 	 * representing ram pages has already been
543 	 * allocated.  Here we initialize each lock
544 	 * in the page structure, and put each on
545 	 * the free list
546 	 */
547 	for (; num; pp++, pnum++, num--) {
548 
549 		/*
550 		 * this needs to fill in the page number
551 		 * and do any other arch specific initialization
552 		 */
553 		add_physmem_cb(pp, pnum);
554 
555 		/*
556 		 * Initialize the page lock as unlocked, since nobody
557 		 * can see or access this page yet.
558 		 */
559 		pp->p_selock = 0;
560 
561 		/*
562 		 * Initialize IO lock
563 		 */
564 		page_iolock_init(pp);
565 
566 		/*
567 		 * initialize other fields in the page_t
568 		 */
569 		PP_SETFREE(pp);
570 		page_clr_all_props(pp);
571 		PP_SETAGED(pp);
572 		pp->p_offset = (u_offset_t)-1;
573 		pp->p_next = pp;
574 		pp->p_prev = pp;
575 
576 		/*
577 		 * Simple case: System doesn't support large pages.
578 		 */
579 		if (szc == 0) {
580 			pp->p_szc = 0;
581 			page_free_at_startup(pp);
582 			continue;
583 		}
584 
585 		/*
586 		 * Handle unaligned pages, we collect them up onto
587 		 * the root page until we have a full large page.
588 		 */
589 		if (!IS_P2ALIGNED(pnum, large)) {
590 
591 			/*
592 			 * If not in a large page,
593 			 * just free as small page.
594 			 */
595 			if (root == NULL) {
596 				pp->p_szc = 0;
597 				page_free_at_startup(pp);
598 				continue;
599 			}
600 
601 			/*
602 			 * Link a constituent page into the large page.
603 			 */
604 			pp->p_szc = szc;
605 			page_list_concat(&root, &pp);
606 
607 			/*
608 			 * When large page is fully formed, free it.
609 			 */
610 			if (++cnt == large) {
611 				page_free_large_ctr(cnt);
612 				page_list_add_pages(root, PG_LIST_ISINIT);
613 				root = NULL;
614 				cnt = 0;
615 			}
616 			continue;
617 		}
618 
619 		/*
620 		 * At this point we have a page number which
621 		 * is aligned. We assert that we aren't already
622 		 * in a different large page.
623 		 */
624 		ASSERT(IS_P2ALIGNED(pnum, large));
625 		ASSERT(root == NULL && cnt == 0);
626 
627 		/*
628 		 * If insufficient number of pages left to form
629 		 * a large page, just free the small page.
630 		 */
631 		if (num < large) {
632 			pp->p_szc = 0;
633 			page_free_at_startup(pp);
634 			continue;
635 		}
636 
637 		/*
638 		 * Otherwise start a new large page.
639 		 */
640 		pp->p_szc = szc;
641 		cnt++;
642 		root = pp;
643 	}
644 	ASSERT(root == NULL && cnt == 0);
645 }
646 
647 /*
648  * Find a page representing the specified [vp, offset].
649  * If we find the page but it is intransit coming in,
650  * it will have an "exclusive" lock and we wait for
651  * the i/o to complete.  A page found on the free list
652  * is always reclaimed and then locked.  On success, the page
653  * is locked, its data is valid and it isn't on the free
654  * list, while a NULL is returned if the page doesn't exist.
655  */
656 page_t *
657 page_lookup(vnode_t *vp, u_offset_t off, se_t se)
658 {
659 	return (page_lookup_create(vp, off, se, NULL, NULL, 0));
660 }
661 
662 /*
663  * Find a page representing the specified [vp, offset].
664  * We either return the one we found or, if passed in,
665  * create one with identity of [vp, offset] of the
666  * pre-allocated page. If we find exsisting page but it is
667  * intransit coming in, it will have an "exclusive" lock
668  * and we wait for the i/o to complete.  A page found on
669  * the free list is always reclaimed and then locked.
670  * On success, the page is locked, its data is valid and
671  * it isn't on the free list, while a NULL is returned
672  * if the page doesn't exist and newpp is NULL;
673  */
674 page_t *
675 page_lookup_create(
676 	vnode_t *vp,
677 	u_offset_t off,
678 	se_t se,
679 	page_t *newpp,
680 	spgcnt_t *nrelocp,
681 	int flags)
682 {
683 	page_t		*pp;
684 	kmutex_t	*phm;
685 	ulong_t		index;
686 	uint_t		hash_locked;
687 	uint_t		es;
688 
689 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
690 	VM_STAT_ADD(page_lookup_cnt[0]);
691 	ASSERT(newpp ? PAGE_EXCL(newpp) : 1);
692 
693 	/*
694 	 * Acquire the appropriate page hash lock since
695 	 * we have to search the hash list.  Pages that
696 	 * hash to this list can't change identity while
697 	 * this lock is held.
698 	 */
699 	hash_locked = 0;
700 	index = PAGE_HASH_FUNC(vp, off);
701 	phm = NULL;
702 top:
703 	PAGE_HASH_SEARCH(index, pp, vp, off);
704 	if (pp != NULL) {
705 		VM_STAT_ADD(page_lookup_cnt[1]);
706 		es = (newpp != NULL) ? 1 : 0;
707 		es |= flags;
708 		if (!hash_locked) {
709 			VM_STAT_ADD(page_lookup_cnt[2]);
710 			if (!page_try_reclaim_lock(pp, se, es)) {
711 				/*
712 				 * On a miss, acquire the phm.  Then
713 				 * next time, page_lock() will be called,
714 				 * causing a wait if the page is busy.
715 				 * just looping with page_trylock() would
716 				 * get pretty boring.
717 				 */
718 				VM_STAT_ADD(page_lookup_cnt[3]);
719 				phm = PAGE_HASH_MUTEX(index);
720 				mutex_enter(phm);
721 				hash_locked = 1;
722 				goto top;
723 			}
724 		} else {
725 			VM_STAT_ADD(page_lookup_cnt[4]);
726 			if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) {
727 				VM_STAT_ADD(page_lookup_cnt[5]);
728 				goto top;
729 			}
730 		}
731 
732 		/*
733 		 * Since `pp' is locked it can not change identity now.
734 		 * Reconfirm we locked the correct page.
735 		 *
736 		 * Both the p_vnode and p_offset *must* be cast volatile
737 		 * to force a reload of their values: The PAGE_HASH_SEARCH
738 		 * macro will have stuffed p_vnode and p_offset into
739 		 * registers before calling page_trylock(); another thread,
740 		 * actually holding the hash lock, could have changed the
741 		 * page's identity in memory, but our registers would not
742 		 * be changed, fooling the reconfirmation.  If the hash
743 		 * lock was held during the search, the casting would
744 		 * not be needed.
745 		 */
746 		VM_STAT_ADD(page_lookup_cnt[6]);
747 		if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
748 		    ((volatile u_offset_t)(pp->p_offset) != off)) {
749 			VM_STAT_ADD(page_lookup_cnt[7]);
750 			if (hash_locked) {
751 				panic("page_lookup_create: lost page %p",
752 				    (void *)pp);
753 				/*NOTREACHED*/
754 			}
755 			page_unlock(pp);
756 			phm = PAGE_HASH_MUTEX(index);
757 			mutex_enter(phm);
758 			hash_locked = 1;
759 			goto top;
760 		}
761 
762 		/*
763 		 * If page_trylock() was called, then pp may still be on
764 		 * the cachelist (can't be on the free list, it would not
765 		 * have been found in the search).  If it is on the
766 		 * cachelist it must be pulled now. To pull the page from
767 		 * the cachelist, it must be exclusively locked.
768 		 *
769 		 * The other big difference between page_trylock() and
770 		 * page_lock(), is that page_lock() will pull the
771 		 * page from whatever free list (the cache list in this
772 		 * case) the page is on.  If page_trylock() was used
773 		 * above, then we have to do the reclaim ourselves.
774 		 */
775 		if ((!hash_locked) && (PP_ISFREE(pp))) {
776 			ASSERT(PP_ISAGED(pp) == 0);
777 			VM_STAT_ADD(page_lookup_cnt[8]);
778 
779 			/*
780 			 * page_relcaim will insure that we
781 			 * have this page exclusively
782 			 */
783 
784 			if (!page_reclaim(pp, NULL)) {
785 				/*
786 				 * Page_reclaim dropped whatever lock
787 				 * we held.
788 				 */
789 				VM_STAT_ADD(page_lookup_cnt[9]);
790 				phm = PAGE_HASH_MUTEX(index);
791 				mutex_enter(phm);
792 				hash_locked = 1;
793 				goto top;
794 			} else if (se == SE_SHARED && newpp == NULL) {
795 				VM_STAT_ADD(page_lookup_cnt[10]);
796 				page_downgrade(pp);
797 			}
798 		}
799 
800 		if (hash_locked) {
801 			mutex_exit(phm);
802 		}
803 
804 		if (newpp != NULL && pp->p_szc < newpp->p_szc &&
805 		    PAGE_EXCL(pp) && nrelocp != NULL) {
806 			ASSERT(nrelocp != NULL);
807 			(void) page_relocate(&pp, &newpp, 1, 1, nrelocp,
808 			    NULL);
809 			if (*nrelocp > 0) {
810 				VM_STAT_COND_ADD(*nrelocp == 1,
811 				    page_lookup_cnt[11]);
812 				VM_STAT_COND_ADD(*nrelocp > 1,
813 				    page_lookup_cnt[12]);
814 				pp = newpp;
815 				se = SE_EXCL;
816 			} else {
817 				if (se == SE_SHARED) {
818 					page_downgrade(pp);
819 				}
820 				VM_STAT_ADD(page_lookup_cnt[13]);
821 			}
822 		} else if (newpp != NULL && nrelocp != NULL) {
823 			if (PAGE_EXCL(pp) && se == SE_SHARED) {
824 				page_downgrade(pp);
825 			}
826 			VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc,
827 			    page_lookup_cnt[14]);
828 			VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc,
829 			    page_lookup_cnt[15]);
830 			VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc,
831 			    page_lookup_cnt[16]);
832 		} else if (newpp != NULL && PAGE_EXCL(pp)) {
833 			se = SE_EXCL;
834 		}
835 	} else if (!hash_locked) {
836 		VM_STAT_ADD(page_lookup_cnt[17]);
837 		phm = PAGE_HASH_MUTEX(index);
838 		mutex_enter(phm);
839 		hash_locked = 1;
840 		goto top;
841 	} else if (newpp != NULL) {
842 		/*
843 		 * If we have a preallocated page then
844 		 * insert it now and basically behave like
845 		 * page_create.
846 		 */
847 		VM_STAT_ADD(page_lookup_cnt[18]);
848 		/*
849 		 * Since we hold the page hash mutex and
850 		 * just searched for this page, page_hashin
851 		 * had better not fail.  If it does, that
852 		 * means some thread did not follow the
853 		 * page hash mutex rules.  Panic now and
854 		 * get it over with.  As usual, go down
855 		 * holding all the locks.
856 		 */
857 		ASSERT(MUTEX_HELD(phm));
858 		if (!page_hashin(newpp, vp, off, phm)) {
859 			ASSERT(MUTEX_HELD(phm));
860 			panic("page_lookup_create: hashin failed %p %p %llx %p",
861 			    (void *)newpp, (void *)vp, off, (void *)phm);
862 			/*NOTREACHED*/
863 		}
864 		ASSERT(MUTEX_HELD(phm));
865 		mutex_exit(phm);
866 		phm = NULL;
867 		page_set_props(newpp, P_REF);
868 		page_io_lock(newpp);
869 		pp = newpp;
870 		se = SE_EXCL;
871 	} else {
872 		VM_STAT_ADD(page_lookup_cnt[19]);
873 		mutex_exit(phm);
874 	}
875 
876 	ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
877 
878 	ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1);
879 
880 	return (pp);
881 }
882 
883 /*
884  * Search the hash list for the page representing the
885  * specified [vp, offset] and return it locked.  Skip
886  * free pages and pages that cannot be locked as requested.
887  * Used while attempting to kluster pages.
888  */
889 page_t *
890 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se)
891 {
892 	page_t		*pp;
893 	kmutex_t	*phm;
894 	ulong_t		index;
895 	uint_t		locked;
896 
897 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
898 	VM_STAT_ADD(page_lookup_nowait_cnt[0]);
899 
900 	index = PAGE_HASH_FUNC(vp, off);
901 	PAGE_HASH_SEARCH(index, pp, vp, off);
902 	locked = 0;
903 	if (pp == NULL) {
904 top:
905 		VM_STAT_ADD(page_lookup_nowait_cnt[1]);
906 		locked = 1;
907 		phm = PAGE_HASH_MUTEX(index);
908 		mutex_enter(phm);
909 		PAGE_HASH_SEARCH(index, pp, vp, off);
910 	}
911 
912 	if (pp == NULL || PP_ISFREE(pp)) {
913 		VM_STAT_ADD(page_lookup_nowait_cnt[2]);
914 		pp = NULL;
915 	} else {
916 		if (!page_trylock(pp, se)) {
917 			VM_STAT_ADD(page_lookup_nowait_cnt[3]);
918 			pp = NULL;
919 		} else {
920 			VM_STAT_ADD(page_lookup_nowait_cnt[4]);
921 			/*
922 			 * See the comment in page_lookup()
923 			 */
924 			if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
925 			    ((u_offset_t)(pp->p_offset) != off)) {
926 				VM_STAT_ADD(page_lookup_nowait_cnt[5]);
927 				if (locked) {
928 					panic("page_lookup_nowait %p",
929 					    (void *)pp);
930 					/*NOTREACHED*/
931 				}
932 				page_unlock(pp);
933 				goto top;
934 			}
935 			if (PP_ISFREE(pp)) {
936 				VM_STAT_ADD(page_lookup_nowait_cnt[6]);
937 				page_unlock(pp);
938 				pp = NULL;
939 			}
940 		}
941 	}
942 	if (locked) {
943 		VM_STAT_ADD(page_lookup_nowait_cnt[7]);
944 		mutex_exit(phm);
945 	}
946 
947 	ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
948 
949 	return (pp);
950 }
951 
952 /*
953  * Search the hash list for a page with the specified [vp, off]
954  * that is known to exist and is already locked.  This routine
955  * is typically used by segment SOFTUNLOCK routines.
956  */
957 page_t *
958 page_find(vnode_t *vp, u_offset_t off)
959 {
960 	page_t		*pp;
961 	kmutex_t	*phm;
962 	ulong_t		index;
963 
964 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
965 	VM_STAT_ADD(page_find_cnt);
966 
967 	index = PAGE_HASH_FUNC(vp, off);
968 	phm = PAGE_HASH_MUTEX(index);
969 
970 	mutex_enter(phm);
971 	PAGE_HASH_SEARCH(index, pp, vp, off);
972 	mutex_exit(phm);
973 
974 	ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr);
975 	return (pp);
976 }
977 
978 /*
979  * Determine whether a page with the specified [vp, off]
980  * currently exists in the system.  Obviously this should
981  * only be considered as a hint since nothing prevents the
982  * page from disappearing or appearing immediately after
983  * the return from this routine. Subsequently, we don't
984  * even bother to lock the list.
985  */
986 page_t *
987 page_exists(vnode_t *vp, u_offset_t off)
988 {
989 	page_t	*pp;
990 	ulong_t		index;
991 
992 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
993 	VM_STAT_ADD(page_exists_cnt);
994 
995 	index = PAGE_HASH_FUNC(vp, off);
996 	PAGE_HASH_SEARCH(index, pp, vp, off);
997 
998 	return (pp);
999 }
1000 
1001 /*
1002  * Determine if physically contiguous pages exist for [vp, off] - [vp, off +
1003  * page_size(szc)) range.  if they exist and ppa is not NULL fill ppa array
1004  * with these pages locked SHARED. If necessary reclaim pages from
1005  * freelist. Return 1 if contiguous pages exist and 0 otherwise.
1006  *
1007  * If we fail to lock pages still return 1 if pages exist and contiguous.
1008  * But in this case return value is just a hint. ppa array won't be filled.
1009  * Caller should initialize ppa[0] as NULL to distinguish return value.
1010  *
1011  * Returns 0 if pages don't exist or not physically contiguous.
1012  *
1013  * This routine doesn't work for anonymous(swapfs) pages.
1014  */
1015 int
1016 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[])
1017 {
1018 	pgcnt_t pages;
1019 	pfn_t pfn;
1020 	page_t *rootpp;
1021 	pgcnt_t i;
1022 	pgcnt_t j;
1023 	u_offset_t save_off = off;
1024 	ulong_t index;
1025 	kmutex_t *phm;
1026 	page_t *pp;
1027 	uint_t pszc;
1028 	int loopcnt = 0;
1029 
1030 	ASSERT(szc != 0);
1031 	ASSERT(vp != NULL);
1032 	ASSERT(!IS_SWAPFSVP(vp));
1033 	ASSERT(vp != &kvp);
1034 
1035 again:
1036 	if (++loopcnt > 3) {
1037 		VM_STAT_ADD(page_exphcontg[0]);
1038 		return (0);
1039 	}
1040 
1041 	index = PAGE_HASH_FUNC(vp, off);
1042 	phm = PAGE_HASH_MUTEX(index);
1043 
1044 	mutex_enter(phm);
1045 	PAGE_HASH_SEARCH(index, pp, vp, off);
1046 	mutex_exit(phm);
1047 
1048 	VM_STAT_ADD(page_exphcontg[1]);
1049 
1050 	if (pp == NULL) {
1051 		VM_STAT_ADD(page_exphcontg[2]);
1052 		return (0);
1053 	}
1054 
1055 	pages = page_get_pagecnt(szc);
1056 	rootpp = pp;
1057 	pfn = rootpp->p_pagenum;
1058 
1059 	if ((pszc = pp->p_szc) >= szc && ppa != NULL) {
1060 		VM_STAT_ADD(page_exphcontg[3]);
1061 		if (!page_trylock(pp, SE_SHARED)) {
1062 			VM_STAT_ADD(page_exphcontg[4]);
1063 			return (1);
1064 		}
1065 		if (pp->p_szc != pszc || pp->p_vnode != vp ||
1066 		    pp->p_offset != off) {
1067 			VM_STAT_ADD(page_exphcontg[5]);
1068 			page_unlock(pp);
1069 			off = save_off;
1070 			goto again;
1071 		}
1072 		/*
1073 		 * szc was non zero and vnode and offset matched after we
1074 		 * locked the page it means it can't become free on us.
1075 		 */
1076 		ASSERT(!PP_ISFREE(pp));
1077 		if (!IS_P2ALIGNED(pfn, pages)) {
1078 			page_unlock(pp);
1079 			return (0);
1080 		}
1081 		ppa[0] = pp;
1082 		pp++;
1083 		off += PAGESIZE;
1084 		pfn++;
1085 		for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1086 			if (!page_trylock(pp, SE_SHARED)) {
1087 				VM_STAT_ADD(page_exphcontg[6]);
1088 				pp--;
1089 				while (i-- > 0) {
1090 					page_unlock(pp);
1091 					pp--;
1092 				}
1093 				ppa[0] = NULL;
1094 				return (1);
1095 			}
1096 			if (pp->p_szc != pszc) {
1097 				VM_STAT_ADD(page_exphcontg[7]);
1098 				page_unlock(pp);
1099 				pp--;
1100 				while (i-- > 0) {
1101 					page_unlock(pp);
1102 					pp--;
1103 				}
1104 				ppa[0] = NULL;
1105 				off = save_off;
1106 				goto again;
1107 			}
1108 			/*
1109 			 * szc the same as for previous already locked pages
1110 			 * with right identity. Since this page had correct
1111 			 * szc after we locked it can't get freed or destroyed
1112 			 * and therefore must have the expected identity.
1113 			 */
1114 			ASSERT(!PP_ISFREE(pp));
1115 			if (pp->p_vnode != vp ||
1116 			    pp->p_offset != off) {
1117 				panic("page_exists_physcontig: "
1118 				    "large page identity doesn't match");
1119 			}
1120 			ppa[i] = pp;
1121 			ASSERT(pp->p_pagenum == pfn);
1122 		}
1123 		VM_STAT_ADD(page_exphcontg[8]);
1124 		ppa[pages] = NULL;
1125 		return (1);
1126 	} else if (pszc >= szc) {
1127 		VM_STAT_ADD(page_exphcontg[9]);
1128 		if (!IS_P2ALIGNED(pfn, pages)) {
1129 			return (0);
1130 		}
1131 		return (1);
1132 	}
1133 
1134 	if (!IS_P2ALIGNED(pfn, pages)) {
1135 		VM_STAT_ADD(page_exphcontg[10]);
1136 		return (0);
1137 	}
1138 
1139 	if (page_numtomemseg_nolock(pfn) !=
1140 	    page_numtomemseg_nolock(pfn + pages - 1)) {
1141 		VM_STAT_ADD(page_exphcontg[11]);
1142 		return (0);
1143 	}
1144 
1145 	/*
1146 	 * We loop up 4 times across pages to promote page size.
1147 	 * We're extra cautious to promote page size atomically with respect
1148 	 * to everybody else.  But we can probably optimize into 1 loop if
1149 	 * this becomes an issue.
1150 	 */
1151 
1152 	for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1153 		ASSERT(pp->p_pagenum == pfn);
1154 		if (!page_trylock(pp, SE_EXCL)) {
1155 			VM_STAT_ADD(page_exphcontg[12]);
1156 			break;
1157 		}
1158 		if (pp->p_vnode != vp ||
1159 		    pp->p_offset != off) {
1160 			VM_STAT_ADD(page_exphcontg[13]);
1161 			page_unlock(pp);
1162 			break;
1163 		}
1164 		if (pp->p_szc >= szc) {
1165 			ASSERT(i == 0);
1166 			page_unlock(pp);
1167 			off = save_off;
1168 			goto again;
1169 		}
1170 	}
1171 
1172 	if (i != pages) {
1173 		VM_STAT_ADD(page_exphcontg[14]);
1174 		--pp;
1175 		while (i-- > 0) {
1176 			page_unlock(pp);
1177 			--pp;
1178 		}
1179 		return (0);
1180 	}
1181 
1182 	pp = rootpp;
1183 	for (i = 0; i < pages; i++, pp++) {
1184 		if (PP_ISFREE(pp)) {
1185 			VM_STAT_ADD(page_exphcontg[15]);
1186 			ASSERT(!PP_ISAGED(pp));
1187 			ASSERT(pp->p_szc == 0);
1188 			if (!page_reclaim(pp, NULL)) {
1189 				break;
1190 			}
1191 		} else {
1192 			ASSERT(pp->p_szc < szc);
1193 			VM_STAT_ADD(page_exphcontg[16]);
1194 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1195 		}
1196 	}
1197 	if (i < pages) {
1198 		VM_STAT_ADD(page_exphcontg[17]);
1199 		/*
1200 		 * page_reclaim failed because we were out of memory.
1201 		 * drop the rest of the locks and return because this page
1202 		 * must be already reallocated anyway.
1203 		 */
1204 		pp = rootpp;
1205 		for (j = 0; j < pages; j++, pp++) {
1206 			if (j != i) {
1207 				page_unlock(pp);
1208 			}
1209 		}
1210 		return (0);
1211 	}
1212 
1213 	off = save_off;
1214 	pp = rootpp;
1215 	for (i = 0; i < pages; i++, pp++, off += PAGESIZE) {
1216 		ASSERT(PAGE_EXCL(pp));
1217 		ASSERT(!PP_ISFREE(pp));
1218 		ASSERT(!hat_page_is_mapped(pp));
1219 		ASSERT(pp->p_vnode == vp);
1220 		ASSERT(pp->p_offset == off);
1221 		pp->p_szc = szc;
1222 	}
1223 	pp = rootpp;
1224 	for (i = 0; i < pages; i++, pp++) {
1225 		if (ppa == NULL) {
1226 			page_unlock(pp);
1227 		} else {
1228 			ppa[i] = pp;
1229 			page_downgrade(ppa[i]);
1230 		}
1231 	}
1232 	if (ppa != NULL) {
1233 		ppa[pages] = NULL;
1234 	}
1235 	VM_STAT_ADD(page_exphcontg[18]);
1236 	ASSERT(vp->v_pages != NULL);
1237 	return (1);
1238 }
1239 
1240 /*
1241  * Determine whether a page with the specified [vp, off]
1242  * currently exists in the system and if so return its
1243  * size code. Obviously this should only be considered as
1244  * a hint since nothing prevents the page from disappearing
1245  * or appearing immediately after the return from this routine.
1246  */
1247 int
1248 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc)
1249 {
1250 	page_t		*pp;
1251 	kmutex_t	*phm;
1252 	ulong_t		index;
1253 	int		rc = 0;
1254 
1255 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1256 	ASSERT(szc != NULL);
1257 	VM_STAT_ADD(page_exists_forreal_cnt);
1258 
1259 	index = PAGE_HASH_FUNC(vp, off);
1260 	phm = PAGE_HASH_MUTEX(index);
1261 
1262 	mutex_enter(phm);
1263 	PAGE_HASH_SEARCH(index, pp, vp, off);
1264 	if (pp != NULL) {
1265 		*szc = pp->p_szc;
1266 		rc = 1;
1267 	}
1268 	mutex_exit(phm);
1269 	return (rc);
1270 }
1271 
1272 /* wakeup threads waiting for pages in page_create_get_something() */
1273 void
1274 wakeup_pcgs(void)
1275 {
1276 	if (!CV_HAS_WAITERS(&pcgs_cv))
1277 		return;
1278 	cv_broadcast(&pcgs_cv);
1279 }
1280 
1281 /*
1282  * 'freemem' is used all over the kernel as an indication of how many
1283  * pages are free (either on the cache list or on the free page list)
1284  * in the system.  In very few places is a really accurate 'freemem'
1285  * needed.  To avoid contention of the lock protecting a the
1286  * single freemem, it was spread out into NCPU buckets.  Set_freemem
1287  * sets freemem to the total of all NCPU buckets.  It is called from
1288  * clock() on each TICK.
1289  */
1290 void
1291 set_freemem()
1292 {
1293 	struct pcf	*p;
1294 	ulong_t		t;
1295 	uint_t		i;
1296 
1297 	t = 0;
1298 	p = pcf;
1299 	for (i = 0;  i < PCF_FANOUT; i++) {
1300 		t += p->pcf_count;
1301 		p++;
1302 	}
1303 	freemem = t;
1304 
1305 	/*
1306 	 * Don't worry about grabbing mutex.  It's not that
1307 	 * critical if we miss a tick or two.  This is
1308 	 * where we wakeup possible delayers in
1309 	 * page_create_get_something().
1310 	 */
1311 	wakeup_pcgs();
1312 }
1313 
1314 ulong_t
1315 get_freemem()
1316 {
1317 	struct pcf	*p;
1318 	ulong_t		t;
1319 	uint_t		i;
1320 
1321 	t = 0;
1322 	p = pcf;
1323 	for (i = 0; i < PCF_FANOUT; i++) {
1324 		t += p->pcf_count;
1325 		p++;
1326 	}
1327 	/*
1328 	 * We just calculated it, might as well set it.
1329 	 */
1330 	freemem = t;
1331 	return (t);
1332 }
1333 
1334 /*
1335  * Acquire all of the page cache & free (pcf) locks.
1336  */
1337 void
1338 pcf_acquire_all()
1339 {
1340 	struct pcf	*p;
1341 	uint_t		i;
1342 
1343 	p = pcf;
1344 	for (i = 0; i < PCF_FANOUT; i++) {
1345 		mutex_enter(&p->pcf_lock);
1346 		p++;
1347 	}
1348 }
1349 
1350 /*
1351  * Release all the pcf_locks.
1352  */
1353 void
1354 pcf_release_all()
1355 {
1356 	struct pcf	*p;
1357 	uint_t		i;
1358 
1359 	p = pcf;
1360 	for (i = 0; i < PCF_FANOUT; i++) {
1361 		mutex_exit(&p->pcf_lock);
1362 		p++;
1363 	}
1364 }
1365 
1366 /*
1367  * Inform the VM system that we need some pages freed up.
1368  * Calls must be symmetric, e.g.:
1369  *
1370  *	page_needfree(100);
1371  *	wait a bit;
1372  *	page_needfree(-100);
1373  */
1374 void
1375 page_needfree(spgcnt_t npages)
1376 {
1377 	mutex_enter(&new_freemem_lock);
1378 	needfree += npages;
1379 	mutex_exit(&new_freemem_lock);
1380 }
1381 
1382 /*
1383  * Throttle for page_create(): try to prevent freemem from dropping
1384  * below throttlefree.  We can't provide a 100% guarantee because
1385  * KM_NOSLEEP allocations, page_reclaim(), and various other things
1386  * nibble away at the freelist.  However, we can block all PG_WAIT
1387  * allocations until memory becomes available.  The motivation is
1388  * that several things can fall apart when there's no free memory:
1389  *
1390  * (1) If pageout() needs memory to push a page, the system deadlocks.
1391  *
1392  * (2) By (broken) specification, timeout(9F) can neither fail nor
1393  *     block, so it has no choice but to panic the system if it
1394  *     cannot allocate a callout structure.
1395  *
1396  * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block;
1397  *     it panics if it cannot allocate a callback structure.
1398  *
1399  * (4) Untold numbers of third-party drivers have not yet been hardened
1400  *     against KM_NOSLEEP and/or allocb() failures; they simply assume
1401  *     success and panic the system with a data fault on failure.
1402  *     (The long-term solution to this particular problem is to ship
1403  *     hostile fault-injecting DEBUG kernels with the DDK.)
1404  *
1405  * It is theoretically impossible to guarantee success of non-blocking
1406  * allocations, but in practice, this throttle is very hard to break.
1407  */
1408 static int
1409 page_create_throttle(pgcnt_t npages, int flags)
1410 {
1411 	ulong_t	fm;
1412 	uint_t	i;
1413 	pgcnt_t tf;	/* effective value of throttlefree */
1414 
1415 	/*
1416 	 * Never deny pages when:
1417 	 * - it's a thread that cannot block [NOMEMWAIT()]
1418 	 * - the allocation cannot block and must not fail
1419 	 * - the allocation cannot block and is pageout dispensated
1420 	 */
1421 	if (NOMEMWAIT() ||
1422 	    ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) ||
1423 	    ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE))
1424 		return (1);
1425 
1426 	/*
1427 	 * If the allocation can't block, we look favorably upon it
1428 	 * unless we're below pageout_reserve.  In that case we fail
1429 	 * the allocation because we want to make sure there are a few
1430 	 * pages available for pageout.
1431 	 */
1432 	if ((flags & PG_WAIT) == 0)
1433 		return (freemem >= npages + pageout_reserve);
1434 
1435 	/* Calculate the effective throttlefree value */
1436 	tf = throttlefree -
1437 	    ((flags & PG_PUSHPAGE) ? pageout_reserve : 0);
1438 
1439 	cv_signal(&proc_pageout->p_cv);
1440 
1441 	while (freemem < npages + tf) {
1442 		pcf_acquire_all();
1443 		mutex_enter(&new_freemem_lock);
1444 		fm = 0;
1445 		for (i = 0; i < PCF_FANOUT; i++) {
1446 			fm += pcf[i].pcf_count;
1447 			pcf[i].pcf_wait++;
1448 			mutex_exit(&pcf[i].pcf_lock);
1449 		}
1450 		freemem = fm;
1451 		needfree += npages;
1452 		freemem_wait++;
1453 		cv_wait(&freemem_cv, &new_freemem_lock);
1454 		freemem_wait--;
1455 		needfree -= npages;
1456 		mutex_exit(&new_freemem_lock);
1457 	}
1458 	return (1);
1459 }
1460 
1461 /*
1462  * page_create_wait() is called to either coalecse pages from the
1463  * different pcf buckets or to wait because there simply are not
1464  * enough pages to satisfy the caller's request.
1465  *
1466  * Sadly, this is called from platform/vm/vm_machdep.c
1467  */
1468 int
1469 page_create_wait(size_t npages, uint_t flags)
1470 {
1471 	pgcnt_t		total;
1472 	uint_t		i;
1473 	struct pcf	*p;
1474 
1475 	/*
1476 	 * Wait until there are enough free pages to satisfy our
1477 	 * entire request.
1478 	 * We set needfree += npages before prodding pageout, to make sure
1479 	 * it does real work when npages > lotsfree > freemem.
1480 	 */
1481 	VM_STAT_ADD(page_create_not_enough);
1482 
1483 	ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1);
1484 checkagain:
1485 	if ((flags & PG_NORELOC) &&
1486 	    kcage_freemem < kcage_throttlefree + npages)
1487 		(void) kcage_create_throttle(npages, flags);
1488 
1489 	if (freemem < npages + throttlefree)
1490 		if (!page_create_throttle(npages, flags))
1491 			return (0);
1492 
1493 	/*
1494 	 * Since page_create_va() looked at every
1495 	 * bucket, assume we are going to have to wait.
1496 	 * Get all of the pcf locks.
1497 	 */
1498 	total = 0;
1499 	p = pcf;
1500 	for (i = 0; i < PCF_FANOUT; i++) {
1501 		mutex_enter(&p->pcf_lock);
1502 		total += p->pcf_count;
1503 		if (total >= npages) {
1504 			/*
1505 			 * Wow!  There are enough pages laying around
1506 			 * to satisfy the request.  Do the accounting,
1507 			 * drop the locks we acquired, and go back.
1508 			 *
1509 			 * freemem is not protected by any lock. So,
1510 			 * we cannot have any assertion containing
1511 			 * freemem.
1512 			 */
1513 			freemem -= npages;
1514 
1515 			while (p >= pcf) {
1516 				if (p->pcf_count <= npages) {
1517 					npages -= p->pcf_count;
1518 					p->pcf_count = 0;
1519 				} else {
1520 					p->pcf_count -= (uint_t)npages;
1521 					npages = 0;
1522 				}
1523 				mutex_exit(&p->pcf_lock);
1524 				p--;
1525 			}
1526 			ASSERT(npages == 0);
1527 			return (1);
1528 		}
1529 		p++;
1530 	}
1531 
1532 	/*
1533 	 * All of the pcf locks are held, there are not enough pages
1534 	 * to satisfy the request (npages < total).
1535 	 * Be sure to acquire the new_freemem_lock before dropping
1536 	 * the pcf locks.  This prevents dropping wakeups in page_free().
1537 	 * The order is always pcf_lock then new_freemem_lock.
1538 	 *
1539 	 * Since we hold all the pcf locks, it is a good time to set freemem.
1540 	 *
1541 	 * If the caller does not want to wait, return now.
1542 	 * Else turn the pageout daemon loose to find something
1543 	 * and wait till it does.
1544 	 *
1545 	 */
1546 	freemem = total;
1547 
1548 	if ((flags & PG_WAIT) == 0) {
1549 		pcf_release_all();
1550 
1551 		TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM,
1552 		"page_create_nomem:npages %ld freemem %ld", npages, freemem);
1553 		return (0);
1554 	}
1555 
1556 	ASSERT(proc_pageout != NULL);
1557 	cv_signal(&proc_pageout->p_cv);
1558 
1559 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START,
1560 	    "page_create_sleep_start: freemem %ld needfree %ld",
1561 	    freemem, needfree);
1562 
1563 	/*
1564 	 * We are going to wait.
1565 	 * We currently hold all of the pcf_locks,
1566 	 * get the new_freemem_lock (it protects freemem_wait),
1567 	 * before dropping the pcf_locks.
1568 	 */
1569 	mutex_enter(&new_freemem_lock);
1570 
1571 	p = pcf;
1572 	for (i = 0; i < PCF_FANOUT; i++) {
1573 		p->pcf_wait++;
1574 		mutex_exit(&p->pcf_lock);
1575 		p++;
1576 	}
1577 
1578 	needfree += npages;
1579 	freemem_wait++;
1580 
1581 	cv_wait(&freemem_cv, &new_freemem_lock);
1582 
1583 	freemem_wait--;
1584 	needfree -= npages;
1585 
1586 	mutex_exit(&new_freemem_lock);
1587 
1588 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END,
1589 	    "page_create_sleep_end: freemem %ld needfree %ld",
1590 	    freemem, needfree);
1591 
1592 	VM_STAT_ADD(page_create_not_enough_again);
1593 	goto checkagain;
1594 }
1595 
1596 /*
1597  * A routine to do the opposite of page_create_wait().
1598  */
1599 void
1600 page_create_putback(spgcnt_t npages)
1601 {
1602 	struct pcf	*p;
1603 	pgcnt_t		lump;
1604 	uint_t		*which;
1605 
1606 	/*
1607 	 * When a contiguous lump is broken up, we have to
1608 	 * deal with lots of pages (min 64) so lets spread
1609 	 * the wealth around.
1610 	 */
1611 	lump = roundup(npages, PCF_FANOUT) / PCF_FANOUT;
1612 	freemem += npages;
1613 
1614 	for (p = pcf; (npages > 0) && (p < &pcf[PCF_FANOUT]); p++) {
1615 		which = &p->pcf_count;
1616 
1617 		mutex_enter(&p->pcf_lock);
1618 
1619 		if (p->pcf_block) {
1620 			which = &p->pcf_reserve;
1621 		}
1622 
1623 		if (lump < npages) {
1624 			*which += (uint_t)lump;
1625 			npages -= lump;
1626 		} else {
1627 			*which += (uint_t)npages;
1628 			npages = 0;
1629 		}
1630 
1631 		if (p->pcf_wait) {
1632 			mutex_enter(&new_freemem_lock);
1633 			/*
1634 			 * Check to see if some other thread
1635 			 * is actually waiting.  Another bucket
1636 			 * may have woken it up by now.  If there
1637 			 * are no waiters, then set our pcf_wait
1638 			 * count to zero to avoid coming in here
1639 			 * next time.
1640 			 */
1641 			if (freemem_wait) {
1642 				if (npages > 1) {
1643 					cv_broadcast(&freemem_cv);
1644 				} else {
1645 					cv_signal(&freemem_cv);
1646 				}
1647 				p->pcf_wait--;
1648 			} else {
1649 				p->pcf_wait = 0;
1650 			}
1651 			mutex_exit(&new_freemem_lock);
1652 		}
1653 		mutex_exit(&p->pcf_lock);
1654 	}
1655 	ASSERT(npages == 0);
1656 }
1657 
1658 /*
1659  * A helper routine for page_create_get_something.
1660  * The indenting got to deep down there.
1661  * Unblock the pcf counters.  Any pages freed after
1662  * pcf_block got set are moved to pcf_count and
1663  * wakeups (cv_broadcast() or cv_signal()) are done as needed.
1664  */
1665 static void
1666 pcgs_unblock(void)
1667 {
1668 	int		i;
1669 	struct pcf	*p;
1670 
1671 	/* Update freemem while we're here. */
1672 	freemem = 0;
1673 	p = pcf;
1674 	for (i = 0; i < PCF_FANOUT; i++) {
1675 		mutex_enter(&p->pcf_lock);
1676 		ASSERT(p->pcf_count == 0);
1677 		p->pcf_count = p->pcf_reserve;
1678 		p->pcf_block = 0;
1679 		freemem += p->pcf_count;
1680 		if (p->pcf_wait) {
1681 			mutex_enter(&new_freemem_lock);
1682 			if (freemem_wait) {
1683 				if (p->pcf_reserve > 1) {
1684 					cv_broadcast(&freemem_cv);
1685 					p->pcf_wait = 0;
1686 				} else {
1687 					cv_signal(&freemem_cv);
1688 					p->pcf_wait--;
1689 				}
1690 			} else {
1691 				p->pcf_wait = 0;
1692 			}
1693 			mutex_exit(&new_freemem_lock);
1694 		}
1695 		p->pcf_reserve = 0;
1696 		mutex_exit(&p->pcf_lock);
1697 		p++;
1698 	}
1699 }
1700 
1701 /*
1702  * Called from page_create_va() when both the cache and free lists
1703  * have been checked once.
1704  *
1705  * Either returns a page or panics since the accounting was done
1706  * way before we got here.
1707  *
1708  * We don't come here often, so leave the accounting on permanently.
1709  */
1710 
1711 #define	MAX_PCGS	100
1712 
1713 #ifdef	DEBUG
1714 #define	PCGS_TRIES	100
1715 #else	/* DEBUG */
1716 #define	PCGS_TRIES	10
1717 #endif	/* DEBUG */
1718 
1719 #ifdef	VM_STATS
1720 uint_t	pcgs_counts[PCGS_TRIES];
1721 uint_t	pcgs_too_many;
1722 uint_t	pcgs_entered;
1723 uint_t	pcgs_entered_noreloc;
1724 uint_t	pcgs_locked;
1725 uint_t	pcgs_cagelocked;
1726 #endif	/* VM_STATS */
1727 
1728 static page_t *
1729 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg,
1730     caddr_t vaddr, uint_t flags)
1731 {
1732 	uint_t		count;
1733 	page_t		*pp;
1734 	uint_t		locked, i;
1735 	struct	pcf	*p;
1736 	lgrp_t		*lgrp;
1737 	int		cagelocked = 0;
1738 
1739 	VM_STAT_ADD(pcgs_entered);
1740 
1741 	/*
1742 	 * Tap any reserve freelists: if we fail now, we'll die
1743 	 * since the page(s) we're looking for have already been
1744 	 * accounted for.
1745 	 */
1746 	flags |= PG_PANIC;
1747 
1748 	if ((flags & PG_NORELOC) != 0) {
1749 		VM_STAT_ADD(pcgs_entered_noreloc);
1750 		/*
1751 		 * Requests for free pages from critical threads
1752 		 * such as pageout still won't throttle here, but
1753 		 * we must try again, to give the cageout thread
1754 		 * another chance to catch up. Since we already
1755 		 * accounted for the pages, we had better get them
1756 		 * this time.
1757 		 *
1758 		 * N.B. All non-critical threads acquire the pcgs_cagelock
1759 		 * to serialize access to the freelists. This implements a
1760 		 * turnstile-type synchornization to avoid starvation of
1761 		 * critical requests for PG_NORELOC memory by non-critical
1762 		 * threads: all non-critical threads must acquire a 'ticket'
1763 		 * before passing through, which entails making sure
1764 		 * kcage_freemem won't fall below minfree prior to grabbing
1765 		 * pages from the freelists.
1766 		 */
1767 		if (kcage_create_throttle(1, flags) == KCT_NONCRIT) {
1768 			mutex_enter(&pcgs_cagelock);
1769 			cagelocked = 1;
1770 			VM_STAT_ADD(pcgs_cagelocked);
1771 		}
1772 	}
1773 
1774 	/*
1775 	 * Time to get serious.
1776 	 * We failed to get a `correctly colored' page from both the
1777 	 * free and cache lists.
1778 	 * We escalate in stage.
1779 	 *
1780 	 * First try both lists without worring about color.
1781 	 *
1782 	 * Then, grab all page accounting locks (ie. pcf[]) and
1783 	 * steal any pages that they have and set the pcf_block flag to
1784 	 * stop deletions from the lists.  This will help because
1785 	 * a page can get added to the free list while we are looking
1786 	 * at the cache list, then another page could be added to the cache
1787 	 * list allowing the page on the free list to be removed as we
1788 	 * move from looking at the cache list to the free list. This
1789 	 * could happen over and over. We would never find the page
1790 	 * we have accounted for.
1791 	 *
1792 	 * Noreloc pages are a subset of the global (relocatable) page pool.
1793 	 * They are not tracked separately in the pcf bins, so it is
1794 	 * impossible to know when doing pcf accounting if the available
1795 	 * page(s) are noreloc pages or not. When looking for a noreloc page
1796 	 * it is quite easy to end up here even if the global (relocatable)
1797 	 * page pool has plenty of free pages but the noreloc pool is empty.
1798 	 *
1799 	 * When the noreloc pool is empty (or low), additional noreloc pages
1800 	 * are created by converting pages from the global page pool. This
1801 	 * process will stall during pcf accounting if the pcf bins are
1802 	 * already locked. Such is the case when a noreloc allocation is
1803 	 * looping here in page_create_get_something waiting for more noreloc
1804 	 * pages to appear.
1805 	 *
1806 	 * Short of adding a new field to the pcf bins to accurately track
1807 	 * the number of free noreloc pages, we instead do not grab the
1808 	 * pcgs_lock, do not set the pcf blocks and do not timeout when
1809 	 * allocating a noreloc page. This allows noreloc allocations to
1810 	 * loop without blocking global page pool allocations.
1811 	 *
1812 	 * NOTE: the behaviour of page_create_get_something has not changed
1813 	 * for the case of global page pool allocations.
1814 	 */
1815 
1816 	flags &= ~PG_MATCH_COLOR;
1817 	locked = 0;
1818 #if defined(__i386) || defined(__amd64)
1819 	/*
1820 	 * page_create_get_something may be called because 4g memory may be
1821 	 * depleted. Set flags to allow for relocation of base page below
1822 	 * 4g if necessary.
1823 	 */
1824 	if (physmax4g)
1825 		flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI);
1826 #endif
1827 
1828 	lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
1829 
1830 	for (count = 0; kcage_on || count < MAX_PCGS; count++) {
1831 		pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
1832 		    flags, lgrp);
1833 		if (pp == NULL) {
1834 			pp = page_get_cachelist(vp, off, seg, vaddr,
1835 				flags, lgrp);
1836 		}
1837 		if (pp == NULL) {
1838 			/*
1839 			 * Serialize.  Don't fight with other pcgs().
1840 			 */
1841 			if (!locked && (!kcage_on || !(flags & PG_NORELOC))) {
1842 				mutex_enter(&pcgs_lock);
1843 				VM_STAT_ADD(pcgs_locked);
1844 				locked = 1;
1845 				p = pcf;
1846 				for (i = 0; i < PCF_FANOUT; i++) {
1847 					mutex_enter(&p->pcf_lock);
1848 					ASSERT(p->pcf_block == 0);
1849 					p->pcf_block = 1;
1850 					p->pcf_reserve = p->pcf_count;
1851 					p->pcf_count = 0;
1852 					mutex_exit(&p->pcf_lock);
1853 					p++;
1854 				}
1855 				freemem = 0;
1856 			}
1857 
1858 			if (count) {
1859 				/*
1860 				 * Since page_free() puts pages on
1861 				 * a list then accounts for it, we
1862 				 * just have to wait for page_free()
1863 				 * to unlock any page it was working
1864 				 * with. The page_lock()-page_reclaim()
1865 				 * path falls in the same boat.
1866 				 *
1867 				 * We don't need to check on the
1868 				 * PG_WAIT flag, we have already
1869 				 * accounted for the page we are
1870 				 * looking for in page_create_va().
1871 				 *
1872 				 * We just wait a moment to let any
1873 				 * locked pages on the lists free up,
1874 				 * then continue around and try again.
1875 				 *
1876 				 * Will be awakened by set_freemem().
1877 				 */
1878 				mutex_enter(&pcgs_wait_lock);
1879 				cv_wait(&pcgs_cv, &pcgs_wait_lock);
1880 				mutex_exit(&pcgs_wait_lock);
1881 			}
1882 		} else {
1883 #ifdef VM_STATS
1884 			if (count >= PCGS_TRIES) {
1885 				VM_STAT_ADD(pcgs_too_many);
1886 			} else {
1887 				VM_STAT_ADD(pcgs_counts[count]);
1888 			}
1889 #endif
1890 			if (locked) {
1891 				pcgs_unblock();
1892 				mutex_exit(&pcgs_lock);
1893 			}
1894 			if (cagelocked)
1895 				mutex_exit(&pcgs_cagelock);
1896 			return (pp);
1897 		}
1898 	}
1899 	/*
1900 	 * we go down holding the pcf locks.
1901 	 */
1902 	panic("no %spage found %d",
1903 	    ((flags & PG_NORELOC) ? "non-reloc " : ""), count);
1904 	/*NOTREACHED*/
1905 }
1906 
1907 /*
1908  * Create enough pages for "bytes" worth of data starting at
1909  * "off" in "vp".
1910  *
1911  *	Where flag must be one of:
1912  *
1913  *		PG_EXCL:	Exclusive create (fail if any page already
1914  *				exists in the page cache) which does not
1915  *				wait for memory to become available.
1916  *
1917  *		PG_WAIT:	Non-exclusive create which can wait for
1918  *				memory to become available.
1919  *
1920  *		PG_PHYSCONTIG:	Allocate physically contiguous pages.
1921  *				(Not Supported)
1922  *
1923  * A doubly linked list of pages is returned to the caller.  Each page
1924  * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock)
1925  * lock.
1926  *
1927  * Unable to change the parameters to page_create() in a minor release,
1928  * we renamed page_create() to page_create_va(), changed all known calls
1929  * from page_create() to page_create_va(), and created this wrapper.
1930  *
1931  * Upon a major release, we should break compatibility by deleting this
1932  * wrapper, and replacing all the strings "page_create_va", with "page_create".
1933  *
1934  * NOTE: There is a copy of this interface as page_create_io() in
1935  *	 i86/vm/vm_machdep.c. Any bugs fixed here should be applied
1936  *	 there.
1937  */
1938 page_t *
1939 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags)
1940 {
1941 	caddr_t random_vaddr;
1942 	struct seg kseg;
1943 
1944 #ifdef DEBUG
1945 	cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p",
1946 	    (void *)caller());
1947 #endif
1948 
1949 	random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^
1950 	    (uintptr_t)(off >> PAGESHIFT));
1951 	kseg.s_as = &kas;
1952 
1953 	return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr));
1954 }
1955 
1956 #ifdef DEBUG
1957 uint32_t pg_alloc_pgs_mtbf = 0;
1958 #endif
1959 
1960 /*
1961  * Used for large page support. It will attempt to allocate
1962  * a large page(s) off the freelist.
1963  *
1964  * Returns non zero on failure.
1965  */
1966 int
1967 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr,
1968     page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz)
1969 {
1970 	pgcnt_t		npgs, curnpgs, totpgs;
1971 	size_t		pgsz;
1972 	page_t		*pplist = NULL, *pp;
1973 	int		err = 0;
1974 	lgrp_t		*lgrp;
1975 
1976 	ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1));
1977 
1978 	VM_STAT_ADD(alloc_pages[0]);
1979 
1980 #ifdef DEBUG
1981 	if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) {
1982 		return (ENOMEM);
1983 	}
1984 #endif
1985 
1986 	pgsz = page_get_pagesize(szc);
1987 	totpgs = curnpgs = npgs = pgsz >> PAGESHIFT;
1988 
1989 	ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0);
1990 	/*
1991 	 * One must be NULL but not both.
1992 	 * And one must be non NULL but not both.
1993 	 */
1994 	ASSERT(basepp != NULL || ppa != NULL);
1995 	ASSERT(basepp == NULL || ppa == NULL);
1996 
1997 	(void) page_create_wait(npgs, PG_WAIT);
1998 
1999 	while (npgs && szc) {
2000 		lgrp = lgrp_mem_choose(seg, addr, pgsz);
2001 		pp = page_get_freelist(vp, 0, seg, addr, pgsz, 0, lgrp);
2002 		if (pp != NULL) {
2003 			VM_STAT_ADD(alloc_pages[1]);
2004 			page_list_concat(&pplist, &pp);
2005 			ASSERT(npgs >= curnpgs);
2006 			npgs -= curnpgs;
2007 		} else if (anypgsz) {
2008 			VM_STAT_ADD(alloc_pages[2]);
2009 			szc--;
2010 			pgsz = page_get_pagesize(szc);
2011 			curnpgs = pgsz >> PAGESHIFT;
2012 		} else {
2013 			VM_STAT_ADD(alloc_pages[3]);
2014 			ASSERT(npgs == totpgs);
2015 			page_create_putback(npgs);
2016 			return (ENOMEM);
2017 		}
2018 	}
2019 	if (szc == 0) {
2020 		VM_STAT_ADD(alloc_pages[4]);
2021 		ASSERT(npgs != 0);
2022 		page_create_putback(npgs);
2023 		err = ENOMEM;
2024 	} else if (basepp != NULL) {
2025 		ASSERT(npgs == 0);
2026 		ASSERT(ppa == NULL);
2027 		*basepp = pplist;
2028 	}
2029 
2030 	npgs = totpgs - npgs;
2031 	pp = pplist;
2032 
2033 	/*
2034 	 * Clear the free and age bits. Also if we were passed in a ppa then
2035 	 * fill it in with all the constituent pages from the large page. But
2036 	 * if we failed to allocate all the pages just free what we got.
2037 	 */
2038 	while (npgs != 0) {
2039 		ASSERT(PP_ISFREE(pp));
2040 		ASSERT(PP_ISAGED(pp));
2041 		if (ppa != NULL || err != 0) {
2042 			if (err == 0) {
2043 				VM_STAT_ADD(alloc_pages[5]);
2044 				PP_CLRFREE(pp);
2045 				PP_CLRAGED(pp);
2046 				page_sub(&pplist, pp);
2047 				*ppa++ = pp;
2048 				npgs--;
2049 			} else {
2050 				VM_STAT_ADD(alloc_pages[6]);
2051 				ASSERT(pp->p_szc != 0);
2052 				curnpgs = page_get_pagecnt(pp->p_szc);
2053 				page_list_break(&pp, &pplist, curnpgs);
2054 				page_list_add_pages(pp, 0);
2055 				page_create_putback(curnpgs);
2056 				ASSERT(npgs >= curnpgs);
2057 				npgs -= curnpgs;
2058 			}
2059 			pp = pplist;
2060 		} else {
2061 			VM_STAT_ADD(alloc_pages[7]);
2062 			PP_CLRFREE(pp);
2063 			PP_CLRAGED(pp);
2064 			pp = pp->p_next;
2065 			npgs--;
2066 		}
2067 	}
2068 	return (err);
2069 }
2070 
2071 /*
2072  * Get a single large page off of the freelists, and set it up for use.
2073  * Number of bytes requested must be a supported page size.
2074  *
2075  * Note that this call may fail even if there is sufficient
2076  * memory available or PG_WAIT is set, so the caller must
2077  * be willing to fallback on page_create_va(), block and retry,
2078  * or fail the requester.
2079  */
2080 page_t *
2081 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
2082     struct seg *seg, caddr_t vaddr, void *arg)
2083 {
2084 	pgcnt_t		npages, pcftotal;
2085 	page_t		*pp;
2086 	page_t		*rootpp;
2087 	lgrp_t		*lgrp;
2088 	uint_t		enough;
2089 	uint_t		pcf_index;
2090 	uint_t		i;
2091 	struct pcf	*p;
2092 	struct pcf	*q;
2093 	lgrp_id_t	*lgrpid = (lgrp_id_t *)arg;
2094 
2095 	ASSERT(vp != NULL);
2096 
2097 	ASSERT((flags & ~(PG_EXCL | PG_WAIT |
2098 		    PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0);
2099 	/* but no others */
2100 
2101 	ASSERT((flags & PG_EXCL) == PG_EXCL);
2102 
2103 	npages = btop(bytes);
2104 
2105 	if (!kcage_on || panicstr) {
2106 		/*
2107 		 * Cage is OFF, or we are single threaded in
2108 		 * panic, so make everything a RELOC request.
2109 		 */
2110 		flags &= ~PG_NORELOC;
2111 	}
2112 
2113 	/*
2114 	 * Make sure there's adequate physical memory available.
2115 	 * Note: PG_WAIT is ignored here.
2116 	 */
2117 	if (freemem <= throttlefree + npages) {
2118 		VM_STAT_ADD(page_create_large_cnt[1]);
2119 		return (NULL);
2120 	}
2121 
2122 	/*
2123 	 * If cage is on, dampen draw from cage when available
2124 	 * cage space is low.
2125 	 */
2126 	if ((flags & (PG_NORELOC | PG_WAIT)) ==  (PG_NORELOC | PG_WAIT) &&
2127 	    kcage_freemem < kcage_throttlefree + npages) {
2128 
2129 		/*
2130 		 * The cage is on, the caller wants PG_NORELOC
2131 		 * pages and available cage memory is very low.
2132 		 * Call kcage_create_throttle() to attempt to
2133 		 * control demand on the cage.
2134 		 */
2135 		if (kcage_create_throttle(npages, flags) == KCT_FAILURE) {
2136 			VM_STAT_ADD(page_create_large_cnt[2]);
2137 			return (NULL);
2138 		}
2139 	}
2140 
2141 	enough = 0;
2142 	pcf_index = PCF_INDEX();
2143 	p = &pcf[pcf_index];
2144 	q = &pcf[PCF_FANOUT];
2145 	for (pcftotal = 0, i = 0; i < PCF_FANOUT; i++) {
2146 		if (p->pcf_count > npages) {
2147 			/*
2148 			 * a good one to try.
2149 			 */
2150 			mutex_enter(&p->pcf_lock);
2151 			if (p->pcf_count > npages) {
2152 				p->pcf_count -= (uint_t)npages;
2153 				/*
2154 				 * freemem is not protected by any lock.
2155 				 * Thus, we cannot have any assertion
2156 				 * containing freemem here.
2157 				 */
2158 				freemem -= npages;
2159 				enough = 1;
2160 				mutex_exit(&p->pcf_lock);
2161 				break;
2162 			}
2163 			mutex_exit(&p->pcf_lock);
2164 		}
2165 		pcftotal += p->pcf_count;
2166 		p++;
2167 		if (p >= q) {
2168 			p = pcf;
2169 		}
2170 	}
2171 
2172 	if (!enough) {
2173 		/* If there isn't enough memory available, give up. */
2174 		if (pcftotal < npages) {
2175 			VM_STAT_ADD(page_create_large_cnt[3]);
2176 			return (NULL);
2177 		}
2178 
2179 		/* try to collect pages from several pcf bins */
2180 		for (p = pcf, pcftotal = 0, i = 0; i < PCF_FANOUT; i++) {
2181 			mutex_enter(&p->pcf_lock);
2182 			pcftotal += p->pcf_count;
2183 			if (pcftotal >= npages) {
2184 				/*
2185 				 * Wow!  There are enough pages laying around
2186 				 * to satisfy the request.  Do the accounting,
2187 				 * drop the locks we acquired, and go back.
2188 				 *
2189 				 * freemem is not protected by any lock. So,
2190 				 * we cannot have any assertion containing
2191 				 * freemem.
2192 				 */
2193 				pgcnt_t	tpages = npages;
2194 				freemem -= npages;
2195 				while (p >= pcf) {
2196 					if (p->pcf_count <= tpages) {
2197 						tpages -= p->pcf_count;
2198 						p->pcf_count = 0;
2199 					} else {
2200 						p->pcf_count -= (uint_t)tpages;
2201 						tpages = 0;
2202 					}
2203 					mutex_exit(&p->pcf_lock);
2204 					p--;
2205 				}
2206 				ASSERT(tpages == 0);
2207 				break;
2208 			}
2209 			p++;
2210 		}
2211 		if (i == PCF_FANOUT) {
2212 			/* failed to collect pages - release the locks */
2213 			while (--p >= pcf) {
2214 				mutex_exit(&p->pcf_lock);
2215 			}
2216 			VM_STAT_ADD(page_create_large_cnt[4]);
2217 			return (NULL);
2218 		}
2219 	}
2220 
2221 	/*
2222 	 * This is where this function behaves fundamentally differently
2223 	 * than page_create_va(); since we're intending to map the page
2224 	 * with a single TTE, we have to get it as a physically contiguous
2225 	 * hardware pagesize chunk.  If we can't, we fail.
2226 	 */
2227 	if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max &&
2228 		LGRP_EXISTS(lgrp_table[*lgrpid]))
2229 		lgrp = lgrp_table[*lgrpid];
2230 	else
2231 		lgrp = lgrp_mem_choose(seg, vaddr, bytes);
2232 
2233 	if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr,
2234 	    bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) {
2235 		page_create_putback(npages);
2236 		VM_STAT_ADD(page_create_large_cnt[5]);
2237 		return (NULL);
2238 	}
2239 
2240 	/*
2241 	 * if we got the page with the wrong mtype give it back this is a
2242 	 * workaround for CR 6249718. When CR 6249718 is fixed we never get
2243 	 * inside "if" and the workaround becomes just a nop
2244 	 */
2245 	if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) {
2246 		page_list_add_pages(rootpp, 0);
2247 		page_create_putback(npages);
2248 		VM_STAT_ADD(page_create_large_cnt[6]);
2249 		return (NULL);
2250 	}
2251 
2252 	/*
2253 	 * If satisfying this request has left us with too little
2254 	 * memory, start the wheels turning to get some back.  The
2255 	 * first clause of the test prevents waking up the pageout
2256 	 * daemon in situations where it would decide that there's
2257 	 * nothing to do.
2258 	 */
2259 	if (nscan < desscan && freemem < minfree) {
2260 		TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
2261 		    "pageout_cv_signal:freemem %ld", freemem);
2262 		cv_signal(&proc_pageout->p_cv);
2263 	}
2264 
2265 	pp = rootpp;
2266 	while (npages--) {
2267 		ASSERT(PAGE_EXCL(pp));
2268 		ASSERT(pp->p_vnode == NULL);
2269 		ASSERT(!hat_page_is_mapped(pp));
2270 		PP_CLRFREE(pp);
2271 		PP_CLRAGED(pp);
2272 		if (!page_hashin(pp, vp, off, NULL))
2273 			panic("page_create_large: hashin failed: page %p",
2274 			    (void *)pp);
2275 		page_io_lock(pp);
2276 		off += PAGESIZE;
2277 		pp = pp->p_next;
2278 	}
2279 
2280 	VM_STAT_ADD(page_create_large_cnt[0]);
2281 	return (rootpp);
2282 }
2283 
2284 page_t *
2285 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
2286     struct seg *seg, caddr_t vaddr)
2287 {
2288 	page_t		*plist = NULL;
2289 	pgcnt_t		npages;
2290 	pgcnt_t		found_on_free = 0;
2291 	pgcnt_t		pages_req;
2292 	page_t		*npp = NULL;
2293 	uint_t		enough;
2294 	uint_t		i;
2295 	uint_t		pcf_index;
2296 	struct pcf	*p;
2297 	struct pcf	*q;
2298 	lgrp_t		*lgrp;
2299 
2300 	TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
2301 		"page_create_start:vp %p off %llx bytes %lu flags %x",
2302 		vp, off, bytes, flags);
2303 
2304 	ASSERT(bytes != 0 && vp != NULL);
2305 
2306 	if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) {
2307 		panic("page_create: invalid flags");
2308 		/*NOTREACHED*/
2309 	}
2310 	ASSERT((flags & ~(PG_EXCL | PG_WAIT |
2311 	    PG_NORELOC | PG_PANIC | PG_PUSHPAGE)) == 0);
2312 	    /* but no others */
2313 
2314 	pages_req = npages = btopr(bytes);
2315 	/*
2316 	 * Try to see whether request is too large to *ever* be
2317 	 * satisfied, in order to prevent deadlock.  We arbitrarily
2318 	 * decide to limit maximum size requests to max_page_get.
2319 	 */
2320 	if (npages >= max_page_get) {
2321 		if ((flags & PG_WAIT) == 0) {
2322 			TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG,
2323 			    "page_create_toobig:vp %p off %llx npages "
2324 			    "%lu max_page_get %lu",
2325 			    vp, off, npages, max_page_get);
2326 			return (NULL);
2327 		} else {
2328 			cmn_err(CE_WARN,
2329 			    "Request for too much kernel memory "
2330 			    "(%lu bytes), will hang forever", bytes);
2331 			for (;;)
2332 				delay(1000000000);
2333 		}
2334 	}
2335 
2336 	if (!kcage_on || panicstr) {
2337 		/*
2338 		 * Cage is OFF, or we are single threaded in
2339 		 * panic, so make everything a RELOC request.
2340 		 */
2341 		flags &= ~PG_NORELOC;
2342 	}
2343 
2344 	if (freemem <= throttlefree + npages)
2345 		if (!page_create_throttle(npages, flags))
2346 			return (NULL);
2347 
2348 	/*
2349 	 * If cage is on, dampen draw from cage when available
2350 	 * cage space is low.
2351 	 */
2352 	if ((flags & PG_NORELOC) &&
2353 		kcage_freemem < kcage_throttlefree + npages) {
2354 
2355 		/*
2356 		 * The cage is on, the caller wants PG_NORELOC
2357 		 * pages and available cage memory is very low.
2358 		 * Call kcage_create_throttle() to attempt to
2359 		 * control demand on the cage.
2360 		 */
2361 		if (kcage_create_throttle(npages, flags) == KCT_FAILURE)
2362 			return (NULL);
2363 	}
2364 
2365 	VM_STAT_ADD(page_create_cnt[0]);
2366 
2367 	enough = 0;
2368 	pcf_index = PCF_INDEX();
2369 
2370 	p = &pcf[pcf_index];
2371 	q = &pcf[PCF_FANOUT];
2372 	for (i = 0; i < PCF_FANOUT; i++) {
2373 		if (p->pcf_count > npages) {
2374 			/*
2375 			 * a good one to try.
2376 			 */
2377 			mutex_enter(&p->pcf_lock);
2378 			if (p->pcf_count > npages) {
2379 				p->pcf_count -= (uint_t)npages;
2380 				/*
2381 				 * freemem is not protected by any lock.
2382 				 * Thus, we cannot have any assertion
2383 				 * containing freemem here.
2384 				 */
2385 				freemem -= npages;
2386 				enough = 1;
2387 				mutex_exit(&p->pcf_lock);
2388 				break;
2389 			}
2390 			mutex_exit(&p->pcf_lock);
2391 		}
2392 		p++;
2393 		if (p >= q) {
2394 			p = pcf;
2395 		}
2396 	}
2397 
2398 	if (!enough) {
2399 		/*
2400 		 * Have to look harder.  If npages is greater than
2401 		 * one, then we might have to coalecse the counters.
2402 		 *
2403 		 * Go wait.  We come back having accounted
2404 		 * for the memory.
2405 		 */
2406 		VM_STAT_ADD(page_create_cnt[1]);
2407 		if (!page_create_wait(npages, flags)) {
2408 			VM_STAT_ADD(page_create_cnt[2]);
2409 			return (NULL);
2410 		}
2411 	}
2412 
2413 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
2414 		"page_create_success:vp %p off %llx", vp, off);
2415 
2416 	/*
2417 	 * If satisfying this request has left us with too little
2418 	 * memory, start the wheels turning to get some back.  The
2419 	 * first clause of the test prevents waking up the pageout
2420 	 * daemon in situations where it would decide that there's
2421 	 * nothing to do.
2422 	 */
2423 	if (nscan < desscan && freemem < minfree) {
2424 		TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
2425 			"pageout_cv_signal:freemem %ld", freemem);
2426 		cv_signal(&proc_pageout->p_cv);
2427 	}
2428 
2429 	/*
2430 	 * Loop around collecting the requested number of pages.
2431 	 * Most of the time, we have to `create' a new page. With
2432 	 * this in mind, pull the page off the free list before
2433 	 * getting the hash lock.  This will minimize the hash
2434 	 * lock hold time, nesting, and the like.  If it turns
2435 	 * out we don't need the page, we put it back at the end.
2436 	 */
2437 	while (npages--) {
2438 		page_t		*pp;
2439 		kmutex_t	*phm = NULL;
2440 		ulong_t		index;
2441 
2442 		index = PAGE_HASH_FUNC(vp, off);
2443 top:
2444 		ASSERT(phm == NULL);
2445 		ASSERT(index == PAGE_HASH_FUNC(vp, off));
2446 		ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
2447 
2448 		if (npp == NULL) {
2449 			/*
2450 			 * Try to get a page from the freelist (ie,
2451 			 * a page with no [vp, off] tag).  If that
2452 			 * fails, use the cachelist.
2453 			 *
2454 			 * During the first attempt at both the free
2455 			 * and cache lists we try for the correct color.
2456 			 */
2457 			/*
2458 			 * XXXX-how do we deal with virtual indexed
2459 			 * caches and and colors?
2460 			 */
2461 			VM_STAT_ADD(page_create_cnt[4]);
2462 			/*
2463 			 * Get lgroup to allocate next page of shared memory
2464 			 * from and use it to specify where to allocate
2465 			 * the physical memory
2466 			 */
2467 			lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
2468 			npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
2469 			    flags | PG_MATCH_COLOR, lgrp);
2470 			if (npp == NULL) {
2471 				npp = page_get_cachelist(vp, off, seg,
2472 				    vaddr, flags | PG_MATCH_COLOR, lgrp);
2473 				if (npp == NULL) {
2474 					npp = page_create_get_something(vp,
2475 					    off, seg, vaddr,
2476 					    flags & ~PG_MATCH_COLOR);
2477 				}
2478 
2479 				if (PP_ISAGED(npp) == 0) {
2480 					/*
2481 					 * Since this page came from the
2482 					 * cachelist, we must destroy the
2483 					 * old vnode association.
2484 					 */
2485 					page_hashout(npp, NULL);
2486 				}
2487 			}
2488 		}
2489 
2490 		/*
2491 		 * We own this page!
2492 		 */
2493 		ASSERT(PAGE_EXCL(npp));
2494 		ASSERT(npp->p_vnode == NULL);
2495 		ASSERT(!hat_page_is_mapped(npp));
2496 		PP_CLRFREE(npp);
2497 		PP_CLRAGED(npp);
2498 
2499 		/*
2500 		 * Here we have a page in our hot little mits and are
2501 		 * just waiting to stuff it on the appropriate lists.
2502 		 * Get the mutex and check to see if it really does
2503 		 * not exist.
2504 		 */
2505 		phm = PAGE_HASH_MUTEX(index);
2506 		mutex_enter(phm);
2507 		PAGE_HASH_SEARCH(index, pp, vp, off);
2508 		if (pp == NULL) {
2509 			VM_STAT_ADD(page_create_new);
2510 			pp = npp;
2511 			npp = NULL;
2512 			if (!page_hashin(pp, vp, off, phm)) {
2513 				/*
2514 				 * Since we hold the page hash mutex and
2515 				 * just searched for this page, page_hashin
2516 				 * had better not fail.  If it does, that
2517 				 * means somethread did not follow the
2518 				 * page hash mutex rules.  Panic now and
2519 				 * get it over with.  As usual, go down
2520 				 * holding all the locks.
2521 				 */
2522 				ASSERT(MUTEX_HELD(phm));
2523 				panic("page_create: "
2524 				    "hashin failed %p %p %llx %p",
2525 				    (void *)pp, (void *)vp, off, (void *)phm);
2526 				/*NOTREACHED*/
2527 			}
2528 			ASSERT(MUTEX_HELD(phm));
2529 			mutex_exit(phm);
2530 			phm = NULL;
2531 
2532 			/*
2533 			 * Hat layer locking need not be done to set
2534 			 * the following bits since the page is not hashed
2535 			 * and was on the free list (i.e., had no mappings).
2536 			 *
2537 			 * Set the reference bit to protect
2538 			 * against immediate pageout
2539 			 *
2540 			 * XXXmh modify freelist code to set reference
2541 			 * bit so we don't have to do it here.
2542 			 */
2543 			page_set_props(pp, P_REF);
2544 			found_on_free++;
2545 		} else {
2546 			VM_STAT_ADD(page_create_exists);
2547 			if (flags & PG_EXCL) {
2548 				/*
2549 				 * Found an existing page, and the caller
2550 				 * wanted all new pages.  Undo all of the work
2551 				 * we have done.
2552 				 */
2553 				mutex_exit(phm);
2554 				phm = NULL;
2555 				while (plist != NULL) {
2556 					pp = plist;
2557 					page_sub(&plist, pp);
2558 					page_io_unlock(pp);
2559 					/* large pages should not end up here */
2560 					ASSERT(pp->p_szc == 0);
2561 					/*LINTED: constant in conditional ctx*/
2562 					VN_DISPOSE(pp, B_INVAL, 0, kcred);
2563 				}
2564 				VM_STAT_ADD(page_create_found_one);
2565 				goto fail;
2566 			}
2567 			ASSERT(flags & PG_WAIT);
2568 			if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) {
2569 				/*
2570 				 * Start all over again if we blocked trying
2571 				 * to lock the page.
2572 				 */
2573 				mutex_exit(phm);
2574 				VM_STAT_ADD(page_create_page_lock_failed);
2575 				phm = NULL;
2576 				goto top;
2577 			}
2578 			mutex_exit(phm);
2579 			phm = NULL;
2580 
2581 			if (PP_ISFREE(pp)) {
2582 				ASSERT(PP_ISAGED(pp) == 0);
2583 				VM_STAT_ADD(pagecnt.pc_get_cache);
2584 				page_list_sub(pp, PG_CACHE_LIST);
2585 				PP_CLRFREE(pp);
2586 				found_on_free++;
2587 			}
2588 		}
2589 
2590 		/*
2591 		 * Got a page!  It is locked.  Acquire the i/o
2592 		 * lock since we are going to use the p_next and
2593 		 * p_prev fields to link the requested pages together.
2594 		 */
2595 		page_io_lock(pp);
2596 		page_add(&plist, pp);
2597 		plist = plist->p_next;
2598 		off += PAGESIZE;
2599 		vaddr += PAGESIZE;
2600 	}
2601 
2602 	ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1);
2603 fail:
2604 	if (npp != NULL) {
2605 		/*
2606 		 * Did not need this page after all.
2607 		 * Put it back on the free list.
2608 		 */
2609 		VM_STAT_ADD(page_create_putbacks);
2610 		PP_SETFREE(npp);
2611 		PP_SETAGED(npp);
2612 		npp->p_offset = (u_offset_t)-1;
2613 		page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
2614 		page_unlock(npp);
2615 
2616 	}
2617 
2618 	ASSERT(pages_req >= found_on_free);
2619 
2620 	{
2621 		uint_t overshoot = (uint_t)(pages_req - found_on_free);
2622 
2623 		if (overshoot) {
2624 			VM_STAT_ADD(page_create_overshoot);
2625 			p = &pcf[pcf_index];
2626 			mutex_enter(&p->pcf_lock);
2627 			if (p->pcf_block) {
2628 				p->pcf_reserve += overshoot;
2629 			} else {
2630 				p->pcf_count += overshoot;
2631 				if (p->pcf_wait) {
2632 					mutex_enter(&new_freemem_lock);
2633 					if (freemem_wait) {
2634 						cv_signal(&freemem_cv);
2635 						p->pcf_wait--;
2636 					} else {
2637 						p->pcf_wait = 0;
2638 					}
2639 					mutex_exit(&new_freemem_lock);
2640 				}
2641 			}
2642 			mutex_exit(&p->pcf_lock);
2643 			/* freemem is approximate, so this test OK */
2644 			if (!p->pcf_block)
2645 				freemem += overshoot;
2646 		}
2647 	}
2648 
2649 	return (plist);
2650 }
2651 
2652 /*
2653  * One or more constituent pages of this large page has been marked
2654  * toxic. Simply demote the large page to PAGESIZE pages and let
2655  * page_free() handle it. This routine should only be called by
2656  * large page free routines (page_free_pages() and page_destroy_pages().
2657  * All pages are locked SE_EXCL and have already been marked free.
2658  */
2659 static void
2660 page_free_toxic_pages(page_t *rootpp)
2661 {
2662 	page_t	*tpp;
2663 	pgcnt_t	i, pgcnt = page_get_pagecnt(rootpp->p_szc);
2664 	uint_t	szc = rootpp->p_szc;
2665 
2666 	for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) {
2667 		ASSERT(tpp->p_szc == szc);
2668 		ASSERT((PAGE_EXCL(tpp) &&
2669 		    !page_iolock_assert(tpp)) || panicstr);
2670 		tpp->p_szc = 0;
2671 	}
2672 
2673 	while (rootpp != NULL) {
2674 		tpp = rootpp;
2675 		page_sub(&rootpp, tpp);
2676 		ASSERT(PP_ISFREE(tpp));
2677 		PP_CLRFREE(tpp);
2678 		page_free(tpp, 1);
2679 	}
2680 }
2681 
2682 /*
2683  * Put page on the "free" list.
2684  * The free list is really two lists maintained by
2685  * the PSM of whatever machine we happen to be on.
2686  */
2687 void
2688 page_free(page_t *pp, int dontneed)
2689 {
2690 	struct pcf	*p;
2691 	uint_t		pcf_index;
2692 
2693 	ASSERT((PAGE_EXCL(pp) &&
2694 	    !page_iolock_assert(pp)) || panicstr);
2695 
2696 	if (PP_ISFREE(pp)) {
2697 		panic("page_free: page %p is free", (void *)pp);
2698 	}
2699 
2700 	if (pp->p_szc != 0) {
2701 		if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
2702 		    pp->p_vnode == &kvp) {
2703 			panic("page_free: anon or kernel "
2704 			    "or no vnode large page %p", (void *)pp);
2705 		}
2706 		page_demote_vp_pages(pp);
2707 		ASSERT(pp->p_szc == 0);
2708 	}
2709 
2710 	/*
2711 	 * The page_struct_lock need not be acquired to examine these
2712 	 * fields since the page has an "exclusive" lock.
2713 	 */
2714 	if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
2715 		panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d",
2716 		    pp, page_pptonum(pp), pp->p_lckcnt, pp->p_cowcnt);
2717 		/*NOTREACHED*/
2718 	}
2719 
2720 	ASSERT(!hat_page_getshare(pp));
2721 
2722 	PP_SETFREE(pp);
2723 	ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) ||
2724 	    !hat_ismod(pp));
2725 	page_clr_all_props(pp);
2726 	ASSERT(!hat_page_getshare(pp));
2727 
2728 	/*
2729 	 * Now we add the page to the head of the free list.
2730 	 * But if this page is associated with a paged vnode
2731 	 * then we adjust the head forward so that the page is
2732 	 * effectively at the end of the list.
2733 	 */
2734 	if (pp->p_vnode == NULL) {
2735 		/*
2736 		 * Page has no identity, put it on the free list.
2737 		 */
2738 		PP_SETAGED(pp);
2739 		pp->p_offset = (u_offset_t)-1;
2740 		page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
2741 		VM_STAT_ADD(pagecnt.pc_free_free);
2742 		TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
2743 		    "page_free_free:pp %p", pp);
2744 	} else {
2745 		PP_CLRAGED(pp);
2746 
2747 		if (!dontneed || nopageage) {
2748 			/* move it to the tail of the list */
2749 			page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
2750 
2751 			VM_STAT_ADD(pagecnt.pc_free_cache);
2752 			TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL,
2753 			    "page_free_cache_tail:pp %p", pp);
2754 		} else {
2755 			page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
2756 
2757 			VM_STAT_ADD(pagecnt.pc_free_dontneed);
2758 			TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD,
2759 			    "page_free_cache_head:pp %p", pp);
2760 		}
2761 	}
2762 	page_unlock(pp);
2763 
2764 	/*
2765 	 * Now do the `freemem' accounting.
2766 	 */
2767 	pcf_index = PCF_INDEX();
2768 	p = &pcf[pcf_index];
2769 
2770 	mutex_enter(&p->pcf_lock);
2771 	if (p->pcf_block) {
2772 		p->pcf_reserve += 1;
2773 	} else {
2774 		p->pcf_count += 1;
2775 		if (p->pcf_wait) {
2776 			mutex_enter(&new_freemem_lock);
2777 			/*
2778 			 * Check to see if some other thread
2779 			 * is actually waiting.  Another bucket
2780 			 * may have woken it up by now.  If there
2781 			 * are no waiters, then set our pcf_wait
2782 			 * count to zero to avoid coming in here
2783 			 * next time.  Also, since only one page
2784 			 * was put on the free list, just wake
2785 			 * up one waiter.
2786 			 */
2787 			if (freemem_wait) {
2788 				cv_signal(&freemem_cv);
2789 				p->pcf_wait--;
2790 			} else {
2791 				p->pcf_wait = 0;
2792 			}
2793 			mutex_exit(&new_freemem_lock);
2794 		}
2795 	}
2796 	mutex_exit(&p->pcf_lock);
2797 
2798 	/* freemem is approximate, so this test OK */
2799 	if (!p->pcf_block)
2800 		freemem += 1;
2801 }
2802 
2803 /*
2804  * Put page on the "free" list during intial startup.
2805  * This happens during initial single threaded execution.
2806  */
2807 void
2808 page_free_at_startup(page_t *pp)
2809 {
2810 	struct pcf	*p;
2811 	uint_t		pcf_index;
2812 
2813 	page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT);
2814 	VM_STAT_ADD(pagecnt.pc_free_free);
2815 
2816 	/*
2817 	 * Now do the `freemem' accounting.
2818 	 */
2819 	pcf_index = PCF_INDEX();
2820 	p = &pcf[pcf_index];
2821 
2822 	ASSERT(p->pcf_block == 0);
2823 	ASSERT(p->pcf_wait == 0);
2824 	p->pcf_count += 1;
2825 
2826 	/* freemem is approximate, so this is OK */
2827 	freemem += 1;
2828 }
2829 
2830 void
2831 page_free_pages(page_t *pp)
2832 {
2833 	page_t	*tpp, *rootpp = NULL;
2834 	pgcnt_t	pgcnt = page_get_pagecnt(pp->p_szc);
2835 	pgcnt_t	i;
2836 	uint_t	szc = pp->p_szc;
2837 
2838 	VM_STAT_ADD(pagecnt.pc_free_pages);
2839 	TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
2840 	    "page_free_free:pp %p", pp);
2841 
2842 	ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
2843 	if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
2844 		panic("page_free_pages: not root page %p", (void *)pp);
2845 		/*NOTREACHED*/
2846 	}
2847 
2848 	for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
2849 		ASSERT((PAGE_EXCL(tpp) &&
2850 		    !page_iolock_assert(tpp)) || panicstr);
2851 		if (PP_ISFREE(tpp)) {
2852 			panic("page_free_pages: page %p is free", (void *)tpp);
2853 			/*NOTREACHED*/
2854 		}
2855 		if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 ||
2856 		    tpp->p_cowcnt != 0) {
2857 			panic("page_free_pages %p", (void *)tpp);
2858 			/*NOTREACHED*/
2859 		}
2860 
2861 		ASSERT(!hat_page_getshare(tpp));
2862 		ASSERT(tpp->p_vnode == NULL);
2863 		ASSERT(tpp->p_szc == szc);
2864 
2865 		PP_SETFREE(tpp);
2866 		page_clr_all_props(tpp);
2867 		PP_SETAGED(tpp);
2868 		tpp->p_offset = (u_offset_t)-1;
2869 		ASSERT(tpp->p_next == tpp);
2870 		ASSERT(tpp->p_prev == tpp);
2871 		page_list_concat(&rootpp, &tpp);
2872 	}
2873 	ASSERT(rootpp == pp);
2874 
2875 	page_list_add_pages(rootpp, 0);
2876 	page_create_putback(pgcnt);
2877 }
2878 
2879 int free_pages = 1;
2880 
2881 /*
2882  * This routine attempts to return pages to the cachelist via page_release().
2883  * It does not *have* to be successful in all cases, since the pageout scanner
2884  * will catch any pages it misses.  It does need to be fast and not introduce
2885  * too much overhead.
2886  *
2887  * If a page isn't found on the unlocked sweep of the page_hash bucket, we
2888  * don't lock and retry.  This is ok, since the page scanner will eventually
2889  * find any page we miss in free_vp_pages().
2890  */
2891 void
2892 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len)
2893 {
2894 	page_t *pp;
2895 	u_offset_t eoff;
2896 	extern int swap_in_range(vnode_t *, u_offset_t, size_t);
2897 
2898 	eoff = off + len;
2899 
2900 	if (free_pages == 0)
2901 		return;
2902 	if (swap_in_range(vp, off, len))
2903 		return;
2904 
2905 	for (; off < eoff; off += PAGESIZE) {
2906 
2907 		/*
2908 		 * find the page using a fast, but inexact search. It'll be OK
2909 		 * if a few pages slip through the cracks here.
2910 		 */
2911 		pp = page_exists(vp, off);
2912 
2913 		/*
2914 		 * If we didn't find the page (it may not exist), the page
2915 		 * is free, looks still in use (shared), or we can't lock it,
2916 		 * just give up.
2917 		 */
2918 		if (pp == NULL ||
2919 		    PP_ISFREE(pp) ||
2920 		    page_share_cnt(pp) > 0 ||
2921 		    !page_trylock(pp, SE_EXCL))
2922 			continue;
2923 
2924 		/*
2925 		 * Once we have locked pp, verify that it's still the
2926 		 * correct page and not already free
2927 		 */
2928 		ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL));
2929 		if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) {
2930 			page_unlock(pp);
2931 			continue;
2932 		}
2933 
2934 		/*
2935 		 * try to release the page...
2936 		 */
2937 		(void) page_release(pp, 1);
2938 	}
2939 }
2940 
2941 /*
2942  * Reclaim the given page from the free list.
2943  * Returns 1 on success or 0 on failure.
2944  *
2945  * The page is unlocked if it can't be reclaimed (when freemem == 0).
2946  * If `lock' is non-null, it will be dropped and re-acquired if
2947  * the routine must wait while freemem is 0.
2948  *
2949  * As it turns out, boot_getpages() does this.  It picks a page,
2950  * based on where OBP mapped in some address, gets its pfn, searches
2951  * the memsegs, locks the page, then pulls it off the free list!
2952  */
2953 int
2954 page_reclaim(page_t *pp, kmutex_t *lock)
2955 {
2956 	struct pcf	*p;
2957 	uint_t		pcf_index;
2958 	struct cpu	*cpup;
2959 	uint_t		i;
2960 	pgcnt_t		npgs, need;
2961 	pgcnt_t		collected = 0;
2962 
2963 	ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1);
2964 	ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp));
2965 
2966 	npgs = page_get_pagecnt(pp->p_szc);
2967 
2968 	/*
2969 	 * If `freemem' is 0, we cannot reclaim this page from the
2970 	 * freelist, so release every lock we might hold: the page,
2971 	 * and the `lock' before blocking.
2972 	 *
2973 	 * The only way `freemem' can become 0 while there are pages
2974 	 * marked free (have their p->p_free bit set) is when the
2975 	 * system is low on memory and doing a page_create().  In
2976 	 * order to guarantee that once page_create() starts acquiring
2977 	 * pages it will be able to get all that it needs since `freemem'
2978 	 * was decreased by the requested amount.  So, we need to release
2979 	 * this page, and let page_create() have it.
2980 	 *
2981 	 * Since `freemem' being zero is not supposed to happen, just
2982 	 * use the usual hash stuff as a starting point.  If that bucket
2983 	 * is empty, then assume the worst, and start at the beginning
2984 	 * of the pcf array.  If we always start at the beginning
2985 	 * when acquiring more than one pcf lock, there won't be any
2986 	 * deadlock problems.
2987 	 */
2988 
2989 	/* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */
2990 
2991 	if (freemem <= throttlefree && !page_create_throttle(npgs, 0)) {
2992 		pcf_acquire_all();
2993 		goto page_reclaim_nomem;
2994 	}
2995 
2996 	pcf_index = PCF_INDEX();
2997 	p = &pcf[pcf_index];
2998 	mutex_enter(&p->pcf_lock);
2999 	if (p->pcf_count >= npgs) {
3000 		collected = npgs;
3001 		p->pcf_count -= npgs;
3002 	}
3003 	mutex_exit(&p->pcf_lock);
3004 	need = npgs - collected;
3005 
3006 	if (need > 0) {
3007 		VM_STAT_ADD(page_reclaim_zero);
3008 		/*
3009 		 * Check again. Its possible that some other thread
3010 		 * could have been right behind us, and added one
3011 		 * to a list somewhere.  Acquire each of the pcf locks
3012 		 * until we find a page.
3013 		 */
3014 		p = pcf;
3015 		for (i = 0; i < PCF_FANOUT; i++) {
3016 			mutex_enter(&p->pcf_lock);
3017 			if (p->pcf_count) {
3018 				if (p->pcf_count >= need) {
3019 					p->pcf_count -= need;
3020 					collected += need;
3021 					need = 0;
3022 					break;
3023 				} else if (p->pcf_count) {
3024 					collected += p->pcf_count;
3025 					need -= p->pcf_count;
3026 					p->pcf_count = 0;
3027 				}
3028 			}
3029 			p++;
3030 		}
3031 
3032 		if (need > 0) {
3033 page_reclaim_nomem:
3034 			/*
3035 			 * We really can't have page `pp'.
3036 			 * Time for the no-memory dance with
3037 			 * page_free().  This is just like
3038 			 * page_create_wait().  Plus the added
3039 			 * attraction of releasing whatever mutex
3040 			 * we held when we were called with in `lock'.
3041 			 * Page_unlock() will wakeup any thread
3042 			 * waiting around for this page.
3043 			 */
3044 			if (lock) {
3045 				VM_STAT_ADD(page_reclaim_zero_locked);
3046 				mutex_exit(lock);
3047 			}
3048 			page_unlock(pp);
3049 
3050 			/*
3051 			 * get this before we drop all the pcf locks.
3052 			 */
3053 			mutex_enter(&new_freemem_lock);
3054 
3055 			p = pcf;
3056 			p->pcf_count += collected;
3057 			for (i = 0; i < PCF_FANOUT; i++) {
3058 				p->pcf_wait++;
3059 				mutex_exit(&p->pcf_lock);
3060 				p++;
3061 			}
3062 
3063 			freemem_wait++;
3064 			cv_wait(&freemem_cv, &new_freemem_lock);
3065 			freemem_wait--;
3066 
3067 			mutex_exit(&new_freemem_lock);
3068 
3069 			if (lock) {
3070 				mutex_enter(lock);
3071 			}
3072 			return (0);
3073 		}
3074 
3075 		/*
3076 		 * We beat the PCF bins over the head until
3077 		 * we got the memory that we wanted.
3078 		 * The pcf accounting has been done,
3079 		 * though none of the pcf_wait flags have been set,
3080 		 * drop the locks and continue on.
3081 		 */
3082 		ASSERT(collected == npgs);
3083 		while (p >= pcf) {
3084 			mutex_exit(&p->pcf_lock);
3085 			p--;
3086 		}
3087 	}
3088 
3089 	/*
3090 	 * freemem is not protected by any lock. Thus, we cannot
3091 	 * have any assertion containing freemem here.
3092 	 */
3093 	freemem -= npgs;
3094 
3095 	VM_STAT_ADD(pagecnt.pc_reclaim);
3096 	if (PP_ISAGED(pp)) {
3097 		if (npgs > 1) {
3098 			page_list_sub_pages(pp, pp->p_szc);
3099 		} else {
3100 			page_list_sub(pp, PG_FREE_LIST);
3101 		}
3102 		TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE,
3103 		    "page_reclaim_free:pp %p", pp);
3104 	} else {
3105 		ASSERT(npgs == 1);
3106 		page_list_sub(pp, PG_CACHE_LIST);
3107 		TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE,
3108 		    "page_reclaim_cache:pp %p", pp);
3109 	}
3110 
3111 	/*
3112 	 * clear the p_free & p_age bits since this page is no longer
3113 	 * on the free list.  Notice that there was a brief time where
3114 	 * a page is marked as free, but is not on the list.
3115 	 *
3116 	 * Set the reference bit to protect against immediate pageout.
3117 	 */
3118 	for (i = 0; i < npgs; i++, pp++) {
3119 		PP_CLRFREE(pp);
3120 		PP_CLRAGED(pp);
3121 		page_set_props(pp, P_REF);
3122 	}
3123 
3124 	CPU_STATS_ENTER_K();
3125 	cpup = CPU;	/* get cpup now that CPU cannot change */
3126 	CPU_STATS_ADDQ(cpup, vm, pgrec, 1);
3127 	CPU_STATS_ADDQ(cpup, vm, pgfrec, 1);
3128 	CPU_STATS_EXIT_K();
3129 
3130 	return (1);
3131 }
3132 
3133 
3134 
3135 /*
3136  * Destroy identity of the page and put it back on
3137  * the page free list.  Assumes that the caller has
3138  * acquired the "exclusive" lock on the page.
3139  */
3140 void
3141 page_destroy(page_t *pp, int dontfree)
3142 {
3143 	ASSERT((PAGE_EXCL(pp) &&
3144 	    !page_iolock_assert(pp)) || panicstr);
3145 
3146 	if (pp->p_szc != 0) {
3147 		if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
3148 		    pp->p_vnode == &kvp) {
3149 			panic("page_destroy: anon or kernel or no vnode "
3150 			    "large page %p", (void *)pp);
3151 		}
3152 		page_demote_vp_pages(pp);
3153 		ASSERT(pp->p_szc == 0);
3154 	}
3155 
3156 	TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp);
3157 
3158 	/*
3159 	 * Unload translations, if any, then hash out the
3160 	 * page to erase its identity.
3161 	 */
3162 	(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3163 	page_hashout(pp, NULL);
3164 
3165 	if (!dontfree) {
3166 		/*
3167 		 * Acquire the "freemem_lock" for availrmem.
3168 		 * The page_struct_lock need not be acquired for lckcnt
3169 		 * and cowcnt since the page has an "exclusive" lock.
3170 		 */
3171 		if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) {
3172 			mutex_enter(&freemem_lock);
3173 			if (pp->p_lckcnt != 0) {
3174 				availrmem++;
3175 				pp->p_lckcnt = 0;
3176 			}
3177 			if (pp->p_cowcnt != 0) {
3178 				availrmem += pp->p_cowcnt;
3179 				pp->p_cowcnt = 0;
3180 			}
3181 			mutex_exit(&freemem_lock);
3182 		}
3183 		/*
3184 		 * Put the page on the "free" list.
3185 		 */
3186 		page_free(pp, 0);
3187 	}
3188 }
3189 
3190 void
3191 page_destroy_pages(page_t *pp)
3192 {
3193 
3194 	page_t	*tpp, *rootpp = NULL;
3195 	pgcnt_t	pgcnt = page_get_pagecnt(pp->p_szc);
3196 	pgcnt_t	i, pglcks = 0;
3197 	uint_t	szc = pp->p_szc;
3198 
3199 	ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
3200 
3201 	VM_STAT_ADD(pagecnt.pc_destroy_pages);
3202 
3203 	TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp);
3204 
3205 	if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
3206 		panic("page_destroy_pages: not root page %p", (void *)pp);
3207 		/*NOTREACHED*/
3208 	}
3209 
3210 	for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
3211 		ASSERT((PAGE_EXCL(tpp) &&
3212 		    !page_iolock_assert(tpp)) || panicstr);
3213 		(void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
3214 		page_hashout(tpp, NULL);
3215 		ASSERT(tpp->p_offset == (u_offset_t)-1);
3216 		if (tpp->p_lckcnt != 0) {
3217 			pglcks++;
3218 			tpp->p_lckcnt = 0;
3219 		} else if (tpp->p_cowcnt != 0) {
3220 			pglcks += tpp->p_cowcnt;
3221 			tpp->p_cowcnt = 0;
3222 		}
3223 		ASSERT(!hat_page_getshare(tpp));
3224 		ASSERT(tpp->p_vnode == NULL);
3225 		ASSERT(tpp->p_szc == szc);
3226 
3227 		PP_SETFREE(tpp);
3228 		page_clr_all_props(tpp);
3229 		PP_SETAGED(tpp);
3230 		ASSERT(tpp->p_next == tpp);
3231 		ASSERT(tpp->p_prev == tpp);
3232 		page_list_concat(&rootpp, &tpp);
3233 	}
3234 
3235 	ASSERT(rootpp == pp);
3236 	if (pglcks != 0) {
3237 		mutex_enter(&freemem_lock);
3238 		availrmem += pglcks;
3239 		mutex_exit(&freemem_lock);
3240 	}
3241 
3242 	page_list_add_pages(rootpp, 0);
3243 	page_create_putback(pgcnt);
3244 }
3245 
3246 /*
3247  * Similar to page_destroy(), but destroys pages which are
3248  * locked and known to be on the page free list.  Since
3249  * the page is known to be free and locked, no one can access
3250  * it.
3251  *
3252  * Also, the number of free pages does not change.
3253  */
3254 void
3255 page_destroy_free(page_t *pp)
3256 {
3257 	ASSERT(PAGE_EXCL(pp));
3258 	ASSERT(PP_ISFREE(pp));
3259 	ASSERT(pp->p_vnode);
3260 	ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0);
3261 	ASSERT(!hat_page_is_mapped(pp));
3262 	ASSERT(PP_ISAGED(pp) == 0);
3263 	ASSERT(pp->p_szc == 0);
3264 
3265 	VM_STAT_ADD(pagecnt.pc_destroy_free);
3266 	page_list_sub(pp, PG_CACHE_LIST);
3267 
3268 	page_hashout(pp, NULL);
3269 	ASSERT(pp->p_vnode == NULL);
3270 	ASSERT(pp->p_offset == (u_offset_t)-1);
3271 	ASSERT(pp->p_hash == NULL);
3272 
3273 	PP_SETAGED(pp);
3274 	page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
3275 	page_unlock(pp);
3276 
3277 	mutex_enter(&new_freemem_lock);
3278 	if (freemem_wait) {
3279 		cv_signal(&freemem_cv);
3280 	}
3281 	mutex_exit(&new_freemem_lock);
3282 }
3283 
3284 /*
3285  * Rename the page "opp" to have an identity specified
3286  * by [vp, off].  If a page already exists with this name
3287  * it is locked and destroyed.  Note that the page's
3288  * translations are not unloaded during the rename.
3289  *
3290  * This routine is used by the anon layer to "steal" the
3291  * original page and is not unlike destroying a page and
3292  * creating a new page using the same page frame.
3293  *
3294  * XXX -- Could deadlock if caller 1 tries to rename A to B while
3295  * caller 2 tries to rename B to A.
3296  */
3297 void
3298 page_rename(page_t *opp, vnode_t *vp, u_offset_t off)
3299 {
3300 	page_t		*pp;
3301 	int		olckcnt = 0;
3302 	int		ocowcnt = 0;
3303 	kmutex_t	*phm;
3304 	ulong_t		index;
3305 
3306 	ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp));
3307 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3308 	ASSERT(PP_ISFREE(opp) == 0);
3309 
3310 	VM_STAT_ADD(page_rename_count);
3311 
3312 	TRACE_3(TR_FAC_VM, TR_PAGE_RENAME,
3313 		"page rename:pp %p vp %p off %llx", opp, vp, off);
3314 
3315 	/*
3316 	 * CacheFS may call page_rename for a large NFS page
3317 	 * when both CacheFS and NFS mount points are used
3318 	 * by applications. Demote this large page before
3319 	 * renaming it, to ensure that there are no "partial"
3320 	 * large pages left lying around.
3321 	 */
3322 	if (opp->p_szc != 0) {
3323 		vnode_t *ovp = opp->p_vnode;
3324 		ASSERT(ovp != NULL);
3325 		ASSERT(!IS_SWAPFSVP(ovp));
3326 		ASSERT(ovp != &kvp);
3327 		page_demote_vp_pages(opp);
3328 		ASSERT(opp->p_szc == 0);
3329 	}
3330 
3331 	page_hashout(opp, NULL);
3332 	PP_CLRAGED(opp);
3333 
3334 	/*
3335 	 * Acquire the appropriate page hash lock, since
3336 	 * we're going to rename the page.
3337 	 */
3338 	index = PAGE_HASH_FUNC(vp, off);
3339 	phm = PAGE_HASH_MUTEX(index);
3340 	mutex_enter(phm);
3341 top:
3342 	/*
3343 	 * Look for an existing page with this name and destroy it if found.
3344 	 * By holding the page hash lock all the way to the page_hashin()
3345 	 * call, we are assured that no page can be created with this
3346 	 * identity.  In the case when the phm lock is dropped to undo any
3347 	 * hat layer mappings, the existing page is held with an "exclusive"
3348 	 * lock, again preventing another page from being created with
3349 	 * this identity.
3350 	 */
3351 	PAGE_HASH_SEARCH(index, pp, vp, off);
3352 	if (pp != NULL) {
3353 		VM_STAT_ADD(page_rename_exists);
3354 
3355 		/*
3356 		 * As it turns out, this is one of only two places where
3357 		 * page_lock() needs to hold the passed in lock in the
3358 		 * successful case.  In all of the others, the lock could
3359 		 * be dropped as soon as the attempt is made to lock
3360 		 * the page.  It is tempting to add yet another arguement,
3361 		 * PL_KEEP or PL_DROP, to let page_lock know what to do.
3362 		 */
3363 		if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) {
3364 			/*
3365 			 * Went to sleep because the page could not
3366 			 * be locked.  We were woken up when the page
3367 			 * was unlocked, or when the page was destroyed.
3368 			 * In either case, `phm' was dropped while we
3369 			 * slept.  Hence we should not just roar through
3370 			 * this loop.
3371 			 */
3372 			goto top;
3373 		}
3374 
3375 		/*
3376 		 * If an existing page is a large page, then demote
3377 		 * it to ensure that no "partial" large pages are
3378 		 * "created" after page_rename. An existing page
3379 		 * can be a CacheFS page, and can't belong to swapfs.
3380 		 */
3381 		if (hat_page_is_mapped(pp)) {
3382 			/*
3383 			 * Unload translations.  Since we hold the
3384 			 * exclusive lock on this page, the page
3385 			 * can not be changed while we drop phm.
3386 			 * This is also not a lock protocol violation,
3387 			 * but rather the proper way to do things.
3388 			 */
3389 			mutex_exit(phm);
3390 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3391 			if (pp->p_szc != 0) {
3392 				ASSERT(!IS_SWAPFSVP(vp));
3393 				ASSERT(vp != &kvp);
3394 				page_demote_vp_pages(pp);
3395 				ASSERT(pp->p_szc == 0);
3396 			}
3397 			mutex_enter(phm);
3398 		} else if (pp->p_szc != 0) {
3399 			ASSERT(!IS_SWAPFSVP(vp));
3400 			ASSERT(vp != &kvp);
3401 			mutex_exit(phm);
3402 			page_demote_vp_pages(pp);
3403 			ASSERT(pp->p_szc == 0);
3404 			mutex_enter(phm);
3405 		}
3406 		page_hashout(pp, phm);
3407 	}
3408 	/*
3409 	 * Hash in the page with the new identity.
3410 	 */
3411 	if (!page_hashin(opp, vp, off, phm)) {
3412 		/*
3413 		 * We were holding phm while we searched for [vp, off]
3414 		 * and only dropped phm if we found and locked a page.
3415 		 * If we can't create this page now, then some thing
3416 		 * is really broken.
3417 		 */
3418 		panic("page_rename: Can't hash in page: %p", (void *)pp);
3419 		/*NOTREACHED*/
3420 	}
3421 
3422 	ASSERT(MUTEX_HELD(phm));
3423 	mutex_exit(phm);
3424 
3425 	/*
3426 	 * Now that we have dropped phm, lets get around to finishing up
3427 	 * with pp.
3428 	 */
3429 	if (pp != NULL) {
3430 		ASSERT(!hat_page_is_mapped(pp));
3431 		/* for now large pages should not end up here */
3432 		ASSERT(pp->p_szc == 0);
3433 		/*
3434 		 * Save the locks for transfer to the new page and then
3435 		 * clear them so page_free doesn't think they're important.
3436 		 * The page_struct_lock need not be acquired for lckcnt and
3437 		 * cowcnt since the page has an "exclusive" lock.
3438 		 */
3439 		olckcnt = pp->p_lckcnt;
3440 		ocowcnt = pp->p_cowcnt;
3441 		pp->p_lckcnt = pp->p_cowcnt = 0;
3442 
3443 		/*
3444 		 * Put the page on the "free" list after we drop
3445 		 * the lock.  The less work under the lock the better.
3446 		 */
3447 		/*LINTED: constant in conditional context*/
3448 		VN_DISPOSE(pp, B_FREE, 0, kcred);
3449 	}
3450 
3451 	/*
3452 	 * Transfer the lock count from the old page (if any).
3453 	 * The page_struct_lock need not be acquired for lckcnt and
3454 	 * cowcnt since the page has an "exclusive" lock.
3455 	 */
3456 	opp->p_lckcnt += olckcnt;
3457 	opp->p_cowcnt += ocowcnt;
3458 }
3459 
3460 /*
3461  * low level routine to add page `pp' to the hash and vp chains for [vp, offset]
3462  *
3463  * Pages are normally inserted at the start of a vnode's v_pages list.
3464  * If the vnode is VMODSORT and the page is modified, it goes at the end.
3465  * This can happen when a modified page is relocated for DR.
3466  *
3467  * Returns 1 on success and 0 on failure.
3468  */
3469 static int
3470 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset)
3471 {
3472 	page_t		**listp;
3473 	page_t		*tp;
3474 	ulong_t		index;
3475 
3476 	ASSERT(PAGE_EXCL(pp));
3477 	ASSERT(vp != NULL);
3478 	ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
3479 
3480 	/*
3481 	 * Be sure to set these up before the page is inserted on the hash
3482 	 * list.  As soon as the page is placed on the list some other
3483 	 * thread might get confused and wonder how this page could
3484 	 * possibly hash to this list.
3485 	 */
3486 	pp->p_vnode = vp;
3487 	pp->p_offset = offset;
3488 
3489 	/*
3490 	 * record if this page is on a swap vnode
3491 	 */
3492 	if ((vp->v_flag & VISSWAP) != 0)
3493 		PP_SETSWAP(pp);
3494 
3495 	index = PAGE_HASH_FUNC(vp, offset);
3496 	ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index)));
3497 	listp = &page_hash[index];
3498 
3499 	/*
3500 	 * If this page is already hashed in, fail this attempt to add it.
3501 	 */
3502 	for (tp = *listp; tp != NULL; tp = tp->p_hash) {
3503 		if (tp->p_vnode == vp && tp->p_offset == offset) {
3504 			pp->p_vnode = NULL;
3505 			pp->p_offset = (u_offset_t)(-1);
3506 			return (0);
3507 		}
3508 	}
3509 	pp->p_hash = *listp;
3510 	*listp = pp;
3511 
3512 	/*
3513 	 * Add the page to the vnode's list of pages
3514 	 */
3515 	if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp))
3516 		listp = &vp->v_pages->p_vpprev->p_vpnext;
3517 	else
3518 		listp = &vp->v_pages;
3519 
3520 	page_vpadd(listp, pp);
3521 
3522 	return (1);
3523 }
3524 
3525 /*
3526  * Add page `pp' to both the hash and vp chains for [vp, offset].
3527  *
3528  * Returns 1 on success and 0 on failure.
3529  * If hold is passed in, it is not dropped.
3530  */
3531 int
3532 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold)
3533 {
3534 	kmutex_t	*phm = NULL;
3535 	kmutex_t	*vphm;
3536 	int		rc;
3537 
3538 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3539 
3540 	TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN,
3541 		"page_hashin:pp %p vp %p offset %llx",
3542 		pp, vp, offset);
3543 
3544 	VM_STAT_ADD(hashin_count);
3545 
3546 	if (hold != NULL)
3547 		phm = hold;
3548 	else {
3549 		VM_STAT_ADD(hashin_not_held);
3550 		phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset));
3551 		mutex_enter(phm);
3552 	}
3553 
3554 	vphm = page_vnode_mutex(vp);
3555 	mutex_enter(vphm);
3556 	rc = page_do_hashin(pp, vp, offset);
3557 	mutex_exit(vphm);
3558 	if (hold == NULL)
3559 		mutex_exit(phm);
3560 	if (rc == 0)
3561 		VM_STAT_ADD(hashin_already);
3562 	return (rc);
3563 }
3564 
3565 /*
3566  * Remove page ``pp'' from the hash and vp chains and remove vp association.
3567  * All mutexes must be held
3568  */
3569 static void
3570 page_do_hashout(page_t *pp)
3571 {
3572 	page_t	**hpp;
3573 	page_t	*hp;
3574 	vnode_t	*vp = pp->p_vnode;
3575 
3576 	ASSERT(vp != NULL);
3577 	ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
3578 
3579 	/*
3580 	 * First, take pp off of its hash chain.
3581 	 */
3582 	hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)];
3583 
3584 	for (;;) {
3585 		hp = *hpp;
3586 		if (hp == pp)
3587 			break;
3588 		if (hp == NULL) {
3589 			panic("page_do_hashout");
3590 			/*NOTREACHED*/
3591 		}
3592 		hpp = &hp->p_hash;
3593 	}
3594 	*hpp = pp->p_hash;
3595 
3596 	/*
3597 	 * Now remove it from its associated vnode.
3598 	 */
3599 	if (vp->v_pages)
3600 		page_vpsub(&vp->v_pages, pp);
3601 
3602 	pp->p_hash = NULL;
3603 	page_clr_all_props(pp);
3604 	PP_CLRSWAP(pp);
3605 	pp->p_vnode = NULL;
3606 	pp->p_offset = (u_offset_t)-1;
3607 }
3608 
3609 /*
3610  * Remove page ``pp'' from the hash and vp chains and remove vp association.
3611  *
3612  * When `phm' is non-NULL it contains the address of the mutex protecting the
3613  * hash list pp is on.  It is not dropped.
3614  */
3615 void
3616 page_hashout(page_t *pp, kmutex_t *phm)
3617 {
3618 	vnode_t		*vp;
3619 	ulong_t		index;
3620 	kmutex_t	*nphm;
3621 	kmutex_t	*vphm;
3622 	kmutex_t	*sep;
3623 
3624 	ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1);
3625 	ASSERT(pp->p_vnode != NULL);
3626 	ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
3627 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode)));
3628 
3629 	vp = pp->p_vnode;
3630 
3631 	TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT,
3632 		"page_hashout:pp %p vp %p", pp, vp);
3633 
3634 	/* Kernel probe */
3635 	TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */,
3636 	    tnf_opaque, vnode, vp,
3637 	    tnf_offset, offset, pp->p_offset);
3638 
3639 	/*
3640 	 *
3641 	 */
3642 	VM_STAT_ADD(hashout_count);
3643 	index = PAGE_HASH_FUNC(vp, pp->p_offset);
3644 	if (phm == NULL) {
3645 		VM_STAT_ADD(hashout_not_held);
3646 		nphm = PAGE_HASH_MUTEX(index);
3647 		mutex_enter(nphm);
3648 	}
3649 	ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1);
3650 
3651 
3652 	/*
3653 	 * grab page vnode mutex and remove it...
3654 	 */
3655 	vphm = page_vnode_mutex(vp);
3656 	mutex_enter(vphm);
3657 
3658 	page_do_hashout(pp);
3659 
3660 	mutex_exit(vphm);
3661 	if (phm == NULL)
3662 		mutex_exit(nphm);
3663 
3664 	/*
3665 	 * Wake up processes waiting for this page.  The page's
3666 	 * identity has been changed, and is probably not the
3667 	 * desired page any longer.
3668 	 */
3669 	sep = page_se_mutex(pp);
3670 	mutex_enter(sep);
3671 	pp->p_selock &= ~SE_EWANTED;
3672 	if (CV_HAS_WAITERS(&pp->p_cv))
3673 		cv_broadcast(&pp->p_cv);
3674 	mutex_exit(sep);
3675 }
3676 
3677 /*
3678  * Add the page to the front of a linked list of pages
3679  * using the p_next & p_prev pointers for the list.
3680  * The caller is responsible for protecting the list pointers.
3681  */
3682 void
3683 page_add(page_t **ppp, page_t *pp)
3684 {
3685 	ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3686 
3687 	page_add_common(ppp, pp);
3688 }
3689 
3690 
3691 
3692 /*
3693  *  Common code for page_add() and mach_page_add()
3694  */
3695 void
3696 page_add_common(page_t **ppp, page_t *pp)
3697 {
3698 	if (*ppp == NULL) {
3699 		pp->p_next = pp->p_prev = pp;
3700 	} else {
3701 		pp->p_next = *ppp;
3702 		pp->p_prev = (*ppp)->p_prev;
3703 		(*ppp)->p_prev = pp;
3704 		pp->p_prev->p_next = pp;
3705 	}
3706 	*ppp = pp;
3707 }
3708 
3709 
3710 /*
3711  * Remove this page from a linked list of pages
3712  * using the p_next & p_prev pointers for the list.
3713  *
3714  * The caller is responsible for protecting the list pointers.
3715  */
3716 void
3717 page_sub(page_t **ppp, page_t *pp)
3718 {
3719 	ASSERT((PP_ISFREE(pp)) ? 1 :
3720 	    (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3721 
3722 	if (*ppp == NULL || pp == NULL) {
3723 		panic("page_sub: bad arg(s): pp %p, *ppp %p",
3724 		    (void *)pp, (void *)(*ppp));
3725 		/*NOTREACHED*/
3726 	}
3727 
3728 	page_sub_common(ppp, pp);
3729 }
3730 
3731 
3732 /*
3733  *  Common code for page_sub() and mach_page_sub()
3734  */
3735 void
3736 page_sub_common(page_t **ppp, page_t *pp)
3737 {
3738 	if (*ppp == pp)
3739 		*ppp = pp->p_next;		/* go to next page */
3740 
3741 	if (*ppp == pp)
3742 		*ppp = NULL;			/* page list is gone */
3743 	else {
3744 		pp->p_prev->p_next = pp->p_next;
3745 		pp->p_next->p_prev = pp->p_prev;
3746 	}
3747 	pp->p_prev = pp->p_next = pp;		/* make pp a list of one */
3748 }
3749 
3750 
3751 /*
3752  * Break page list cppp into two lists with npages in the first list.
3753  * The tail is returned in nppp.
3754  */
3755 void
3756 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages)
3757 {
3758 	page_t *s1pp = *oppp;
3759 	page_t *s2pp;
3760 	page_t *e1pp, *e2pp;
3761 	long n = 0;
3762 
3763 	if (s1pp == NULL) {
3764 		*nppp = NULL;
3765 		return;
3766 	}
3767 	if (npages == 0) {
3768 		*nppp = s1pp;
3769 		*oppp = NULL;
3770 		return;
3771 	}
3772 	for (n = 0, s2pp = *oppp; n < npages; n++) {
3773 		s2pp = s2pp->p_next;
3774 	}
3775 	/* Fix head and tail of new lists */
3776 	e1pp = s2pp->p_prev;
3777 	e2pp = s1pp->p_prev;
3778 	s1pp->p_prev = e1pp;
3779 	e1pp->p_next = s1pp;
3780 	s2pp->p_prev = e2pp;
3781 	e2pp->p_next = s2pp;
3782 
3783 	/* second list empty */
3784 	if (s2pp == s1pp) {
3785 		*oppp = s1pp;
3786 		*nppp = NULL;
3787 	} else {
3788 		*oppp = s1pp;
3789 		*nppp = s2pp;
3790 	}
3791 }
3792 
3793 /*
3794  * Concatenate page list nppp onto the end of list ppp.
3795  */
3796 void
3797 page_list_concat(page_t **ppp, page_t **nppp)
3798 {
3799 	page_t *s1pp, *s2pp, *e1pp, *e2pp;
3800 
3801 	if (*nppp == NULL) {
3802 		return;
3803 	}
3804 	if (*ppp == NULL) {
3805 		*ppp = *nppp;
3806 		return;
3807 	}
3808 	s1pp = *ppp;
3809 	e1pp =  s1pp->p_prev;
3810 	s2pp = *nppp;
3811 	e2pp = s2pp->p_prev;
3812 	s1pp->p_prev = e2pp;
3813 	e2pp->p_next = s1pp;
3814 	e1pp->p_next = s2pp;
3815 	s2pp->p_prev = e1pp;
3816 }
3817 
3818 /*
3819  * return the next page in the page list
3820  */
3821 page_t *
3822 page_list_next(page_t *pp)
3823 {
3824 	return (pp->p_next);
3825 }
3826 
3827 
3828 /*
3829  * Add the page to the front of the linked list of pages
3830  * using p_vpnext/p_vpprev pointers for the list.
3831  *
3832  * The caller is responsible for protecting the lists.
3833  */
3834 void
3835 page_vpadd(page_t **ppp, page_t *pp)
3836 {
3837 	if (*ppp == NULL) {
3838 		pp->p_vpnext = pp->p_vpprev = pp;
3839 	} else {
3840 		pp->p_vpnext = *ppp;
3841 		pp->p_vpprev = (*ppp)->p_vpprev;
3842 		(*ppp)->p_vpprev = pp;
3843 		pp->p_vpprev->p_vpnext = pp;
3844 	}
3845 	*ppp = pp;
3846 }
3847 
3848 /*
3849  * Remove this page from the linked list of pages
3850  * using p_vpnext/p_vpprev pointers for the list.
3851  *
3852  * The caller is responsible for protecting the lists.
3853  */
3854 void
3855 page_vpsub(page_t **ppp, page_t *pp)
3856 {
3857 	if (*ppp == NULL || pp == NULL) {
3858 		panic("page_vpsub: bad arg(s): pp %p, *ppp %p",
3859 		    (void *)pp, (void *)(*ppp));
3860 		/*NOTREACHED*/
3861 	}
3862 
3863 	if (*ppp == pp)
3864 		*ppp = pp->p_vpnext;		/* go to next page */
3865 
3866 	if (*ppp == pp)
3867 		*ppp = NULL;			/* page list is gone */
3868 	else {
3869 		pp->p_vpprev->p_vpnext = pp->p_vpnext;
3870 		pp->p_vpnext->p_vpprev = pp->p_vpprev;
3871 	}
3872 	pp->p_vpprev = pp->p_vpnext = pp;	/* make pp a list of one */
3873 }
3874 
3875 /*
3876  * Lock a physical page into memory "long term".  Used to support "lock
3877  * in memory" functions.  Accepts the page to be locked, and a cow variable
3878  * to indicate whether a the lock will travel to the new page during
3879  * a potential copy-on-write.
3880  */
3881 int
3882 page_pp_lock(
3883 	page_t *pp,			/* page to be locked */
3884 	int cow,			/* cow lock */
3885 	int kernel)			/* must succeed -- ignore checking */
3886 {
3887 	int r = 0;			/* result -- assume failure */
3888 
3889 	ASSERT(PAGE_LOCKED(pp));
3890 
3891 	page_struct_lock(pp);
3892 	/*
3893 	 * Acquire the "freemem_lock" for availrmem.
3894 	 */
3895 	if (cow) {
3896 		mutex_enter(&freemem_lock);
3897 		if ((availrmem > pages_pp_maximum) &&
3898 		    (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
3899 			availrmem--;
3900 			pages_locked++;
3901 			mutex_exit(&freemem_lock);
3902 			r = 1;
3903 			if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3904 				cmn_err(CE_WARN,
3905 				    "COW lock limit reached on pfn 0x%lx",
3906 				    page_pptonum(pp));
3907 			}
3908 		} else
3909 			mutex_exit(&freemem_lock);
3910 	} else {
3911 		if (pp->p_lckcnt) {
3912 			if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
3913 				r = 1;
3914 				if (++pp->p_lckcnt ==
3915 				    (ushort_t)PAGE_LOCK_MAXIMUM) {
3916 					cmn_err(CE_WARN, "Page lock limit "
3917 					    "reached on pfn 0x%lx",
3918 					    page_pptonum(pp));
3919 				}
3920 			}
3921 		} else {
3922 			if (kernel) {
3923 				/* availrmem accounting done by caller */
3924 				++pp->p_lckcnt;
3925 				r = 1;
3926 			} else {
3927 				mutex_enter(&freemem_lock);
3928 				if (availrmem > pages_pp_maximum) {
3929 					availrmem--;
3930 					pages_locked++;
3931 					++pp->p_lckcnt;
3932 					r = 1;
3933 				}
3934 				mutex_exit(&freemem_lock);
3935 			}
3936 		}
3937 	}
3938 	page_struct_unlock(pp);
3939 	return (r);
3940 }
3941 
3942 /*
3943  * Decommit a lock on a physical page frame.  Account for cow locks if
3944  * appropriate.
3945  */
3946 void
3947 page_pp_unlock(
3948 	page_t *pp,			/* page to be unlocked */
3949 	int cow,			/* expect cow lock */
3950 	int kernel)			/* this was a kernel lock */
3951 {
3952 	ASSERT(PAGE_LOCKED(pp));
3953 
3954 	page_struct_lock(pp);
3955 	/*
3956 	 * Acquire the "freemem_lock" for availrmem.
3957 	 * If cowcnt or lcknt is already 0 do nothing; i.e., we
3958 	 * could be called to unlock even if nothing is locked. This could
3959 	 * happen if locked file pages were truncated (removing the lock)
3960 	 * and the file was grown again and new pages faulted in; the new
3961 	 * pages are unlocked but the segment still thinks they're locked.
3962 	 */
3963 	if (cow) {
3964 		if (pp->p_cowcnt) {
3965 			mutex_enter(&freemem_lock);
3966 			pp->p_cowcnt--;
3967 			availrmem++;
3968 			pages_locked--;
3969 			mutex_exit(&freemem_lock);
3970 		}
3971 	} else {
3972 		if (pp->p_lckcnt && --pp->p_lckcnt == 0) {
3973 			if (!kernel) {
3974 				mutex_enter(&freemem_lock);
3975 				availrmem++;
3976 				pages_locked--;
3977 				mutex_exit(&freemem_lock);
3978 			}
3979 		}
3980 	}
3981 	page_struct_unlock(pp);
3982 }
3983 
3984 /*
3985  * This routine reserves availrmem for npages;
3986  * 	flags: KM_NOSLEEP or KM_SLEEP
3987  * 	returns 1 on success or 0 on failure
3988  */
3989 int
3990 page_resv(pgcnt_t npages, uint_t flags)
3991 {
3992 	mutex_enter(&freemem_lock);
3993 	while (availrmem < tune.t_minarmem + npages) {
3994 		if (flags & KM_NOSLEEP) {
3995 			mutex_exit(&freemem_lock);
3996 			return (0);
3997 		}
3998 		mutex_exit(&freemem_lock);
3999 		page_needfree(npages);
4000 		kmem_reap();
4001 		delay(hz >> 2);
4002 		page_needfree(-(spgcnt_t)npages);
4003 		mutex_enter(&freemem_lock);
4004 	}
4005 	availrmem -= npages;
4006 	mutex_exit(&freemem_lock);
4007 	return (1);
4008 }
4009 
4010 /*
4011  * This routine unreserves availrmem for npages;
4012  */
4013 void
4014 page_unresv(pgcnt_t npages)
4015 {
4016 	mutex_enter(&freemem_lock);
4017 	availrmem += npages;
4018 	mutex_exit(&freemem_lock);
4019 }
4020 
4021 /*
4022  * See Statement at the beginning of segvn_lockop() regarding
4023  * the way we handle cowcnts and lckcnts.
4024  *
4025  * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage
4026  * that breaks COW has PROT_WRITE.
4027  *
4028  * Note that, we may also break COW in case we are softlocking
4029  * on read access during physio;
4030  * in this softlock case, the vpage may not have PROT_WRITE.
4031  * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp'
4032  * if the vpage doesn't have PROT_WRITE.
4033  *
4034  * This routine is never called if we are stealing a page
4035  * in anon_private.
4036  *
4037  * The caller subtracted from availrmem for read only mapping.
4038  * if lckcnt is 1 increment availrmem.
4039  */
4040 void
4041 page_pp_useclaim(
4042 	page_t *opp,		/* original page frame losing lock */
4043 	page_t *npp,		/* new page frame gaining lock */
4044 	uint_t	write_perm) 	/* set if vpage has PROT_WRITE */
4045 {
4046 	int payback = 0;
4047 
4048 	ASSERT(PAGE_LOCKED(opp));
4049 	ASSERT(PAGE_LOCKED(npp));
4050 
4051 	page_struct_lock(opp);
4052 
4053 	ASSERT(npp->p_cowcnt == 0);
4054 	ASSERT(npp->p_lckcnt == 0);
4055 
4056 	/* Don't use claim if nothing is locked (see page_pp_unlock above) */
4057 	if ((write_perm && opp->p_cowcnt != 0) ||
4058 	    (!write_perm && opp->p_lckcnt != 0)) {
4059 
4060 		if (write_perm) {
4061 			npp->p_cowcnt++;
4062 			ASSERT(opp->p_cowcnt != 0);
4063 			opp->p_cowcnt--;
4064 		} else {
4065 
4066 			ASSERT(opp->p_lckcnt != 0);
4067 
4068 			/*
4069 			 * We didn't need availrmem decremented if p_lckcnt on
4070 			 * original page is 1. Here, we are unlocking
4071 			 * read-only copy belonging to original page and
4072 			 * are locking a copy belonging to new page.
4073 			 */
4074 			if (opp->p_lckcnt == 1)
4075 				payback = 1;
4076 
4077 			npp->p_lckcnt++;
4078 			opp->p_lckcnt--;
4079 		}
4080 	}
4081 	if (payback) {
4082 		mutex_enter(&freemem_lock);
4083 		availrmem++;
4084 		pages_useclaim--;
4085 		mutex_exit(&freemem_lock);
4086 	}
4087 	page_struct_unlock(opp);
4088 }
4089 
4090 /*
4091  * Simple claim adjust functions -- used to support changes in
4092  * claims due to changes in access permissions.  Used by segvn_setprot().
4093  */
4094 int
4095 page_addclaim(page_t *pp)
4096 {
4097 	int r = 0;			/* result */
4098 
4099 	ASSERT(PAGE_LOCKED(pp));
4100 
4101 	page_struct_lock(pp);
4102 	ASSERT(pp->p_lckcnt != 0);
4103 
4104 	if (pp->p_lckcnt == 1) {
4105 		if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
4106 			--pp->p_lckcnt;
4107 			r = 1;
4108 			if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4109 				cmn_err(CE_WARN,
4110 				    "COW lock limit reached on pfn 0x%lx",
4111 				    page_pptonum(pp));
4112 			}
4113 		}
4114 	} else {
4115 		mutex_enter(&freemem_lock);
4116 		if ((availrmem > pages_pp_maximum) &&
4117 		    (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
4118 			--availrmem;
4119 			++pages_claimed;
4120 			mutex_exit(&freemem_lock);
4121 			--pp->p_lckcnt;
4122 			r = 1;
4123 			if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4124 				cmn_err(CE_WARN,
4125 				    "COW lock limit reached on pfn 0x%lx",
4126 				    page_pptonum(pp));
4127 			}
4128 		} else
4129 			mutex_exit(&freemem_lock);
4130 	}
4131 	page_struct_unlock(pp);
4132 	return (r);
4133 }
4134 
4135 int
4136 page_subclaim(page_t *pp)
4137 {
4138 	int r = 0;
4139 
4140 	ASSERT(PAGE_LOCKED(pp));
4141 
4142 	page_struct_lock(pp);
4143 	ASSERT(pp->p_cowcnt != 0);
4144 
4145 	if (pp->p_lckcnt) {
4146 		if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
4147 			r = 1;
4148 			/*
4149 			 * for availrmem
4150 			 */
4151 			mutex_enter(&freemem_lock);
4152 			availrmem++;
4153 			pages_claimed--;
4154 			mutex_exit(&freemem_lock);
4155 
4156 			pp->p_cowcnt--;
4157 
4158 			if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4159 				cmn_err(CE_WARN,
4160 				    "Page lock limit reached on pfn 0x%lx",
4161 				    page_pptonum(pp));
4162 			}
4163 		}
4164 	} else {
4165 		r = 1;
4166 		pp->p_cowcnt--;
4167 		pp->p_lckcnt++;
4168 	}
4169 	page_struct_unlock(pp);
4170 	return (r);
4171 }
4172 
4173 int
4174 page_addclaim_pages(page_t  **ppa)
4175 {
4176 
4177 	pgcnt_t	lckpgs = 0, pg_idx;
4178 
4179 	VM_STAT_ADD(pagecnt.pc_addclaim_pages);
4180 
4181 	mutex_enter(&page_llock);
4182 	for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
4183 
4184 		ASSERT(PAGE_LOCKED(ppa[pg_idx]));
4185 		ASSERT(ppa[pg_idx]->p_lckcnt != 0);
4186 		if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4187 			mutex_exit(&page_llock);
4188 			return (0);
4189 		}
4190 		if (ppa[pg_idx]->p_lckcnt > 1)
4191 			lckpgs++;
4192 	}
4193 
4194 	if (lckpgs != 0) {
4195 		mutex_enter(&freemem_lock);
4196 		if (availrmem >= pages_pp_maximum + lckpgs) {
4197 			availrmem -= lckpgs;
4198 			pages_claimed += lckpgs;
4199 		} else {
4200 			mutex_exit(&freemem_lock);
4201 			mutex_exit(&page_llock);
4202 			return (0);
4203 		}
4204 		mutex_exit(&freemem_lock);
4205 	}
4206 
4207 	for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
4208 		ppa[pg_idx]->p_lckcnt--;
4209 		ppa[pg_idx]->p_cowcnt++;
4210 	}
4211 	mutex_exit(&page_llock);
4212 	return (1);
4213 }
4214 
4215 int
4216 page_subclaim_pages(page_t  **ppa)
4217 {
4218 	pgcnt_t	ulckpgs = 0, pg_idx;
4219 
4220 	VM_STAT_ADD(pagecnt.pc_subclaim_pages);
4221 
4222 	mutex_enter(&page_llock);
4223 	for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
4224 
4225 		ASSERT(PAGE_LOCKED(ppa[pg_idx]));
4226 		ASSERT(ppa[pg_idx]->p_cowcnt != 0);
4227 		if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4228 			mutex_exit(&page_llock);
4229 			return (0);
4230 		}
4231 		if (ppa[pg_idx]->p_lckcnt != 0)
4232 			ulckpgs++;
4233 	}
4234 
4235 	if (ulckpgs != 0) {
4236 		mutex_enter(&freemem_lock);
4237 		availrmem += ulckpgs;
4238 		pages_claimed -= ulckpgs;
4239 		mutex_exit(&freemem_lock);
4240 	}
4241 
4242 	for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
4243 		ppa[pg_idx]->p_cowcnt--;
4244 		ppa[pg_idx]->p_lckcnt++;
4245 
4246 	}
4247 	mutex_exit(&page_llock);
4248 	return (1);
4249 }
4250 
4251 page_t *
4252 page_numtopp(pfn_t pfnum, se_t se)
4253 {
4254 	page_t *pp;
4255 
4256 retry:
4257 	pp = page_numtopp_nolock(pfnum);
4258 	if (pp == NULL) {
4259 		return ((page_t *)NULL);
4260 	}
4261 
4262 	/*
4263 	 * Acquire the appropriate lock on the page.
4264 	 */
4265 	while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) {
4266 		if (page_pptonum(pp) != pfnum)
4267 			goto retry;
4268 		continue;
4269 	}
4270 
4271 	if (page_pptonum(pp) != pfnum) {
4272 		page_unlock(pp);
4273 		goto retry;
4274 	}
4275 
4276 	return (pp);
4277 }
4278 
4279 page_t *
4280 page_numtopp_noreclaim(pfn_t pfnum, se_t se)
4281 {
4282 	page_t *pp;
4283 
4284 retry:
4285 	pp = page_numtopp_nolock(pfnum);
4286 	if (pp == NULL) {
4287 		return ((page_t *)NULL);
4288 	}
4289 
4290 	/*
4291 	 * Acquire the appropriate lock on the page.
4292 	 */
4293 	while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) {
4294 		if (page_pptonum(pp) != pfnum)
4295 			goto retry;
4296 		continue;
4297 	}
4298 
4299 	if (page_pptonum(pp) != pfnum) {
4300 		page_unlock(pp);
4301 		goto retry;
4302 	}
4303 
4304 	return (pp);
4305 }
4306 
4307 /*
4308  * This routine is like page_numtopp, but will only return page structs
4309  * for pages which are ok for loading into hardware using the page struct.
4310  */
4311 page_t *
4312 page_numtopp_nowait(pfn_t pfnum, se_t se)
4313 {
4314 	page_t *pp;
4315 
4316 retry:
4317 	pp = page_numtopp_nolock(pfnum);
4318 	if (pp == NULL) {
4319 		return ((page_t *)NULL);
4320 	}
4321 
4322 	/*
4323 	 * Try to acquire the appropriate lock on the page.
4324 	 */
4325 	if (PP_ISFREE(pp))
4326 		pp = NULL;
4327 	else {
4328 		if (!page_trylock(pp, se))
4329 			pp = NULL;
4330 		else {
4331 			if (page_pptonum(pp) != pfnum) {
4332 				page_unlock(pp);
4333 				goto retry;
4334 			}
4335 			if (PP_ISFREE(pp)) {
4336 				page_unlock(pp);
4337 				pp = NULL;
4338 			}
4339 		}
4340 	}
4341 	return (pp);
4342 }
4343 
4344 /*
4345  * Returns a count of dirty pages that are in the process
4346  * of being written out.  If 'cleanit' is set, try to push the page.
4347  */
4348 pgcnt_t
4349 page_busy(int cleanit)
4350 {
4351 	page_t *page0 = page_first();
4352 	page_t *pp = page0;
4353 	pgcnt_t nppbusy = 0;
4354 	u_offset_t off;
4355 
4356 	do {
4357 		vnode_t *vp = pp->p_vnode;
4358 
4359 		/*
4360 		 * A page is a candidate for syncing if it is:
4361 		 *
4362 		 * (a)	On neither the freelist nor the cachelist
4363 		 * (b)	Hashed onto a vnode
4364 		 * (c)	Not a kernel page
4365 		 * (d)	Dirty
4366 		 * (e)	Not part of a swapfile
4367 		 * (f)	a page which belongs to a real vnode; eg has a non-null
4368 		 *	v_vfsp pointer.
4369 		 * (g)	Backed by a filesystem which doesn't have a
4370 		 *	stubbed-out sync operation
4371 		 */
4372 		if (!PP_ISFREE(pp) && vp != NULL && vp != &kvp &&
4373 		    hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL &&
4374 		    vfs_can_sync(vp->v_vfsp)) {
4375 			nppbusy++;
4376 			vfs_syncprogress();
4377 
4378 			if (!cleanit)
4379 				continue;
4380 			if (!page_trylock(pp, SE_EXCL))
4381 				continue;
4382 
4383 			if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) ||
4384 			    pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
4385 			    !(hat_pagesync(pp,
4386 			    HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) {
4387 				page_unlock(pp);
4388 				continue;
4389 			}
4390 			off = pp->p_offset;
4391 			VN_HOLD(vp);
4392 			page_unlock(pp);
4393 			(void) VOP_PUTPAGE(vp, off, PAGESIZE,
4394 			    B_ASYNC | B_FREE, kcred);
4395 			VN_RELE(vp);
4396 		}
4397 	} while ((pp = page_next(pp)) != page0);
4398 
4399 	return (nppbusy);
4400 }
4401 
4402 void page_invalidate_pages(void);
4403 
4404 /*
4405  * callback handler to vm sub-system
4406  *
4407  * callers make sure no recursive entries to this func.
4408  */
4409 /*ARGSUSED*/
4410 boolean_t
4411 callb_vm_cpr(void *arg, int code)
4412 {
4413 	if (code == CB_CODE_CPR_CHKPT)
4414 		page_invalidate_pages();
4415 	return (B_TRUE);
4416 }
4417 
4418 /*
4419  * Invalidate all pages of the system.
4420  * It shouldn't be called until all user page activities are all stopped.
4421  */
4422 void
4423 page_invalidate_pages()
4424 {
4425 	page_t *pp;
4426 	page_t *page0;
4427 	pgcnt_t nbusypages;
4428 	int retry = 0;
4429 	const int MAXRETRIES = 4;
4430 #if defined(__sparc)
4431 	extern struct vnode prom_ppages;
4432 #endif /* __sparc */
4433 
4434 top:
4435 	/*
4436 	 * Flush dirty pages and destory the clean ones.
4437 	 */
4438 	nbusypages = 0;
4439 
4440 	pp = page0 = page_first();
4441 	do {
4442 		struct vnode	*vp;
4443 		u_offset_t	offset;
4444 		int		mod;
4445 
4446 		/*
4447 		 * skip the page if it has no vnode or the page associated
4448 		 * with the kernel vnode or prom allocated kernel mem.
4449 		 */
4450 #if defined(__sparc)
4451 		if ((vp = pp->p_vnode) == NULL || vp == &kvp ||
4452 		    vp == &prom_ppages)
4453 #else /* x86 doesn't have prom or prom_ppage */
4454 		if ((vp = pp->p_vnode) == NULL || vp == &kvp)
4455 #endif /* __sparc */
4456 			continue;
4457 
4458 		/*
4459 		 * skip the page which is already free invalidated.
4460 		 */
4461 		if (PP_ISFREE(pp) && PP_ISAGED(pp))
4462 			continue;
4463 
4464 		/*
4465 		 * skip pages that are already locked or can't be "exclusively"
4466 		 * locked or are already free.  After we lock the page, check
4467 		 * the free and age bits again to be sure it's not destroied
4468 		 * yet.
4469 		 * To achieve max. parallelization, we use page_trylock instead
4470 		 * of page_lock so that we don't get block on individual pages
4471 		 * while we have thousands of other pages to process.
4472 		 */
4473 		if (!page_trylock(pp, SE_EXCL)) {
4474 			nbusypages++;
4475 			continue;
4476 		} else if (PP_ISFREE(pp)) {
4477 			if (!PP_ISAGED(pp)) {
4478 				page_destroy_free(pp);
4479 			} else {
4480 				page_unlock(pp);
4481 			}
4482 			continue;
4483 		}
4484 		/*
4485 		 * Is this page involved in some I/O? shared?
4486 		 *
4487 		 * The page_struct_lock need not be acquired to
4488 		 * examine these fields since the page has an
4489 		 * "exclusive" lock.
4490 		 */
4491 		if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
4492 			page_unlock(pp);
4493 			continue;
4494 		}
4495 
4496 		if (vp->v_type == VCHR) {
4497 			panic("vp->v_type == VCHR");
4498 			/*NOTREACHED*/
4499 		}
4500 
4501 		if (!page_try_demote_pages(pp)) {
4502 			page_unlock(pp);
4503 			continue;
4504 		}
4505 
4506 		/*
4507 		 * Check the modified bit. Leave the bits alone in hardware
4508 		 * (they will be modified if we do the putpage).
4509 		 */
4510 		mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD)
4511 			& P_MOD);
4512 		if (mod) {
4513 			offset = pp->p_offset;
4514 			/*
4515 			 * Hold the vnode before releasing the page lock
4516 			 * to prevent it from being freed and re-used by
4517 			 * some other thread.
4518 			 */
4519 			VN_HOLD(vp);
4520 			page_unlock(pp);
4521 			/*
4522 			 * No error return is checked here. Callers such as
4523 			 * cpr deals with the dirty pages at the dump time
4524 			 * if this putpage fails.
4525 			 */
4526 			(void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL,
4527 			    kcred);
4528 			VN_RELE(vp);
4529 		} else {
4530 			page_destroy(pp, 0);
4531 		}
4532 	} while ((pp = page_next(pp)) != page0);
4533 	if (nbusypages && retry++ < MAXRETRIES) {
4534 		delay(1);
4535 		goto top;
4536 	}
4537 }
4538 
4539 /*
4540  * Replace the page "old" with the page "new" on the page hash and vnode lists
4541  *
4542  * the replacemnt must be done in place, ie the equivalent sequence:
4543  *
4544  *	vp = old->p_vnode;
4545  *	off = old->p_offset;
4546  *	page_do_hashout(old)
4547  *	page_do_hashin(new, vp, off)
4548  *
4549  * doesn't work, since
4550  *  1) if old is the only page on the vnode, the v_pages list has a window
4551  *     where it looks empty. This will break file system assumptions.
4552  * and
4553  *  2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list.
4554  */
4555 static void
4556 page_do_relocate_hash(page_t *new, page_t *old)
4557 {
4558 	page_t	**hash_list;
4559 	vnode_t	*vp = old->p_vnode;
4560 	kmutex_t *sep;
4561 
4562 	ASSERT(PAGE_EXCL(old));
4563 	ASSERT(PAGE_EXCL(new));
4564 	ASSERT(vp != NULL);
4565 	ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
4566 	ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset))));
4567 
4568 	/*
4569 	 * First find old page on the page hash list
4570 	 */
4571 	hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)];
4572 
4573 	for (;;) {
4574 		if (*hash_list == old)
4575 			break;
4576 		if (*hash_list == NULL) {
4577 			panic("page_do_hashout");
4578 			/*NOTREACHED*/
4579 		}
4580 		hash_list = &(*hash_list)->p_hash;
4581 	}
4582 
4583 	/*
4584 	 * update new and replace old with new on the page hash list
4585 	 */
4586 	new->p_vnode = old->p_vnode;
4587 	new->p_offset = old->p_offset;
4588 	new->p_hash = old->p_hash;
4589 	*hash_list = new;
4590 
4591 	if ((new->p_vnode->v_flag & VISSWAP) != 0)
4592 		PP_SETSWAP(new);
4593 
4594 	/*
4595 	 * replace old with new on the vnode's page list
4596 	 */
4597 	if (old->p_vpnext == old) {
4598 		new->p_vpnext = new;
4599 		new->p_vpprev = new;
4600 	} else {
4601 		new->p_vpnext = old->p_vpnext;
4602 		new->p_vpprev = old->p_vpprev;
4603 		new->p_vpnext->p_vpprev = new;
4604 		new->p_vpprev->p_vpnext = new;
4605 	}
4606 	if (vp->v_pages == old)
4607 		vp->v_pages = new;
4608 
4609 	/*
4610 	 * clear out the old page
4611 	 */
4612 	old->p_hash = NULL;
4613 	old->p_vpnext = NULL;
4614 	old->p_vpprev = NULL;
4615 	old->p_vnode = NULL;
4616 	PP_CLRSWAP(old);
4617 	old->p_offset = (u_offset_t)-1;
4618 	page_clr_all_props(old);
4619 
4620 	/*
4621 	 * Wake up processes waiting for this page.  The page's
4622 	 * identity has been changed, and is probably not the
4623 	 * desired page any longer.
4624 	 */
4625 	sep = page_se_mutex(old);
4626 	mutex_enter(sep);
4627 	old->p_selock &= ~SE_EWANTED;
4628 	if (CV_HAS_WAITERS(&old->p_cv))
4629 		cv_broadcast(&old->p_cv);
4630 	mutex_exit(sep);
4631 }
4632 
4633 /*
4634  * This function moves the identity of page "pp_old" to page "pp_new".
4635  * Both pages must be locked on entry.  "pp_new" is free, has no identity,
4636  * and need not be hashed out from anywhere.
4637  */
4638 void
4639 page_relocate_hash(page_t *pp_new, page_t *pp_old)
4640 {
4641 	vnode_t *vp = pp_old->p_vnode;
4642 	u_offset_t off = pp_old->p_offset;
4643 	kmutex_t *phm, *vphm;
4644 
4645 	/*
4646 	 * Rehash two pages
4647 	 */
4648 	ASSERT(PAGE_EXCL(pp_old));
4649 	ASSERT(PAGE_EXCL(pp_new));
4650 	ASSERT(vp != NULL);
4651 	ASSERT(pp_new->p_vnode == NULL);
4652 
4653 	/*
4654 	 * hashout then hashin while holding the mutexes
4655 	 */
4656 	phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off));
4657 	mutex_enter(phm);
4658 	vphm = page_vnode_mutex(vp);
4659 	mutex_enter(vphm);
4660 
4661 	page_do_relocate_hash(pp_new, pp_old);
4662 
4663 	mutex_exit(vphm);
4664 	mutex_exit(phm);
4665 
4666 	/*
4667 	 * The page_struct_lock need not be acquired for lckcnt and
4668 	 * cowcnt since the page has an "exclusive" lock.
4669 	 */
4670 	ASSERT(pp_new->p_lckcnt == 0);
4671 	ASSERT(pp_new->p_cowcnt == 0);
4672 	pp_new->p_lckcnt = pp_old->p_lckcnt;
4673 	pp_new->p_cowcnt = pp_old->p_cowcnt;
4674 	pp_old->p_lckcnt = pp_old->p_cowcnt = 0;
4675 
4676 	/* The following comment preserved from page_flip(). */
4677 	/* XXX - Do we need to protect fsdata? */
4678 	pp_new->p_fsdata = pp_old->p_fsdata;
4679 }
4680 
4681 /*
4682  * Helper routine used to lock all remaining members of a
4683  * large page. The caller is responsible for passing in a locked
4684  * pp. If pp is a large page, then it succeeds in locking all the
4685  * remaining constituent pages or it returns with only the
4686  * original page locked.
4687  *
4688  * Returns 1 on success, 0 on failure.
4689  *
4690  * If success is returned this routine gurantees p_szc for all constituent
4691  * pages of a large page pp belongs to can't change. To achieve this we
4692  * recheck szc of pp after locking all constituent pages and retry if szc
4693  * changed (it could only decrease). Since hat_page_demote() needs an EXCL
4694  * lock on one of constituent pages it can't be running after all constituent
4695  * pages are locked.  hat_page_demote() with a lock on a constituent page
4696  * outside of this large page (i.e. pp belonged to a larger large page) is
4697  * already done with all constituent pages of pp since the root's p_szc is
4698  * changed last. Thefore no need to synchronize with hat_page_demote() that
4699  * locked a constituent page outside of pp's current large page.
4700  */
4701 #ifdef DEBUG
4702 uint32_t gpg_trylock_mtbf = 0;
4703 #endif
4704 
4705 int
4706 group_page_trylock(page_t *pp, se_t se)
4707 {
4708 	page_t  *tpp;
4709 	pgcnt_t	npgs, i, j;
4710 	uint_t pszc = pp->p_szc;
4711 
4712 #ifdef DEBUG
4713 	if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) {
4714 		return (0);
4715 	}
4716 #endif
4717 
4718 	if (pp != PP_GROUPLEADER(pp, pszc)) {
4719 		return (0);
4720 	}
4721 
4722 retry:
4723 	ASSERT(PAGE_LOCKED_SE(pp, se));
4724 	ASSERT(!PP_ISFREE(pp));
4725 	if (pszc == 0) {
4726 		return (1);
4727 	}
4728 	npgs = page_get_pagecnt(pszc);
4729 	tpp = pp + 1;
4730 	for (i = 1; i < npgs; i++, tpp++) {
4731 		if (!page_trylock(tpp, se)) {
4732 			tpp = pp + 1;
4733 			for (j = 1; j < i; j++, tpp++) {
4734 				page_unlock(tpp);
4735 			}
4736 			return (0);
4737 		}
4738 	}
4739 	if (pp->p_szc != pszc) {
4740 		ASSERT(pp->p_szc < pszc);
4741 		ASSERT(pp->p_vnode != NULL && pp->p_vnode != &kvp &&
4742 		    !IS_SWAPFSVP(pp->p_vnode));
4743 		tpp = pp + 1;
4744 		for (i = 1; i < npgs; i++, tpp++) {
4745 			page_unlock(tpp);
4746 		}
4747 		pszc = pp->p_szc;
4748 		goto retry;
4749 	}
4750 	return (1);
4751 }
4752 
4753 void
4754 group_page_unlock(page_t *pp)
4755 {
4756 	page_t *tpp;
4757 	pgcnt_t	npgs, i;
4758 
4759 	ASSERT(PAGE_LOCKED(pp));
4760 	ASSERT(!PP_ISFREE(pp));
4761 	ASSERT(pp == PP_PAGEROOT(pp));
4762 	npgs = page_get_pagecnt(pp->p_szc);
4763 	for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) {
4764 		page_unlock(tpp);
4765 	}
4766 }
4767 
4768 /*
4769  * returns
4770  * 0 		: on success and *nrelocp is number of relocated PAGESIZE pages
4771  * ERANGE	: this is not a base page
4772  * EBUSY	: failure to get locks on the page/pages
4773  * ENOMEM	: failure to obtain replacement pages
4774  * EAGAIN	: OBP has not yet completed its boot-time handoff to the kernel
4775  *
4776  * Return with all constituent members of target and replacement
4777  * SE_EXCL locked. It is the callers responsibility to drop the
4778  * locks.
4779  */
4780 int
4781 do_page_relocate(
4782 	page_t **target,
4783 	page_t **replacement,
4784 	int grouplock,
4785 	spgcnt_t *nrelocp,
4786 	lgrp_t *lgrp)
4787 {
4788 #ifdef DEBUG
4789 	page_t *first_repl;
4790 #endif /* DEBUG */
4791 	page_t *repl;
4792 	page_t *targ;
4793 	page_t *pl = NULL;
4794 	uint_t ppattr;
4795 	pfn_t   pfn, repl_pfn;
4796 	uint_t	szc;
4797 	spgcnt_t npgs, i;
4798 	int repl_contig = 0;
4799 	uint_t flags = 0;
4800 	spgcnt_t dofree = 0;
4801 
4802 	*nrelocp = 0;
4803 
4804 #if defined(__sparc)
4805 	/*
4806 	 * We need to wait till OBP has completed
4807 	 * its boot-time handoff of its resources to the kernel
4808 	 * before we allow page relocation
4809 	 */
4810 	if (page_relocate_ready == 0) {
4811 		return (EAGAIN);
4812 	}
4813 #endif
4814 
4815 	/*
4816 	 * If this is not a base page,
4817 	 * just return with 0x0 pages relocated.
4818 	 */
4819 	targ = *target;
4820 	ASSERT(PAGE_EXCL(targ));
4821 	ASSERT(!PP_ISFREE(targ));
4822 	szc = targ->p_szc;
4823 	ASSERT(szc < mmu_page_sizes);
4824 	VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
4825 	pfn = targ->p_pagenum;
4826 	if (pfn != PFN_BASE(pfn, szc)) {
4827 		VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]);
4828 		return (ERANGE);
4829 	}
4830 
4831 	if ((repl = *replacement) != NULL && repl->p_szc >= szc) {
4832 		repl_pfn = repl->p_pagenum;
4833 		if (repl_pfn != PFN_BASE(repl_pfn, szc)) {
4834 			VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]);
4835 			return (ERANGE);
4836 		}
4837 		repl_contig = 1;
4838 	}
4839 
4840 	/*
4841 	 * We must lock all members of this large page or we cannot
4842 	 * relocate any part of it.
4843 	 */
4844 	if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) {
4845 		VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]);
4846 		return (EBUSY);
4847 	}
4848 
4849 	/*
4850 	 * reread szc it could have been decreased before
4851 	 * group_page_trylock() was done.
4852 	 */
4853 	szc = targ->p_szc;
4854 	ASSERT(szc < mmu_page_sizes);
4855 	VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
4856 	ASSERT(pfn == PFN_BASE(pfn, szc));
4857 
4858 	npgs = page_get_pagecnt(targ->p_szc);
4859 
4860 	if (repl == NULL) {
4861 		dofree = npgs;		/* Size of target page in MMU pages */
4862 		if (!page_create_wait(dofree, 0)) {
4863 			if (grouplock != 0) {
4864 				group_page_unlock(targ);
4865 			}
4866 			VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
4867 			return (ENOMEM);
4868 		}
4869 
4870 		/*
4871 		 * seg kmem pages require that the target and replacement
4872 		 * page be the same pagesize.
4873 		 */
4874 		flags = (targ->p_vnode == &kvp) ? PGR_SAMESZC : 0;
4875 		repl = page_get_replacement_page(targ, lgrp, flags);
4876 		if (repl == NULL) {
4877 			if (grouplock != 0) {
4878 				group_page_unlock(targ);
4879 			}
4880 			page_create_putback(dofree);
4881 			VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
4882 			return (ENOMEM);
4883 		}
4884 	}
4885 #ifdef DEBUG
4886 	else {
4887 		ASSERT(PAGE_LOCKED(repl));
4888 	}
4889 #endif /* DEBUG */
4890 
4891 #if defined(__sparc)
4892 	/*
4893 	 * Let hat_page_relocate() complete the relocation if it's kernel page
4894 	 */
4895 	if (targ->p_vnode == &kvp) {
4896 		*replacement = repl;
4897 		if (hat_page_relocate(target, replacement, nrelocp) != 0) {
4898 			if (grouplock != 0) {
4899 				group_page_unlock(targ);
4900 			}
4901 			if (dofree) {
4902 				*replacement = NULL;
4903 				page_free_replacement_page(repl);
4904 				page_create_putback(dofree);
4905 			}
4906 			VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]);
4907 			return (EAGAIN);
4908 		}
4909 		VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
4910 		return (0);
4911 	}
4912 #else
4913 #if defined(lint)
4914 	dofree = dofree;
4915 #endif
4916 #endif
4917 
4918 #ifdef DEBUG
4919 	first_repl = repl;
4920 #endif /* DEBUG */
4921 
4922 	for (i = 0; i < npgs; i++) {
4923 		ASSERT(PAGE_EXCL(targ));
4924 
4925 		(void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD);
4926 
4927 		ASSERT(hat_page_getshare(targ) == 0);
4928 		ASSERT(!PP_ISFREE(targ));
4929 		ASSERT(targ->p_pagenum == (pfn + i));
4930 		ASSERT(repl_contig == 0 ||
4931 		    repl->p_pagenum == (repl_pfn + i));
4932 
4933 		/*
4934 		 * Copy the page contents and attributes then
4935 		 * relocate the page in the page hash.
4936 		 */
4937 		ppcopy(targ, repl);
4938 		ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO));
4939 		page_clr_all_props(repl);
4940 		page_set_props(repl, ppattr);
4941 		page_relocate_hash(repl, targ);
4942 
4943 		ASSERT(hat_page_getshare(targ) == 0);
4944 		ASSERT(hat_page_getshare(repl) == 0);
4945 		/*
4946 		 * Now clear the props on targ, after the
4947 		 * page_relocate_hash(), they no longer
4948 		 * have any meaning.
4949 		 */
4950 		page_clr_all_props(targ);
4951 		ASSERT(targ->p_next == targ);
4952 		ASSERT(targ->p_prev == targ);
4953 		page_list_concat(&pl, &targ);
4954 
4955 		targ++;
4956 		if (repl_contig != 0) {
4957 			repl++;
4958 		} else {
4959 			repl = repl->p_next;
4960 		}
4961 	}
4962 	/* assert that we have come full circle with repl */
4963 	ASSERT(repl_contig == 1 || first_repl == repl);
4964 
4965 	*target = pl;
4966 	if (*replacement == NULL) {
4967 		ASSERT(first_repl == repl);
4968 		*replacement = repl;
4969 	}
4970 	VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
4971 	*nrelocp = npgs;
4972 	return (0);
4973 }
4974 /*
4975  * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated.
4976  */
4977 int
4978 page_relocate(
4979 	page_t **target,
4980 	page_t **replacement,
4981 	int grouplock,
4982 	int freetarget,
4983 	spgcnt_t *nrelocp,
4984 	lgrp_t *lgrp)
4985 {
4986 	spgcnt_t ret;
4987 
4988 	/* do_page_relocate returns 0 on success or errno value */
4989 	ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp);
4990 
4991 	if (ret != 0 || freetarget == 0) {
4992 		return (ret);
4993 	}
4994 	if (*nrelocp == 1) {
4995 		ASSERT(*target != NULL);
4996 		page_free(*target, 1);
4997 	} else {
4998 		page_t *tpp = *target;
4999 		uint_t szc = tpp->p_szc;
5000 		pgcnt_t npgs = page_get_pagecnt(szc);
5001 		ASSERT(npgs > 1);
5002 		ASSERT(szc != 0);
5003 		do {
5004 			ASSERT(PAGE_EXCL(tpp));
5005 			ASSERT(!hat_page_is_mapped(tpp));
5006 			ASSERT(tpp->p_szc == szc);
5007 			PP_SETFREE(tpp);
5008 			PP_SETAGED(tpp);
5009 			npgs--;
5010 		} while ((tpp = tpp->p_next) != *target);
5011 		ASSERT(npgs == 0);
5012 		page_list_add_pages(*target, 0);
5013 		npgs = page_get_pagecnt(szc);
5014 		page_create_putback(npgs);
5015 	}
5016 	return (ret);
5017 }
5018 
5019 /*
5020  * it is up to the caller to deal with pcf accounting.
5021  */
5022 void
5023 page_free_replacement_page(page_t *pplist)
5024 {
5025 	page_t *pp;
5026 
5027 	while (pplist != NULL) {
5028 		/*
5029 		 * pp_targ is a linked list.
5030 		 */
5031 		pp = pplist;
5032 		if (pp->p_szc == 0) {
5033 			page_sub(&pplist, pp);
5034 			page_clr_all_props(pp);
5035 			PP_SETFREE(pp);
5036 			PP_SETAGED(pp);
5037 			page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
5038 			page_unlock(pp);
5039 			VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]);
5040 		} else {
5041 			spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc);
5042 			page_t *tpp;
5043 			page_list_break(&pp, &pplist, curnpgs);
5044 			tpp = pp;
5045 			do {
5046 				ASSERT(PAGE_EXCL(tpp));
5047 				ASSERT(!hat_page_is_mapped(tpp));
5048 				page_clr_all_props(pp);
5049 				PP_SETFREE(tpp);
5050 				PP_SETAGED(tpp);
5051 			} while ((tpp = tpp->p_next) != pp);
5052 			page_list_add_pages(pp, 0);
5053 			VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]);
5054 		}
5055 	}
5056 }
5057 
5058 /*
5059  * Relocate target to non-relocatable replacement page.
5060  */
5061 int
5062 page_relocate_cage(page_t **target, page_t **replacement)
5063 {
5064 	page_t *tpp, *rpp;
5065 	spgcnt_t pgcnt, npgs;
5066 	int result;
5067 
5068 	tpp = *target;
5069 
5070 	ASSERT(PAGE_EXCL(tpp));
5071 	ASSERT(tpp->p_szc == 0);
5072 
5073 	pgcnt = btop(page_get_pagesize(tpp->p_szc));
5074 
5075 	do {
5076 		(void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC);
5077 		rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC);
5078 		if (rpp == NULL) {
5079 			page_create_putback(pgcnt);
5080 			kcage_cageout_wakeup();
5081 		}
5082 	} while (rpp == NULL);
5083 
5084 	ASSERT(PP_ISNORELOC(rpp));
5085 
5086 	result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL);
5087 
5088 	if (result == 0) {
5089 		*replacement = rpp;
5090 		if (pgcnt != npgs)
5091 			panic("page_relocate_cage: partial relocation");
5092 	}
5093 
5094 	return (result);
5095 }
5096 
5097 /*
5098  * Release the page lock on a page, place on cachelist
5099  * tail if no longer mapped. Caller can let us know if
5100  * the page is known to be clean.
5101  */
5102 int
5103 page_release(page_t *pp, int checkmod)
5104 {
5105 	int status;
5106 
5107 	ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) &&
5108 		(pp->p_vnode != NULL));
5109 
5110 	if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) &&
5111 	    ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) &&
5112 	    pp->p_lckcnt == 0 && pp->p_cowcnt == 0 &&
5113 	    !hat_page_is_mapped(pp)) {
5114 
5115 		/*
5116 		 * If page is modified, unlock it
5117 		 *
5118 		 * (p_nrm & P_MOD) bit has the latest stuff because:
5119 		 * (1) We found that this page doesn't have any mappings
5120 		 *	_after_ holding SE_EXCL and
5121 		 * (2) We didn't drop SE_EXCL lock after the check in (1)
5122 		 */
5123 		if (checkmod && hat_ismod(pp)) {
5124 			page_unlock(pp);
5125 			status = PGREL_MOD;
5126 		} else {
5127 			/*LINTED: constant in conditional context*/
5128 			VN_DISPOSE(pp, B_FREE, 0, kcred);
5129 			status = PGREL_CLEAN;
5130 		}
5131 	} else {
5132 		page_unlock(pp);
5133 		status = PGREL_NOTREL;
5134 	}
5135 	return (status);
5136 }
5137 
5138 /*
5139  * Given a constituent page, try to demote the large page on the freelist.
5140  *
5141  * Returns nonzero if the page could be demoted successfully. Returns with
5142  * the constituent page still locked.
5143  */
5144 int
5145 page_try_demote_free_pages(page_t *pp)
5146 {
5147 	page_t *rootpp = pp;
5148 	pfn_t	pfn = page_pptonum(pp);
5149 	spgcnt_t npgs;
5150 	uint_t	szc = pp->p_szc;
5151 
5152 	ASSERT(PP_ISFREE(pp));
5153 	ASSERT(PAGE_EXCL(pp));
5154 
5155 	/*
5156 	 * Adjust rootpp and lock it, if `pp' is not the base
5157 	 * constituent page.
5158 	 */
5159 	npgs = page_get_pagecnt(pp->p_szc);
5160 	if (npgs == 1) {
5161 		return (0);
5162 	}
5163 
5164 	if (!IS_P2ALIGNED(pfn, npgs)) {
5165 		pfn = P2ALIGN(pfn, npgs);
5166 		rootpp = page_numtopp_nolock(pfn);
5167 	}
5168 
5169 	if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) {
5170 		return (0);
5171 	}
5172 
5173 	if (rootpp->p_szc != szc) {
5174 		if (pp != rootpp)
5175 			page_unlock(rootpp);
5176 		return (0);
5177 	}
5178 
5179 	page_demote_free_pages(rootpp);
5180 
5181 	if (pp != rootpp)
5182 		page_unlock(rootpp);
5183 
5184 	ASSERT(PP_ISFREE(pp));
5185 	ASSERT(PAGE_EXCL(pp));
5186 	return (1);
5187 }
5188 
5189 /*
5190  * Given a constituent page, try to demote the large page.
5191  *
5192  * Returns nonzero if the page could be demoted successfully. Returns with
5193  * the constituent page still locked.
5194  */
5195 int
5196 page_try_demote_pages(page_t *pp)
5197 {
5198 	page_t *tpp, *rootpp = pp;
5199 	pfn_t	pfn = page_pptonum(pp);
5200 	spgcnt_t i, npgs;
5201 	uint_t	szc = pp->p_szc;
5202 	vnode_t *vp = pp->p_vnode;
5203 
5204 	ASSERT(PAGE_EXCL(pp));
5205 
5206 	VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]);
5207 
5208 	if (pp->p_szc == 0) {
5209 		VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]);
5210 		return (1);
5211 	}
5212 
5213 	if (vp != NULL && !IS_SWAPFSVP(vp) && vp != &kvp) {
5214 		VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]);
5215 		page_demote_vp_pages(pp);
5216 		ASSERT(pp->p_szc == 0);
5217 		return (1);
5218 	}
5219 
5220 	/*
5221 	 * Adjust rootpp if passed in is not the base
5222 	 * constituent page.
5223 	 */
5224 	npgs = page_get_pagecnt(pp->p_szc);
5225 	ASSERT(npgs > 1);
5226 	if (!IS_P2ALIGNED(pfn, npgs)) {
5227 		pfn = P2ALIGN(pfn, npgs);
5228 		rootpp = page_numtopp_nolock(pfn);
5229 		VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]);
5230 		ASSERT(rootpp->p_vnode != NULL);
5231 		ASSERT(rootpp->p_szc == szc);
5232 	}
5233 
5234 	/*
5235 	 * We can't demote kernel pages since we can't hat_unload()
5236 	 * the mappings.
5237 	 */
5238 	if (rootpp->p_vnode == &kvp)
5239 		return (0);
5240 
5241 	/*
5242 	 * Attempt to lock all constituent pages except the page passed
5243 	 * in since it's already locked.
5244 	 */
5245 	for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
5246 		ASSERT(!PP_ISFREE(tpp));
5247 		ASSERT(tpp->p_vnode != NULL);
5248 
5249 		if (tpp != pp && !page_trylock(tpp, SE_EXCL))
5250 			break;
5251 		ASSERT(tpp->p_szc == rootpp->p_szc);
5252 		ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i);
5253 	}
5254 
5255 	/*
5256 	 * If we failed to lock them all then unlock what we have
5257 	 * locked so far and bail.
5258 	 */
5259 	if (i < npgs) {
5260 		tpp = rootpp;
5261 		while (i-- > 0) {
5262 			if (tpp != pp)
5263 				page_unlock(tpp);
5264 			tpp++;
5265 		}
5266 		VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]);
5267 		return (0);
5268 	}
5269 
5270 	for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
5271 		ASSERT(PAGE_EXCL(tpp));
5272 		(void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
5273 		tpp->p_szc = 0;
5274 	}
5275 
5276 	/*
5277 	 * Unlock all pages except the page passed in.
5278 	 */
5279 	for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
5280 		ASSERT(!hat_page_is_mapped(tpp));
5281 		if (tpp != pp)
5282 			page_unlock(tpp);
5283 	}
5284 
5285 	VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]);
5286 	return (1);
5287 }
5288 
5289 /*
5290  * Called by page_free() and page_destroy() to demote the page size code
5291  * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero
5292  * p_szc on free list, neither can we just clear p_szc of a single page_t
5293  * within a large page since it will break other code that relies on p_szc
5294  * being the same for all page_t's of a large page). Anonymous pages should
5295  * never end up here because anon_map_getpages() cannot deal with p_szc
5296  * changes after a single constituent page is locked.  While anonymous or
5297  * kernel large pages are demoted or freed the entire large page at a time
5298  * with all constituent pages locked EXCL for the file system pages we
5299  * have to be able to demote a large page (i.e. decrease all constituent pages
5300  * p_szc) with only just an EXCL lock on one of constituent pages. The reason
5301  * we can easily deal with anonymous page demotion the entire large page at a
5302  * time is that those operation originate at address space level and concern
5303  * the entire large page region with actual demotion only done when pages are
5304  * not shared with any other processes (therefore we can always get EXCL lock
5305  * on all anonymous constituent pages after clearing segment page
5306  * cache). However file system pages can be truncated or invalidated at a
5307  * PAGESIZE level from the file system side and end up in page_free() or
5308  * page_destroy() (we also allow only part of the large page to be SOFTLOCKed
5309  * and therfore pageout should be able to demote a large page by EXCL locking
5310  * any constituent page that is not under SOFTLOCK). In those cases we cannot
5311  * rely on being able to lock EXCL all constituent pages.
5312  *
5313  * To prevent szc changes on file system pages one has to lock all constituent
5314  * pages at least SHARED (or call page_szc_lock()). The only subsystem that
5315  * doesn't rely on locking all constituent pages (or using page_szc_lock()) to
5316  * prevent szc changes is hat layer that uses its own page level mlist
5317  * locks. hat assumes that szc doesn't change after mlist lock for a page is
5318  * taken. Therefore we need to change szc under hat level locks if we only
5319  * have an EXCL lock on a single constituent page and hat still references any
5320  * of constituent pages.  (Note we can't "ignore" hat layer by simply
5321  * hat_pageunload() all constituent pages without having EXCL locks on all of
5322  * constituent pages). We use hat_page_demote() call to safely demote szc of
5323  * all constituent pages under hat locks when we only have an EXCL lock on one
5324  * of constituent pages.
5325  *
5326  * This routine calls page_szc_lock() before calling hat_page_demote() to
5327  * allow segvn in one special case not to lock all constituent pages SHARED
5328  * before calling hat_memload_array() that relies on p_szc not changeing even
5329  * before hat level mlist lock is taken.  In that case segvn uses
5330  * page_szc_lock() to prevent hat_page_demote() changeing p_szc values.
5331  *
5332  * Anonymous or kernel page demotion still has to lock all pages exclusively
5333  * and do hat_pageunload() on all constituent pages before demoting the page
5334  * therefore there's no need for anonymous or kernel page demotion to use
5335  * hat_page_demote() mechanism.
5336  *
5337  * hat_page_demote() removes all large mappings that map pp and then decreases
5338  * p_szc starting from the last constituent page of the large page. By working
5339  * from the tail of a large page in pfn decreasing order allows one looking at
5340  * the root page to know that hat_page_demote() is done for root's szc area.
5341  * e.g. if a root page has szc 1 one knows it only has to lock all constituent
5342  * pages within szc 1 area to prevent szc changes because hat_page_demote()
5343  * that started on this page when it had szc > 1 is done for this szc 1 area.
5344  *
5345  * We are guranteed that all constituent pages of pp's large page belong to
5346  * the same vnode with the consecutive offsets increasing in the direction of
5347  * the pfn i.e. the identity of constituent pages can't change until their
5348  * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove
5349  * large mappings to pp even though we don't lock any constituent page except
5350  * pp (i.e. we won't unload e.g. kernel locked page).
5351  */
5352 static void
5353 page_demote_vp_pages(page_t *pp)
5354 {
5355 	kmutex_t *mtx;
5356 
5357 	ASSERT(PAGE_EXCL(pp));
5358 	ASSERT(!PP_ISFREE(pp));
5359 	ASSERT(pp->p_vnode != NULL);
5360 	ASSERT(!IS_SWAPFSVP(pp->p_vnode));
5361 	ASSERT(pp->p_vnode != &kvp);
5362 
5363 	VM_STAT_ADD(pagecnt.pc_demote_pages[0]);
5364 
5365 	mtx = page_szc_lock(pp);
5366 	if (mtx != NULL) {
5367 		hat_page_demote(pp);
5368 		mutex_exit(mtx);
5369 	}
5370 	ASSERT(pp->p_szc == 0);
5371 }
5372 
5373 /*
5374  * Mark any existing pages for migration in the given range
5375  */
5376 void
5377 page_mark_migrate(struct seg *seg, caddr_t addr, size_t len,
5378     struct anon_map *amp, ulong_t anon_index, vnode_t *vp,
5379     u_offset_t vnoff, int rflag)
5380 {
5381 	struct anon	*ap;
5382 	vnode_t		*curvp;
5383 	lgrp_t		*from;
5384 	pgcnt_t		i;
5385 	pgcnt_t		nlocked;
5386 	u_offset_t	off;
5387 	pfn_t		pfn;
5388 	size_t		pgsz;
5389 	size_t		segpgsz;
5390 	pgcnt_t		pages;
5391 	uint_t		pszc;
5392 	page_t		**ppa;
5393 	pgcnt_t		ppa_nentries;
5394 	page_t		*pp;
5395 	caddr_t		va;
5396 	ulong_t		an_idx;
5397 	anon_sync_obj_t	cookie;
5398 
5399 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5400 
5401 	/*
5402 	 * Don't do anything if don't need to do lgroup optimizations
5403 	 * on this system
5404 	 */
5405 	if (!lgrp_optimizations())
5406 		return;
5407 
5408 	/*
5409 	 * Align address and length to (potentially large) page boundary
5410 	 */
5411 	segpgsz = page_get_pagesize(seg->s_szc);
5412 	addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz);
5413 	if (rflag)
5414 		len = P2ROUNDUP(len, segpgsz);
5415 
5416 	/*
5417 	 * Allocate page array to accomodate largest page size
5418 	 */
5419 	pgsz = page_get_pagesize(page_num_pagesizes() - 1);
5420 	ppa_nentries = btop(pgsz);
5421 	ppa = kmem_zalloc(ppa_nentries * sizeof (page_t *), KM_SLEEP);
5422 
5423 	/*
5424 	 * Do one (large) page at a time
5425 	 */
5426 	va = addr;
5427 	while (va < addr + len) {
5428 		/*
5429 		 * Lookup (root) page for vnode and offset corresponding to
5430 		 * this virtual address
5431 		 * Try anonmap first since there may be copy-on-write
5432 		 * pages, but initialize vnode pointer and offset using
5433 		 * vnode arguments just in case there isn't an amp.
5434 		 */
5435 		curvp = vp;
5436 		off = vnoff + va - seg->s_base;
5437 		if (amp) {
5438 			ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
5439 			an_idx = anon_index + seg_page(seg, va);
5440 			anon_array_enter(amp, an_idx, &cookie);
5441 			ap = anon_get_ptr(amp->ahp, an_idx);
5442 			if (ap)
5443 				swap_xlate(ap, &curvp, &off);
5444 			anon_array_exit(&cookie);
5445 			ANON_LOCK_EXIT(&amp->a_rwlock);
5446 		}
5447 
5448 		pp = NULL;
5449 		if (curvp)
5450 			pp = page_lookup(curvp, off, SE_SHARED);
5451 
5452 		/*
5453 		 * If there isn't a page at this virtual address,
5454 		 * skip to next page
5455 		 */
5456 		if (pp == NULL) {
5457 			va += PAGESIZE;
5458 			continue;
5459 		}
5460 
5461 		/*
5462 		 * Figure out which lgroup this page is in for kstats
5463 		 */
5464 		pfn = page_pptonum(pp);
5465 		from = lgrp_pfn_to_lgrp(pfn);
5466 
5467 		/*
5468 		 * Get page size, and round up and skip to next page boundary
5469 		 * if unaligned address
5470 		 */
5471 		pszc = pp->p_szc;
5472 		pgsz = page_get_pagesize(pszc);
5473 		pages = btop(pgsz);
5474 		if (!IS_P2ALIGNED(va, pgsz) ||
5475 		    !IS_P2ALIGNED(pfn, pages) ||
5476 		    pgsz > segpgsz) {
5477 			pgsz = MIN(pgsz, segpgsz);
5478 			page_unlock(pp);
5479 			i = btop(P2END((uintptr_t)va, pgsz) -
5480 			    (uintptr_t)va);
5481 			va = (caddr_t)P2END((uintptr_t)va, pgsz);
5482 			lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, i);
5483 			continue;
5484 		}
5485 
5486 		/*
5487 		 * Upgrade to exclusive lock on page
5488 		 */
5489 		if (!page_tryupgrade(pp)) {
5490 			page_unlock(pp);
5491 			va += pgsz;
5492 			lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
5493 			    btop(pgsz));
5494 			continue;
5495 		}
5496 
5497 		/*
5498 		 * Remember pages locked exclusively and how many
5499 		 */
5500 		ppa[0] = pp;
5501 		nlocked = 1;
5502 
5503 		/*
5504 		 * Lock constituent pages if this is large page
5505 		 */
5506 		if (pages > 1) {
5507 			/*
5508 			 * Lock all constituents except root page, since it
5509 			 * should be locked already.
5510 			 */
5511 			for (i = 1; i < pages; i++) {
5512 				pp++;
5513 				if (!page_trylock(pp, SE_EXCL)) {
5514 					break;
5515 				}
5516 				if (PP_ISFREE(pp) ||
5517 				    pp->p_szc != pszc) {
5518 					/*
5519 					 * hat_page_demote() raced in with us.
5520 					 */
5521 					ASSERT(!IS_SWAPFSVP(curvp));
5522 					page_unlock(pp);
5523 					break;
5524 				}
5525 				ppa[nlocked] = pp;
5526 				nlocked++;
5527 			}
5528 		}
5529 
5530 		/*
5531 		 * If all constituent pages couldn't be locked,
5532 		 * unlock pages locked so far and skip to next page.
5533 		 */
5534 		if (nlocked != pages) {
5535 			for (i = 0; i < nlocked; i++)
5536 				page_unlock(ppa[i]);
5537 			va += pgsz;
5538 			lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
5539 			    btop(pgsz));
5540 			continue;
5541 		}
5542 
5543 		/*
5544 		 * hat_page_demote() can no longer happen
5545 		 * since last cons page had the right p_szc after
5546 		 * all cons pages were locked. all cons pages
5547 		 * should now have the same p_szc.
5548 		 */
5549 
5550 		/*
5551 		 * All constituent pages locked successfully, so mark
5552 		 * large page for migration and unload the mappings of
5553 		 * constituent pages, so a fault will occur on any part of the
5554 		 * large page
5555 		 */
5556 		PP_SETMIGRATE(ppa[0]);
5557 		for (i = 0; i < nlocked; i++) {
5558 			pp = ppa[i];
5559 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
5560 			ASSERT(hat_page_getshare(pp) == 0);
5561 			page_unlock(pp);
5562 		}
5563 		lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked);
5564 
5565 		va += pgsz;
5566 	}
5567 	kmem_free(ppa, ppa_nentries * sizeof (page_t *));
5568 }
5569 
5570 /*
5571  * Migrate any pages that have been marked for migration in the given range
5572  */
5573 void
5574 page_migrate(
5575 	struct seg	*seg,
5576 	caddr_t		addr,
5577 	page_t		**ppa,
5578 	pgcnt_t		npages)
5579 {
5580 	lgrp_t		*from;
5581 	lgrp_t		*to;
5582 	page_t		*newpp;
5583 	page_t		*pp;
5584 	pfn_t		pfn;
5585 	size_t		pgsz;
5586 	spgcnt_t	page_cnt;
5587 	spgcnt_t	i;
5588 	uint_t		pszc;
5589 
5590 	ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5591 
5592 	while (npages > 0) {
5593 		pp = *ppa;
5594 		pszc = pp->p_szc;
5595 		pgsz = page_get_pagesize(pszc);
5596 		page_cnt = btop(pgsz);
5597 
5598 		/*
5599 		 * Check to see whether this page is marked for migration
5600 		 *
5601 		 * Assume that root page of large page is marked for
5602 		 * migration and none of the other constituent pages
5603 		 * are marked.  This really simplifies clearing the
5604 		 * migrate bit by not having to clear it from each
5605 		 * constituent page.
5606 		 *
5607 		 * note we don't want to relocate an entire large page if
5608 		 * someone is only using one subpage.
5609 		 */
5610 		if (npages < page_cnt)
5611 			break;
5612 
5613 		/*
5614 		 * Is it marked for migration?
5615 		 */
5616 		if (!PP_ISMIGRATE(pp))
5617 			goto next;
5618 
5619 		/*
5620 		 * Determine lgroups that page is being migrated between
5621 		 */
5622 		pfn = page_pptonum(pp);
5623 		if (!IS_P2ALIGNED(pfn, page_cnt)) {
5624 			break;
5625 		}
5626 		from = lgrp_pfn_to_lgrp(pfn);
5627 		to = lgrp_mem_choose(seg, addr, pgsz);
5628 
5629 		/*
5630 		 * Check to see whether we are trying to migrate page to lgroup
5631 		 * where it is allocated already
5632 		 */
5633 		if (to == from) {
5634 			PP_CLRMIGRATE(pp);
5635 			goto next;
5636 		}
5637 
5638 		/*
5639 		 * Need to get exclusive lock's to migrate
5640 		 */
5641 		for (i = 0; i < page_cnt; i++) {
5642 			ASSERT(PAGE_LOCKED(ppa[i]));
5643 			if (page_pptonum(ppa[i]) != pfn + i ||
5644 			    ppa[i]->p_szc != pszc) {
5645 				break;
5646 			}
5647 			if (!page_tryupgrade(ppa[i])) {
5648 				lgrp_stat_add(from->lgrp_id,
5649 				    LGRP_PM_FAIL_LOCK_PGS,
5650 				    page_cnt);
5651 				break;
5652 			}
5653 		}
5654 		if (i != page_cnt) {
5655 			while (--i != -1) {
5656 				page_downgrade(ppa[i]);
5657 			}
5658 			goto next;
5659 		}
5660 
5661 		(void) page_create_wait(page_cnt, PG_WAIT);
5662 		newpp = page_get_replacement_page(pp, to, PGR_SAMESZC);
5663 		if (newpp == NULL) {
5664 			page_create_putback(page_cnt);
5665 			for (i = 0; i < page_cnt; i++) {
5666 				page_downgrade(ppa[i]);
5667 			}
5668 			lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS,
5669 			    page_cnt);
5670 			goto next;
5671 		}
5672 		ASSERT(newpp->p_szc == pszc);
5673 		/*
5674 		 * Clear migrate bit and relocate page
5675 		 */
5676 		PP_CLRMIGRATE(pp);
5677 		if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) {
5678 			panic("page_migrate: page_relocate failed");
5679 		}
5680 		ASSERT(page_cnt * PAGESIZE == pgsz);
5681 
5682 		/*
5683 		 * Keep stats for number of pages migrated from and to
5684 		 * each lgroup
5685 		 */
5686 		lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt);
5687 		lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt);
5688 		/*
5689 		 * update the page_t array we were passed in and
5690 		 * unlink constituent pages of a large page.
5691 		 */
5692 		for (i = 0; i < page_cnt; ++i, ++pp) {
5693 			ASSERT(PAGE_EXCL(newpp));
5694 			ASSERT(newpp->p_szc == pszc);
5695 			ppa[i] = newpp;
5696 			pp = newpp;
5697 			page_sub(&newpp, pp);
5698 			page_downgrade(pp);
5699 		}
5700 		ASSERT(newpp == NULL);
5701 next:
5702 		addr += pgsz;
5703 		ppa += page_cnt;
5704 		npages -= page_cnt;
5705 	}
5706 }
5707 
5708 ulong_t mem_waiters 	= 0;
5709 ulong_t	max_count 	= 20;
5710 #define	MAX_DELAY	0x1ff
5711 
5712 /*
5713  * Check if enough memory is available to proceed.
5714  * Depending on system configuration and how much memory is
5715  * reserved for swap we need to check against two variables.
5716  * e.g. on systems with little physical swap availrmem can be
5717  * more reliable indicator of how much memory is available.
5718  * On systems with large phys swap freemem can be better indicator.
5719  * If freemem drops below threshold level don't return an error
5720  * immediately but wake up pageout to free memory and block.
5721  * This is done number of times. If pageout is not able to free
5722  * memory within certain time return an error.
5723  * The same applies for availrmem but kmem_reap is used to
5724  * free memory.
5725  */
5726 int
5727 page_mem_avail(pgcnt_t npages)
5728 {
5729 	ulong_t count;
5730 
5731 #if defined(__i386)
5732 	if (freemem > desfree + npages &&
5733 	    availrmem > swapfs_reserve + npages &&
5734 	    btop(vmem_size(heap_arena, VMEM_FREE)) > tune.t_minarmem +
5735 	    npages)
5736 		return (1);
5737 #else
5738 	if (freemem > desfree + npages &&
5739 	    availrmem > swapfs_reserve + npages)
5740 		return (1);
5741 #endif
5742 
5743 	count = max_count;
5744 	atomic_add_long(&mem_waiters, 1);
5745 
5746 	while (freemem < desfree + npages && --count) {
5747 		cv_signal(&proc_pageout->p_cv);
5748 		if (delay_sig(hz + (mem_waiters & MAX_DELAY))) {
5749 			atomic_add_long(&mem_waiters, -1);
5750 			return (0);
5751 		}
5752 	}
5753 	if (count == 0) {
5754 		atomic_add_long(&mem_waiters, -1);
5755 		return (0);
5756 	}
5757 
5758 	count = max_count;
5759 	while (availrmem < swapfs_reserve + npages && --count) {
5760 		kmem_reap();
5761 		if (delay_sig(hz + (mem_waiters & MAX_DELAY))) {
5762 			atomic_add_long(&mem_waiters, -1);
5763 			return (0);
5764 		}
5765 	}
5766 	atomic_add_long(&mem_waiters, -1);
5767 	if (count == 0)
5768 		return (0);
5769 
5770 #if defined(__i386)
5771 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
5772 	    tune.t_minarmem + npages)
5773 		return (0);
5774 #endif
5775 	return (1);
5776 }
5777 
5778 #define	MAX_CNT	60	/* max num of iterations */
5779 /*
5780  * Reclaim/reserve availrmem for npages.
5781  * If there is not enough memory start reaping seg, kmem caches.
5782  * Start pageout scanner (via page_needfree()).
5783  * Exit after ~ MAX_CNT s regardless of how much memory has been released.
5784  * Note: There is no guarantee that any availrmem will be freed as
5785  * this memory typically is locked (kernel heap) or reserved for swap.
5786  * Also due to memory fragmentation kmem allocator may not be able
5787  * to free any memory (single user allocated buffer will prevent
5788  * freeing slab or a page).
5789  */
5790 int
5791 page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust)
5792 {
5793 	int	i = 0;
5794 	int	ret = 0;
5795 	pgcnt_t	deficit;
5796 	pgcnt_t old_availrmem;
5797 
5798 	mutex_enter(&freemem_lock);
5799 	old_availrmem = availrmem - 1;
5800 	while ((availrmem < tune.t_minarmem + npages + epages) &&
5801 	    (old_availrmem < availrmem) && (i++ < MAX_CNT)) {
5802 		old_availrmem = availrmem;
5803 		deficit = tune.t_minarmem + npages + epages - availrmem;
5804 		mutex_exit(&freemem_lock);
5805 		page_needfree(deficit);
5806 		seg_preap();
5807 		kmem_reap();
5808 		delay(hz);
5809 		page_needfree(-(spgcnt_t)deficit);
5810 		mutex_enter(&freemem_lock);
5811 	}
5812 
5813 	if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) {
5814 		availrmem -= npages;
5815 		ret = 1;
5816 	}
5817 
5818 	mutex_exit(&freemem_lock);
5819 
5820 	return (ret);
5821 }
5822 
5823 /*
5824  * Search the memory segments to locate the desired page.  Within a
5825  * segment, pages increase linearly with one page structure per
5826  * physical page frame (size PAGESIZE).  The search begins
5827  * with the segment that was accessed last, to take advantage of locality.
5828  * If the hint misses, we start from the beginning of the sorted memseg list
5829  */
5830 
5831 
5832 /*
5833  * Some data structures for pfn to pp lookup.
5834  */
5835 ulong_t mhash_per_slot;
5836 struct memseg *memseg_hash[N_MEM_SLOTS];
5837 
5838 page_t *
5839 page_numtopp_nolock(pfn_t pfnum)
5840 {
5841 	struct memseg *seg;
5842 	page_t *pp;
5843 	vm_cpu_data_t *vc = CPU->cpu_vm_data;
5844 
5845 	ASSERT(vc != NULL);
5846 
5847 	MEMSEG_STAT_INCR(nsearch);
5848 
5849 	/* Try last winner first */
5850 	if (((seg = vc->vc_pnum_memseg) != NULL) &&
5851 		(pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5852 		MEMSEG_STAT_INCR(nlastwon);
5853 		pp = seg->pages + (pfnum - seg->pages_base);
5854 		if (pp->p_pagenum == pfnum)
5855 			return ((page_t *)pp);
5856 	}
5857 
5858 	/* Else Try hash */
5859 	if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
5860 		(pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5861 		MEMSEG_STAT_INCR(nhashwon);
5862 		vc->vc_pnum_memseg = seg;
5863 		pp = seg->pages + (pfnum - seg->pages_base);
5864 		if (pp->p_pagenum == pfnum)
5865 			return ((page_t *)pp);
5866 	}
5867 
5868 	/* Else Brute force */
5869 	for (seg = memsegs; seg != NULL; seg = seg->next) {
5870 		if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
5871 			vc->vc_pnum_memseg = seg;
5872 			pp = seg->pages + (pfnum - seg->pages_base);
5873 			return ((page_t *)pp);
5874 		}
5875 	}
5876 	vc->vc_pnum_memseg = NULL;
5877 	MEMSEG_STAT_INCR(nnotfound);
5878 	return ((page_t *)NULL);
5879 
5880 }
5881 
5882 struct memseg *
5883 page_numtomemseg_nolock(pfn_t pfnum)
5884 {
5885 	struct memseg *seg;
5886 	page_t *pp;
5887 
5888 	/* Try hash */
5889 	if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
5890 		(pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
5891 		pp = seg->pages + (pfnum - seg->pages_base);
5892 		if (pp->p_pagenum == pfnum)
5893 			return (seg);
5894 	}
5895 
5896 	/* Else Brute force */
5897 	for (seg = memsegs; seg != NULL; seg = seg->next) {
5898 		if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
5899 			return (seg);
5900 		}
5901 	}
5902 	return ((struct memseg *)NULL);
5903 }
5904 
5905 /*
5906  * Given a page and a count return the page struct that is
5907  * n structs away from the current one in the global page
5908  * list.
5909  *
5910  * This function wraps to the first page upon
5911  * reaching the end of the memseg list.
5912  */
5913 page_t *
5914 page_nextn(page_t *pp, ulong_t n)
5915 {
5916 	struct memseg *seg;
5917 	page_t *ppn;
5918 	vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data;
5919 
5920 	ASSERT(vc != NULL);
5921 
5922 	if (((seg = vc->vc_pnext_memseg) == NULL) ||
5923 	    (seg->pages_base == seg->pages_end) ||
5924 	    !(pp >= seg->pages && pp < seg->epages)) {
5925 
5926 		for (seg = memsegs; seg; seg = seg->next) {
5927 			if (pp >= seg->pages && pp < seg->epages)
5928 				break;
5929 		}
5930 
5931 		if (seg == NULL) {
5932 			/* Memory delete got in, return something valid. */
5933 			/* TODO: fix me. */
5934 			seg = memsegs;
5935 			pp = seg->pages;
5936 		}
5937 	}
5938 
5939 	/* check for wraparound - possible if n is large */
5940 	while ((ppn = (pp + n)) >= seg->epages || ppn < pp) {
5941 		n -= seg->epages - pp;
5942 		seg = seg->next;
5943 		if (seg == NULL)
5944 			seg = memsegs;
5945 		pp = seg->pages;
5946 	}
5947 	vc->vc_pnext_memseg = seg;
5948 	return (ppn);
5949 }
5950 
5951 /*
5952  * Initialize for a loop using page_next_scan_large().
5953  */
5954 page_t *
5955 page_next_scan_init(void **cookie)
5956 {
5957 	ASSERT(cookie != NULL);
5958 	*cookie = (void *)memsegs;
5959 	return ((page_t *)memsegs->pages);
5960 }
5961 
5962 /*
5963  * Return the next page in a scan of page_t's, assuming we want
5964  * to skip over sub-pages within larger page sizes.
5965  *
5966  * The cookie is used to keep track of the current memseg.
5967  */
5968 page_t *
5969 page_next_scan_large(
5970 	page_t		*pp,
5971 	ulong_t		*n,
5972 	void		**cookie)
5973 {
5974 	struct memseg	*seg = (struct memseg *)*cookie;
5975 	page_t		*new_pp;
5976 	ulong_t		cnt;
5977 	pfn_t		pfn;
5978 
5979 
5980 	/*
5981 	 * get the count of page_t's to skip based on the page size
5982 	 */
5983 	ASSERT(pp != NULL);
5984 	if (pp->p_szc == 0) {
5985 		cnt = 1;
5986 	} else {
5987 		pfn = page_pptonum(pp);
5988 		cnt = page_get_pagecnt(pp->p_szc);
5989 		cnt -= pfn & (cnt - 1);
5990 	}
5991 	*n += cnt;
5992 	new_pp = pp + cnt;
5993 
5994 	/*
5995 	 * Catch if we went past the end of the current memory segment. If so,
5996 	 * just move to the next segment with pages.
5997 	 */
5998 	if (new_pp >= seg->epages) {
5999 		do {
6000 			seg = seg->next;
6001 			if (seg == NULL)
6002 				seg = memsegs;
6003 		} while (seg->pages == seg->epages);
6004 		new_pp = seg->pages;
6005 		*cookie = (void *)seg;
6006 	}
6007 
6008 	return (new_pp);
6009 }
6010 
6011 
6012 /*
6013  * Returns next page in list. Note: this function wraps
6014  * to the first page in the list upon reaching the end
6015  * of the list. Callers should be aware of this fact.
6016  */
6017 
6018 /* We should change this be a #define */
6019 
6020 page_t *
6021 page_next(page_t *pp)
6022 {
6023 	return (page_nextn(pp, 1));
6024 }
6025 
6026 page_t *
6027 page_first()
6028 {
6029 	return ((page_t *)memsegs->pages);
6030 }
6031 
6032 
6033 /*
6034  * This routine is called at boot with the initial memory configuration
6035  * and when memory is added or removed.
6036  */
6037 void
6038 build_pfn_hash()
6039 {
6040 	pfn_t cur;
6041 	pgcnt_t index;
6042 	struct memseg *pseg;
6043 	int	i;
6044 
6045 	/*
6046 	 * Clear memseg_hash array.
6047 	 * Since memory add/delete is designed to operate concurrently
6048 	 * with normal operation, the hash rebuild must be able to run
6049 	 * concurrently with page_numtopp_nolock(). To support this
6050 	 * functionality, assignments to memseg_hash array members must
6051 	 * be done atomically.
6052 	 *
6053 	 * NOTE: bzero() does not currently guarantee this for kernel
6054 	 * threads, and cannot be used here.
6055 	 */
6056 	for (i = 0; i < N_MEM_SLOTS; i++)
6057 		memseg_hash[i] = NULL;
6058 
6059 	hat_kpm_mseghash_clear(N_MEM_SLOTS);
6060 
6061 	/*
6062 	 * Physmax is the last valid pfn.
6063 	 */
6064 	mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT;
6065 	for (pseg = memsegs; pseg != NULL; pseg = pseg->next) {
6066 		index = MEMSEG_PFN_HASH(pseg->pages_base);
6067 		cur = pseg->pages_base;
6068 		do {
6069 			if (index >= N_MEM_SLOTS)
6070 				index = MEMSEG_PFN_HASH(cur);
6071 
6072 			if (memseg_hash[index] == NULL ||
6073 			    memseg_hash[index]->pages_base > pseg->pages_base) {
6074 				memseg_hash[index] = pseg;
6075 				hat_kpm_mseghash_update(index, pseg);
6076 			}
6077 			cur += mhash_per_slot;
6078 			index++;
6079 		} while (cur < pseg->pages_end);
6080 	}
6081 }
6082 
6083 /*
6084  * Return the pagenum for the pp
6085  */
6086 pfn_t
6087 page_pptonum(page_t *pp)
6088 {
6089 	return (pp->p_pagenum);
6090 }
6091 
6092 /*
6093  * interface to the referenced and modified etc bits
6094  * in the PSM part of the page struct
6095  * when no locking is desired.
6096  */
6097 void
6098 page_set_props(page_t *pp, uint_t flags)
6099 {
6100 	ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0);
6101 	pp->p_nrm |= (uchar_t)flags;
6102 }
6103 
6104 void
6105 page_clr_all_props(page_t *pp)
6106 {
6107 	pp->p_nrm = 0;
6108 }
6109 
6110 /*
6111  * Clear p_lckcnt and p_cowcnt, adjusting freemem if required.
6112  */
6113 int
6114 page_clear_lck_cow(page_t *pp, int adjust)
6115 {
6116 	int	f_amount;
6117 
6118 	ASSERT(PAGE_EXCL(pp));
6119 
6120 	/*
6121 	 * The page_struct_lock need not be acquired here since
6122 	 * we require the caller hold the page exclusively locked.
6123 	 */
6124 	f_amount = 0;
6125 	if (pp->p_lckcnt) {
6126 		f_amount = 1;
6127 		pp->p_lckcnt = 0;
6128 	}
6129 	if (pp->p_cowcnt) {
6130 		f_amount += pp->p_cowcnt;
6131 		pp->p_cowcnt = 0;
6132 	}
6133 
6134 	if (adjust && f_amount) {
6135 		mutex_enter(&freemem_lock);
6136 		availrmem += f_amount;
6137 		mutex_exit(&freemem_lock);
6138 	}
6139 
6140 	return (f_amount);
6141 }
6142 
6143 /*
6144  * The following functions is called from free_vp_pages()
6145  * for an inexact estimate of a newly free'd page...
6146  */
6147 ulong_t
6148 page_share_cnt(page_t *pp)
6149 {
6150 	return (hat_page_getshare(pp));
6151 }
6152 
6153 int
6154 page_isshared(page_t *pp)
6155 {
6156 	return (hat_page_getshare(pp) > 1);
6157 }
6158 
6159 int
6160 page_isfree(page_t *pp)
6161 {
6162 	return (PP_ISFREE(pp));
6163 }
6164 
6165 int
6166 page_isref(page_t *pp)
6167 {
6168 	return (hat_page_getattr(pp, P_REF));
6169 }
6170 
6171 int
6172 page_ismod(page_t *pp)
6173 {
6174 	return (hat_page_getattr(pp, P_MOD));
6175 }
6176