xref: /titanic_44/usr/src/uts/common/os/mem_cage.c (revision 67e3a03ed4a2813074d36330f062ed6e593a4937)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/thread.h>
31 #include <sys/proc.h>
32 #include <sys/callb.h>
33 #include <sys/vnode.h>
34 #include <sys/debug.h>
35 #include <sys/systm.h>		/* for bzero */
36 #include <sys/memlist.h>
37 #include <sys/cmn_err.h>
38 #include <sys/sysmacros.h>
39 #include <sys/vmsystm.h>	/* for NOMEMWAIT() */
40 #include <sys/atomic.h>		/* used to update kcage_freemem */
41 #include <sys/kmem.h>		/* for kmem_reap */
42 #include <sys/errno.h>
43 #include <sys/mem_cage.h>
44 #include <vm/seg_kmem.h>
45 #include <vm/page.h>
46 #include <vm/hat.h>
47 #include <vm/vm_dep.h>
48 #include <sys/mem_config.h>
49 #include <sys/lgrp.h>
50 #include <sys/rwlock.h>
51 #include <sys/cpupart.h>
52 
53 extern pri_t maxclsyspri;
54 
55 #ifdef DEBUG
56 #define	KCAGE_STATS
57 #endif
58 
59 #ifdef KCAGE_STATS
60 
61 #define	KCAGE_STATS_VERSION 9	/* can help report generators */
62 #define	KCAGE_STATS_NSCANS 256	/* depth of scan statistics buffer */
63 
64 struct kcage_stats_scan {
65 	/* managed by KCAGE_STAT_* macros */
66 	clock_t	scan_lbolt;
67 	uint_t	scan_id;
68 
69 	/* set in kcage_cageout() */
70 	uint_t	kt_passes;
71 	clock_t	kt_ticks;
72 	pgcnt_t	kt_kcage_freemem_start;
73 	pgcnt_t	kt_kcage_freemem_end;
74 	pgcnt_t kt_freemem_start;
75 	pgcnt_t kt_freemem_end;
76 	uint_t	kt_examined;
77 	uint_t	kt_cantlock;
78 	uint_t	kt_gotone;
79 	uint_t	kt_gotonefree;
80 	uint_t	kt_skiplevel;
81 	uint_t	kt_skipshared;
82 	uint_t	kt_skiprefd;
83 	uint_t	kt_destroy;
84 
85 	/* set in kcage_invalidate_page() */
86 	uint_t	kip_reloclocked;
87 	uint_t	kip_relocmod;
88 	uint_t	kip_destroy;
89 	uint_t	kip_nomem;
90 	uint_t	kip_demotefailed;
91 
92 	/* set in kcage_expand() */
93 	uint_t	ke_wanted;
94 	uint_t	ke_examined;
95 	uint_t	ke_lefthole;
96 	uint_t	ke_gotone;
97 	uint_t	ke_gotonefree;
98 };
99 
100 struct kcage_stats {
101 	/* managed by KCAGE_STAT_* macros */
102 	uint_t	version;
103 	uint_t	size;
104 
105 	/* set in kcage_cageout */
106 	uint_t	kt_wakeups;
107 	uint_t	kt_scans;
108 	uint_t	kt_cageout_break;
109 
110 	/* set in kcage_expand */
111 	uint_t	ke_calls;
112 	uint_t	ke_nopfn;
113 	uint_t	ke_nopaget;
114 	uint_t	ke_isnoreloc;
115 	uint_t	ke_deleting;
116 	uint_t	ke_lowfreemem;
117 	uint_t	ke_terminate;
118 
119 	/* set in kcage_freemem_add() */
120 	uint_t	kfa_trottlewake;
121 
122 	/* set in kcage_freemem_sub() */
123 	uint_t	kfs_cagewake;
124 
125 	/* set in kcage_create_throttle */
126 	uint_t	kct_calls;
127 	uint_t	kct_cageout;
128 	uint_t	kct_critical;
129 	uint_t	kct_exempt;
130 	uint_t	kct_cagewake;
131 	uint_t	kct_wait;
132 	uint_t	kct_progress;
133 	uint_t	kct_noprogress;
134 	uint_t	kct_timeout;
135 
136 	/* set in kcage_cageout_wakeup */
137 	uint_t	kcw_expandearly;
138 
139 	/* managed by KCAGE_STAT_* macros */
140 	uint_t	scan_array_size;
141 	uint_t	scan_index;
142 	struct kcage_stats_scan scans[KCAGE_STATS_NSCANS];
143 };
144 
145 static struct kcage_stats kcage_stats;
146 static struct kcage_stats_scan kcage_stats_scan_zero;
147 
148 /*
149  * No real need for atomics here. For the most part the incs and sets are
150  * done by the kernel cage thread. There are a few that are done by any
151  * number of other threads. Those cases are noted by comments.
152  */
153 #define	KCAGE_STAT_INCR(m)	kcage_stats.m++
154 
155 #define	KCAGE_STAT_NINCR(m, v) kcage_stats.m += (v)
156 
157 #define	KCAGE_STAT_INCR_SCAN(m)	\
158 	KCAGE_STAT_INCR(scans[kcage_stats.scan_index].m)
159 
160 #define	KCAGE_STAT_NINCR_SCAN(m, v) \
161 	KCAGE_STAT_NINCR(scans[kcage_stats.scan_index].m, v)
162 
163 #define	KCAGE_STAT_SET(m, v)	kcage_stats.m = (v)
164 
165 #define	KCAGE_STAT_SETZ(m, v)	\
166 	if (kcage_stats.m == 0) kcage_stats.m = (v)
167 
168 #define	KCAGE_STAT_SET_SCAN(m, v)	\
169 	KCAGE_STAT_SET(scans[kcage_stats.scan_index].m, v)
170 
171 #define	KCAGE_STAT_SETZ_SCAN(m, v)	\
172 	KCAGE_STAT_SETZ(scans[kcage_stats.scan_index].m, v)
173 
174 #define	KCAGE_STAT_INC_SCAN_INDEX \
175 	KCAGE_STAT_SET_SCAN(scan_lbolt, lbolt); \
176 	KCAGE_STAT_SET_SCAN(scan_id, kcage_stats.scan_index); \
177 	kcage_stats.scan_index = \
178 	(kcage_stats.scan_index + 1) % KCAGE_STATS_NSCANS; \
179 	kcage_stats.scans[kcage_stats.scan_index] = kcage_stats_scan_zero
180 
181 #define	KCAGE_STAT_INIT_SCAN_INDEX \
182 	kcage_stats.version = KCAGE_STATS_VERSION; \
183 	kcage_stats.size = sizeof (kcage_stats); \
184 	kcage_stats.scan_array_size = KCAGE_STATS_NSCANS; \
185 	kcage_stats.scan_index = 0
186 
187 #else /* KCAGE_STATS */
188 
189 #define	KCAGE_STAT_INCR(v)
190 #define	KCAGE_STAT_NINCR(m, v)
191 #define	KCAGE_STAT_INCR_SCAN(v)
192 #define	KCAGE_STAT_NINCR_SCAN(m, v)
193 #define	KCAGE_STAT_SET(m, v)
194 #define	KCAGE_STAT_SETZ(m, v)
195 #define	KCAGE_STAT_SET_SCAN(m, v)
196 #define	KCAGE_STAT_SETZ_SCAN(m, v)
197 #define	KCAGE_STAT_INC_SCAN_INDEX
198 #define	KCAGE_STAT_INIT_SCAN_INDEX
199 
200 #endif /* KCAGE_STATS */
201 
202 static kmutex_t kcage_throttle_mutex;	/* protects kcage_throttle_cv */
203 static kcondvar_t kcage_throttle_cv;
204 
205 static kmutex_t kcage_cageout_mutex;	/* protects cv and ready flag */
206 static kcondvar_t kcage_cageout_cv;	/* cageout thread naps here */
207 static int kcage_cageout_ready;		/* nonzero when cageout thread ready */
208 kthread_id_t kcage_cageout_thread;	/* to aid debugging */
209 
210 static krwlock_t kcage_range_rwlock;	/* protects kcage_glist elements */
211 
212 /*
213  * Cage expansion happens within a range.
214  */
215 struct kcage_glist {
216 	struct kcage_glist	*next;
217 	pfn_t			base;
218 	pfn_t			lim;
219 	pfn_t			curr;
220 	int			decr;
221 };
222 
223 static struct kcage_glist *kcage_glist;
224 static struct kcage_glist *kcage_current_glist;
225 
226 /*
227  * The firstfree element is provided so that kmem_alloc can be avoided
228  * until that cage has somewhere to go. This is not currently a problem
229  * as early kmem_alloc's use BOP_ALLOC instead of page_create_va.
230  */
231 static vmem_t *kcage_arena;
232 static struct kcage_glist kcage_glist_firstfree;
233 static struct kcage_glist *kcage_glist_freelist = &kcage_glist_firstfree;
234 
235 /*
236  * Miscellaneous forward references
237  */
238 static struct kcage_glist *kcage_glist_alloc(void);
239 static int kcage_glist_delete(pfn_t, pfn_t, struct kcage_glist **);
240 static void kcage_cageout(void);
241 static int kcage_invalidate_page(page_t *, pgcnt_t *);
242 static int kcage_setnoreloc_pages(page_t *, se_t);
243 static int kcage_range_add_internal(pfn_t base, pgcnt_t npgs, kcage_dir_t);
244 static void kcage_init(pgcnt_t preferred_size);
245 static int kcage_range_delete_internal(pfn_t base, pgcnt_t npgs);
246 
247 /*
248  * Kernel Memory Cage counters and thresholds.
249  */
250 int kcage_on = 0;
251 pgcnt_t kcage_freemem;
252 pgcnt_t kcage_needfree;
253 pgcnt_t kcage_lotsfree;
254 pgcnt_t kcage_desfree;
255 pgcnt_t kcage_minfree;
256 pgcnt_t kcage_throttlefree;
257 pgcnt_t	kcage_reserve;
258 int kcage_maxwait = 10;	/* in seconds */
259 
260 /* when we use lp for kmem we start the cage at a higher initial value */
261 pgcnt_t kcage_kmemlp_mincage;
262 
263 #ifdef DEBUG
264 pgcnt_t	kcage_pagets;
265 #define	KCAGEPAGETS_INC()	kcage_pagets++
266 #else
267 #define	KCAGEPAGETS_INC()
268 #endif
269 
270 /* kstats to export what pages are currently caged */
271 kmutex_t kcage_kstat_lock;
272 static int kcage_kstat_update(kstat_t *ksp, int rw);
273 static int kcage_kstat_snapshot(kstat_t *ksp, void *buf, int rw);
274 
275 /*
276  * Startup and Dynamic Reconfiguration interfaces.
277  * kcage_range_add()
278  * kcage_range_del()
279  * kcage_range_delete_post_mem_del()
280  * kcage_range_init()
281  * kcage_set_thresholds()
282  */
283 
284 /*
285  * Called from page_get_contig_pages to get the approximate kcage pfn range
286  * for exclusion from search for contiguous pages. This routine is called
287  * without kcage_range lock (kcage routines can call page_get_contig_pages
288  * through page_relocate) and with the assumption, based on kcage_range_add,
289  * that kcage_current_glist always contain a valid pointer.
290  */
291 
292 int
293 kcage_current_pfn(pfn_t *pfncur)
294 {
295 	struct kcage_glist *lp = kcage_current_glist;
296 
297 	ASSERT(kcage_on);
298 
299 	ASSERT(lp != NULL);
300 
301 	*pfncur = lp->curr;
302 
303 	return (lp->decr);
304 }
305 
306 /*
307  * Called from vm_pagelist.c during coalesce to find kernel cage regions
308  * within an mnode. Looks for the lowest range between lo and hi.
309  *
310  * Kernel cage memory is defined between kcage_glist and kcage_current_glist.
311  * Non-cage memory is defined between kcage_current_glist and list end.
312  *
313  * If incage is set, returns the lowest kcage range. Otherwise returns lowest
314  * non-cage range.
315  *
316  * Returns zero on success and nlo, nhi:
317  * 	lo <= nlo < nhi <= hi
318  * Returns non-zero if no overlapping range is found.
319  */
320 int
321 kcage_next_range(int incage, pfn_t lo, pfn_t hi,
322     pfn_t *nlo, pfn_t *nhi)
323 {
324 	struct kcage_glist *lp;
325 	pfn_t tlo = hi;
326 	pfn_t thi = hi;
327 
328 	ASSERT(lo <= hi);
329 
330 	/*
331 	 * Reader lock protects the list, but kcage_get_pfn
332 	 * running concurrently may advance kcage_current_glist
333 	 * and also update kcage_current_glist->curr. Page
334 	 * coalesce can handle this race condition.
335 	 */
336 	rw_enter(&kcage_range_rwlock, RW_READER);
337 
338 	for (lp = incage ? kcage_glist : kcage_current_glist;
339 	    lp != NULL; lp = lp->next) {
340 
341 		pfn_t klo, khi;
342 
343 		/* find the range limits in this element */
344 		if ((incage && lp->decr) || (!incage && !lp->decr)) {
345 			klo = lp->curr;
346 			khi = lp->lim;
347 		} else {
348 			klo = lp->base;
349 			khi = lp->curr;
350 		}
351 
352 		/* handle overlap */
353 		if (klo < tlo && klo < khi && lo < khi && klo < hi) {
354 			tlo = MAX(lo, klo);
355 			thi = MIN(hi, khi);
356 			if (tlo == lo)
357 				break;
358 		}
359 
360 		/* check end of kcage */
361 		if (incage && lp == kcage_current_glist) {
362 			break;
363 		}
364 	}
365 
366 	rw_exit(&kcage_range_rwlock);
367 
368 	/* return non-zero if no overlapping range found */
369 	if (tlo == thi)
370 		return (1);
371 
372 	ASSERT(lo <= tlo && tlo < thi && thi <= hi);
373 
374 	/* return overlapping range */
375 	*nlo = tlo;
376 	*nhi = thi;
377 	return (0);
378 }
379 
380 void
381 kcage_range_init(struct memlist *ml, kcage_dir_t d, pgcnt_t preferred_size)
382 {
383 	int ret = 0;
384 
385 	ASSERT(kcage_arena == NULL);
386 	kcage_arena = vmem_create("kcage_arena", NULL, 0, sizeof (uint64_t),
387 	    segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
388 	ASSERT(kcage_arena != NULL);
389 
390 	if (d == KCAGE_DOWN) {
391 		while (ml->next != NULL)
392 			ml = ml->next;
393 	}
394 
395 	rw_enter(&kcage_range_rwlock, RW_WRITER);
396 
397 	while (ml != NULL) {
398 		ret = kcage_range_add_internal(btop(ml->address),
399 		    btop(ml->size), d);
400 		if (ret)
401 			panic("kcage_range_add_internal failed: "
402 			    "ml=%p, ret=0x%x\n", ml, ret);
403 
404 		ml = (d == KCAGE_DOWN ? ml->prev : ml->next);
405 	}
406 
407 	rw_exit(&kcage_range_rwlock);
408 
409 	if (ret == 0)
410 		kcage_init(preferred_size);
411 }
412 
413 /*
414  * Third arg controls direction of growth: 0: increasing pfns,
415  * 1: decreasing.
416  */
417 static int
418 kcage_range_add_internal(pfn_t base, pgcnt_t npgs, kcage_dir_t d)
419 {
420 	struct kcage_glist *new, **lpp;
421 	pfn_t lim;
422 
423 	ASSERT(rw_write_held(&kcage_range_rwlock));
424 
425 	ASSERT(npgs != 0);
426 	if (npgs == 0)
427 		return (EINVAL);
428 
429 	lim = base + npgs;
430 
431 	ASSERT(lim > base);
432 	if (lim <= base)
433 		return (EINVAL);
434 
435 	new = kcage_glist_alloc();
436 	if (new == NULL) {
437 		return (ENOMEM);
438 	}
439 
440 	new->base = base;
441 	new->lim = lim;
442 	new->decr = (d == KCAGE_DOWN);
443 	if (new->decr != 0)
444 		new->curr = new->lim;
445 	else
446 		new->curr = new->base;
447 	/*
448 	 * Any overlapping existing ranges are removed by deleting
449 	 * from the new list as we search for the tail.
450 	 */
451 	lpp = &kcage_glist;
452 	while (*lpp != NULL) {
453 		int ret;
454 		ret = kcage_glist_delete((*lpp)->base, (*lpp)->lim, &new);
455 		if (ret != 0)
456 			return (ret);
457 		lpp = &(*lpp)->next;
458 	}
459 
460 	*lpp = new;
461 
462 	if (kcage_current_glist == NULL) {
463 		kcage_current_glist = kcage_glist;
464 	}
465 
466 	return (0);
467 }
468 
469 int
470 kcage_range_add(pfn_t base, pgcnt_t npgs, kcage_dir_t d)
471 {
472 	int ret;
473 
474 	rw_enter(&kcage_range_rwlock, RW_WRITER);
475 	ret = kcage_range_add_internal(base, npgs, d);
476 	rw_exit(&kcage_range_rwlock);
477 	return (ret);
478 }
479 
480 /*
481  * Calls to add and delete must be protected by kcage_range_rwlock
482  */
483 static int
484 kcage_range_delete_internal(pfn_t base, pgcnt_t npgs)
485 {
486 	struct kcage_glist *lp;
487 	pfn_t lim;
488 
489 	ASSERT(rw_write_held(&kcage_range_rwlock));
490 
491 	ASSERT(npgs != 0);
492 	if (npgs == 0)
493 		return (EINVAL);
494 
495 	lim = base + npgs;
496 
497 	ASSERT(lim > base);
498 	if (lim <= base)
499 		return (EINVAL);
500 
501 	/*
502 	 * Check if the delete is OK first as a number of elements
503 	 * might be involved and it will be difficult to go
504 	 * back and undo (can't just add the range back in).
505 	 */
506 	for (lp = kcage_glist; lp != NULL; lp = lp->next) {
507 		/*
508 		 * If there have been no pages allocated from this
509 		 * element, we don't need to check it.
510 		 */
511 		if ((lp->decr == 0 && lp->curr == lp->base) ||
512 		    (lp->decr != 0 && lp->curr == lp->lim))
513 			continue;
514 		/*
515 		 * If the element does not overlap, its OK.
516 		 */
517 		if (base >= lp->lim || lim <= lp->base)
518 			continue;
519 		/*
520 		 * Overlapping element: Does the range to be deleted
521 		 * overlap the area already used? If so fail.
522 		 */
523 		if (lp->decr == 0 && base < lp->curr && lim >= lp->base) {
524 			return (EBUSY);
525 		}
526 		if (lp->decr != 0 && base < lp->lim && lim >= lp->curr) {
527 			return (EBUSY);
528 		}
529 	}
530 	return (kcage_glist_delete(base, lim, &kcage_glist));
531 }
532 
533 int
534 kcage_range_delete(pfn_t base, pgcnt_t npgs)
535 {
536 	int ret;
537 
538 	rw_enter(&kcage_range_rwlock, RW_WRITER);
539 	ret = kcage_range_delete_internal(base, npgs);
540 	rw_exit(&kcage_range_rwlock);
541 	return (ret);
542 }
543 
544 /*
545  * Calls to add and delete must be protected by kcage_range_rwlock.
546  * This routine gets called after successful Solaris memory
547  * delete operation from DR post memory delete routines.
548  */
549 static int
550 kcage_range_delete_post_mem_del_internal(pfn_t base, pgcnt_t npgs)
551 {
552 	pfn_t lim;
553 
554 	ASSERT(rw_write_held(&kcage_range_rwlock));
555 
556 	ASSERT(npgs != 0);
557 	if (npgs == 0)
558 		return (EINVAL);
559 
560 	lim = base + npgs;
561 
562 	ASSERT(lim > base);
563 	if (lim <= base)
564 		return (EINVAL);
565 
566 	return (kcage_glist_delete(base, lim, &kcage_glist));
567 }
568 
569 int
570 kcage_range_delete_post_mem_del(pfn_t base, pgcnt_t npgs)
571 {
572 	int ret;
573 
574 	rw_enter(&kcage_range_rwlock, RW_WRITER);
575 	ret = kcage_range_delete_post_mem_del_internal(base, npgs);
576 	rw_exit(&kcage_range_rwlock);
577 	return (ret);
578 }
579 
580 /*
581  * No locking is required here as the whole operation is covered
582  * by kcage_range_rwlock writer lock.
583  */
584 static struct kcage_glist *
585 kcage_glist_alloc(void)
586 {
587 	struct kcage_glist *new;
588 
589 	if ((new = kcage_glist_freelist) != NULL) {
590 		kcage_glist_freelist = new->next;
591 	} else {
592 		new = vmem_alloc(kcage_arena, sizeof (*new), VM_NOSLEEP);
593 	}
594 
595 	if (new != NULL)
596 		bzero(new, sizeof (*new));
597 
598 	return (new);
599 }
600 
601 static void
602 kcage_glist_free(struct kcage_glist *lp)
603 {
604 	lp->next = kcage_glist_freelist;
605 	kcage_glist_freelist = lp;
606 }
607 
608 static int
609 kcage_glist_delete(pfn_t base, pfn_t lim, struct kcage_glist **lpp)
610 {
611 	struct kcage_glist *lp, *prev = *lpp;
612 
613 	while ((lp = *lpp) != NULL) {
614 		if (lim > lp->base && base < lp->lim) {
615 			/* The delete range overlaps this element. */
616 			if (base <= lp->base && lim >= lp->lim) {
617 				/* Delete whole element. */
618 				*lpp = lp->next;
619 				if (lp == kcage_current_glist) {
620 					/* This can never happen. */
621 					ASSERT(kcage_current_glist != prev);
622 					kcage_current_glist = prev;
623 				}
624 				kcage_glist_free(lp);
625 				continue;
626 			}
627 
628 			/* Partial delete. */
629 			if (base > lp->base && lim < lp->lim) {
630 				struct kcage_glist *new;
631 
632 				/*
633 				 * Remove a section from the middle,
634 				 * need to allocate a new element.
635 				 */
636 				new = kcage_glist_alloc();
637 				if (new == NULL) {
638 					return (ENOMEM);
639 				}
640 
641 				/*
642 				 * Tranfser unused range to new.
643 				 * Edit lp in place to preserve
644 				 * kcage_current_glist.
645 				 */
646 				new->decr = lp->decr;
647 				if (new->decr != 0) {
648 					new->base = lp->base;
649 					new->lim = base;
650 					new->curr = base;
651 
652 					lp->base = lim;
653 				} else {
654 					new->base = lim;
655 					new->lim = lp->lim;
656 					new->curr = new->base;
657 
658 					lp->lim = base;
659 				}
660 
661 				/* Insert new. */
662 				new->next = lp->next;
663 				lp->next = new;
664 				lpp = &lp->next;
665 			} else {
666 				/* Delete part of current block. */
667 				if (base > lp->base) {
668 					ASSERT(lim >= lp->lim);
669 					ASSERT(base < lp->lim);
670 					if (lp->decr != 0 &&
671 					    lp->curr == lp->lim)
672 						lp->curr = base;
673 					lp->lim = base;
674 				} else {
675 					ASSERT(base <= lp->base);
676 					ASSERT(lim > lp->base);
677 					if (lp->decr == 0 &&
678 					    lp->curr == lp->base)
679 						lp->curr = lim;
680 					lp->base = lim;
681 				}
682 			}
683 		}
684 		prev = *lpp;
685 		lpp = &(*lpp)->next;
686 	}
687 
688 	return (0);
689 }
690 
691 /*
692  * If lockit is 1, kcage_get_pfn holds the
693  * reader lock for kcage_range_rwlock.
694  * Changes to lp->curr can cause race conditions, but
695  * they are handled by higher level code (see kcage_next_range.)
696  */
697 static pfn_t
698 kcage_get_pfn(int lockit)
699 {
700 	struct kcage_glist *lp;
701 	pfn_t pfn = PFN_INVALID;
702 
703 	if (lockit && !rw_tryenter(&kcage_range_rwlock, RW_READER))
704 		return (pfn);
705 
706 	lp = kcage_current_glist;
707 	while (lp != NULL) {
708 		if (lp->decr != 0) {
709 			if (lp->curr != lp->base) {
710 				pfn = --lp->curr;
711 				break;
712 			}
713 		} else {
714 			if (lp->curr != lp->lim) {
715 				pfn = lp->curr++;
716 				break;
717 			}
718 		}
719 
720 		lp = lp->next;
721 		if (lp)
722 			kcage_current_glist = lp;
723 	}
724 
725 	if (lockit)
726 		rw_exit(&kcage_range_rwlock);
727 	return (pfn);
728 }
729 
730 /*
731  * Walk the physical address space of the cage.
732  * This routine does not guarantee to return PFNs in the order
733  * in which they were allocated to the cage. Instead, it walks
734  * each range as they appear on the growth list returning the PFNs
735  * range in ascending order.
736  *
737  * To begin scanning at lower edge of cage, reset should be nonzero.
738  * To step through cage, reset should be zero.
739  *
740  * PFN_INVALID will be returned when the upper end of the cage is
741  * reached -- indicating a full scan of the cage has been completed since
742  * previous reset. PFN_INVALID will continue to be returned until
743  * kcage_walk_cage is reset.
744  *
745  * It is possible to receive a PFN_INVALID result on reset if a growth
746  * list is not installed or if none of the PFNs in the installed list have
747  * been allocated to the cage. In otherwords, there is no cage.
748  *
749  * Caller need not hold kcage_range_rwlock while calling this function
750  * as the front part of the list is static - pages never come out of
751  * the cage.
752  *
753  * The caller is expected to only be kcage_cageout().
754  */
755 static pfn_t
756 kcage_walk_cage(int reset)
757 {
758 	static struct kcage_glist *lp = NULL;
759 	static pfn_t pfn;
760 
761 	if (reset)
762 		lp = NULL;
763 	if (lp == NULL) {
764 		lp = kcage_glist;
765 		pfn = PFN_INVALID;
766 	}
767 again:
768 	if (pfn == PFN_INVALID) {
769 		if (lp == NULL)
770 			return (PFN_INVALID);
771 
772 		if (lp->decr != 0) {
773 			/*
774 			 * In this range the cage grows from the highest
775 			 * address towards the lowest.
776 			 * Arrange to return pfns from curr to lim-1,
777 			 * inclusive, in ascending order.
778 			 */
779 
780 			pfn = lp->curr;
781 		} else {
782 			/*
783 			 * In this range the cage grows from the lowest
784 			 * address towards the highest.
785 			 * Arrange to return pfns from base to curr,
786 			 * inclusive, in ascending order.
787 			 */
788 
789 			pfn = lp->base;
790 		}
791 	}
792 
793 	if (lp->decr != 0) {		/* decrementing pfn */
794 		if (pfn == lp->lim) {
795 			/* Don't go beyond the static part of the glist. */
796 			if (lp == kcage_current_glist)
797 				lp = NULL;
798 			else
799 				lp = lp->next;
800 			pfn = PFN_INVALID;
801 			goto again;
802 		}
803 
804 		ASSERT(pfn >= lp->curr && pfn < lp->lim);
805 	} else {			/* incrementing pfn */
806 		if (pfn == lp->curr) {
807 			/* Don't go beyond the static part of the glist. */
808 			if (lp == kcage_current_glist)
809 				lp = NULL;
810 			else
811 				lp = lp->next;
812 			pfn = PFN_INVALID;
813 			goto again;
814 		}
815 
816 		ASSERT(pfn >= lp->base && pfn < lp->curr);
817 	}
818 
819 	return (pfn++);
820 }
821 
822 /*
823  * Callback functions for to recalc cage thresholds after
824  * Kphysm memory add/delete operations.
825  */
826 /*ARGSUSED*/
827 static void
828 kcage_kphysm_postadd_cb(void *arg, pgcnt_t delta_pages)
829 {
830 	kcage_recalc_thresholds();
831 }
832 
833 /*ARGSUSED*/
834 static int
835 kcage_kphysm_predel_cb(void *arg, pgcnt_t delta_pages)
836 {
837 	/* TODO: when should cage refuse memory delete requests? */
838 	return (0);
839 }
840 
841 /*ARGSUSED*/
842 static  void
843 kcage_kphysm_postdel_cb(void *arg, pgcnt_t delta_pages, int cancelled)
844 {
845 	kcage_recalc_thresholds();
846 }
847 
848 static kphysm_setup_vector_t kcage_kphysm_vectors = {
849 	KPHYSM_SETUP_VECTOR_VERSION,
850 	kcage_kphysm_postadd_cb,
851 	kcage_kphysm_predel_cb,
852 	kcage_kphysm_postdel_cb
853 };
854 
855 /*
856  * This is called before a CPR suspend and after a CPR resume.  We have to
857  * turn off kcage_cageout_ready before a suspend, and turn it back on after a
858  * restart.
859  */
860 /*ARGSUSED*/
861 static boolean_t
862 kcage_cageout_cpr(void *arg, int code)
863 {
864 	if (code == CB_CODE_CPR_CHKPT) {
865 		ASSERT(kcage_cageout_ready);
866 		kcage_cageout_ready = 0;
867 		return (B_TRUE);
868 	} else if (code == CB_CODE_CPR_RESUME) {
869 		ASSERT(kcage_cageout_ready == 0);
870 		kcage_cageout_ready = 1;
871 		return (B_TRUE);
872 	}
873 	return (B_FALSE);
874 }
875 
876 /*
877  * kcage_recalc_preferred_size() increases initial cage size to improve large
878  * page availability when lp for kmem is enabled and kpr is disabled
879  */
880 static pgcnt_t
881 kcage_recalc_preferred_size(pgcnt_t preferred_size)
882 {
883 	if (SEGKMEM_USE_LARGEPAGES && segkmem_reloc == 0) {
884 		pgcnt_t lpmincage = kcage_kmemlp_mincage;
885 		if (lpmincage == 0) {
886 			lpmincage = MIN(P2ROUNDUP(((physmem * PAGESIZE) / 8),
887 			    segkmem_heaplp_quantum), 0x40000000UL) / PAGESIZE;
888 		}
889 		kcage_kmemlp_mincage = MIN(lpmincage,
890 		    (segkmem_kmemlp_max / PAGESIZE));
891 		preferred_size = MAX(kcage_kmemlp_mincage, preferred_size);
892 	}
893 	return (preferred_size);
894 }
895 
896 /*
897  * Kcage_init() builds the cage and initializes the cage thresholds.
898  * The size of the cage is determined by the argument preferred_size.
899  * or the actual amount of memory, whichever is smaller.
900  */
901 static void
902 kcage_init(pgcnt_t preferred_size)
903 {
904 	pgcnt_t wanted;
905 	pfn_t pfn;
906 	page_t *pp;
907 	kstat_t *ksp;
908 
909 	extern struct vnode kvp;
910 	extern void page_list_noreloc_startup(page_t *);
911 
912 	ASSERT(!kcage_on);
913 
914 	/* increase preferred cage size for lp for kmem */
915 	preferred_size = kcage_recalc_preferred_size(preferred_size);
916 
917 	/* Debug note: initialize this now so early expansions can stat */
918 	KCAGE_STAT_INIT_SCAN_INDEX;
919 
920 	/*
921 	 * Initialize cage thresholds and install kphysm callback.
922 	 * If we can't arrange to have the thresholds track with
923 	 * available physical memory, then the cage thresholds may
924 	 * end up over time at levels that adversly effect system
925 	 * performance; so, bail out.
926 	 */
927 	kcage_recalc_thresholds();
928 	if (kphysm_setup_func_register(&kcage_kphysm_vectors, NULL)) {
929 		ASSERT(0);		/* Catch this in DEBUG kernels. */
930 		return;
931 	}
932 
933 	/*
934 	 * Limit startup cage size within the range of kcage_minfree
935 	 * and availrmem, inclusively.
936 	 */
937 	wanted = MIN(MAX(preferred_size, kcage_minfree), availrmem);
938 
939 	/*
940 	 * Construct the cage. PFNs are allocated from the glist. It
941 	 * is assumed that the list has been properly ordered for the
942 	 * platform by the platform code. Typically, this is as simple
943 	 * as calling kcage_range_init(phys_avail, decr), where decr is
944 	 * 1 if the kernel has been loaded into upper end of physical
945 	 * memory, or 0 if the kernel has been loaded at the low end.
946 	 *
947 	 * Note: it is assumed that we are in the startup flow, so there
948 	 * is no reason to grab the page lock.
949 	 */
950 	kcage_freemem = 0;
951 	pfn = PFN_INVALID;			/* prime for alignment test */
952 	while (wanted != 0) {
953 		if ((pfn = kcage_get_pfn(0)) == PFN_INVALID)
954 			break;
955 
956 		if ((pp = page_numtopp_nolock(pfn)) != NULL) {
957 			KCAGEPAGETS_INC();
958 			/*
959 			 * Set the noreloc state on the page.
960 			 * If the page is free and not already
961 			 * on the noreloc list then move it.
962 			 */
963 			if (PP_ISFREE(pp)) {
964 				if (PP_ISNORELOC(pp) == 0)
965 					page_list_noreloc_startup(pp);
966 			} else {
967 				ASSERT(pp->p_szc == 0);
968 				PP_SETNORELOC(pp);
969 			}
970 		}
971 		PLCNT_XFER_NORELOC(pp);
972 		wanted -= 1;
973 	}
974 
975 	/*
976 	 * Need to go through and find kernel allocated pages
977 	 * and capture them into the Cage.  These will primarily
978 	 * be pages gotten through boot_alloc().
979 	 */
980 	if (kvp.v_pages) {
981 
982 		pp = kvp.v_pages;
983 		do {
984 			ASSERT(!PP_ISFREE(pp));
985 			ASSERT(pp->p_szc == 0);
986 			PP_SETNORELOC(pp);
987 		} while ((pp = pp->p_vpnext) != kvp.v_pages);
988 
989 	}
990 
991 	kcage_on = 1;
992 
993 	/*
994 	 * CB_CL_CPR_POST_KERNEL is the class that executes from cpr_suspend()
995 	 * after the cageout thread is blocked, and executes from cpr_resume()
996 	 * before the cageout thread is restarted.  By executing in this class,
997 	 * we are assured that the kernel cage thread won't miss wakeup calls
998 	 * and also CPR's larger kmem_alloc requests will not fail after
999 	 * CPR shuts down the cageout kernel thread.
1000 	 */
1001 	(void) callb_add(kcage_cageout_cpr, NULL, CB_CL_CPR_POST_KERNEL,
1002 	    "cageout");
1003 
1004 	/*
1005 	 * Coalesce pages to improve large page availability. A better fix
1006 	 * would to coalesce pages as they are included in the cage
1007 	 */
1008 	if (SEGKMEM_USE_LARGEPAGES) {
1009 		extern void page_freelist_coalesce_all(int mnode);
1010 		page_freelist_coalesce_all(-1);	/* do all mnodes */
1011 	}
1012 
1013 	ksp = kstat_create("kcage", 0, "kcage_page_list", "misc",
1014 	    KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VAR_SIZE | KSTAT_FLAG_VIRTUAL);
1015 	if (ksp != NULL) {
1016 		ksp->ks_update = kcage_kstat_update;
1017 		ksp->ks_snapshot = kcage_kstat_snapshot;
1018 		ksp->ks_lock = &kcage_kstat_lock; /* XXX - not really needed */
1019 		kstat_install(ksp);
1020 	}
1021 }
1022 
1023 static int
1024 kcage_kstat_update(kstat_t *ksp, int rw)
1025 {
1026 	struct kcage_glist *lp;
1027 	uint_t count;
1028 
1029 	if (rw == KSTAT_WRITE)
1030 		return (EACCES);
1031 
1032 	count = 0;
1033 	rw_enter(&kcage_range_rwlock, RW_WRITER);
1034 	for (lp = kcage_glist; lp != NULL; lp = lp->next) {
1035 		if (lp->decr) {
1036 			if (lp->curr != lp->lim) {
1037 				count++;
1038 			}
1039 		} else {
1040 			if (lp->curr != lp->base) {
1041 				count++;
1042 			}
1043 		}
1044 	}
1045 	rw_exit(&kcage_range_rwlock);
1046 
1047 	ksp->ks_ndata = count;
1048 	ksp->ks_data_size = count * 2 * sizeof (uint64_t);
1049 
1050 	return (0);
1051 }
1052 
1053 static int
1054 kcage_kstat_snapshot(kstat_t *ksp, void *buf, int rw)
1055 {
1056 	struct kcage_glist *lp;
1057 	struct memunit {
1058 		uint64_t address;
1059 		uint64_t size;
1060 	} *kspmem;
1061 
1062 	if (rw == KSTAT_WRITE)
1063 		return (EACCES);
1064 
1065 	ksp->ks_snaptime = gethrtime();
1066 
1067 	kspmem = (struct memunit *)buf;
1068 	rw_enter(&kcage_range_rwlock, RW_WRITER);
1069 	for (lp = kcage_glist; lp != NULL; lp = lp->next, kspmem++) {
1070 		if ((caddr_t)kspmem >= (caddr_t)buf + ksp->ks_data_size)
1071 			break;
1072 
1073 		if (lp->decr) {
1074 			if (lp->curr != lp->lim) {
1075 				kspmem->address = ptob(lp->curr);
1076 				kspmem->size = ptob(lp->lim - lp->curr);
1077 			}
1078 		} else {
1079 			if (lp->curr != lp->base) {
1080 				kspmem->address = ptob(lp->base);
1081 				kspmem->size = ptob(lp->curr - lp->base);
1082 			}
1083 		}
1084 	}
1085 	rw_exit(&kcage_range_rwlock);
1086 
1087 	return (0);
1088 }
1089 
1090 void
1091 kcage_recalc_thresholds()
1092 {
1093 	static int first = 1;
1094 	static pgcnt_t init_lotsfree;
1095 	static pgcnt_t init_desfree;
1096 	static pgcnt_t init_minfree;
1097 	static pgcnt_t init_throttlefree;
1098 	static pgcnt_t init_reserve;
1099 
1100 	/* TODO: any reason to take more care than this with live editing? */
1101 	mutex_enter(&kcage_cageout_mutex);
1102 	mutex_enter(&freemem_lock);
1103 
1104 	if (first) {
1105 		first = 0;
1106 		init_lotsfree = kcage_lotsfree;
1107 		init_desfree = kcage_desfree;
1108 		init_minfree = kcage_minfree;
1109 		init_throttlefree = kcage_throttlefree;
1110 		init_reserve = kcage_reserve;
1111 	} else {
1112 		kcage_lotsfree = init_lotsfree;
1113 		kcage_desfree = init_desfree;
1114 		kcage_minfree = init_minfree;
1115 		kcage_throttlefree = init_throttlefree;
1116 		kcage_reserve = init_reserve;
1117 	}
1118 
1119 	if (kcage_lotsfree == 0)
1120 		kcage_lotsfree = MAX(32, total_pages / 256);
1121 
1122 	if (kcage_minfree == 0)
1123 		kcage_minfree = MAX(32, kcage_lotsfree / 2);
1124 
1125 	if (kcage_desfree == 0)
1126 		kcage_desfree = MAX(32, kcage_minfree);
1127 
1128 	if (kcage_throttlefree == 0)
1129 		kcage_throttlefree = MAX(32, kcage_minfree / 2);
1130 
1131 	if (kcage_reserve == 0)
1132 		kcage_reserve = MIN(32, kcage_throttlefree / 2);
1133 
1134 	mutex_exit(&freemem_lock);
1135 	mutex_exit(&kcage_cageout_mutex);
1136 
1137 	if (kcage_cageout_ready) {
1138 		if (kcage_freemem < kcage_desfree)
1139 			kcage_cageout_wakeup();
1140 
1141 		if (kcage_needfree) {
1142 			mutex_enter(&kcage_throttle_mutex);
1143 			cv_broadcast(&kcage_throttle_cv);
1144 			mutex_exit(&kcage_throttle_mutex);
1145 		}
1146 	}
1147 }
1148 
1149 /*
1150  * Pageout interface:
1151  * kcage_cageout_init()
1152  */
1153 void
1154 kcage_cageout_init()
1155 {
1156 	if (kcage_on) {
1157 
1158 		(void) thread_create(NULL, 0, kcage_cageout,
1159 		    NULL, 0, proc_pageout, TS_RUN, maxclsyspri - 1);
1160 	}
1161 }
1162 
1163 
1164 /*
1165  * VM Interfaces:
1166  * kcage_create_throttle()
1167  * kcage_freemem_add()
1168  * kcage_freemem_sub()
1169  */
1170 
1171 /*
1172  * Wakeup cageout thread and throttle waiting for the number of pages
1173  * requested to become available.  For non-critical requests, a
1174  * timeout is added, since freemem accounting is separate from cage
1175  * freemem accounting: it's possible for us to get stuck and not make
1176  * forward progress even though there was sufficient freemem before
1177  * arriving here.
1178  */
1179 int
1180 kcage_create_throttle(pgcnt_t npages, int flags)
1181 {
1182 	int niter = 0;
1183 	pgcnt_t lastfree;
1184 	int enough = kcage_freemem > kcage_throttlefree + npages;
1185 
1186 	KCAGE_STAT_INCR(kct_calls);		/* unprotected incr. */
1187 
1188 	kcage_cageout_wakeup();			/* just to be sure */
1189 	KCAGE_STAT_INCR(kct_cagewake);		/* unprotected incr. */
1190 
1191 	/*
1192 	 * Obviously, we can't throttle the cageout thread since
1193 	 * we depend on it.  We also can't throttle the panic thread.
1194 	 */
1195 	if (curthread == kcage_cageout_thread || panicstr) {
1196 		KCAGE_STAT_INCR(kct_cageout);	/* unprotected incr. */
1197 		return (KCT_CRIT);
1198 	}
1199 
1200 	/*
1201 	 * Don't throttle threads which are critical for proper
1202 	 * vm management if we're above kcage_throttlefree or
1203 	 * if freemem is very low.
1204 	 */
1205 	if (NOMEMWAIT()) {
1206 		if (enough) {
1207 			KCAGE_STAT_INCR(kct_exempt);	/* unprotected incr. */
1208 			return (KCT_CRIT);
1209 		} else if (freemem < minfree) {
1210 			KCAGE_STAT_INCR(kct_critical);  /* unprotected incr. */
1211 			return (KCT_CRIT);
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * Don't throttle real-time threads if kcage_freemem > kcage_reserve.
1217 	 */
1218 	if (DISP_PRIO(curthread) > maxclsyspri &&
1219 	    kcage_freemem > kcage_reserve) {
1220 		KCAGE_STAT_INCR(kct_exempt);	/* unprotected incr. */
1221 		return (KCT_CRIT);
1222 	}
1223 
1224 	/*
1225 	 * Cause all other threads (which are assumed to not be
1226 	 * critical to cageout) to wait here until their request
1227 	 * can be satisfied. Be a little paranoid and wake the
1228 	 * kernel cage on each loop through this logic.
1229 	 */
1230 	while (kcage_freemem < kcage_throttlefree + npages) {
1231 		ASSERT(kcage_on);
1232 
1233 		lastfree = kcage_freemem;
1234 
1235 		if (kcage_cageout_ready) {
1236 			mutex_enter(&kcage_throttle_mutex);
1237 
1238 			kcage_needfree += npages;
1239 			KCAGE_STAT_INCR(kct_wait);
1240 
1241 			kcage_cageout_wakeup();
1242 			KCAGE_STAT_INCR(kct_cagewake);
1243 
1244 			cv_wait(&kcage_throttle_cv, &kcage_throttle_mutex);
1245 
1246 			kcage_needfree -= npages;
1247 
1248 			mutex_exit(&kcage_throttle_mutex);
1249 		} else {
1250 			/*
1251 			 * NOTE: atomics are used just in case we enter
1252 			 * mp operation before the cageout thread is ready.
1253 			 */
1254 			atomic_add_long(&kcage_needfree, npages);
1255 
1256 			kcage_cageout_wakeup();
1257 			KCAGE_STAT_INCR(kct_cagewake);	/* unprotected incr. */
1258 
1259 			atomic_add_long(&kcage_needfree, -npages);
1260 		}
1261 
1262 		if ((flags & PG_WAIT) == 0) {
1263 			if (kcage_freemem > lastfree) {
1264 				KCAGE_STAT_INCR(kct_progress);
1265 				niter = 0;
1266 			} else {
1267 				KCAGE_STAT_INCR(kct_noprogress);
1268 				if (++niter >= kcage_maxwait) {
1269 					KCAGE_STAT_INCR(kct_timeout);
1270 					return (KCT_FAILURE);
1271 				}
1272 			}
1273 		}
1274 	}
1275 	return (KCT_NONCRIT);
1276 }
1277 
1278 void
1279 kcage_freemem_add(pgcnt_t npages)
1280 {
1281 	extern void wakeup_pcgs(void);
1282 
1283 	atomic_add_long(&kcage_freemem, npages);
1284 
1285 	wakeup_pcgs();  /* wakeup threads in pcgs() */
1286 
1287 	if (kcage_needfree != 0 &&
1288 	    kcage_freemem >= (kcage_throttlefree + kcage_needfree)) {
1289 
1290 		mutex_enter(&kcage_throttle_mutex);
1291 		cv_broadcast(&kcage_throttle_cv);
1292 		KCAGE_STAT_INCR(kfa_trottlewake);
1293 		mutex_exit(&kcage_throttle_mutex);
1294 	}
1295 }
1296 
1297 void
1298 kcage_freemem_sub(pgcnt_t npages)
1299 {
1300 	atomic_add_long(&kcage_freemem, -npages);
1301 
1302 	if (kcage_freemem < kcage_desfree) {
1303 		kcage_cageout_wakeup();
1304 		KCAGE_STAT_INCR(kfs_cagewake); /* unprotected incr. */
1305 	}
1306 }
1307 
1308 /*
1309  * return 0 on failure and 1 on success.
1310  */
1311 static int
1312 kcage_setnoreloc_pages(page_t *rootpp, se_t se)
1313 {
1314 	pgcnt_t npgs, i;
1315 	page_t *pp;
1316 	pfn_t rootpfn = page_pptonum(rootpp);
1317 	uint_t szc;
1318 
1319 	ASSERT(!PP_ISFREE(rootpp));
1320 	ASSERT(PAGE_LOCKED_SE(rootpp, se));
1321 	if (!group_page_trylock(rootpp, se)) {
1322 		return (0);
1323 	}
1324 	szc = rootpp->p_szc;
1325 	if (szc == 0) {
1326 		/*
1327 		 * The szc of a locked page can only change for pages that are
1328 		 * non-swapfs (i.e. anonymous memory) file system pages.
1329 		 */
1330 		ASSERT(rootpp->p_vnode != NULL &&
1331 		    !PP_ISKAS(rootpp) &&
1332 		    !IS_SWAPFSVP(rootpp->p_vnode));
1333 		PP_SETNORELOC(rootpp);
1334 		return (1);
1335 	}
1336 	npgs = page_get_pagecnt(szc);
1337 	ASSERT(IS_P2ALIGNED(rootpfn, npgs));
1338 	pp = rootpp;
1339 	for (i = 0; i < npgs; i++, pp++) {
1340 		ASSERT(PAGE_LOCKED_SE(pp, se));
1341 		ASSERT(!PP_ISFREE(pp));
1342 		ASSERT(pp->p_szc == szc);
1343 		PP_SETNORELOC(pp);
1344 	}
1345 	group_page_unlock(rootpp);
1346 	return (1);
1347 }
1348 
1349 /*
1350  * Attempt to convert page to a caged page (set the P_NORELOC flag).
1351  * If successful and pages is free, move page to the tail of whichever
1352  * list it is on.
1353  * Returns:
1354  *   EBUSY  page already locked, assimilated but not free.
1355  *   ENOMEM page assimilated, but memory too low to relocate. Page not free.
1356  *   EAGAIN page not assimilated. Page not free.
1357  *   ERANGE page assimilated. Page not root.
1358  *   0      page assimilated. Page free.
1359  *   *nfreedp number of pages freed.
1360  * NOTE: With error codes ENOMEM, EBUSY, and 0 (zero), there is no way
1361  * to distinguish between a page that was already a NORELOC page from
1362  * those newly converted to NORELOC pages by this invocation of
1363  * kcage_assimilate_page.
1364  */
1365 static int
1366 kcage_assimilate_page(page_t *pp, pgcnt_t *nfreedp)
1367 {
1368 	if (page_trylock(pp, SE_EXCL)) {
1369 		if (PP_ISNORELOC(pp)) {
1370 check_free_and_return:
1371 			if (PP_ISFREE(pp)) {
1372 				page_unlock(pp);
1373 				*nfreedp = 0;
1374 				return (0);
1375 			} else {
1376 				page_unlock(pp);
1377 				return (EBUSY);
1378 			}
1379 			/*NOTREACHED*/
1380 		}
1381 	} else {
1382 		if (page_trylock(pp, SE_SHARED)) {
1383 			if (PP_ISNORELOC(pp))
1384 				goto check_free_and_return;
1385 		} else
1386 			return (EAGAIN);
1387 
1388 		if (!PP_ISFREE(pp)) {
1389 			page_unlock(pp);
1390 			return (EAGAIN);
1391 		}
1392 
1393 		/*
1394 		 * Need to upgrade the lock on it and set the NORELOC
1395 		 * bit. If it is free then remove it from the free
1396 		 * list so that the platform free list code can keep
1397 		 * NORELOC pages where they should be.
1398 		 */
1399 		/*
1400 		 * Before doing anything, get the exclusive lock.
1401 		 * This may fail (eg ISM pages are left shared locked).
1402 		 * If the page is free this will leave a hole in the
1403 		 * cage. There is no solution yet to this.
1404 		 */
1405 		if (!page_tryupgrade(pp)) {
1406 			page_unlock(pp);
1407 			return (EAGAIN);
1408 		}
1409 	}
1410 
1411 	ASSERT(PAGE_EXCL(pp));
1412 
1413 	if (PP_ISFREE(pp)) {
1414 		int which = PP_ISAGED(pp) ? PG_FREE_LIST : PG_CACHE_LIST;
1415 
1416 		page_list_sub(pp, which);
1417 		ASSERT(pp->p_szc == 0);
1418 		PP_SETNORELOC(pp);
1419 		PLCNT_XFER_NORELOC(pp);
1420 		page_list_add(pp, which | PG_LIST_TAIL);
1421 
1422 		page_unlock(pp);
1423 		*nfreedp = 1;
1424 		return (0);
1425 	} else {
1426 		if (pp->p_szc != 0) {
1427 			if (!kcage_setnoreloc_pages(pp, SE_EXCL)) {
1428 				page_unlock(pp);
1429 				return (EAGAIN);
1430 			}
1431 			ASSERT(PP_ISNORELOC(pp));
1432 		} else {
1433 			PP_SETNORELOC(pp);
1434 		}
1435 		PLCNT_XFER_NORELOC(pp);
1436 		return (kcage_invalidate_page(pp, nfreedp));
1437 	}
1438 	/*NOTREACHED*/
1439 }
1440 
1441 static int
1442 kcage_expand()
1443 {
1444 	int did_something = 0;
1445 
1446 	spgcnt_t wanted;
1447 	pfn_t pfn;
1448 	page_t *pp;
1449 	/* TODO: we don't really need n any more? */
1450 	pgcnt_t n;
1451 	pgcnt_t nf, nfreed;
1452 
1453 	/*
1454 	 * Expand the cage if available cage memory is really low. Calculate
1455 	 * the amount required to return kcage_freemem to the level of
1456 	 * kcage_lotsfree, or to satisfy throttled requests, whichever is
1457 	 * more.  It is rare for their sum to create an artificial threshold
1458 	 * above kcage_lotsfree, but it is possible.
1459 	 *
1460 	 * Exit early if expansion amount is equal to or less than zero.
1461 	 * (<0 is possible if kcage_freemem rises suddenly.)
1462 	 *
1463 	 * Exit early when the global page pool (apparently) does not
1464 	 * have enough free pages to page_relocate() even a single page.
1465 	 */
1466 	wanted = MAX(kcage_lotsfree, kcage_throttlefree + kcage_needfree)
1467 	    - kcage_freemem;
1468 	if (wanted <= 0)
1469 		return (0);
1470 	else if (freemem < pageout_reserve + 1) {
1471 		KCAGE_STAT_INCR(ke_lowfreemem);
1472 		return (0);
1473 	}
1474 
1475 	KCAGE_STAT_INCR(ke_calls);
1476 	KCAGE_STAT_SET_SCAN(ke_wanted, (uint_t)wanted);
1477 
1478 	/*
1479 	 * Assimilate more pages from the global page pool into the cage.
1480 	 */
1481 	n = 0;				/* number of pages PP_SETNORELOC'd */
1482 	nf = 0;				/* number of those actually free */
1483 	while (kcage_on && nf < wanted) {
1484 		pfn = kcage_get_pfn(1);
1485 		if (pfn == PFN_INVALID) {	/* eek! no where to grow */
1486 			KCAGE_STAT_INCR(ke_nopfn);
1487 			goto terminate;
1488 		}
1489 
1490 		KCAGE_STAT_INCR_SCAN(ke_examined);
1491 
1492 		if ((pp = page_numtopp_nolock(pfn)) == NULL) {
1493 			KCAGE_STAT_INCR(ke_nopaget);
1494 			continue;
1495 		}
1496 		KCAGEPAGETS_INC();
1497 		/*
1498 		 * Sanity check. Skip this pfn if it is
1499 		 * being deleted.
1500 		 */
1501 		if (pfn_is_being_deleted(pfn)) {
1502 			KCAGE_STAT_INCR(ke_deleting);
1503 			continue;
1504 		}
1505 
1506 		if (PP_ISNORELOC(pp)) {
1507 			KCAGE_STAT_INCR(ke_isnoreloc);
1508 			continue;
1509 		}
1510 
1511 		switch (kcage_assimilate_page(pp, &nfreed)) {
1512 			case 0:		/* assimilated, page is free */
1513 				KCAGE_STAT_NINCR_SCAN(ke_gotonefree, nfreed);
1514 				did_something = 1;
1515 				nf += nfreed;
1516 				n++;
1517 				break;
1518 
1519 			case EBUSY:	/* assimilated, page not free */
1520 			case ERANGE:	/* assimilated, page not root */
1521 				KCAGE_STAT_INCR_SCAN(ke_gotone);
1522 				did_something = 1;
1523 				n++;
1524 				break;
1525 
1526 			case ENOMEM:	/* assimilated, but no mem */
1527 				KCAGE_STAT_INCR(ke_terminate);
1528 				did_something = 1;
1529 				n++;
1530 				goto terminate;
1531 
1532 			case EAGAIN:	/* can't assimilate */
1533 				KCAGE_STAT_INCR_SCAN(ke_lefthole);
1534 				break;
1535 
1536 			default:	/* catch this with debug kernels */
1537 				ASSERT(0);
1538 				break;
1539 		}
1540 	}
1541 
1542 	/*
1543 	 * Realign cage edge with the nearest physical address
1544 	 * boundry for big pages. This is done to give us a
1545 	 * better chance of actually getting usable big pages
1546 	 * in the cage.
1547 	 */
1548 
1549 terminate:
1550 
1551 	return (did_something);
1552 }
1553 
1554 /*
1555  * Relocate page opp (Original Page Pointer) from cage pool to page rpp
1556  * (Replacement Page Pointer) in the global pool. Page opp will be freed
1557  * if relocation is successful, otherwise it is only unlocked.
1558  * On entry, page opp must be exclusively locked and not free.
1559  * *nfreedp: number of pages freed.
1560  */
1561 static int
1562 kcage_relocate_page(page_t *pp, pgcnt_t *nfreedp)
1563 {
1564 	page_t *opp = pp;
1565 	page_t *rpp = NULL;
1566 	spgcnt_t npgs;
1567 	int result;
1568 
1569 	ASSERT(!PP_ISFREE(opp));
1570 	ASSERT(PAGE_EXCL(opp));
1571 
1572 	result = page_relocate(&opp, &rpp, 1, 1, &npgs, NULL);
1573 	*nfreedp = npgs;
1574 	if (result == 0) {
1575 		while (npgs-- > 0) {
1576 			page_t *tpp;
1577 
1578 			ASSERT(rpp != NULL);
1579 			tpp = rpp;
1580 			page_sub(&rpp, tpp);
1581 			page_unlock(tpp);
1582 		}
1583 
1584 		ASSERT(rpp == NULL);
1585 
1586 		return (0);		/* success */
1587 	}
1588 
1589 	page_unlock(opp);
1590 	return (result);
1591 }
1592 
1593 /*
1594  * Based on page_invalidate_pages()
1595  *
1596  * Kcage_invalidate_page() uses page_relocate() twice. Both instances
1597  * of use must be updated to match the new page_relocate() when it
1598  * becomes available.
1599  *
1600  * Return result of kcage_relocate_page or zero if page was directly freed.
1601  * *nfreedp: number of pages freed.
1602  */
1603 static int
1604 kcage_invalidate_page(page_t *pp, pgcnt_t *nfreedp)
1605 {
1606 	int result;
1607 
1608 #if defined(__sparc)
1609 	extern struct vnode prom_ppages;
1610 	ASSERT(pp->p_vnode != &prom_ppages);
1611 #endif /* __sparc */
1612 
1613 	ASSERT(!PP_ISFREE(pp));
1614 	ASSERT(PAGE_EXCL(pp));
1615 
1616 	/*
1617 	 * Is this page involved in some I/O? shared?
1618 	 * The page_struct_lock need not be acquired to
1619 	 * examine these fields since the page has an
1620 	 * "exclusive" lock.
1621 	 */
1622 	if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
1623 		result = kcage_relocate_page(pp, nfreedp);
1624 #ifdef KCAGE_STATS
1625 		if (result == 0)
1626 			KCAGE_STAT_INCR_SCAN(kip_reloclocked);
1627 		else if (result == ENOMEM)
1628 			KCAGE_STAT_INCR_SCAN(kip_nomem);
1629 #endif
1630 		return (result);
1631 	}
1632 
1633 	ASSERT(pp->p_vnode->v_type != VCHR);
1634 
1635 	/*
1636 	 * Unload the mappings and check if mod bit is set.
1637 	 */
1638 	(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1639 
1640 	if (hat_ismod(pp)) {
1641 		result = kcage_relocate_page(pp, nfreedp);
1642 #ifdef KCAGE_STATS
1643 		if (result == 0)
1644 			KCAGE_STAT_INCR_SCAN(kip_relocmod);
1645 		else if (result == ENOMEM)
1646 			KCAGE_STAT_INCR_SCAN(kip_nomem);
1647 #endif
1648 		return (result);
1649 	}
1650 
1651 	if (!page_try_demote_pages(pp)) {
1652 		KCAGE_STAT_INCR_SCAN(kip_demotefailed);
1653 		page_unlock(pp);
1654 		return (EAGAIN);
1655 	}
1656 
1657 	page_destroy(pp, 0);
1658 	KCAGE_STAT_INCR_SCAN(kip_destroy);
1659 	*nfreedp = 1;
1660 	return (0);
1661 }
1662 
1663 static void
1664 kcage_cageout()
1665 {
1666 	pfn_t pfn;
1667 	page_t *pp;
1668 	callb_cpr_t cprinfo;
1669 	int did_something;
1670 	int scan_again;
1671 	pfn_t start_pfn;
1672 	int pass;
1673 	int last_pass;
1674 	int pages_skipped;
1675 	int shared_skipped;
1676 	ulong_t shared_level = 8;
1677 	pgcnt_t nfreed;
1678 #ifdef KCAGE_STATS
1679 	clock_t scan_start;
1680 #endif
1681 
1682 	CALLB_CPR_INIT(&cprinfo, &kcage_cageout_mutex,
1683 	    callb_generic_cpr, "cageout");
1684 
1685 	mutex_enter(&kcage_cageout_mutex);
1686 	kcage_cageout_thread = curthread;
1687 
1688 	pfn = PFN_INVALID;		/* force scan reset */
1689 	start_pfn = PFN_INVALID;	/* force init with 1st cage pfn */
1690 	kcage_cageout_ready = 1;	/* switch kcage_cageout_wakeup mode */
1691 
1692 loop:
1693 	/*
1694 	 * Wait here. Sooner or later, kcage_freemem_sub() will notice
1695 	 * that kcage_freemem is less than kcage_desfree. When it does
1696 	 * notice, kcage_freemem_sub() will wake us up via call to
1697 	 * kcage_cageout_wakeup().
1698 	 */
1699 	CALLB_CPR_SAFE_BEGIN(&cprinfo);
1700 	cv_wait(&kcage_cageout_cv, &kcage_cageout_mutex);
1701 	CALLB_CPR_SAFE_END(&cprinfo, &kcage_cageout_mutex);
1702 
1703 	KCAGE_STAT_INCR(kt_wakeups);
1704 	KCAGE_STAT_SET_SCAN(kt_freemem_start, freemem);
1705 	KCAGE_STAT_SET_SCAN(kt_kcage_freemem_start, kcage_freemem);
1706 	pass = 0;
1707 	last_pass = 0;
1708 
1709 #ifdef KCAGE_STATS
1710 	scan_start = lbolt;
1711 #endif
1712 
1713 again:
1714 	if (!kcage_on)
1715 		goto loop;
1716 
1717 	KCAGE_STAT_INCR(kt_scans);
1718 	KCAGE_STAT_INCR_SCAN(kt_passes);
1719 
1720 	did_something = 0;
1721 	pages_skipped = 0;
1722 	shared_skipped = 0;
1723 	while ((kcage_freemem < kcage_lotsfree || kcage_needfree) &&
1724 	    (pfn = kcage_walk_cage(pfn == PFN_INVALID)) != PFN_INVALID) {
1725 
1726 		if (start_pfn == PFN_INVALID)
1727 			start_pfn = pfn;
1728 		else if (start_pfn == pfn) {
1729 			last_pass = pass;
1730 			pass += 1;
1731 			/*
1732 			 * Did a complete walk of kernel cage, but didn't free
1733 			 * any pages.  If only one cpu is active then
1734 			 * stop kernel cage walk and try expanding.
1735 			 */
1736 			if (cp_default.cp_ncpus == 1 && did_something == 0) {
1737 				KCAGE_STAT_INCR(kt_cageout_break);
1738 				break;
1739 			}
1740 		}
1741 
1742 		pp = page_numtopp_nolock(pfn);
1743 		if (pp == NULL) {
1744 			continue;
1745 		}
1746 
1747 		KCAGE_STAT_INCR_SCAN(kt_examined);
1748 
1749 		/*
1750 		 * Do a quick PP_ISNORELOC() and PP_ISFREE test outside
1751 		 * of the lock. If one is missed it will be seen next
1752 		 * time through.
1753 		 *
1754 		 * Skip non-caged-pages. These pages can exist in the cage
1755 		 * because, if during cage expansion, a page is
1756 		 * encountered that is long-term locked the lock prevents the
1757 		 * expansion logic from setting the P_NORELOC flag. Hence,
1758 		 * non-caged-pages surrounded by caged-pages.
1759 		 */
1760 		if (!PP_ISNORELOC(pp)) {
1761 			switch (kcage_assimilate_page(pp, &nfreed)) {
1762 				case 0:
1763 					did_something = 1;
1764 					KCAGE_STAT_NINCR_SCAN(kt_gotonefree,
1765 					    nfreed);
1766 					break;
1767 
1768 				case EBUSY:
1769 				case ERANGE:
1770 					did_something = 1;
1771 					KCAGE_STAT_INCR_SCAN(kt_gotone);
1772 					break;
1773 
1774 				case EAGAIN:
1775 				case ENOMEM:
1776 					break;
1777 
1778 				default:
1779 					/* catch this with debug kernels */
1780 					ASSERT(0);
1781 					break;
1782 			}
1783 
1784 			continue;
1785 		} else {
1786 			int prm;
1787 
1788 			if (PP_ISFREE(pp)) {
1789 				continue;
1790 			}
1791 
1792 			if ((PP_ISKAS(pp) && pp->p_lckcnt > 0) ||
1793 			    !page_trylock(pp, SE_EXCL)) {
1794 				KCAGE_STAT_INCR_SCAN(kt_cantlock);
1795 				continue;
1796 			}
1797 
1798 			/* P_NORELOC bit should not have gone away. */
1799 			ASSERT(PP_ISNORELOC(pp));
1800 			if (PP_ISFREE(pp) || (PP_ISKAS(pp) &&
1801 			    pp->p_lckcnt > 0)) {
1802 				page_unlock(pp);
1803 				continue;
1804 			}
1805 
1806 			KCAGE_STAT_SET_SCAN(kt_skiplevel, shared_level);
1807 			if (hat_page_checkshare(pp, shared_level)) {
1808 				page_unlock(pp);
1809 				pages_skipped = 1;
1810 				shared_skipped = 1;
1811 				KCAGE_STAT_INCR_SCAN(kt_skipshared);
1812 				continue;
1813 			}
1814 
1815 			/*
1816 			 * In pass {0, 1}, skip page if ref bit is set.
1817 			 * In pass {0, 1, 2}, skip page if mod bit is set.
1818 			 */
1819 			prm = hat_pagesync(pp,
1820 			    HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
1821 
1822 			/* On first pass ignore ref'd pages */
1823 			if (pass <= 1 && (prm & P_REF)) {
1824 				KCAGE_STAT_INCR_SCAN(kt_skiprefd);
1825 				pages_skipped = 1;
1826 				page_unlock(pp);
1827 				continue;
1828 			}
1829 
1830 			/* On pass 2, page_destroy if mod bit is not set */
1831 			if (pass <= 2) {
1832 				if (pp->p_szc != 0 || (prm & P_MOD) ||
1833 				    pp->p_lckcnt || pp->p_cowcnt) {
1834 					pages_skipped = 1;
1835 					page_unlock(pp);
1836 				} else {
1837 
1838 					/*
1839 					 * unload the mappings before
1840 					 * checking if mod bit is set
1841 					 */
1842 					(void) hat_pageunload(pp,
1843 					    HAT_FORCE_PGUNLOAD);
1844 
1845 					/*
1846 					 * skip this page if modified
1847 					 */
1848 					if (hat_ismod(pp)) {
1849 						pages_skipped = 1;
1850 						page_unlock(pp);
1851 						continue;
1852 					}
1853 
1854 					KCAGE_STAT_INCR_SCAN(kt_destroy);
1855 					page_destroy(pp, 0);
1856 					did_something = 1;
1857 				}
1858 				continue;
1859 			}
1860 
1861 			if (kcage_invalidate_page(pp, &nfreed) == 0) {
1862 				did_something = 1;
1863 				KCAGE_STAT_NINCR_SCAN(kt_gotonefree, nfreed);
1864 			}
1865 
1866 			/*
1867 			 * No need to drop the page lock here.
1868 			 * Kcage_invalidate_page has done that for us
1869 			 * either explicitly or through a page_free.
1870 			 */
1871 		}
1872 	}
1873 
1874 	/*
1875 	 * Expand the cage only if available cage memory is really low.
1876 	 * This test is done only after a complete scan of the cage.
1877 	 * The reason for not checking and expanding more often is to
1878 	 * avoid rapid expansion of the cage. Naturally, scanning the
1879 	 * cage takes time. So by scanning first, we use that work as a
1880 	 * delay loop in between expand decisions.
1881 	 */
1882 
1883 	scan_again = 0;
1884 	if (kcage_freemem < kcage_minfree || kcage_needfree) {
1885 		/*
1886 		 * Kcage_expand() will return a non-zero value if it was
1887 		 * able to expand the cage -- whether or not the new
1888 		 * pages are free and immediately usable. If non-zero,
1889 		 * we do another scan of the cage. The pages might be
1890 		 * freed during that scan or by time we get back here.
1891 		 * If not, we will attempt another expansion.
1892 		 * However, if kcage_expand() returns zero, then it was
1893 		 * unable to expand the cage. This is the case when the
1894 		 * the growth list is exausted, therefore no work was done
1895 		 * and there is no reason to scan the cage again.
1896 		 * Note: Kernel cage scan is not repeated when only one
1897 		 * cpu is active to avoid kernel cage thread hogging cpu.
1898 		 */
1899 		if (pass <= 3 && pages_skipped && cp_default.cp_ncpus > 1)
1900 			scan_again = 1;
1901 		else
1902 			(void) kcage_expand(); /* don't scan again */
1903 	} else if (kcage_freemem < kcage_lotsfree) {
1904 		/*
1905 		 * If available cage memory is less than abundant
1906 		 * and a full scan of the cage has not yet been completed,
1907 		 * or a scan has completed and some work was performed,
1908 		 * or pages were skipped because of sharing,
1909 		 * or we simply have not yet completed two passes,
1910 		 * then do another scan.
1911 		 */
1912 		if (pass <= 2 && pages_skipped)
1913 			scan_again = 1;
1914 		if (pass == last_pass || did_something)
1915 			scan_again = 1;
1916 		else if (shared_skipped && shared_level < (8<<24)) {
1917 			shared_level <<= 1;
1918 			scan_again = 1;
1919 		}
1920 	}
1921 
1922 	if (scan_again && cp_default.cp_ncpus > 1)
1923 		goto again;
1924 	else {
1925 		if (shared_level > 8)
1926 			shared_level >>= 1;
1927 
1928 		KCAGE_STAT_SET_SCAN(kt_freemem_end, freemem);
1929 		KCAGE_STAT_SET_SCAN(kt_kcage_freemem_end, kcage_freemem);
1930 		KCAGE_STAT_SET_SCAN(kt_ticks, lbolt - scan_start);
1931 		KCAGE_STAT_INC_SCAN_INDEX;
1932 		goto loop;
1933 	}
1934 
1935 	/*NOTREACHED*/
1936 }
1937 
1938 void
1939 kcage_cageout_wakeup()
1940 {
1941 	if (mutex_tryenter(&kcage_cageout_mutex)) {
1942 		if (kcage_cageout_ready) {
1943 			cv_signal(&kcage_cageout_cv);
1944 		} else if (kcage_freemem < kcage_minfree || kcage_needfree) {
1945 			/*
1946 			 * Available cage memory is really low. Time to
1947 			 * start expanding the cage. However, the
1948 			 * kernel cage thread is not yet ready to
1949 			 * do the work. Use *this* thread, which is
1950 			 * most likely to be t0, to do the work.
1951 			 */
1952 			KCAGE_STAT_INCR(kcw_expandearly);
1953 			(void) kcage_expand();
1954 			KCAGE_STAT_INC_SCAN_INDEX;
1955 		}
1956 
1957 		mutex_exit(&kcage_cageout_mutex);
1958 	}
1959 	/* else, kernel cage thread is already running */
1960 }
1961 
1962 void
1963 kcage_tick()
1964 {
1965 	/*
1966 	 * Once per second we wake up all the threads throttled
1967 	 * waiting for cage memory, in case we've become stuck
1968 	 * and haven't made forward progress expanding the cage.
1969 	 */
1970 	if (kcage_on && kcage_cageout_ready)
1971 		cv_broadcast(&kcage_throttle_cv);
1972 }
1973