xref: /titanic_53/usr/src/uts/common/os/sched.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28*7c478bd9Sstevel@tonic-gate /*	  All Rights Reserved	*/
29*7c478bd9Sstevel@tonic-gate 
30*7c478bd9Sstevel@tonic-gate 
31*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
32*7c478bd9Sstevel@tonic-gate 
33*7c478bd9Sstevel@tonic-gate #include <sys/param.h>
34*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
35*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
36*7c478bd9Sstevel@tonic-gate #include <sys/systm.h>
37*7c478bd9Sstevel@tonic-gate #include <sys/proc.h>
38*7c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
39*7c478bd9Sstevel@tonic-gate #include <sys/var.h>
40*7c478bd9Sstevel@tonic-gate #include <sys/tuneable.h>
41*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
42*7c478bd9Sstevel@tonic-gate #include <sys/buf.h>
43*7c478bd9Sstevel@tonic-gate #include <sys/disp.h>
44*7c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>
45*7c478bd9Sstevel@tonic-gate #include <sys/vmparam.h>
46*7c478bd9Sstevel@tonic-gate #include <sys/class.h>
47*7c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
48*7c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
49*7c478bd9Sstevel@tonic-gate #include <sys/debug.h>
50*7c478bd9Sstevel@tonic-gate #include <sys/tnf_probe.h>
51*7c478bd9Sstevel@tonic-gate #include <sys/procfs.h>
52*7c478bd9Sstevel@tonic-gate 
53*7c478bd9Sstevel@tonic-gate #include <vm/seg.h>
54*7c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h>
55*7c478bd9Sstevel@tonic-gate #include <vm/as.h>
56*7c478bd9Sstevel@tonic-gate #include <vm/rm.h>
57*7c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
58*7c478bd9Sstevel@tonic-gate #include <sys/callb.h>
59*7c478bd9Sstevel@tonic-gate 
60*7c478bd9Sstevel@tonic-gate /*
61*7c478bd9Sstevel@tonic-gate  * The swapper sleeps on runout when there is no one to swap in.
62*7c478bd9Sstevel@tonic-gate  * It sleeps on runin when it could not find space to swap someone
63*7c478bd9Sstevel@tonic-gate  * in or after swapping someone in.
64*7c478bd9Sstevel@tonic-gate  */
65*7c478bd9Sstevel@tonic-gate char	runout;
66*7c478bd9Sstevel@tonic-gate char	runin;
67*7c478bd9Sstevel@tonic-gate char	wake_sched;	/* flag tells clock to wake swapper on next tick */
68*7c478bd9Sstevel@tonic-gate char	wake_sched_sec;	/* flag tells clock to wake swapper after a second */
69*7c478bd9Sstevel@tonic-gate 
70*7c478bd9Sstevel@tonic-gate /*
71*7c478bd9Sstevel@tonic-gate  * The swapper swaps processes to reduce memory demand and runs
72*7c478bd9Sstevel@tonic-gate  * when avefree < desfree.  The swapper resorts to SOFTSWAP when
73*7c478bd9Sstevel@tonic-gate  * avefree < desfree which results in swapping out all processes
74*7c478bd9Sstevel@tonic-gate  * sleeping for more than maxslp seconds.  HARDSWAP occurs when the
75*7c478bd9Sstevel@tonic-gate  * system is on the verge of thrashing and this results in swapping
76*7c478bd9Sstevel@tonic-gate  * out runnable threads or threads sleeping for less than maxslp secs.
77*7c478bd9Sstevel@tonic-gate  *
78*7c478bd9Sstevel@tonic-gate  * The swapper runs through all the active processes in the system
79*7c478bd9Sstevel@tonic-gate  * and invokes the scheduling class specific swapin/swapout routine
80*7c478bd9Sstevel@tonic-gate  * for every thread in the process to obtain an effective priority
81*7c478bd9Sstevel@tonic-gate  * for the process.  A priority of -1 implies that the thread isn't
82*7c478bd9Sstevel@tonic-gate  * swappable.  This effective priority is used to find the most
83*7c478bd9Sstevel@tonic-gate  * eligible process to swapout or swapin.
84*7c478bd9Sstevel@tonic-gate  *
85*7c478bd9Sstevel@tonic-gate  * NOTE:  Threads which have been swapped are not linked on any
86*7c478bd9Sstevel@tonic-gate  *	  queue and their dispatcher lock points at the "swapped_lock".
87*7c478bd9Sstevel@tonic-gate  *
88*7c478bd9Sstevel@tonic-gate  * Processes containing threads with the TS_DONT_SWAP flag set cannot be
89*7c478bd9Sstevel@tonic-gate  * swapped out immediately by the swapper.  This is due to the fact that
90*7c478bd9Sstevel@tonic-gate  * such threads may be holding locks which may be needed by the swapper
91*7c478bd9Sstevel@tonic-gate  * to push its pages out.  The TS_SWAPENQ flag is set on such threads
92*7c478bd9Sstevel@tonic-gate  * to prevent them running in user mode.  When such threads reach a
93*7c478bd9Sstevel@tonic-gate  * safe point (i.e., are not holding any locks - CL_TRAPRET), they
94*7c478bd9Sstevel@tonic-gate  * queue themseleves onto the swap queue which is processed by the
95*7c478bd9Sstevel@tonic-gate  * swapper.  This results in reducing memory demand when the system
96*7c478bd9Sstevel@tonic-gate  * is desparate for memory as the thread can't run in user mode.
97*7c478bd9Sstevel@tonic-gate  *
98*7c478bd9Sstevel@tonic-gate  * The swap queue consists of threads, linked via t_link, which are
99*7c478bd9Sstevel@tonic-gate  * haven't been swapped, are runnable but not on the run queue.  The
100*7c478bd9Sstevel@tonic-gate  * swap queue is protected by the "swapped_lock".  The dispatcher
101*7c478bd9Sstevel@tonic-gate  * lock (t_lockp) of all threads on the swap queue points at the
102*7c478bd9Sstevel@tonic-gate  * "swapped_lock".  Thus, the entire queue and/or threads on the
103*7c478bd9Sstevel@tonic-gate  * queue can be locked by acquiring "swapped_lock".
104*7c478bd9Sstevel@tonic-gate  */
105*7c478bd9Sstevel@tonic-gate static kthread_t *tswap_queue;
106*7c478bd9Sstevel@tonic-gate extern disp_lock_t swapped_lock; /* protects swap queue and threads on it */
107*7c478bd9Sstevel@tonic-gate 
108*7c478bd9Sstevel@tonic-gate int	maxslp = 0;
109*7c478bd9Sstevel@tonic-gate pgcnt_t	avefree;	/* 5 sec moving average of free memory */
110*7c478bd9Sstevel@tonic-gate pgcnt_t	avefree30;	/* 30 sec moving average of free memory */
111*7c478bd9Sstevel@tonic-gate 
112*7c478bd9Sstevel@tonic-gate /*
113*7c478bd9Sstevel@tonic-gate  * Minimum size used to decide if sufficient memory is available
114*7c478bd9Sstevel@tonic-gate  * before a process is swapped in.  This is necessary since in most
115*7c478bd9Sstevel@tonic-gate  * cases the actual size of a process (p_swrss) being swapped in
116*7c478bd9Sstevel@tonic-gate  * is usually 2 pages (kernel stack pages).  This is due to the fact
117*7c478bd9Sstevel@tonic-gate  * almost all user pages of a process are stolen by pageout before
118*7c478bd9Sstevel@tonic-gate  * the swapper decides to swapout it out.
119*7c478bd9Sstevel@tonic-gate  */
120*7c478bd9Sstevel@tonic-gate int	min_procsize = 12;
121*7c478bd9Sstevel@tonic-gate 
122*7c478bd9Sstevel@tonic-gate static int	swapin(proc_t *);
123*7c478bd9Sstevel@tonic-gate static int	swapout(proc_t *, uint_t *, int);
124*7c478bd9Sstevel@tonic-gate static void	process_swap_queue();
125*7c478bd9Sstevel@tonic-gate 
126*7c478bd9Sstevel@tonic-gate #ifdef __sparc
127*7c478bd9Sstevel@tonic-gate extern void lwp_swapin(kthread_t *);
128*7c478bd9Sstevel@tonic-gate #endif /* __sparc */
129*7c478bd9Sstevel@tonic-gate 
130*7c478bd9Sstevel@tonic-gate /*
131*7c478bd9Sstevel@tonic-gate  * Counters to keep track of the number of swapins or swapouts.
132*7c478bd9Sstevel@tonic-gate  */
133*7c478bd9Sstevel@tonic-gate uint_t tot_swapped_in, tot_swapped_out;
134*7c478bd9Sstevel@tonic-gate uint_t softswap, hardswap, swapqswap;
135*7c478bd9Sstevel@tonic-gate 
136*7c478bd9Sstevel@tonic-gate /*
137*7c478bd9Sstevel@tonic-gate  * Macro to determine if a process is eligble to be swapped.
138*7c478bd9Sstevel@tonic-gate  */
139*7c478bd9Sstevel@tonic-gate #define	not_swappable(p)					\
140*7c478bd9Sstevel@tonic-gate 	(((p)->p_flag & SSYS) || (p)->p_stat == SIDL ||		\
141*7c478bd9Sstevel@tonic-gate 	    (p)->p_stat == SZOMB || (p)->p_as == NULL ||	\
142*7c478bd9Sstevel@tonic-gate 	    (p)->p_as == &kas)
143*7c478bd9Sstevel@tonic-gate 
144*7c478bd9Sstevel@tonic-gate /*
145*7c478bd9Sstevel@tonic-gate  * Memory scheduler.
146*7c478bd9Sstevel@tonic-gate  */
147*7c478bd9Sstevel@tonic-gate void
148*7c478bd9Sstevel@tonic-gate sched()
149*7c478bd9Sstevel@tonic-gate {
150*7c478bd9Sstevel@tonic-gate 	kthread_id_t	t;
151*7c478bd9Sstevel@tonic-gate 	pri_t		proc_pri;
152*7c478bd9Sstevel@tonic-gate 	pri_t		thread_pri;
153*7c478bd9Sstevel@tonic-gate 	pri_t		swapin_pri;
154*7c478bd9Sstevel@tonic-gate 	int		desperate;
155*7c478bd9Sstevel@tonic-gate 	pgcnt_t		needs;
156*7c478bd9Sstevel@tonic-gate 	int		divisor;
157*7c478bd9Sstevel@tonic-gate 	proc_t		*prp;
158*7c478bd9Sstevel@tonic-gate 	proc_t		*swapout_prp;
159*7c478bd9Sstevel@tonic-gate 	proc_t		*swapin_prp;
160*7c478bd9Sstevel@tonic-gate 	spgcnt_t	avail;
161*7c478bd9Sstevel@tonic-gate 	int		chosen_pri;
162*7c478bd9Sstevel@tonic-gate 	time_t		swapout_time;
163*7c478bd9Sstevel@tonic-gate 	time_t		swapin_proc_time;
164*7c478bd9Sstevel@tonic-gate 	callb_cpr_t	cprinfo;
165*7c478bd9Sstevel@tonic-gate 	kmutex_t	swap_cpr_lock;
166*7c478bd9Sstevel@tonic-gate 
167*7c478bd9Sstevel@tonic-gate 	mutex_init(&swap_cpr_lock, NULL, MUTEX_DEFAULT, NULL);
168*7c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &swap_cpr_lock, callb_generic_cpr, "sched");
169*7c478bd9Sstevel@tonic-gate 	if (maxslp == 0)
170*7c478bd9Sstevel@tonic-gate 		maxslp = MAXSLP;
171*7c478bd9Sstevel@tonic-gate loop:
172*7c478bd9Sstevel@tonic-gate 	needs = 0;
173*7c478bd9Sstevel@tonic-gate 	desperate = 0;
174*7c478bd9Sstevel@tonic-gate 
175*7c478bd9Sstevel@tonic-gate 	swapin_pri = v.v_nglobpris;
176*7c478bd9Sstevel@tonic-gate 	swapin_prp = NULL;
177*7c478bd9Sstevel@tonic-gate 	chosen_pri = -1;
178*7c478bd9Sstevel@tonic-gate 
179*7c478bd9Sstevel@tonic-gate 	process_swap_queue();
180*7c478bd9Sstevel@tonic-gate 
181*7c478bd9Sstevel@tonic-gate 	/*
182*7c478bd9Sstevel@tonic-gate 	 * Set desperate if
183*7c478bd9Sstevel@tonic-gate 	 * 	1.  At least 2 runnable processes (on average).
184*7c478bd9Sstevel@tonic-gate 	 *	2.  Short (5 sec) and longer (30 sec) average is less
185*7c478bd9Sstevel@tonic-gate 	 *	    than minfree and desfree respectively.
186*7c478bd9Sstevel@tonic-gate 	 *	3.  Pagein + pageout rate is excessive.
187*7c478bd9Sstevel@tonic-gate 	 */
188*7c478bd9Sstevel@tonic-gate 	if (avenrun[0] >= 2 * FSCALE &&
189*7c478bd9Sstevel@tonic-gate 	    (MAX(avefree, avefree30) < desfree) &&
190*7c478bd9Sstevel@tonic-gate 	    (pginrate + pgoutrate > maxpgio || avefree < minfree)) {
191*7c478bd9Sstevel@tonic-gate 		TRACE_4(TR_FAC_SCHED, TR_DESPERATE,
192*7c478bd9Sstevel@tonic-gate 		    "desp:avefree: %d, avefree30: %d, freemem: %d"
193*7c478bd9Sstevel@tonic-gate 		    " pginrate: %d\n", avefree, avefree30, freemem, pginrate);
194*7c478bd9Sstevel@tonic-gate 		desperate = 1;
195*7c478bd9Sstevel@tonic-gate 		goto unload;
196*7c478bd9Sstevel@tonic-gate 	}
197*7c478bd9Sstevel@tonic-gate 
198*7c478bd9Sstevel@tonic-gate 	/*
199*7c478bd9Sstevel@tonic-gate 	 * Search list of processes to swapin and swapout deadwood.
200*7c478bd9Sstevel@tonic-gate 	 */
201*7c478bd9Sstevel@tonic-gate 	swapin_proc_time = 0;
202*7c478bd9Sstevel@tonic-gate top:
203*7c478bd9Sstevel@tonic-gate 	mutex_enter(&pidlock);
204*7c478bd9Sstevel@tonic-gate 	for (prp = practive; prp != NULL; prp = prp->p_next) {
205*7c478bd9Sstevel@tonic-gate 		if (not_swappable(prp))
206*7c478bd9Sstevel@tonic-gate 			continue;
207*7c478bd9Sstevel@tonic-gate 
208*7c478bd9Sstevel@tonic-gate 		/*
209*7c478bd9Sstevel@tonic-gate 		 * Look at processes with at least one swapped lwp.
210*7c478bd9Sstevel@tonic-gate 		 */
211*7c478bd9Sstevel@tonic-gate 		if (prp->p_swapcnt) {
212*7c478bd9Sstevel@tonic-gate 			time_t proc_time;
213*7c478bd9Sstevel@tonic-gate 
214*7c478bd9Sstevel@tonic-gate 			/*
215*7c478bd9Sstevel@tonic-gate 			 * Higher priority processes are good candidates
216*7c478bd9Sstevel@tonic-gate 			 * to swapin.
217*7c478bd9Sstevel@tonic-gate 			 */
218*7c478bd9Sstevel@tonic-gate 			mutex_enter(&prp->p_lock);
219*7c478bd9Sstevel@tonic-gate 			proc_pri = -1;
220*7c478bd9Sstevel@tonic-gate 			t = prp->p_tlist;
221*7c478bd9Sstevel@tonic-gate 			proc_time = 0;
222*7c478bd9Sstevel@tonic-gate 			do {
223*7c478bd9Sstevel@tonic-gate 				if (t->t_schedflag & TS_LOAD)
224*7c478bd9Sstevel@tonic-gate 					continue;
225*7c478bd9Sstevel@tonic-gate 
226*7c478bd9Sstevel@tonic-gate 				thread_lock(t);
227*7c478bd9Sstevel@tonic-gate 				thread_pri = CL_SWAPIN(t, 0);
228*7c478bd9Sstevel@tonic-gate 				thread_unlock(t);
229*7c478bd9Sstevel@tonic-gate 
230*7c478bd9Sstevel@tonic-gate 				if (t->t_stime - proc_time > 0)
231*7c478bd9Sstevel@tonic-gate 					proc_time = t->t_stime;
232*7c478bd9Sstevel@tonic-gate 				if (thread_pri > proc_pri)
233*7c478bd9Sstevel@tonic-gate 					proc_pri = thread_pri;
234*7c478bd9Sstevel@tonic-gate 			} while ((t = t->t_forw) != prp->p_tlist);
235*7c478bd9Sstevel@tonic-gate 			mutex_exit(&prp->p_lock);
236*7c478bd9Sstevel@tonic-gate 
237*7c478bd9Sstevel@tonic-gate 			if (proc_pri == -1)
238*7c478bd9Sstevel@tonic-gate 				continue;
239*7c478bd9Sstevel@tonic-gate 
240*7c478bd9Sstevel@tonic-gate 			TRACE_3(TR_FAC_SCHED, TR_CHOOSE_SWAPIN,
241*7c478bd9Sstevel@tonic-gate 			    "prp %p epri %d proc_time %d",
242*7c478bd9Sstevel@tonic-gate 			    prp, proc_pri, proc_time);
243*7c478bd9Sstevel@tonic-gate 
244*7c478bd9Sstevel@tonic-gate 			/*
245*7c478bd9Sstevel@tonic-gate 			 * Swapin processes with a high effective priority.
246*7c478bd9Sstevel@tonic-gate 			 */
247*7c478bd9Sstevel@tonic-gate 			if (swapin_prp == NULL || proc_pri > chosen_pri) {
248*7c478bd9Sstevel@tonic-gate 				swapin_prp = prp;
249*7c478bd9Sstevel@tonic-gate 				chosen_pri = proc_pri;
250*7c478bd9Sstevel@tonic-gate 				swapin_pri = proc_pri;
251*7c478bd9Sstevel@tonic-gate 				swapin_proc_time = proc_time;
252*7c478bd9Sstevel@tonic-gate 			}
253*7c478bd9Sstevel@tonic-gate 		} else {
254*7c478bd9Sstevel@tonic-gate 			/*
255*7c478bd9Sstevel@tonic-gate 			 * No need to soft swap if we have sufficient
256*7c478bd9Sstevel@tonic-gate 			 * memory.
257*7c478bd9Sstevel@tonic-gate 			 */
258*7c478bd9Sstevel@tonic-gate 			if (avefree > desfree ||
259*7c478bd9Sstevel@tonic-gate 			    avefree < desfree && freemem > desfree)
260*7c478bd9Sstevel@tonic-gate 				continue;
261*7c478bd9Sstevel@tonic-gate 
262*7c478bd9Sstevel@tonic-gate 			/*
263*7c478bd9Sstevel@tonic-gate 			 * Skip processes which are exiting.  This is
264*7c478bd9Sstevel@tonic-gate 			 * determined by checking p_lwpcnt since SZOMB is
265*7c478bd9Sstevel@tonic-gate 			 * set after the addressed space is released.
266*7c478bd9Sstevel@tonic-gate 			 */
267*7c478bd9Sstevel@tonic-gate 			mutex_enter(&prp->p_lock);
268*7c478bd9Sstevel@tonic-gate 			if (prp->p_lwpcnt == 0 ||
269*7c478bd9Sstevel@tonic-gate 			    (prp->p_flag & SEXITLWPS) ||
270*7c478bd9Sstevel@tonic-gate 			    (prp->p_as != NULL && AS_ISPGLCK(prp->p_as))) {
271*7c478bd9Sstevel@tonic-gate 				mutex_exit(&prp->p_lock);
272*7c478bd9Sstevel@tonic-gate 				continue;
273*7c478bd9Sstevel@tonic-gate 			}
274*7c478bd9Sstevel@tonic-gate 
275*7c478bd9Sstevel@tonic-gate 			/*
276*7c478bd9Sstevel@tonic-gate 			 * Softswapping to kick out deadwood.
277*7c478bd9Sstevel@tonic-gate 			 */
278*7c478bd9Sstevel@tonic-gate 			proc_pri = -1;
279*7c478bd9Sstevel@tonic-gate 			t = prp->p_tlist;
280*7c478bd9Sstevel@tonic-gate 			do {
281*7c478bd9Sstevel@tonic-gate 				if ((t->t_schedflag & (TS_SWAPENQ |
282*7c478bd9Sstevel@tonic-gate 				    TS_ON_SWAPQ | TS_LOAD)) != TS_LOAD)
283*7c478bd9Sstevel@tonic-gate 					continue;
284*7c478bd9Sstevel@tonic-gate 
285*7c478bd9Sstevel@tonic-gate 				thread_lock(t);
286*7c478bd9Sstevel@tonic-gate 				thread_pri = CL_SWAPOUT(t, SOFTSWAP);
287*7c478bd9Sstevel@tonic-gate 				thread_unlock(t);
288*7c478bd9Sstevel@tonic-gate 				if (thread_pri > proc_pri)
289*7c478bd9Sstevel@tonic-gate 					proc_pri = thread_pri;
290*7c478bd9Sstevel@tonic-gate 			} while ((t = t->t_forw) != prp->p_tlist);
291*7c478bd9Sstevel@tonic-gate 
292*7c478bd9Sstevel@tonic-gate 			if (proc_pri != -1) {
293*7c478bd9Sstevel@tonic-gate 				uint_t swrss;
294*7c478bd9Sstevel@tonic-gate 
295*7c478bd9Sstevel@tonic-gate 				mutex_exit(&pidlock);
296*7c478bd9Sstevel@tonic-gate 
297*7c478bd9Sstevel@tonic-gate 				TRACE_1(TR_FAC_SCHED, TR_SOFTSWAP,
298*7c478bd9Sstevel@tonic-gate 				    "softswap:prp %p", prp);
299*7c478bd9Sstevel@tonic-gate 
300*7c478bd9Sstevel@tonic-gate 				(void) swapout(prp, &swrss, SOFTSWAP);
301*7c478bd9Sstevel@tonic-gate 				softswap++;
302*7c478bd9Sstevel@tonic-gate 				prp->p_swrss += swrss;
303*7c478bd9Sstevel@tonic-gate 				mutex_exit(&prp->p_lock);
304*7c478bd9Sstevel@tonic-gate 				goto top;
305*7c478bd9Sstevel@tonic-gate 			}
306*7c478bd9Sstevel@tonic-gate 			mutex_exit(&prp->p_lock);
307*7c478bd9Sstevel@tonic-gate 		}
308*7c478bd9Sstevel@tonic-gate 	}
309*7c478bd9Sstevel@tonic-gate 	if (swapin_prp != NULL)
310*7c478bd9Sstevel@tonic-gate 		mutex_enter(&swapin_prp->p_lock);
311*7c478bd9Sstevel@tonic-gate 	mutex_exit(&pidlock);
312*7c478bd9Sstevel@tonic-gate 
313*7c478bd9Sstevel@tonic-gate 	if (swapin_prp == NULL) {
314*7c478bd9Sstevel@tonic-gate 		TRACE_3(TR_FAC_SCHED, TR_RUNOUT,
315*7c478bd9Sstevel@tonic-gate 		"schedrunout:runout nswapped: %d, avefree: %ld freemem: %ld",
316*7c478bd9Sstevel@tonic-gate 		    nswapped, avefree, freemem);
317*7c478bd9Sstevel@tonic-gate 
318*7c478bd9Sstevel@tonic-gate 		t = curthread;
319*7c478bd9Sstevel@tonic-gate 		thread_lock(t);
320*7c478bd9Sstevel@tonic-gate 		runout++;
321*7c478bd9Sstevel@tonic-gate 		t->t_schedflag |= (TS_ALLSTART & ~TS_CSTART);
322*7c478bd9Sstevel@tonic-gate 		t->t_whystop = PR_SUSPENDED;
323*7c478bd9Sstevel@tonic-gate 		t->t_whatstop = SUSPEND_NORMAL;
324*7c478bd9Sstevel@tonic-gate 		(void) new_mstate(t, LMS_SLEEP);
325*7c478bd9Sstevel@tonic-gate 		mutex_enter(&swap_cpr_lock);
326*7c478bd9Sstevel@tonic-gate 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
327*7c478bd9Sstevel@tonic-gate 		mutex_exit(&swap_cpr_lock);
328*7c478bd9Sstevel@tonic-gate 		thread_stop(t);		/* change state and drop lock */
329*7c478bd9Sstevel@tonic-gate 		swtch();
330*7c478bd9Sstevel@tonic-gate 		mutex_enter(&swap_cpr_lock);
331*7c478bd9Sstevel@tonic-gate 		CALLB_CPR_SAFE_END(&cprinfo, &swap_cpr_lock);
332*7c478bd9Sstevel@tonic-gate 		mutex_exit(&swap_cpr_lock);
333*7c478bd9Sstevel@tonic-gate 		goto loop;
334*7c478bd9Sstevel@tonic-gate 	}
335*7c478bd9Sstevel@tonic-gate 
336*7c478bd9Sstevel@tonic-gate 	/*
337*7c478bd9Sstevel@tonic-gate 	 * Decide how deserving this process is to be brought in.
338*7c478bd9Sstevel@tonic-gate 	 * Needs is an estimate of how much core the process will
339*7c478bd9Sstevel@tonic-gate 	 * need.  If the process has been out for a while, then we
340*7c478bd9Sstevel@tonic-gate 	 * will bring it in with 1/2 the core needed, otherwise
341*7c478bd9Sstevel@tonic-gate 	 * we are conservative.
342*7c478bd9Sstevel@tonic-gate 	 */
343*7c478bd9Sstevel@tonic-gate 	divisor = 1;
344*7c478bd9Sstevel@tonic-gate 	swapout_time = (lbolt - swapin_proc_time) / hz;
345*7c478bd9Sstevel@tonic-gate 	if (swapout_time > maxslp / 2)
346*7c478bd9Sstevel@tonic-gate 		divisor = 2;
347*7c478bd9Sstevel@tonic-gate 
348*7c478bd9Sstevel@tonic-gate 	needs = MIN(swapin_prp->p_swrss, lotsfree);
349*7c478bd9Sstevel@tonic-gate 	needs = MAX(needs, min_procsize);
350*7c478bd9Sstevel@tonic-gate 	needs = needs / divisor;
351*7c478bd9Sstevel@tonic-gate 
352*7c478bd9Sstevel@tonic-gate 	/*
353*7c478bd9Sstevel@tonic-gate 	 * Use freemem, since we want processes to be swapped
354*7c478bd9Sstevel@tonic-gate 	 * in quickly.
355*7c478bd9Sstevel@tonic-gate 	 */
356*7c478bd9Sstevel@tonic-gate 	avail = freemem - deficit;
357*7c478bd9Sstevel@tonic-gate 	if (avail > (spgcnt_t)needs) {
358*7c478bd9Sstevel@tonic-gate 		deficit += needs;
359*7c478bd9Sstevel@tonic-gate 
360*7c478bd9Sstevel@tonic-gate 		TRACE_2(TR_FAC_SCHED, TR_SWAPIN_VALUES,
361*7c478bd9Sstevel@tonic-gate 		    "swapin_values: prp %p needs %lu", swapin_prp, needs);
362*7c478bd9Sstevel@tonic-gate 
363*7c478bd9Sstevel@tonic-gate 		if (swapin(swapin_prp)) {
364*7c478bd9Sstevel@tonic-gate 			mutex_exit(&swapin_prp->p_lock);
365*7c478bd9Sstevel@tonic-gate 			goto loop;
366*7c478bd9Sstevel@tonic-gate 		}
367*7c478bd9Sstevel@tonic-gate 		deficit -= MIN(needs, deficit);
368*7c478bd9Sstevel@tonic-gate 		mutex_exit(&swapin_prp->p_lock);
369*7c478bd9Sstevel@tonic-gate 	} else {
370*7c478bd9Sstevel@tonic-gate 		mutex_exit(&swapin_prp->p_lock);
371*7c478bd9Sstevel@tonic-gate 		/*
372*7c478bd9Sstevel@tonic-gate 		 * If deficit is high, too many processes have been
373*7c478bd9Sstevel@tonic-gate 		 * swapped in so wait a sec before attempting to
374*7c478bd9Sstevel@tonic-gate 		 * swapin more.
375*7c478bd9Sstevel@tonic-gate 		 */
376*7c478bd9Sstevel@tonic-gate 		if (freemem > needs) {
377*7c478bd9Sstevel@tonic-gate 			TRACE_2(TR_FAC_SCHED, TR_HIGH_DEFICIT,
378*7c478bd9Sstevel@tonic-gate 			    "deficit: prp %p needs %lu", swapin_prp, needs);
379*7c478bd9Sstevel@tonic-gate 			goto block;
380*7c478bd9Sstevel@tonic-gate 		}
381*7c478bd9Sstevel@tonic-gate 	}
382*7c478bd9Sstevel@tonic-gate 
383*7c478bd9Sstevel@tonic-gate 	TRACE_2(TR_FAC_SCHED, TR_UNLOAD,
384*7c478bd9Sstevel@tonic-gate 	    "unload: prp %p needs %lu", swapin_prp, needs);
385*7c478bd9Sstevel@tonic-gate 
386*7c478bd9Sstevel@tonic-gate unload:
387*7c478bd9Sstevel@tonic-gate 	/*
388*7c478bd9Sstevel@tonic-gate 	 * Unload all unloadable modules, free all other memory
389*7c478bd9Sstevel@tonic-gate 	 * resources we can find, then look for a thread to hardswap.
390*7c478bd9Sstevel@tonic-gate 	 */
391*7c478bd9Sstevel@tonic-gate 	modreap();
392*7c478bd9Sstevel@tonic-gate 	segkp_cache_free();
393*7c478bd9Sstevel@tonic-gate 
394*7c478bd9Sstevel@tonic-gate 	swapout_prp = NULL;
395*7c478bd9Sstevel@tonic-gate 	mutex_enter(&pidlock);
396*7c478bd9Sstevel@tonic-gate 	for (prp = practive; prp != NULL; prp = prp->p_next) {
397*7c478bd9Sstevel@tonic-gate 
398*7c478bd9Sstevel@tonic-gate 		/*
399*7c478bd9Sstevel@tonic-gate 		 * No need to soft swap if we have sufficient
400*7c478bd9Sstevel@tonic-gate 		 * memory.
401*7c478bd9Sstevel@tonic-gate 		 */
402*7c478bd9Sstevel@tonic-gate 		if (not_swappable(prp))
403*7c478bd9Sstevel@tonic-gate 			continue;
404*7c478bd9Sstevel@tonic-gate 
405*7c478bd9Sstevel@tonic-gate 		if (avefree > minfree ||
406*7c478bd9Sstevel@tonic-gate 		    avefree < minfree && freemem > desfree) {
407*7c478bd9Sstevel@tonic-gate 			swapout_prp = NULL;
408*7c478bd9Sstevel@tonic-gate 			break;
409*7c478bd9Sstevel@tonic-gate 		}
410*7c478bd9Sstevel@tonic-gate 
411*7c478bd9Sstevel@tonic-gate 		/*
412*7c478bd9Sstevel@tonic-gate 		 * Skip processes which are exiting.  This is determined
413*7c478bd9Sstevel@tonic-gate 		 * by checking p_lwpcnt since SZOMB is set after the
414*7c478bd9Sstevel@tonic-gate 		 * addressed space is released.
415*7c478bd9Sstevel@tonic-gate 		 */
416*7c478bd9Sstevel@tonic-gate 		mutex_enter(&prp->p_lock);
417*7c478bd9Sstevel@tonic-gate 		if (prp->p_lwpcnt == 0 ||
418*7c478bd9Sstevel@tonic-gate 		    (prp->p_flag & SEXITLWPS) ||
419*7c478bd9Sstevel@tonic-gate 		    (prp->p_as != NULL && AS_ISPGLCK(prp->p_as))) {
420*7c478bd9Sstevel@tonic-gate 			mutex_exit(&prp->p_lock);
421*7c478bd9Sstevel@tonic-gate 			continue;
422*7c478bd9Sstevel@tonic-gate 		}
423*7c478bd9Sstevel@tonic-gate 
424*7c478bd9Sstevel@tonic-gate 		proc_pri = -1;
425*7c478bd9Sstevel@tonic-gate 		t = prp->p_tlist;
426*7c478bd9Sstevel@tonic-gate 		do {
427*7c478bd9Sstevel@tonic-gate 			if ((t->t_schedflag & (TS_SWAPENQ |
428*7c478bd9Sstevel@tonic-gate 			    TS_ON_SWAPQ | TS_LOAD)) != TS_LOAD)
429*7c478bd9Sstevel@tonic-gate 				continue;
430*7c478bd9Sstevel@tonic-gate 
431*7c478bd9Sstevel@tonic-gate 			thread_lock(t);
432*7c478bd9Sstevel@tonic-gate 			thread_pri = CL_SWAPOUT(t, HARDSWAP);
433*7c478bd9Sstevel@tonic-gate 			thread_unlock(t);
434*7c478bd9Sstevel@tonic-gate 			if (thread_pri > proc_pri)
435*7c478bd9Sstevel@tonic-gate 				proc_pri = thread_pri;
436*7c478bd9Sstevel@tonic-gate 		} while ((t = t->t_forw) != prp->p_tlist);
437*7c478bd9Sstevel@tonic-gate 
438*7c478bd9Sstevel@tonic-gate 		mutex_exit(&prp->p_lock);
439*7c478bd9Sstevel@tonic-gate 		if (proc_pri == -1)
440*7c478bd9Sstevel@tonic-gate 			continue;
441*7c478bd9Sstevel@tonic-gate 
442*7c478bd9Sstevel@tonic-gate 		/*
443*7c478bd9Sstevel@tonic-gate 		 * Swapout processes sleeping with a lower priority
444*7c478bd9Sstevel@tonic-gate 		 * than the one currently being swapped in, if any.
445*7c478bd9Sstevel@tonic-gate 		 */
446*7c478bd9Sstevel@tonic-gate 		if (swapin_prp == NULL || swapin_pri > proc_pri) {
447*7c478bd9Sstevel@tonic-gate 			TRACE_2(TR_FAC_SCHED, TR_CHOOSE_SWAPOUT,
448*7c478bd9Sstevel@tonic-gate 			    "hardswap: prp %p needs %lu", prp, needs);
449*7c478bd9Sstevel@tonic-gate 
450*7c478bd9Sstevel@tonic-gate 			if (swapout_prp == NULL || proc_pri < chosen_pri) {
451*7c478bd9Sstevel@tonic-gate 				swapout_prp = prp;
452*7c478bd9Sstevel@tonic-gate 				chosen_pri = proc_pri;
453*7c478bd9Sstevel@tonic-gate 			}
454*7c478bd9Sstevel@tonic-gate 		}
455*7c478bd9Sstevel@tonic-gate 	}
456*7c478bd9Sstevel@tonic-gate 
457*7c478bd9Sstevel@tonic-gate 	/*
458*7c478bd9Sstevel@tonic-gate 	 * Acquire the "p_lock" before dropping "pidlock"
459*7c478bd9Sstevel@tonic-gate 	 * to prevent the proc structure from being freed
460*7c478bd9Sstevel@tonic-gate 	 * if the process exits before swapout completes.
461*7c478bd9Sstevel@tonic-gate 	 */
462*7c478bd9Sstevel@tonic-gate 	if (swapout_prp != NULL)
463*7c478bd9Sstevel@tonic-gate 		mutex_enter(&swapout_prp->p_lock);
464*7c478bd9Sstevel@tonic-gate 	mutex_exit(&pidlock);
465*7c478bd9Sstevel@tonic-gate 
466*7c478bd9Sstevel@tonic-gate 	if ((prp = swapout_prp) != NULL) {
467*7c478bd9Sstevel@tonic-gate 		uint_t swrss = 0;
468*7c478bd9Sstevel@tonic-gate 		int swapped;
469*7c478bd9Sstevel@tonic-gate 
470*7c478bd9Sstevel@tonic-gate 		swapped = swapout(prp, &swrss, HARDSWAP);
471*7c478bd9Sstevel@tonic-gate 		if (swapped) {
472*7c478bd9Sstevel@tonic-gate 			/*
473*7c478bd9Sstevel@tonic-gate 			 * If desperate, we want to give the space obtained
474*7c478bd9Sstevel@tonic-gate 			 * by swapping this process out to processes in core,
475*7c478bd9Sstevel@tonic-gate 			 * so we give them a chance by increasing deficit.
476*7c478bd9Sstevel@tonic-gate 			 */
477*7c478bd9Sstevel@tonic-gate 			prp->p_swrss += swrss;
478*7c478bd9Sstevel@tonic-gate 			if (desperate)
479*7c478bd9Sstevel@tonic-gate 				deficit += MIN(prp->p_swrss, lotsfree);
480*7c478bd9Sstevel@tonic-gate 			hardswap++;
481*7c478bd9Sstevel@tonic-gate 		}
482*7c478bd9Sstevel@tonic-gate 		mutex_exit(&swapout_prp->p_lock);
483*7c478bd9Sstevel@tonic-gate 
484*7c478bd9Sstevel@tonic-gate 		if (swapped)
485*7c478bd9Sstevel@tonic-gate 			goto loop;
486*7c478bd9Sstevel@tonic-gate 	}
487*7c478bd9Sstevel@tonic-gate 
488*7c478bd9Sstevel@tonic-gate 	/*
489*7c478bd9Sstevel@tonic-gate 	 * Delay for 1 second and look again later.
490*7c478bd9Sstevel@tonic-gate 	 */
491*7c478bd9Sstevel@tonic-gate 	TRACE_3(TR_FAC_SCHED, TR_RUNIN,
492*7c478bd9Sstevel@tonic-gate 	    "schedrunin:runin nswapped: %d, avefree: %ld freemem: %ld",
493*7c478bd9Sstevel@tonic-gate 	    nswapped, avefree, freemem);
494*7c478bd9Sstevel@tonic-gate 
495*7c478bd9Sstevel@tonic-gate block:
496*7c478bd9Sstevel@tonic-gate 	t = curthread;
497*7c478bd9Sstevel@tonic-gate 	thread_lock(t);
498*7c478bd9Sstevel@tonic-gate 	runin++;
499*7c478bd9Sstevel@tonic-gate 	t->t_schedflag |= (TS_ALLSTART & ~TS_CSTART);
500*7c478bd9Sstevel@tonic-gate 	t->t_whystop = PR_SUSPENDED;
501*7c478bd9Sstevel@tonic-gate 	t->t_whatstop = SUSPEND_NORMAL;
502*7c478bd9Sstevel@tonic-gate 	(void) new_mstate(t, LMS_SLEEP);
503*7c478bd9Sstevel@tonic-gate 	mutex_enter(&swap_cpr_lock);
504*7c478bd9Sstevel@tonic-gate 	CALLB_CPR_SAFE_BEGIN(&cprinfo);
505*7c478bd9Sstevel@tonic-gate 	mutex_exit(&swap_cpr_lock);
506*7c478bd9Sstevel@tonic-gate 	thread_stop(t);		/* change to stop state and drop lock */
507*7c478bd9Sstevel@tonic-gate 	swtch();
508*7c478bd9Sstevel@tonic-gate 	mutex_enter(&swap_cpr_lock);
509*7c478bd9Sstevel@tonic-gate 	CALLB_CPR_SAFE_END(&cprinfo, &swap_cpr_lock);
510*7c478bd9Sstevel@tonic-gate 	mutex_exit(&swap_cpr_lock);
511*7c478bd9Sstevel@tonic-gate 	goto loop;
512*7c478bd9Sstevel@tonic-gate }
513*7c478bd9Sstevel@tonic-gate 
514*7c478bd9Sstevel@tonic-gate /*
515*7c478bd9Sstevel@tonic-gate  * Remove the specified thread from the swap queue.
516*7c478bd9Sstevel@tonic-gate  */
517*7c478bd9Sstevel@tonic-gate static void
518*7c478bd9Sstevel@tonic-gate swapdeq(kthread_id_t tp)
519*7c478bd9Sstevel@tonic-gate {
520*7c478bd9Sstevel@tonic-gate 	kthread_id_t *tpp;
521*7c478bd9Sstevel@tonic-gate 
522*7c478bd9Sstevel@tonic-gate 	ASSERT(THREAD_LOCK_HELD(tp));
523*7c478bd9Sstevel@tonic-gate 	ASSERT(tp->t_schedflag & TS_ON_SWAPQ);
524*7c478bd9Sstevel@tonic-gate 
525*7c478bd9Sstevel@tonic-gate 	tpp = &tswap_queue;
526*7c478bd9Sstevel@tonic-gate 	for (;;) {
527*7c478bd9Sstevel@tonic-gate 		ASSERT(*tpp != NULL);
528*7c478bd9Sstevel@tonic-gate 		if (*tpp == tp)
529*7c478bd9Sstevel@tonic-gate 			break;
530*7c478bd9Sstevel@tonic-gate 		tpp = &(*tpp)->t_link;
531*7c478bd9Sstevel@tonic-gate 	}
532*7c478bd9Sstevel@tonic-gate 	*tpp = tp->t_link;
533*7c478bd9Sstevel@tonic-gate 	tp->t_schedflag &= ~TS_ON_SWAPQ;
534*7c478bd9Sstevel@tonic-gate }
535*7c478bd9Sstevel@tonic-gate 
536*7c478bd9Sstevel@tonic-gate /*
537*7c478bd9Sstevel@tonic-gate  * Swap in lwps.  Returns nonzero on success (i.e., if at least one lwp is
538*7c478bd9Sstevel@tonic-gate  * swapped in) and 0 on failure.
539*7c478bd9Sstevel@tonic-gate  */
540*7c478bd9Sstevel@tonic-gate static int
541*7c478bd9Sstevel@tonic-gate swapin(proc_t *pp)
542*7c478bd9Sstevel@tonic-gate {
543*7c478bd9Sstevel@tonic-gate 	kthread_id_t tp;
544*7c478bd9Sstevel@tonic-gate 	int err;
545*7c478bd9Sstevel@tonic-gate 	int num_swapped_in = 0;
546*7c478bd9Sstevel@tonic-gate 	struct cpu *cpup = CPU;
547*7c478bd9Sstevel@tonic-gate 	pri_t thread_pri;
548*7c478bd9Sstevel@tonic-gate 
549*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pp->p_lock));
550*7c478bd9Sstevel@tonic-gate 	ASSERT(pp->p_swapcnt);
551*7c478bd9Sstevel@tonic-gate 
552*7c478bd9Sstevel@tonic-gate top:
553*7c478bd9Sstevel@tonic-gate 	tp = pp->p_tlist;
554*7c478bd9Sstevel@tonic-gate 	do {
555*7c478bd9Sstevel@tonic-gate 		/*
556*7c478bd9Sstevel@tonic-gate 		 * Only swapin eligible lwps (specified by the scheduling
557*7c478bd9Sstevel@tonic-gate 		 * class) which are unloaded and ready to run.
558*7c478bd9Sstevel@tonic-gate 		 */
559*7c478bd9Sstevel@tonic-gate 		thread_lock(tp);
560*7c478bd9Sstevel@tonic-gate 		thread_pri = CL_SWAPIN(tp, 0);
561*7c478bd9Sstevel@tonic-gate 		if (thread_pri != -1 && tp->t_state == TS_RUN &&
562*7c478bd9Sstevel@tonic-gate 		    (tp->t_schedflag & TS_LOAD) == 0) {
563*7c478bd9Sstevel@tonic-gate 			size_t stack_size;
564*7c478bd9Sstevel@tonic-gate 			pgcnt_t stack_pages;
565*7c478bd9Sstevel@tonic-gate 
566*7c478bd9Sstevel@tonic-gate 			ASSERT((tp->t_schedflag & TS_ON_SWAPQ) == 0);
567*7c478bd9Sstevel@tonic-gate 
568*7c478bd9Sstevel@tonic-gate 			thread_unlock(tp);
569*7c478bd9Sstevel@tonic-gate 			/*
570*7c478bd9Sstevel@tonic-gate 			 * Now drop the p_lock since the stack needs
571*7c478bd9Sstevel@tonic-gate 			 * to brought in.
572*7c478bd9Sstevel@tonic-gate 			 */
573*7c478bd9Sstevel@tonic-gate 			mutex_exit(&pp->p_lock);
574*7c478bd9Sstevel@tonic-gate 
575*7c478bd9Sstevel@tonic-gate 			stack_size = swapsize(tp->t_swap);
576*7c478bd9Sstevel@tonic-gate 			stack_pages = btopr(stack_size);
577*7c478bd9Sstevel@tonic-gate 			/* Kernel probe */
578*7c478bd9Sstevel@tonic-gate 			TNF_PROBE_4(swapin_lwp, "vm swap swapin", /* CSTYLED */,
579*7c478bd9Sstevel@tonic-gate 				tnf_pid,	pid,		pp->p_pid,
580*7c478bd9Sstevel@tonic-gate 				tnf_lwpid,	lwpid,		tp->t_tid,
581*7c478bd9Sstevel@tonic-gate 				tnf_kthread_id,	tid,		tp,
582*7c478bd9Sstevel@tonic-gate 				tnf_ulong,	page_count,	stack_pages);
583*7c478bd9Sstevel@tonic-gate 
584*7c478bd9Sstevel@tonic-gate 			rw_enter(&kas.a_lock, RW_READER);
585*7c478bd9Sstevel@tonic-gate 			err = segkp_fault(segkp->s_as->a_hat, segkp,
586*7c478bd9Sstevel@tonic-gate 			    tp->t_swap, stack_size, F_SOFTLOCK, S_OTHER);
587*7c478bd9Sstevel@tonic-gate 			rw_exit(&kas.a_lock);
588*7c478bd9Sstevel@tonic-gate 
589*7c478bd9Sstevel@tonic-gate #ifdef __sparc
590*7c478bd9Sstevel@tonic-gate 			lwp_swapin(tp);
591*7c478bd9Sstevel@tonic-gate #endif /* __sparc */
592*7c478bd9Sstevel@tonic-gate 
593*7c478bd9Sstevel@tonic-gate 			/*
594*7c478bd9Sstevel@tonic-gate 			 * Re-acquire the p_lock.
595*7c478bd9Sstevel@tonic-gate 			 */
596*7c478bd9Sstevel@tonic-gate 			mutex_enter(&pp->p_lock);
597*7c478bd9Sstevel@tonic-gate 			if (err) {
598*7c478bd9Sstevel@tonic-gate 				num_swapped_in = 0;
599*7c478bd9Sstevel@tonic-gate 				break;
600*7c478bd9Sstevel@tonic-gate 			} else {
601*7c478bd9Sstevel@tonic-gate 				CPU_STATS_ADDQ(cpup, vm, swapin, 1);
602*7c478bd9Sstevel@tonic-gate 				CPU_STATS_ADDQ(cpup, vm, pgswapin,
603*7c478bd9Sstevel@tonic-gate 				    stack_pages);
604*7c478bd9Sstevel@tonic-gate 
605*7c478bd9Sstevel@tonic-gate 				pp->p_swapcnt--;
606*7c478bd9Sstevel@tonic-gate 				pp->p_swrss -= stack_pages;
607*7c478bd9Sstevel@tonic-gate 
608*7c478bd9Sstevel@tonic-gate 				thread_lock(tp);
609*7c478bd9Sstevel@tonic-gate 				tp->t_schedflag |= TS_LOAD;
610*7c478bd9Sstevel@tonic-gate 				dq_sruninc(tp);
611*7c478bd9Sstevel@tonic-gate 
612*7c478bd9Sstevel@tonic-gate 				tp->t_stime = lbolt;	/* set swapin time */
613*7c478bd9Sstevel@tonic-gate 				thread_unlock(tp);
614*7c478bd9Sstevel@tonic-gate 
615*7c478bd9Sstevel@tonic-gate 				nswapped--;
616*7c478bd9Sstevel@tonic-gate 				tot_swapped_in++;
617*7c478bd9Sstevel@tonic-gate 				num_swapped_in++;
618*7c478bd9Sstevel@tonic-gate 
619*7c478bd9Sstevel@tonic-gate 				TRACE_2(TR_FAC_SCHED, TR_SWAPIN,
620*7c478bd9Sstevel@tonic-gate 				    "swapin: pp %p stack_pages %lu",
621*7c478bd9Sstevel@tonic-gate 				    pp, stack_pages);
622*7c478bd9Sstevel@tonic-gate 				goto top;
623*7c478bd9Sstevel@tonic-gate 			}
624*7c478bd9Sstevel@tonic-gate 		}
625*7c478bd9Sstevel@tonic-gate 		thread_unlock(tp);
626*7c478bd9Sstevel@tonic-gate 	} while ((tp = tp->t_forw) != pp->p_tlist);
627*7c478bd9Sstevel@tonic-gate 	return (num_swapped_in);
628*7c478bd9Sstevel@tonic-gate }
629*7c478bd9Sstevel@tonic-gate 
630*7c478bd9Sstevel@tonic-gate /*
631*7c478bd9Sstevel@tonic-gate  * Swap out lwps.  Returns nonzero on success (i.e., if at least one lwp is
632*7c478bd9Sstevel@tonic-gate  * swapped out) and 0 on failure.
633*7c478bd9Sstevel@tonic-gate  */
634*7c478bd9Sstevel@tonic-gate static int
635*7c478bd9Sstevel@tonic-gate swapout(proc_t *pp, uint_t *swrss, int swapflags)
636*7c478bd9Sstevel@tonic-gate {
637*7c478bd9Sstevel@tonic-gate 	kthread_id_t tp;
638*7c478bd9Sstevel@tonic-gate 	pgcnt_t ws_pages = 0;
639*7c478bd9Sstevel@tonic-gate 	int err;
640*7c478bd9Sstevel@tonic-gate 	int swapped_lwps = 0;
641*7c478bd9Sstevel@tonic-gate 	struct as *as = pp->p_as;
642*7c478bd9Sstevel@tonic-gate 	struct cpu *cpup = CPU;
643*7c478bd9Sstevel@tonic-gate 	pri_t thread_pri;
644*7c478bd9Sstevel@tonic-gate 
645*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&pp->p_lock));
646*7c478bd9Sstevel@tonic-gate 
647*7c478bd9Sstevel@tonic-gate 	if (pp->p_lwpcnt == 0 || (pp->p_flag & SEXITLWPS))
648*7c478bd9Sstevel@tonic-gate 		return (0);
649*7c478bd9Sstevel@tonic-gate 
650*7c478bd9Sstevel@tonic-gate top:
651*7c478bd9Sstevel@tonic-gate 	tp = pp->p_tlist;
652*7c478bd9Sstevel@tonic-gate 	do {
653*7c478bd9Sstevel@tonic-gate 		klwp_t *lwp = ttolwp(tp);
654*7c478bd9Sstevel@tonic-gate 
655*7c478bd9Sstevel@tonic-gate 		/*
656*7c478bd9Sstevel@tonic-gate 		 * Swapout eligible lwps (specified by the scheduling
657*7c478bd9Sstevel@tonic-gate 		 * class) which don't have TS_DONT_SWAP set.  Set the
658*7c478bd9Sstevel@tonic-gate 		 * "intent to swap" flag (TS_SWAPENQ) on threads
659*7c478bd9Sstevel@tonic-gate 		 * which have TS_DONT_SWAP set so that they can be
660*7c478bd9Sstevel@tonic-gate 		 * swapped if and when they reach a safe point.
661*7c478bd9Sstevel@tonic-gate 		 */
662*7c478bd9Sstevel@tonic-gate 		thread_lock(tp);
663*7c478bd9Sstevel@tonic-gate 		thread_pri = CL_SWAPOUT(tp, swapflags);
664*7c478bd9Sstevel@tonic-gate 		if (thread_pri != -1) {
665*7c478bd9Sstevel@tonic-gate 			if (tp->t_schedflag & TS_DONT_SWAP) {
666*7c478bd9Sstevel@tonic-gate 				tp->t_schedflag |= TS_SWAPENQ;
667*7c478bd9Sstevel@tonic-gate 				tp->t_trapret = 1;
668*7c478bd9Sstevel@tonic-gate 				aston(tp);
669*7c478bd9Sstevel@tonic-gate 			} else {
670*7c478bd9Sstevel@tonic-gate 				pgcnt_t stack_pages;
671*7c478bd9Sstevel@tonic-gate 				size_t stack_size;
672*7c478bd9Sstevel@tonic-gate 
673*7c478bd9Sstevel@tonic-gate 				ASSERT((tp->t_schedflag &
674*7c478bd9Sstevel@tonic-gate 				    (TS_DONT_SWAP | TS_LOAD)) == TS_LOAD);
675*7c478bd9Sstevel@tonic-gate 
676*7c478bd9Sstevel@tonic-gate 				if (lock_try(&tp->t_lock)) {
677*7c478bd9Sstevel@tonic-gate 					/*
678*7c478bd9Sstevel@tonic-gate 					 * Remove thread from the swap_queue.
679*7c478bd9Sstevel@tonic-gate 					 */
680*7c478bd9Sstevel@tonic-gate 					if (tp->t_schedflag & TS_ON_SWAPQ) {
681*7c478bd9Sstevel@tonic-gate 						ASSERT(!(tp->t_schedflag &
682*7c478bd9Sstevel@tonic-gate 						    TS_SWAPENQ));
683*7c478bd9Sstevel@tonic-gate 						swapdeq(tp);
684*7c478bd9Sstevel@tonic-gate 					} else if (tp->t_state == TS_RUN)
685*7c478bd9Sstevel@tonic-gate 						dq_srundec(tp);
686*7c478bd9Sstevel@tonic-gate 
687*7c478bd9Sstevel@tonic-gate 					tp->t_schedflag &=
688*7c478bd9Sstevel@tonic-gate 					    ~(TS_LOAD | TS_SWAPENQ);
689*7c478bd9Sstevel@tonic-gate 					lock_clear(&tp->t_lock);
690*7c478bd9Sstevel@tonic-gate 
691*7c478bd9Sstevel@tonic-gate 					/*
692*7c478bd9Sstevel@tonic-gate 					 * Set swapout time if the thread isn't
693*7c478bd9Sstevel@tonic-gate 					 * sleeping.
694*7c478bd9Sstevel@tonic-gate 					 */
695*7c478bd9Sstevel@tonic-gate 					if (tp->t_state != TS_SLEEP)
696*7c478bd9Sstevel@tonic-gate 						tp->t_stime = lbolt;
697*7c478bd9Sstevel@tonic-gate 					thread_unlock(tp);
698*7c478bd9Sstevel@tonic-gate 
699*7c478bd9Sstevel@tonic-gate 					nswapped++;
700*7c478bd9Sstevel@tonic-gate 					tot_swapped_out++;
701*7c478bd9Sstevel@tonic-gate 
702*7c478bd9Sstevel@tonic-gate 					lwp->lwp_ru.nswap++;
703*7c478bd9Sstevel@tonic-gate 
704*7c478bd9Sstevel@tonic-gate 					/*
705*7c478bd9Sstevel@tonic-gate 					 * Now drop the p_lock since the
706*7c478bd9Sstevel@tonic-gate 					 * stack needs to pushed out.
707*7c478bd9Sstevel@tonic-gate 					 */
708*7c478bd9Sstevel@tonic-gate 					mutex_exit(&pp->p_lock);
709*7c478bd9Sstevel@tonic-gate 
710*7c478bd9Sstevel@tonic-gate 					stack_size = swapsize(tp->t_swap);
711*7c478bd9Sstevel@tonic-gate 					stack_pages = btopr(stack_size);
712*7c478bd9Sstevel@tonic-gate 					ws_pages += stack_pages;
713*7c478bd9Sstevel@tonic-gate 					/* Kernel probe */
714*7c478bd9Sstevel@tonic-gate 					TNF_PROBE_4(swapout_lwp,
715*7c478bd9Sstevel@tonic-gate 						"vm swap swapout",
716*7c478bd9Sstevel@tonic-gate 						/* CSTYLED */,
717*7c478bd9Sstevel@tonic-gate 						tnf_pid, pid, pp->p_pid,
718*7c478bd9Sstevel@tonic-gate 						tnf_lwpid, lwpid, tp->t_tid,
719*7c478bd9Sstevel@tonic-gate 						tnf_kthread_id, tid, tp,
720*7c478bd9Sstevel@tonic-gate 						tnf_ulong, page_count,
721*7c478bd9Sstevel@tonic-gate 							stack_pages);
722*7c478bd9Sstevel@tonic-gate 
723*7c478bd9Sstevel@tonic-gate 					rw_enter(&kas.a_lock, RW_READER);
724*7c478bd9Sstevel@tonic-gate 					err = segkp_fault(segkp->s_as->a_hat,
725*7c478bd9Sstevel@tonic-gate 					    segkp, tp->t_swap, stack_size,
726*7c478bd9Sstevel@tonic-gate 					    F_SOFTUNLOCK, S_WRITE);
727*7c478bd9Sstevel@tonic-gate 					rw_exit(&kas.a_lock);
728*7c478bd9Sstevel@tonic-gate 
729*7c478bd9Sstevel@tonic-gate 					if (err) {
730*7c478bd9Sstevel@tonic-gate 						cmn_err(CE_PANIC,
731*7c478bd9Sstevel@tonic-gate 						    "swapout: segkp_fault "
732*7c478bd9Sstevel@tonic-gate 						    "failed err: %d", err);
733*7c478bd9Sstevel@tonic-gate 					}
734*7c478bd9Sstevel@tonic-gate 					CPU_STATS_ADDQ(cpup,
735*7c478bd9Sstevel@tonic-gate 					    vm, pgswapout, stack_pages);
736*7c478bd9Sstevel@tonic-gate 
737*7c478bd9Sstevel@tonic-gate 					mutex_enter(&pp->p_lock);
738*7c478bd9Sstevel@tonic-gate 					pp->p_swapcnt++;
739*7c478bd9Sstevel@tonic-gate 					swapped_lwps++;
740*7c478bd9Sstevel@tonic-gate 					goto top;
741*7c478bd9Sstevel@tonic-gate 				}
742*7c478bd9Sstevel@tonic-gate 			}
743*7c478bd9Sstevel@tonic-gate 		}
744*7c478bd9Sstevel@tonic-gate 		thread_unlock(tp);
745*7c478bd9Sstevel@tonic-gate 	} while ((tp = tp->t_forw) != pp->p_tlist);
746*7c478bd9Sstevel@tonic-gate 
747*7c478bd9Sstevel@tonic-gate 	/*
748*7c478bd9Sstevel@tonic-gate 	 * Unload address space when all lwps are swapped out.
749*7c478bd9Sstevel@tonic-gate 	 */
750*7c478bd9Sstevel@tonic-gate 	if (pp->p_swapcnt == pp->p_lwpcnt) {
751*7c478bd9Sstevel@tonic-gate 		size_t as_size = 0;
752*7c478bd9Sstevel@tonic-gate 
753*7c478bd9Sstevel@tonic-gate 		/*
754*7c478bd9Sstevel@tonic-gate 		 * Avoid invoking as_swapout() if the process has
755*7c478bd9Sstevel@tonic-gate 		 * no MMU resources since pageout will eventually
756*7c478bd9Sstevel@tonic-gate 		 * steal pages belonging to this address space.  This
757*7c478bd9Sstevel@tonic-gate 		 * saves CPU cycles as the number of pages that are
758*7c478bd9Sstevel@tonic-gate 		 * potentially freed or pushed out by the segment
759*7c478bd9Sstevel@tonic-gate 		 * swapout operation is very small.
760*7c478bd9Sstevel@tonic-gate 		 */
761*7c478bd9Sstevel@tonic-gate 		if (rm_asrss(pp->p_as) != 0)
762*7c478bd9Sstevel@tonic-gate 			as_size = as_swapout(as);
763*7c478bd9Sstevel@tonic-gate 
764*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(cpup, vm, pgswapout, btop(as_size));
765*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(cpup, vm, swapout, 1);
766*7c478bd9Sstevel@tonic-gate 		ws_pages += btop(as_size);
767*7c478bd9Sstevel@tonic-gate 
768*7c478bd9Sstevel@tonic-gate 		TRACE_2(TR_FAC_SCHED, TR_SWAPOUT,
769*7c478bd9Sstevel@tonic-gate 		    "swapout: pp %p pages_pushed %lu", pp, ws_pages);
770*7c478bd9Sstevel@tonic-gate 		/* Kernel probe */
771*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_2(swapout_process, "vm swap swapout", /* CSTYLED */,
772*7c478bd9Sstevel@tonic-gate 			tnf_pid,	pid,		pp->p_pid,
773*7c478bd9Sstevel@tonic-gate 			tnf_ulong,	page_count,	ws_pages);
774*7c478bd9Sstevel@tonic-gate 	}
775*7c478bd9Sstevel@tonic-gate 	*swrss = ws_pages;
776*7c478bd9Sstevel@tonic-gate 	return (swapped_lwps);
777*7c478bd9Sstevel@tonic-gate }
778*7c478bd9Sstevel@tonic-gate 
779*7c478bd9Sstevel@tonic-gate void
780*7c478bd9Sstevel@tonic-gate swapout_lwp(klwp_t *lwp)
781*7c478bd9Sstevel@tonic-gate {
782*7c478bd9Sstevel@tonic-gate 	kthread_id_t tp = curthread;
783*7c478bd9Sstevel@tonic-gate 
784*7c478bd9Sstevel@tonic-gate 	ASSERT(curthread == lwptot(lwp));
785*7c478bd9Sstevel@tonic-gate 
786*7c478bd9Sstevel@tonic-gate 	/*
787*7c478bd9Sstevel@tonic-gate 	 * Don't insert the thread onto the swap queue if
788*7c478bd9Sstevel@tonic-gate 	 * sufficient memory is available.
789*7c478bd9Sstevel@tonic-gate 	 */
790*7c478bd9Sstevel@tonic-gate 	if (avefree > desfree || avefree < desfree && freemem > desfree) {
791*7c478bd9Sstevel@tonic-gate 		thread_lock(tp);
792*7c478bd9Sstevel@tonic-gate 		tp->t_schedflag &= ~TS_SWAPENQ;
793*7c478bd9Sstevel@tonic-gate 		thread_unlock(tp);
794*7c478bd9Sstevel@tonic-gate 		return;
795*7c478bd9Sstevel@tonic-gate 	}
796*7c478bd9Sstevel@tonic-gate 
797*7c478bd9Sstevel@tonic-gate 	/*
798*7c478bd9Sstevel@tonic-gate 	 * Lock the thread, then move it to the swapped queue from the
799*7c478bd9Sstevel@tonic-gate 	 * onproc queue and set its state to be TS_RUN.
800*7c478bd9Sstevel@tonic-gate 	 */
801*7c478bd9Sstevel@tonic-gate 	thread_lock(tp);
802*7c478bd9Sstevel@tonic-gate 	ASSERT(tp->t_state == TS_ONPROC);
803*7c478bd9Sstevel@tonic-gate 	if (tp->t_schedflag & TS_SWAPENQ) {
804*7c478bd9Sstevel@tonic-gate 		tp->t_schedflag &= ~TS_SWAPENQ;
805*7c478bd9Sstevel@tonic-gate 
806*7c478bd9Sstevel@tonic-gate 		/*
807*7c478bd9Sstevel@tonic-gate 		 * Set the state of this thread to be runnable
808*7c478bd9Sstevel@tonic-gate 		 * and move it from the onproc queue to the swap queue.
809*7c478bd9Sstevel@tonic-gate 		 */
810*7c478bd9Sstevel@tonic-gate 		disp_swapped_enq(tp);
811*7c478bd9Sstevel@tonic-gate 
812*7c478bd9Sstevel@tonic-gate 		/*
813*7c478bd9Sstevel@tonic-gate 		 * Insert the thread onto the swap queue.
814*7c478bd9Sstevel@tonic-gate 		 */
815*7c478bd9Sstevel@tonic-gate 		tp->t_link = tswap_queue;
816*7c478bd9Sstevel@tonic-gate 		tswap_queue = tp;
817*7c478bd9Sstevel@tonic-gate 		tp->t_schedflag |= TS_ON_SWAPQ;
818*7c478bd9Sstevel@tonic-gate 
819*7c478bd9Sstevel@tonic-gate 		thread_unlock_nopreempt(tp);
820*7c478bd9Sstevel@tonic-gate 
821*7c478bd9Sstevel@tonic-gate 		TRACE_1(TR_FAC_SCHED, TR_SWAPOUT_LWP, "swapout_lwp:%x", lwp);
822*7c478bd9Sstevel@tonic-gate 
823*7c478bd9Sstevel@tonic-gate 		swtch();
824*7c478bd9Sstevel@tonic-gate 	} else {
825*7c478bd9Sstevel@tonic-gate 		thread_unlock(tp);
826*7c478bd9Sstevel@tonic-gate 	}
827*7c478bd9Sstevel@tonic-gate }
828*7c478bd9Sstevel@tonic-gate 
829*7c478bd9Sstevel@tonic-gate /*
830*7c478bd9Sstevel@tonic-gate  * Swap all threads on the swap queue.
831*7c478bd9Sstevel@tonic-gate  */
832*7c478bd9Sstevel@tonic-gate static void
833*7c478bd9Sstevel@tonic-gate process_swap_queue(void)
834*7c478bd9Sstevel@tonic-gate {
835*7c478bd9Sstevel@tonic-gate 	kthread_id_t tp;
836*7c478bd9Sstevel@tonic-gate 	uint_t ws_pages;
837*7c478bd9Sstevel@tonic-gate 	proc_t *pp;
838*7c478bd9Sstevel@tonic-gate 	struct cpu *cpup = CPU;
839*7c478bd9Sstevel@tonic-gate 	klwp_t *lwp;
840*7c478bd9Sstevel@tonic-gate 	int err;
841*7c478bd9Sstevel@tonic-gate 
842*7c478bd9Sstevel@tonic-gate 	if (tswap_queue == NULL)
843*7c478bd9Sstevel@tonic-gate 		return;
844*7c478bd9Sstevel@tonic-gate 
845*7c478bd9Sstevel@tonic-gate 	/*
846*7c478bd9Sstevel@tonic-gate 	 * Acquire the "swapped_lock" which locks the swap queue,
847*7c478bd9Sstevel@tonic-gate 	 * and unload the stacks of all threads on it.
848*7c478bd9Sstevel@tonic-gate 	 */
849*7c478bd9Sstevel@tonic-gate 	disp_lock_enter(&swapped_lock);
850*7c478bd9Sstevel@tonic-gate 	while ((tp = tswap_queue) != NULL) {
851*7c478bd9Sstevel@tonic-gate 		pgcnt_t stack_pages;
852*7c478bd9Sstevel@tonic-gate 		size_t stack_size;
853*7c478bd9Sstevel@tonic-gate 
854*7c478bd9Sstevel@tonic-gate 		tswap_queue = tp->t_link;
855*7c478bd9Sstevel@tonic-gate 		tp->t_link = NULL;
856*7c478bd9Sstevel@tonic-gate 
857*7c478bd9Sstevel@tonic-gate 		/*
858*7c478bd9Sstevel@tonic-gate 		 * Drop the "dispatcher lock" before acquiring "t_lock"
859*7c478bd9Sstevel@tonic-gate 		 * to avoid spinning on it since the thread at the front
860*7c478bd9Sstevel@tonic-gate 		 * of the swap queue could be pinned before giving up
861*7c478bd9Sstevel@tonic-gate 		 * its "t_lock" in resume.
862*7c478bd9Sstevel@tonic-gate 		 */
863*7c478bd9Sstevel@tonic-gate 		disp_lock_exit(&swapped_lock);
864*7c478bd9Sstevel@tonic-gate 		lock_set(&tp->t_lock);
865*7c478bd9Sstevel@tonic-gate 
866*7c478bd9Sstevel@tonic-gate 		/*
867*7c478bd9Sstevel@tonic-gate 		 * Now, re-acquire the "swapped_lock".  Acquiring this lock
868*7c478bd9Sstevel@tonic-gate 		 * results in locking the thread since its dispatcher lock
869*7c478bd9Sstevel@tonic-gate 		 * (t_lockp) is the "swapped_lock".
870*7c478bd9Sstevel@tonic-gate 		 */
871*7c478bd9Sstevel@tonic-gate 		disp_lock_enter(&swapped_lock);
872*7c478bd9Sstevel@tonic-gate 		ASSERT(tp->t_state == TS_RUN);
873*7c478bd9Sstevel@tonic-gate 		ASSERT(tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ));
874*7c478bd9Sstevel@tonic-gate 
875*7c478bd9Sstevel@tonic-gate 		tp->t_schedflag &= ~(TS_LOAD | TS_ON_SWAPQ);
876*7c478bd9Sstevel@tonic-gate 		tp->t_stime = lbolt;		/* swapout time */
877*7c478bd9Sstevel@tonic-gate 		disp_lock_exit(&swapped_lock);
878*7c478bd9Sstevel@tonic-gate 		lock_clear(&tp->t_lock);
879*7c478bd9Sstevel@tonic-gate 
880*7c478bd9Sstevel@tonic-gate 		lwp = ttolwp(tp);
881*7c478bd9Sstevel@tonic-gate 		lwp->lwp_ru.nswap++;
882*7c478bd9Sstevel@tonic-gate 
883*7c478bd9Sstevel@tonic-gate 		pp = ttoproc(tp);
884*7c478bd9Sstevel@tonic-gate 		stack_size = swapsize(tp->t_swap);
885*7c478bd9Sstevel@tonic-gate 		stack_pages = btopr(stack_size);
886*7c478bd9Sstevel@tonic-gate 
887*7c478bd9Sstevel@tonic-gate 		/* Kernel probe */
888*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_4(swapout_lwp, "vm swap swapout", /* CSTYLED */,
889*7c478bd9Sstevel@tonic-gate 			tnf_pid,	pid,		pp->p_pid,
890*7c478bd9Sstevel@tonic-gate 			tnf_lwpid,	lwpid,		tp->t_tid,
891*7c478bd9Sstevel@tonic-gate 			tnf_kthread_id,	tid,		tp,
892*7c478bd9Sstevel@tonic-gate 			tnf_ulong,	page_count,	stack_pages);
893*7c478bd9Sstevel@tonic-gate 
894*7c478bd9Sstevel@tonic-gate 		rw_enter(&kas.a_lock, RW_READER);
895*7c478bd9Sstevel@tonic-gate 		err = segkp_fault(segkp->s_as->a_hat, segkp, tp->t_swap,
896*7c478bd9Sstevel@tonic-gate 		    stack_size, F_SOFTUNLOCK, S_WRITE);
897*7c478bd9Sstevel@tonic-gate 		rw_exit(&kas.a_lock);
898*7c478bd9Sstevel@tonic-gate 
899*7c478bd9Sstevel@tonic-gate 		if (err) {
900*7c478bd9Sstevel@tonic-gate 			cmn_err(CE_PANIC,
901*7c478bd9Sstevel@tonic-gate 			"process_swap_list: segkp_fault failed err: %d", err);
902*7c478bd9Sstevel@tonic-gate 		}
903*7c478bd9Sstevel@tonic-gate 		CPU_STATS_ADDQ(cpup, vm, pgswapout, stack_pages);
904*7c478bd9Sstevel@tonic-gate 
905*7c478bd9Sstevel@tonic-gate 		nswapped++;
906*7c478bd9Sstevel@tonic-gate 		tot_swapped_out++;
907*7c478bd9Sstevel@tonic-gate 		swapqswap++;
908*7c478bd9Sstevel@tonic-gate 
909*7c478bd9Sstevel@tonic-gate 		/*
910*7c478bd9Sstevel@tonic-gate 		 * Don't need p_lock since the swapper is the only
911*7c478bd9Sstevel@tonic-gate 		 * thread which increments/decrements p_swapcnt and p_swrss.
912*7c478bd9Sstevel@tonic-gate 		 */
913*7c478bd9Sstevel@tonic-gate 		ws_pages = stack_pages;
914*7c478bd9Sstevel@tonic-gate 		pp->p_swapcnt++;
915*7c478bd9Sstevel@tonic-gate 
916*7c478bd9Sstevel@tonic-gate 		TRACE_1(TR_FAC_SCHED, TR_SWAPQ_LWP, "swaplist: pp %p", pp);
917*7c478bd9Sstevel@tonic-gate 
918*7c478bd9Sstevel@tonic-gate 		/*
919*7c478bd9Sstevel@tonic-gate 		 * Unload address space when all lwps are swapped out.
920*7c478bd9Sstevel@tonic-gate 		 */
921*7c478bd9Sstevel@tonic-gate 		if (pp->p_swapcnt == pp->p_lwpcnt) {
922*7c478bd9Sstevel@tonic-gate 			size_t as_size = 0;
923*7c478bd9Sstevel@tonic-gate 
924*7c478bd9Sstevel@tonic-gate 			if (rm_asrss(pp->p_as) != 0)
925*7c478bd9Sstevel@tonic-gate 				as_size = as_swapout(pp->p_as);
926*7c478bd9Sstevel@tonic-gate 
927*7c478bd9Sstevel@tonic-gate 			CPU_STATS_ADDQ(cpup, vm, pgswapout,
928*7c478bd9Sstevel@tonic-gate 			    btop(as_size));
929*7c478bd9Sstevel@tonic-gate 			CPU_STATS_ADDQ(cpup, vm, swapout, 1);
930*7c478bd9Sstevel@tonic-gate 
931*7c478bd9Sstevel@tonic-gate 			ws_pages += btop(as_size);
932*7c478bd9Sstevel@tonic-gate 
933*7c478bd9Sstevel@tonic-gate 			TRACE_2(TR_FAC_SCHED, TR_SWAPQ_PROC,
934*7c478bd9Sstevel@tonic-gate 			    "swaplist_proc: pp %p pages_pushed: %lu",
935*7c478bd9Sstevel@tonic-gate 			    pp, ws_pages);
936*7c478bd9Sstevel@tonic-gate 			/* Kernel probe */
937*7c478bd9Sstevel@tonic-gate 			TNF_PROBE_2(swapout_process, "vm swap swapout",
938*7c478bd9Sstevel@tonic-gate 				/* CSTYLED */,
939*7c478bd9Sstevel@tonic-gate 				tnf_pid,	pid,		pp->p_pid,
940*7c478bd9Sstevel@tonic-gate 				tnf_ulong,	page_count,	ws_pages);
941*7c478bd9Sstevel@tonic-gate 		}
942*7c478bd9Sstevel@tonic-gate 		pp->p_swrss += ws_pages;
943*7c478bd9Sstevel@tonic-gate 		disp_lock_enter(&swapped_lock);
944*7c478bd9Sstevel@tonic-gate 	}
945*7c478bd9Sstevel@tonic-gate 	disp_lock_exit(&swapped_lock);
946*7c478bd9Sstevel@tonic-gate }
947