1 /*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 * Copyright (c) 2005 Yahoo! Technologies Norway AS
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * The Mach Operating System project at Carnegie-Mellon University.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by the University of
27 * California, Berkeley and its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49 *
50 * Permission to use, copy, modify and distribute this software and
51 * its documentation is hereby granted, provided that both the copyright
52 * notice and this permission notice appear in all copies of the
53 * software, derivative works or modified versions, and any portions
54 * thereof, and that both notices appear in supporting documentation.
55 *
56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59 *
60 * Carnegie Mellon requests users of this software to return to
61 *
62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
63 * School of Computer Science
64 * Carnegie Mellon University
65 * Pittsburgh PA 15213-3890
66 *
67 * any improvements or extensions that they make and grant Carnegie the
68 * rights to redistribute these changes.
69 */
70
71 #include "opt_kstack_pages.h"
72 #include "opt_kstack_max_pages.h"
73 #include "opt_vm.h"
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/limits.h>
78 #include <sys/kernel.h>
79 #include <sys/eventhandler.h>
80 #include <sys/lock.h>
81 #include <sys/mutex.h>
82 #include <sys/proc.h>
83 #include <sys/kthread.h>
84 #include <sys/ktr.h>
85 #include <sys/mount.h>
86 #include <sys/racct.h>
87 #include <sys/resourcevar.h>
88 #include <sys/refcount.h>
89 #include <sys/sched.h>
90 #include <sys/sdt.h>
91 #include <sys/signalvar.h>
92 #include <sys/smp.h>
93 #include <sys/time.h>
94 #include <sys/vnode.h>
95 #include <sys/vmmeter.h>
96 #include <sys/rwlock.h>
97 #include <sys/sx.h>
98 #include <sys/sysctl.h>
99
100 #include <vm/vm.h>
101 #include <vm/vm_param.h>
102 #include <vm/vm_kern.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_map.h>
106 #include <vm/vm_pageout.h>
107 #include <vm/vm_pager.h>
108 #include <vm/vm_phys.h>
109 #include <vm/vm_radix.h>
110 #include <vm/swap_pager.h>
111 #include <vm/vm_extern.h>
112 #include <vm/uma.h>
113
114 /* the kernel process "vm_daemon" */
115 static void vm_daemon(void);
116 static struct proc *vmproc;
117
118 static struct kproc_desc vm_kp = {
119 "vmdaemon",
120 vm_daemon,
121 &vmproc
122 };
123 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
124
125 static int vm_daemon_timeout = 0;
126 SYSCTL_INT(_vm, OID_AUTO, vmdaemon_timeout, CTLFLAG_RW,
127 &vm_daemon_timeout, 0,
128 "Time between vmdaemon runs");
129
130 static int vm_daemon_needed;
131 static struct mtx vm_daemon_mtx;
132 /* Allow for use by vm_pageout before vm_daemon is initialized. */
133 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
134
135 static void vm_swapout_map_deactivate_pages(vm_map_t, long);
136 static void vm_swapout_object_deactivate(pmap_t, vm_object_t, long);
137
138 static void
vm_swapout_object_deactivate_page(pmap_t pmap,vm_page_t m,bool unmap)139 vm_swapout_object_deactivate_page(pmap_t pmap, vm_page_t m, bool unmap)
140 {
141
142 /*
143 * Ignore unreclaimable wired pages. Repeat the check after busying
144 * since a busy holder may wire the page.
145 */
146 if (vm_page_wired(m) || !vm_page_tryxbusy(m))
147 return;
148
149 if (vm_page_wired(m) || !pmap_page_exists_quick(pmap, m)) {
150 vm_page_xunbusy(m);
151 return;
152 }
153 if (!pmap_is_referenced(m)) {
154 if (!vm_page_active(m))
155 (void)vm_page_try_remove_all(m);
156 else if (unmap && vm_page_try_remove_all(m))
157 vm_page_deactivate(m);
158 }
159 vm_page_xunbusy(m);
160 }
161
162 /*
163 * vm_swapout_object_deactivate
164 *
165 * Deactivate enough pages to satisfy the inactive target
166 * requirements.
167 *
168 * The object and map must be locked.
169 */
170 static void
vm_swapout_object_deactivate(pmap_t pmap,vm_object_t first_object,long desired)171 vm_swapout_object_deactivate(pmap_t pmap, vm_object_t first_object,
172 long desired)
173 {
174 struct pctrie_iter pages;
175 vm_object_t backing_object, object;
176 vm_page_t m;
177 bool unmap;
178
179 VM_OBJECT_ASSERT_LOCKED(first_object);
180 if ((first_object->flags & OBJ_FICTITIOUS) != 0)
181 return;
182 for (object = first_object;; object = backing_object) {
183 if (pmap_resident_count(pmap) <= desired)
184 goto unlock_return;
185 VM_OBJECT_ASSERT_LOCKED(object);
186 if ((object->flags & OBJ_UNMANAGED) != 0 ||
187 blockcount_read(&object->paging_in_progress) > 0)
188 goto unlock_return;
189
190 unmap = true;
191 if (object->shadow_count > 1)
192 unmap = false;
193
194 /*
195 * Scan the object's entire memory queue.
196 */
197 vm_page_iter_init(&pages, object);
198 VM_RADIX_FOREACH(m, &pages) {
199 if (pmap_resident_count(pmap) <= desired)
200 goto unlock_return;
201 if (should_yield())
202 goto unlock_return;
203 vm_swapout_object_deactivate_page(pmap, m, unmap);
204 }
205 if ((backing_object = object->backing_object) == NULL)
206 goto unlock_return;
207 VM_OBJECT_RLOCK(backing_object);
208 if (object != first_object)
209 VM_OBJECT_RUNLOCK(object);
210 }
211 unlock_return:
212 if (object != first_object)
213 VM_OBJECT_RUNLOCK(object);
214 }
215
216 /*
217 * deactivate some number of pages in a map, try to do it fairly, but
218 * that is really hard to do.
219 */
220 static void
vm_swapout_map_deactivate_pages(vm_map_t map,long desired)221 vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
222 {
223 vm_map_entry_t tmpe;
224 vm_object_t obj, bigobj;
225 int nothingwired;
226
227 if (!vm_map_trylock_read(map))
228 return;
229
230 bigobj = NULL;
231 nothingwired = TRUE;
232
233 /*
234 * first, search out the biggest object, and try to free pages from
235 * that.
236 */
237 VM_MAP_ENTRY_FOREACH(tmpe, map) {
238 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
239 obj = tmpe->object.vm_object;
240 if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
241 if (obj->shadow_count <= 1 &&
242 (bigobj == NULL ||
243 bigobj->resident_page_count <
244 obj->resident_page_count)) {
245 if (bigobj != NULL)
246 VM_OBJECT_RUNLOCK(bigobj);
247 bigobj = obj;
248 } else
249 VM_OBJECT_RUNLOCK(obj);
250 }
251 }
252 if (tmpe->wired_count > 0)
253 nothingwired = FALSE;
254 }
255
256 if (bigobj != NULL) {
257 vm_swapout_object_deactivate(map->pmap, bigobj, desired);
258 VM_OBJECT_RUNLOCK(bigobj);
259 }
260 /*
261 * Next, hunt around for other pages to deactivate. We actually
262 * do this search sort of wrong -- .text first is not the best idea.
263 */
264 VM_MAP_ENTRY_FOREACH(tmpe, map) {
265 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
266 break;
267 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
268 obj = tmpe->object.vm_object;
269 if (obj != NULL) {
270 VM_OBJECT_RLOCK(obj);
271 vm_swapout_object_deactivate(map->pmap, obj,
272 desired);
273 VM_OBJECT_RUNLOCK(obj);
274 }
275 }
276 }
277
278 /*
279 * Remove all mappings if a process is swapped out, this will free page
280 * table pages.
281 */
282 if (desired == 0 && nothingwired) {
283 pmap_remove(vm_map_pmap(map), vm_map_min(map),
284 vm_map_max(map));
285 }
286
287 vm_map_unlock_read(map);
288 }
289
290 static void
vm_daemon(void)291 vm_daemon(void)
292 {
293 struct rlimit rsslim;
294 struct proc *p;
295 struct thread *td;
296 struct vmspace *vm;
297 int breakout, tryagain, attempts;
298 uint64_t rsize, ravailable;
299
300 if (racct_enable && vm_daemon_timeout == 0)
301 vm_daemon_timeout = hz;
302
303 while (TRUE) {
304 mtx_lock(&vm_daemon_mtx);
305 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep",
306 vm_daemon_timeout);
307 mtx_unlock(&vm_daemon_mtx);
308
309 /*
310 * scan the processes for exceeding their rlimits or if
311 * process is swapped out -- deactivate pages
312 */
313 tryagain = 0;
314 attempts = 0;
315 again:
316 attempts++;
317 sx_slock(&allproc_lock);
318 FOREACH_PROC_IN_SYSTEM(p) {
319 vm_pindex_t limit, size;
320
321 /*
322 * if this is a system process or if we have already
323 * looked at this process, skip it.
324 */
325 PROC_LOCK(p);
326 if (p->p_state != PRS_NORMAL ||
327 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
328 PROC_UNLOCK(p);
329 continue;
330 }
331 /*
332 * if the process is in a non-running type state,
333 * don't touch it.
334 */
335 breakout = 0;
336 FOREACH_THREAD_IN_PROC(p, td) {
337 thread_lock(td);
338 if (!TD_ON_RUNQ(td) &&
339 !TD_IS_RUNNING(td) &&
340 !TD_IS_SLEEPING(td) &&
341 !TD_IS_SUSPENDED(td)) {
342 thread_unlock(td);
343 breakout = 1;
344 break;
345 }
346 thread_unlock(td);
347 }
348 if (breakout) {
349 PROC_UNLOCK(p);
350 continue;
351 }
352 /*
353 * get a limit
354 */
355 lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
356 limit = OFF_TO_IDX(
357 qmin(rsslim.rlim_cur, rsslim.rlim_max));
358
359 vm = vmspace_acquire_ref(p);
360 _PHOLD(p);
361 PROC_UNLOCK(p);
362 if (vm == NULL) {
363 PRELE(p);
364 continue;
365 }
366 sx_sunlock(&allproc_lock);
367
368 size = vmspace_resident_count(vm);
369 if (size >= limit) {
370 vm_swapout_map_deactivate_pages(
371 &vm->vm_map, limit);
372 size = vmspace_resident_count(vm);
373 }
374 if (racct_enable) {
375 rsize = IDX_TO_OFF(size);
376 PROC_LOCK(p);
377 if (p->p_state == PRS_NORMAL)
378 racct_set(p, RACCT_RSS, rsize);
379 ravailable = racct_get_available(p, RACCT_RSS);
380 PROC_UNLOCK(p);
381 if (rsize > ravailable) {
382 /*
383 * Don't be overly aggressive; this
384 * might be an innocent process,
385 * and the limit could've been exceeded
386 * by some memory hog. Don't try
387 * to deactivate more than 1/4th
388 * of process' resident set size.
389 */
390 if (attempts <= 8) {
391 if (ravailable < rsize -
392 (rsize / 4)) {
393 ravailable = rsize -
394 (rsize / 4);
395 }
396 }
397 vm_swapout_map_deactivate_pages(
398 &vm->vm_map,
399 OFF_TO_IDX(ravailable));
400 /* Update RSS usage after paging out. */
401 size = vmspace_resident_count(vm);
402 rsize = IDX_TO_OFF(size);
403 PROC_LOCK(p);
404 if (p->p_state == PRS_NORMAL)
405 racct_set(p, RACCT_RSS, rsize);
406 PROC_UNLOCK(p);
407 if (rsize > ravailable)
408 tryagain = 1;
409 }
410 }
411 vmspace_free(vm);
412 sx_slock(&allproc_lock);
413 PRELE(p);
414 }
415 sx_sunlock(&allproc_lock);
416 if (tryagain != 0 && attempts <= 10) {
417 maybe_yield();
418 goto again;
419 }
420 }
421 }
422