1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 /*
62 * Virtual memory object module.
63 */
64
65 #include "opt_vm.h"
66
67 #include <sys/systm.h>
68 #include <sys/blockcount.h>
69 #include <sys/conf.h>
70 #include <sys/cpuset.h>
71 #include <sys/ipc.h>
72 #include <sys/jail.h>
73 #include <sys/limits.h>
74 #include <sys/lock.h>
75 #include <sys/mman.h>
76 #include <sys/mount.h>
77 #include <sys/kernel.h>
78 #include <sys/mutex.h>
79 #include <sys/pctrie.h>
80 #include <sys/proc.h>
81 #include <sys/refcount.h>
82 #include <sys/shm.h>
83 #include <sys/sx.h>
84 #include <sys/sysctl.h>
85 #include <sys/resourcevar.h>
86 #include <sys/refcount.h>
87 #include <sys/rwlock.h>
88 #include <sys/user.h>
89 #include <sys/vnode.h>
90 #include <sys/vmmeter.h>
91
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_phys.h>
101 #include <vm/vm_pagequeue.h>
102 #include <vm/swap_pager.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_extern.h>
105 #include <vm/vm_radix.h>
106 #include <vm/vm_reserv.h>
107 #include <vm/uma.h>
108
109 static int old_msync;
110 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
111 "Use old (insecure) msync behavior");
112
113 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
114 int pagerflags, int flags, boolean_t *allclean,
115 boolean_t *eio);
116 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
117 boolean_t *allclean);
118 static void vm_object_backing_remove(vm_object_t object);
119
120 /*
121 * Virtual memory objects maintain the actual data
122 * associated with allocated virtual memory. A given
123 * page of memory exists within exactly one object.
124 *
125 * An object is only deallocated when all "references"
126 * are given up. Only one "reference" to a given
127 * region of an object should be writeable.
128 *
129 * Associated with each object is a list of all resident
130 * memory pages belonging to that object; this list is
131 * maintained by the "vm_page" module, and locked by the object's
132 * lock.
133 *
134 * Each object also records a "pager" routine which is
135 * used to retrieve (and store) pages to the proper backing
136 * storage. In addition, objects may be backed by other
137 * objects from which they were virtual-copied.
138 *
139 * The only items within the object structure which are
140 * modified after time of creation are:
141 * reference count locked by object's lock
142 * pager routine locked by object's lock
143 *
144 */
145
146 struct object_q vm_object_list;
147 struct mtx vm_object_list_mtx; /* lock for object list and count */
148
149 struct vm_object kernel_object_store;
150
151 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
152 "VM object stats");
153
154 static COUNTER_U64_DEFINE_EARLY(object_collapses);
155 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
156 &object_collapses,
157 "VM object collapses");
158
159 static COUNTER_U64_DEFINE_EARLY(object_bypasses);
160 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
161 &object_bypasses,
162 "VM object bypasses");
163
164 static COUNTER_U64_DEFINE_EARLY(object_collapse_waits);
165 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapse_waits, CTLFLAG_RD,
166 &object_collapse_waits,
167 "Number of sleeps for collapse");
168
169 static uma_zone_t obj_zone;
170
171 static int vm_object_zinit(void *mem, int size, int flags);
172
173 #ifdef INVARIANTS
174 static void vm_object_zdtor(void *mem, int size, void *arg);
175
176 static void
vm_object_zdtor(void * mem,int size,void * arg)177 vm_object_zdtor(void *mem, int size, void *arg)
178 {
179 vm_object_t object;
180
181 object = (vm_object_t)mem;
182 KASSERT(object->ref_count == 0,
183 ("object %p ref_count = %d", object, object->ref_count));
184 KASSERT(TAILQ_EMPTY(&object->memq),
185 ("object %p has resident pages in its memq", object));
186 KASSERT(vm_radix_is_empty(&object->rtree),
187 ("object %p has resident pages in its trie", object));
188 #if VM_NRESERVLEVEL > 0
189 KASSERT(LIST_EMPTY(&object->rvq),
190 ("object %p has reservations",
191 object));
192 #endif
193 KASSERT(!vm_object_busied(object),
194 ("object %p busy = %d", object, blockcount_read(&object->busy)));
195 KASSERT(object->resident_page_count == 0,
196 ("object %p resident_page_count = %d",
197 object, object->resident_page_count));
198 KASSERT(atomic_load_int(&object->shadow_count) == 0,
199 ("object %p shadow_count = %d",
200 object, atomic_load_int(&object->shadow_count)));
201 KASSERT(object->type == OBJT_DEAD,
202 ("object %p has non-dead type %d",
203 object, object->type));
204 KASSERT(object->charge == 0 && object->cred == NULL,
205 ("object %p has non-zero charge %ju (%p)",
206 object, (uintmax_t)object->charge, object->cred));
207 }
208 #endif
209
210 static int
vm_object_zinit(void * mem,int size,int flags)211 vm_object_zinit(void *mem, int size, int flags)
212 {
213 vm_object_t object;
214
215 object = (vm_object_t)mem;
216 rw_init_flags(&object->lock, "vmobject", RW_DUPOK | RW_NEW);
217
218 /* These are true for any object that has been freed */
219 object->type = OBJT_DEAD;
220 vm_radix_init(&object->rtree);
221 refcount_init(&object->ref_count, 0);
222 blockcount_init(&object->paging_in_progress);
223 blockcount_init(&object->busy);
224 object->resident_page_count = 0;
225 atomic_store_int(&object->shadow_count, 0);
226 object->flags = OBJ_DEAD;
227
228 mtx_lock(&vm_object_list_mtx);
229 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
230 mtx_unlock(&vm_object_list_mtx);
231 return (0);
232 }
233
234 static void
_vm_object_allocate(objtype_t type,vm_pindex_t size,u_short flags,vm_object_t object,void * handle)235 _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
236 vm_object_t object, void *handle)
237 {
238
239 TAILQ_INIT(&object->memq);
240 LIST_INIT(&object->shadow_head);
241
242 object->type = type;
243 object->flags = flags;
244 if ((flags & OBJ_SWAP) != 0) {
245 pctrie_init(&object->un_pager.swp.swp_blks);
246 object->un_pager.swp.writemappings = 0;
247 }
248
249 /*
250 * Ensure that swap_pager_swapoff() iteration over object_list
251 * sees up to date type and pctrie head if it observed
252 * non-dead object.
253 */
254 atomic_thread_fence_rel();
255
256 object->pg_color = 0;
257 object->size = size;
258 object->domain.dr_policy = NULL;
259 object->generation = 1;
260 object->cleangeneration = 1;
261 refcount_init(&object->ref_count, 1);
262 object->memattr = VM_MEMATTR_DEFAULT;
263 object->cred = NULL;
264 object->charge = 0;
265 object->handle = handle;
266 object->backing_object = NULL;
267 object->backing_object_offset = (vm_ooffset_t) 0;
268 #if VM_NRESERVLEVEL > 0
269 LIST_INIT(&object->rvq);
270 #endif
271 umtx_shm_object_init(object);
272 }
273
274 /*
275 * vm_object_init:
276 *
277 * Initialize the VM objects module.
278 */
279 void
vm_object_init(void)280 vm_object_init(void)
281 {
282 TAILQ_INIT(&vm_object_list);
283 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
284
285 rw_init(&kernel_object->lock, "kernel vm object");
286 vm_radix_init(&kernel_object->rtree);
287 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
288 VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL);
289 #if VM_NRESERVLEVEL > 0
290 kernel_object->flags |= OBJ_COLORED;
291 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
292 #endif
293 kernel_object->un_pager.phys.ops = &default_phys_pg_ops;
294
295 /*
296 * The lock portion of struct vm_object must be type stable due
297 * to vm_pageout_fallback_object_lock locking a vm object
298 * without holding any references to it.
299 *
300 * paging_in_progress is valid always. Lockless references to
301 * the objects may acquire pip and then check OBJ_DEAD.
302 */
303 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
304 #ifdef INVARIANTS
305 vm_object_zdtor,
306 #else
307 NULL,
308 #endif
309 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
310
311 vm_radix_zinit();
312 }
313
314 void
vm_object_clear_flag(vm_object_t object,u_short bits)315 vm_object_clear_flag(vm_object_t object, u_short bits)
316 {
317
318 VM_OBJECT_ASSERT_WLOCKED(object);
319 object->flags &= ~bits;
320 }
321
322 /*
323 * Sets the default memory attribute for the specified object. Pages
324 * that are allocated to this object are by default assigned this memory
325 * attribute.
326 *
327 * Presently, this function must be called before any pages are allocated
328 * to the object. In the future, this requirement may be relaxed for
329 * "default" and "swap" objects.
330 */
331 int
vm_object_set_memattr(vm_object_t object,vm_memattr_t memattr)332 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
333 {
334
335 VM_OBJECT_ASSERT_WLOCKED(object);
336
337 if (object->type == OBJT_DEAD)
338 return (KERN_INVALID_ARGUMENT);
339 if (!TAILQ_EMPTY(&object->memq))
340 return (KERN_FAILURE);
341
342 object->memattr = memattr;
343 return (KERN_SUCCESS);
344 }
345
346 void
vm_object_pip_add(vm_object_t object,short i)347 vm_object_pip_add(vm_object_t object, short i)
348 {
349
350 if (i > 0)
351 blockcount_acquire(&object->paging_in_progress, i);
352 }
353
354 void
vm_object_pip_wakeup(vm_object_t object)355 vm_object_pip_wakeup(vm_object_t object)
356 {
357
358 vm_object_pip_wakeupn(object, 1);
359 }
360
361 void
vm_object_pip_wakeupn(vm_object_t object,short i)362 vm_object_pip_wakeupn(vm_object_t object, short i)
363 {
364
365 if (i > 0)
366 blockcount_release(&object->paging_in_progress, i);
367 }
368
369 /*
370 * Atomically drop the object lock and wait for pip to drain. This protects
371 * from sleep/wakeup races due to identity changes. The lock is not re-acquired
372 * on return.
373 */
374 static void
vm_object_pip_sleep(vm_object_t object,const char * waitid)375 vm_object_pip_sleep(vm_object_t object, const char *waitid)
376 {
377
378 (void)blockcount_sleep(&object->paging_in_progress, &object->lock,
379 waitid, PVM | PDROP);
380 }
381
382 void
vm_object_pip_wait(vm_object_t object,const char * waitid)383 vm_object_pip_wait(vm_object_t object, const char *waitid)
384 {
385
386 VM_OBJECT_ASSERT_WLOCKED(object);
387
388 blockcount_wait(&object->paging_in_progress, &object->lock, waitid,
389 PVM);
390 }
391
392 void
vm_object_pip_wait_unlocked(vm_object_t object,const char * waitid)393 vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid)
394 {
395
396 VM_OBJECT_ASSERT_UNLOCKED(object);
397
398 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM);
399 }
400
401 /*
402 * vm_object_allocate:
403 *
404 * Returns a new object with the given size.
405 */
406 vm_object_t
vm_object_allocate(objtype_t type,vm_pindex_t size)407 vm_object_allocate(objtype_t type, vm_pindex_t size)
408 {
409 vm_object_t object;
410 u_short flags;
411
412 switch (type) {
413 case OBJT_DEAD:
414 panic("vm_object_allocate: can't create OBJT_DEAD");
415 case OBJT_SWAP:
416 flags = OBJ_COLORED | OBJ_SWAP;
417 break;
418 case OBJT_DEVICE:
419 case OBJT_SG:
420 flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
421 break;
422 case OBJT_MGTDEVICE:
423 flags = OBJ_FICTITIOUS;
424 break;
425 case OBJT_PHYS:
426 flags = OBJ_UNMANAGED;
427 break;
428 case OBJT_VNODE:
429 flags = 0;
430 break;
431 default:
432 panic("vm_object_allocate: type %d is undefined or dynamic",
433 type);
434 }
435 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
436 _vm_object_allocate(type, size, flags, object, NULL);
437
438 return (object);
439 }
440
441 vm_object_t
vm_object_allocate_dyn(objtype_t dyntype,vm_pindex_t size,u_short flags)442 vm_object_allocate_dyn(objtype_t dyntype, vm_pindex_t size, u_short flags)
443 {
444 vm_object_t object;
445
446 MPASS(dyntype >= OBJT_FIRST_DYN /* && dyntype < nitems(pagertab) */);
447 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
448 _vm_object_allocate(dyntype, size, flags, object, NULL);
449
450 return (object);
451 }
452
453 /*
454 * vm_object_allocate_anon:
455 *
456 * Returns a new default object of the given size and marked as
457 * anonymous memory for special split/collapse handling. Color
458 * to be initialized by the caller.
459 */
460 vm_object_t
vm_object_allocate_anon(vm_pindex_t size,vm_object_t backing_object,struct ucred * cred,vm_size_t charge)461 vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object,
462 struct ucred *cred, vm_size_t charge)
463 {
464 vm_object_t handle, object;
465
466 if (backing_object == NULL)
467 handle = NULL;
468 else if ((backing_object->flags & OBJ_ANON) != 0)
469 handle = backing_object->handle;
470 else
471 handle = backing_object;
472 object = uma_zalloc(obj_zone, M_WAITOK);
473 _vm_object_allocate(OBJT_SWAP, size,
474 OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle);
475 object->cred = cred;
476 object->charge = cred != NULL ? charge : 0;
477 return (object);
478 }
479
480 static void
vm_object_reference_vnode(vm_object_t object)481 vm_object_reference_vnode(vm_object_t object)
482 {
483 u_int old;
484
485 /*
486 * vnode objects need the lock for the first reference
487 * to serialize with vnode_object_deallocate().
488 */
489 if (!refcount_acquire_if_gt(&object->ref_count, 0)) {
490 VM_OBJECT_RLOCK(object);
491 old = refcount_acquire(&object->ref_count);
492 if (object->type == OBJT_VNODE && old == 0)
493 vref(object->handle);
494 VM_OBJECT_RUNLOCK(object);
495 }
496 }
497
498 /*
499 * vm_object_reference:
500 *
501 * Acquires a reference to the given object.
502 */
503 void
vm_object_reference(vm_object_t object)504 vm_object_reference(vm_object_t object)
505 {
506
507 if (object == NULL)
508 return;
509
510 if (object->type == OBJT_VNODE)
511 vm_object_reference_vnode(object);
512 else
513 refcount_acquire(&object->ref_count);
514 KASSERT((object->flags & OBJ_DEAD) == 0,
515 ("vm_object_reference: Referenced dead object."));
516 }
517
518 /*
519 * vm_object_reference_locked:
520 *
521 * Gets another reference to the given object.
522 *
523 * The object must be locked.
524 */
525 void
vm_object_reference_locked(vm_object_t object)526 vm_object_reference_locked(vm_object_t object)
527 {
528 u_int old;
529
530 VM_OBJECT_ASSERT_LOCKED(object);
531 old = refcount_acquire(&object->ref_count);
532 if (object->type == OBJT_VNODE && old == 0)
533 vref(object->handle);
534 KASSERT((object->flags & OBJ_DEAD) == 0,
535 ("vm_object_reference: Referenced dead object."));
536 }
537
538 /*
539 * Handle deallocating an object of type OBJT_VNODE.
540 */
541 static void
vm_object_deallocate_vnode(vm_object_t object)542 vm_object_deallocate_vnode(vm_object_t object)
543 {
544 struct vnode *vp = (struct vnode *) object->handle;
545 bool last;
546
547 KASSERT(object->type == OBJT_VNODE,
548 ("vm_object_deallocate_vnode: not a vnode object"));
549 KASSERT(vp != NULL, ("vm_object_deallocate_vnode: missing vp"));
550
551 /* Object lock to protect handle lookup. */
552 last = refcount_release(&object->ref_count);
553 VM_OBJECT_RUNLOCK(object);
554
555 if (!last)
556 return;
557
558 if (!umtx_shm_vnobj_persistent)
559 umtx_shm_object_terminated(object);
560
561 /* vrele may need the vnode lock. */
562 vrele(vp);
563 }
564
565 /*
566 * We dropped a reference on an object and discovered that it had a
567 * single remaining shadow. This is a sibling of the reference we
568 * dropped. Attempt to collapse the sibling and backing object.
569 */
570 static vm_object_t
vm_object_deallocate_anon(vm_object_t backing_object)571 vm_object_deallocate_anon(vm_object_t backing_object)
572 {
573 vm_object_t object;
574
575 /* Fetch the final shadow. */
576 object = LIST_FIRST(&backing_object->shadow_head);
577 KASSERT(object != NULL &&
578 atomic_load_int(&backing_object->shadow_count) == 1,
579 ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d",
580 backing_object->ref_count,
581 atomic_load_int(&backing_object->shadow_count)));
582 KASSERT((object->flags & OBJ_ANON) != 0,
583 ("invalid shadow object %p", object));
584
585 if (!VM_OBJECT_TRYWLOCK(object)) {
586 /*
587 * Prevent object from disappearing since we do not have a
588 * reference.
589 */
590 vm_object_pip_add(object, 1);
591 VM_OBJECT_WUNLOCK(backing_object);
592 VM_OBJECT_WLOCK(object);
593 vm_object_pip_wakeup(object);
594 } else
595 VM_OBJECT_WUNLOCK(backing_object);
596
597 /*
598 * Check for a collapse/terminate race with the last reference holder.
599 */
600 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 ||
601 !refcount_acquire_if_not_zero(&object->ref_count)) {
602 VM_OBJECT_WUNLOCK(object);
603 return (NULL);
604 }
605 backing_object = object->backing_object;
606 if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0)
607 vm_object_collapse(object);
608 VM_OBJECT_WUNLOCK(object);
609
610 return (object);
611 }
612
613 /*
614 * vm_object_deallocate:
615 *
616 * Release a reference to the specified object,
617 * gained either through a vm_object_allocate
618 * or a vm_object_reference call. When all references
619 * are gone, storage associated with this object
620 * may be relinquished.
621 *
622 * No object may be locked.
623 */
624 void
vm_object_deallocate(vm_object_t object)625 vm_object_deallocate(vm_object_t object)
626 {
627 vm_object_t temp;
628 bool released;
629
630 while (object != NULL) {
631 /*
632 * If the reference count goes to 0 we start calling
633 * vm_object_terminate() on the object chain. A ref count
634 * of 1 may be a special case depending on the shadow count
635 * being 0 or 1. These cases require a write lock on the
636 * object.
637 */
638 if ((object->flags & OBJ_ANON) == 0)
639 released = refcount_release_if_gt(&object->ref_count, 1);
640 else
641 released = refcount_release_if_gt(&object->ref_count, 2);
642 if (released)
643 return;
644
645 if (object->type == OBJT_VNODE) {
646 VM_OBJECT_RLOCK(object);
647 if (object->type == OBJT_VNODE) {
648 vm_object_deallocate_vnode(object);
649 return;
650 }
651 VM_OBJECT_RUNLOCK(object);
652 }
653
654 VM_OBJECT_WLOCK(object);
655 KASSERT(object->ref_count > 0,
656 ("vm_object_deallocate: object deallocated too many times: %d",
657 object->type));
658
659 /*
660 * If this is not the final reference to an anonymous
661 * object we may need to collapse the shadow chain.
662 */
663 if (!refcount_release(&object->ref_count)) {
664 if (object->ref_count > 1 ||
665 atomic_load_int(&object->shadow_count) == 0) {
666 if ((object->flags & OBJ_ANON) != 0 &&
667 object->ref_count == 1)
668 vm_object_set_flag(object,
669 OBJ_ONEMAPPING);
670 VM_OBJECT_WUNLOCK(object);
671 return;
672 }
673
674 /* Handle collapsing last ref on anonymous objects. */
675 object = vm_object_deallocate_anon(object);
676 continue;
677 }
678
679 /*
680 * Handle the final reference to an object. We restart
681 * the loop with the backing object to avoid recursion.
682 */
683 umtx_shm_object_terminated(object);
684 temp = object->backing_object;
685 if (temp != NULL) {
686 KASSERT(object->type == OBJT_SWAP,
687 ("shadowed tmpfs v_object 2 %p", object));
688 vm_object_backing_remove(object);
689 }
690
691 KASSERT((object->flags & OBJ_DEAD) == 0,
692 ("vm_object_deallocate: Terminating dead object."));
693 vm_object_set_flag(object, OBJ_DEAD);
694 vm_object_terminate(object);
695 object = temp;
696 }
697 }
698
699 void
vm_object_destroy(vm_object_t object)700 vm_object_destroy(vm_object_t object)
701 {
702 uma_zfree(obj_zone, object);
703 }
704
705 static void
vm_object_sub_shadow(vm_object_t object)706 vm_object_sub_shadow(vm_object_t object)
707 {
708 KASSERT(object->shadow_count >= 1,
709 ("object %p sub_shadow count zero", object));
710 atomic_subtract_int(&object->shadow_count, 1);
711 }
712
713 static void
vm_object_backing_remove_locked(vm_object_t object)714 vm_object_backing_remove_locked(vm_object_t object)
715 {
716 vm_object_t backing_object;
717
718 backing_object = object->backing_object;
719 VM_OBJECT_ASSERT_WLOCKED(object);
720 VM_OBJECT_ASSERT_WLOCKED(backing_object);
721
722 KASSERT((object->flags & OBJ_COLLAPSING) == 0,
723 ("vm_object_backing_remove: Removing collapsing object."));
724
725 vm_object_sub_shadow(backing_object);
726 if ((object->flags & OBJ_SHADOWLIST) != 0) {
727 LIST_REMOVE(object, shadow_list);
728 vm_object_clear_flag(object, OBJ_SHADOWLIST);
729 }
730 object->backing_object = NULL;
731 }
732
733 static void
vm_object_backing_remove(vm_object_t object)734 vm_object_backing_remove(vm_object_t object)
735 {
736 vm_object_t backing_object;
737
738 VM_OBJECT_ASSERT_WLOCKED(object);
739
740 backing_object = object->backing_object;
741 if ((object->flags & OBJ_SHADOWLIST) != 0) {
742 VM_OBJECT_WLOCK(backing_object);
743 vm_object_backing_remove_locked(object);
744 VM_OBJECT_WUNLOCK(backing_object);
745 } else {
746 object->backing_object = NULL;
747 vm_object_sub_shadow(backing_object);
748 }
749 }
750
751 static void
vm_object_backing_insert_locked(vm_object_t object,vm_object_t backing_object)752 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object)
753 {
754
755 VM_OBJECT_ASSERT_WLOCKED(object);
756
757 atomic_add_int(&backing_object->shadow_count, 1);
758 if ((backing_object->flags & OBJ_ANON) != 0) {
759 VM_OBJECT_ASSERT_WLOCKED(backing_object);
760 LIST_INSERT_HEAD(&backing_object->shadow_head, object,
761 shadow_list);
762 vm_object_set_flag(object, OBJ_SHADOWLIST);
763 }
764 object->backing_object = backing_object;
765 }
766
767 static void
vm_object_backing_insert(vm_object_t object,vm_object_t backing_object)768 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object)
769 {
770
771 VM_OBJECT_ASSERT_WLOCKED(object);
772
773 if ((backing_object->flags & OBJ_ANON) != 0) {
774 VM_OBJECT_WLOCK(backing_object);
775 vm_object_backing_insert_locked(object, backing_object);
776 VM_OBJECT_WUNLOCK(backing_object);
777 } else {
778 object->backing_object = backing_object;
779 atomic_add_int(&backing_object->shadow_count, 1);
780 }
781 }
782
783 /*
784 * Insert an object into a backing_object's shadow list with an additional
785 * reference to the backing_object added.
786 */
787 static void
vm_object_backing_insert_ref(vm_object_t object,vm_object_t backing_object)788 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object)
789 {
790
791 VM_OBJECT_ASSERT_WLOCKED(object);
792
793 if ((backing_object->flags & OBJ_ANON) != 0) {
794 VM_OBJECT_WLOCK(backing_object);
795 KASSERT((backing_object->flags & OBJ_DEAD) == 0,
796 ("shadowing dead anonymous object"));
797 vm_object_reference_locked(backing_object);
798 vm_object_backing_insert_locked(object, backing_object);
799 vm_object_clear_flag(backing_object, OBJ_ONEMAPPING);
800 VM_OBJECT_WUNLOCK(backing_object);
801 } else {
802 vm_object_reference(backing_object);
803 atomic_add_int(&backing_object->shadow_count, 1);
804 object->backing_object = backing_object;
805 }
806 }
807
808 /*
809 * Transfer a backing reference from backing_object to object.
810 */
811 static void
vm_object_backing_transfer(vm_object_t object,vm_object_t backing_object)812 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object)
813 {
814 vm_object_t new_backing_object;
815
816 /*
817 * Note that the reference to backing_object->backing_object
818 * moves from within backing_object to within object.
819 */
820 vm_object_backing_remove_locked(object);
821 new_backing_object = backing_object->backing_object;
822 if (new_backing_object == NULL)
823 return;
824 if ((new_backing_object->flags & OBJ_ANON) != 0) {
825 VM_OBJECT_WLOCK(new_backing_object);
826 vm_object_backing_remove_locked(backing_object);
827 vm_object_backing_insert_locked(object, new_backing_object);
828 VM_OBJECT_WUNLOCK(new_backing_object);
829 } else {
830 /*
831 * shadow_count for new_backing_object is left
832 * unchanged, its reference provided by backing_object
833 * is replaced by object.
834 */
835 object->backing_object = new_backing_object;
836 backing_object->backing_object = NULL;
837 }
838 }
839
840 /*
841 * Wait for a concurrent collapse to settle.
842 */
843 static void
vm_object_collapse_wait(vm_object_t object)844 vm_object_collapse_wait(vm_object_t object)
845 {
846
847 VM_OBJECT_ASSERT_WLOCKED(object);
848
849 while ((object->flags & OBJ_COLLAPSING) != 0) {
850 vm_object_pip_wait(object, "vmcolwait");
851 counter_u64_add(object_collapse_waits, 1);
852 }
853 }
854
855 /*
856 * Waits for a backing object to clear a pending collapse and returns
857 * it locked if it is an ANON object.
858 */
859 static vm_object_t
vm_object_backing_collapse_wait(vm_object_t object)860 vm_object_backing_collapse_wait(vm_object_t object)
861 {
862 vm_object_t backing_object;
863
864 VM_OBJECT_ASSERT_WLOCKED(object);
865
866 for (;;) {
867 backing_object = object->backing_object;
868 if (backing_object == NULL ||
869 (backing_object->flags & OBJ_ANON) == 0)
870 return (NULL);
871 VM_OBJECT_WLOCK(backing_object);
872 if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0)
873 break;
874 VM_OBJECT_WUNLOCK(object);
875 vm_object_pip_sleep(backing_object, "vmbckwait");
876 counter_u64_add(object_collapse_waits, 1);
877 VM_OBJECT_WLOCK(object);
878 }
879 return (backing_object);
880 }
881
882 /*
883 * vm_object_terminate_single_page removes a pageable page from the object,
884 * and removes it from the paging queues and frees it, if it is not wired.
885 * It is invoked via callback from vm_object_terminate_pages.
886 */
887 static void
vm_object_terminate_single_page(vm_page_t p,void * objectv)888 vm_object_terminate_single_page(vm_page_t p, void *objectv)
889 {
890 vm_object_t object __diagused = objectv;
891
892 vm_page_assert_unbusied(p);
893 KASSERT(p->object == object &&
894 (p->ref_count & VPRC_OBJREF) != 0,
895 ("%s: page %p is inconsistent", __func__, p));
896 p->object = NULL;
897 if (vm_page_drop(p, VPRC_OBJREF) == VPRC_OBJREF) {
898 KASSERT((object->flags & OBJ_UNMANAGED) != 0 ||
899 vm_page_astate_load(p).queue != PQ_NONE,
900 ("%s: page %p does not belong to a queue", __func__, p));
901 VM_CNT_INC(v_pfree);
902 vm_page_free(p);
903 }
904 }
905
906 /*
907 * vm_object_terminate_pages removes any remaining pageable pages
908 * from the object and resets the object to an empty state.
909 */
910 static void
vm_object_terminate_pages(vm_object_t object)911 vm_object_terminate_pages(vm_object_t object)
912 {
913 VM_OBJECT_ASSERT_WLOCKED(object);
914
915 /*
916 * If the object contained any pages, then reset it to an empty state.
917 * Rather than incrementally removing each page from the object, the
918 * page and object are reset to any empty state.
919 */
920 if (object->resident_page_count == 0)
921 return;
922
923 vm_radix_reclaim_callback(&object->rtree,
924 vm_object_terminate_single_page, object);
925 TAILQ_INIT(&object->memq);
926 object->resident_page_count = 0;
927 if (object->type == OBJT_VNODE)
928 vdrop(object->handle);
929 }
930
931 /*
932 * vm_object_terminate actually destroys the specified object, freeing
933 * up all previously used resources.
934 *
935 * The object must be locked.
936 * This routine may block.
937 */
938 void
vm_object_terminate(vm_object_t object)939 vm_object_terminate(vm_object_t object)
940 {
941
942 VM_OBJECT_ASSERT_WLOCKED(object);
943 KASSERT((object->flags & OBJ_DEAD) != 0,
944 ("terminating non-dead obj %p", object));
945 KASSERT((object->flags & OBJ_COLLAPSING) == 0,
946 ("terminating collapsing obj %p", object));
947 KASSERT(object->backing_object == NULL,
948 ("terminating shadow obj %p", object));
949
950 /*
951 * Wait for the pageout daemon and other current users to be
952 * done with the object. Note that new paging_in_progress
953 * users can come after this wait, but they must check
954 * OBJ_DEAD flag set (without unlocking the object), and avoid
955 * the object being terminated.
956 */
957 vm_object_pip_wait(object, "objtrm");
958
959 KASSERT(object->ref_count == 0,
960 ("vm_object_terminate: object with references, ref_count=%d",
961 object->ref_count));
962
963 if ((object->flags & OBJ_PG_DTOR) == 0)
964 vm_object_terminate_pages(object);
965
966 #if VM_NRESERVLEVEL > 0
967 if (__predict_false(!LIST_EMPTY(&object->rvq)))
968 vm_reserv_break_all(object);
969 #endif
970
971 KASSERT(object->cred == NULL || (object->flags & OBJ_SWAP) != 0,
972 ("%s: non-swap obj %p has cred", __func__, object));
973
974 /*
975 * Let the pager know object is dead.
976 */
977 vm_pager_deallocate(object);
978 VM_OBJECT_WUNLOCK(object);
979
980 vm_object_destroy(object);
981 }
982
983 /*
984 * Make the page read-only so that we can clear the object flags. However, if
985 * this is a nosync mmap then the object is likely to stay dirty so do not
986 * mess with the page and do not clear the object flags. Returns TRUE if the
987 * page should be flushed, and FALSE otherwise.
988 */
989 static boolean_t
vm_object_page_remove_write(vm_page_t p,int flags,boolean_t * allclean)990 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean)
991 {
992
993 vm_page_assert_busied(p);
994
995 /*
996 * If we have been asked to skip nosync pages and this is a
997 * nosync page, skip it. Note that the object flags were not
998 * cleared in this case so we do not have to set them.
999 */
1000 if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) {
1001 *allclean = FALSE;
1002 return (FALSE);
1003 } else {
1004 pmap_remove_write(p);
1005 return (p->dirty != 0);
1006 }
1007 }
1008
1009 /*
1010 * vm_object_page_clean
1011 *
1012 * Clean all dirty pages in the specified range of object. Leaves page
1013 * on whatever queue it is currently on. If NOSYNC is set then do not
1014 * write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC),
1015 * leaving the object dirty.
1016 *
1017 * For swap objects backing tmpfs regular files, do not flush anything,
1018 * but remove write protection on the mapped pages to update mtime through
1019 * mmaped writes.
1020 *
1021 * When stuffing pages asynchronously, allow clustering. XXX we need a
1022 * synchronous clustering mode implementation.
1023 *
1024 * Odd semantics: if start == end, we clean everything.
1025 *
1026 * The object must be locked.
1027 *
1028 * Returns FALSE if some page from the range was not written, as
1029 * reported by the pager, and TRUE otherwise.
1030 */
1031 boolean_t
vm_object_page_clean(vm_object_t object,vm_ooffset_t start,vm_ooffset_t end,int flags)1032 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
1033 int flags)
1034 {
1035 vm_page_t np, p;
1036 vm_pindex_t pi, tend, tstart;
1037 int curgeneration, n, pagerflags;
1038 boolean_t eio, res, allclean;
1039
1040 VM_OBJECT_ASSERT_WLOCKED(object);
1041
1042 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0)
1043 return (TRUE);
1044
1045 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
1046 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
1047 pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
1048
1049 tstart = OFF_TO_IDX(start);
1050 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
1051 allclean = tstart == 0 && tend >= object->size;
1052 res = TRUE;
1053
1054 rescan:
1055 curgeneration = object->generation;
1056
1057 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
1058 pi = p->pindex;
1059 if (pi >= tend)
1060 break;
1061 np = TAILQ_NEXT(p, listq);
1062 if (vm_page_none_valid(p))
1063 continue;
1064 if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) {
1065 if (object->generation != curgeneration &&
1066 (flags & OBJPC_SYNC) != 0)
1067 goto rescan;
1068 np = vm_page_find_least(object, pi);
1069 continue;
1070 }
1071 if (!vm_object_page_remove_write(p, flags, &allclean)) {
1072 vm_page_xunbusy(p);
1073 continue;
1074 }
1075 if (object->type == OBJT_VNODE) {
1076 n = vm_object_page_collect_flush(object, p, pagerflags,
1077 flags, &allclean, &eio);
1078 if (eio) {
1079 res = FALSE;
1080 allclean = FALSE;
1081 }
1082 if (object->generation != curgeneration &&
1083 (flags & OBJPC_SYNC) != 0)
1084 goto rescan;
1085
1086 /*
1087 * If the VOP_PUTPAGES() did a truncated write, so
1088 * that even the first page of the run is not fully
1089 * written, vm_pageout_flush() returns 0 as the run
1090 * length. Since the condition that caused truncated
1091 * write may be permanent, e.g. exhausted free space,
1092 * accepting n == 0 would cause an infinite loop.
1093 *
1094 * Forwarding the iterator leaves the unwritten page
1095 * behind, but there is not much we can do there if
1096 * filesystem refuses to write it.
1097 */
1098 if (n == 0) {
1099 n = 1;
1100 allclean = FALSE;
1101 }
1102 } else {
1103 n = 1;
1104 vm_page_xunbusy(p);
1105 }
1106 np = vm_page_find_least(object, pi + n);
1107 }
1108 #if 0
1109 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
1110 #endif
1111
1112 /*
1113 * Leave updating cleangeneration for tmpfs objects to tmpfs
1114 * scan. It needs to update mtime, which happens for other
1115 * filesystems during page writeouts.
1116 */
1117 if (allclean && object->type == OBJT_VNODE)
1118 object->cleangeneration = curgeneration;
1119 return (res);
1120 }
1121
1122 static int
vm_object_page_collect_flush(vm_object_t object,vm_page_t p,int pagerflags,int flags,boolean_t * allclean,boolean_t * eio)1123 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
1124 int flags, boolean_t *allclean, boolean_t *eio)
1125 {
1126 vm_page_t ma[2 * vm_pageout_page_count - 1], tp;
1127 int base, count, runlen;
1128
1129 vm_page_lock_assert(p, MA_NOTOWNED);
1130 vm_page_assert_xbusied(p);
1131 VM_OBJECT_ASSERT_WLOCKED(object);
1132 base = nitems(ma) / 2;
1133 ma[base] = p;
1134 for (count = 1, tp = p; count < vm_pageout_page_count; count++) {
1135 tp = vm_page_next(tp);
1136 if (tp == NULL || vm_page_tryxbusy(tp) == 0)
1137 break;
1138 if (!vm_object_page_remove_write(tp, flags, allclean)) {
1139 vm_page_xunbusy(tp);
1140 break;
1141 }
1142 ma[base + count] = tp;
1143 }
1144
1145 for (tp = p; count < vm_pageout_page_count; count++) {
1146 tp = vm_page_prev(tp);
1147 if (tp == NULL || vm_page_tryxbusy(tp) == 0)
1148 break;
1149 if (!vm_object_page_remove_write(tp, flags, allclean)) {
1150 vm_page_xunbusy(tp);
1151 break;
1152 }
1153 ma[--base] = tp;
1154 }
1155
1156 vm_pageout_flush(&ma[base], count, pagerflags, nitems(ma) / 2 - base,
1157 &runlen, eio);
1158 return (runlen);
1159 }
1160
1161 /*
1162 * Note that there is absolutely no sense in writing out
1163 * anonymous objects, so we track down the vnode object
1164 * to write out.
1165 * We invalidate (remove) all pages from the address space
1166 * for semantic correctness.
1167 *
1168 * If the backing object is a device object with unmanaged pages, then any
1169 * mappings to the specified range of pages must be removed before this
1170 * function is called.
1171 *
1172 * Note: certain anonymous maps, such as MAP_NOSYNC maps,
1173 * may start out with a NULL object.
1174 */
1175 boolean_t
vm_object_sync(vm_object_t object,vm_ooffset_t offset,vm_size_t size,boolean_t syncio,boolean_t invalidate)1176 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1177 boolean_t syncio, boolean_t invalidate)
1178 {
1179 vm_object_t backing_object;
1180 struct vnode *vp;
1181 struct mount *mp;
1182 int error, flags, fsync_after;
1183 boolean_t res;
1184
1185 if (object == NULL)
1186 return (TRUE);
1187 res = TRUE;
1188 error = 0;
1189 VM_OBJECT_WLOCK(object);
1190 while ((backing_object = object->backing_object) != NULL) {
1191 VM_OBJECT_WLOCK(backing_object);
1192 offset += object->backing_object_offset;
1193 VM_OBJECT_WUNLOCK(object);
1194 object = backing_object;
1195 if (object->size < OFF_TO_IDX(offset + size))
1196 size = IDX_TO_OFF(object->size) - offset;
1197 }
1198 /*
1199 * Flush pages if writing is allowed, invalidate them
1200 * if invalidation requested. Pages undergoing I/O
1201 * will be ignored by vm_object_page_remove().
1202 *
1203 * We cannot lock the vnode and then wait for paging
1204 * to complete without deadlocking against vm_fault.
1205 * Instead we simply call vm_object_page_remove() and
1206 * allow it to block internally on a page-by-page
1207 * basis when it encounters pages undergoing async
1208 * I/O.
1209 */
1210 if (object->type == OBJT_VNODE &&
1211 vm_object_mightbedirty(object) != 0 &&
1212 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
1213 VM_OBJECT_WUNLOCK(object);
1214 (void)vn_start_write(vp, &mp, V_WAIT);
1215 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1216 if (syncio && !invalidate && offset == 0 &&
1217 atop(size) == object->size) {
1218 /*
1219 * If syncing the whole mapping of the file,
1220 * it is faster to schedule all the writes in
1221 * async mode, also allowing the clustering,
1222 * and then wait for i/o to complete.
1223 */
1224 flags = 0;
1225 fsync_after = TRUE;
1226 } else {
1227 flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1228 flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
1229 fsync_after = FALSE;
1230 }
1231 VM_OBJECT_WLOCK(object);
1232 res = vm_object_page_clean(object, offset, offset + size,
1233 flags);
1234 VM_OBJECT_WUNLOCK(object);
1235 if (fsync_after) {
1236 for (;;) {
1237 error = VOP_FSYNC(vp, MNT_WAIT, curthread);
1238 if (error != ERELOOKUP)
1239 break;
1240
1241 /*
1242 * Allow SU/bufdaemon to handle more
1243 * dependencies in the meantime.
1244 */
1245 VOP_UNLOCK(vp);
1246 vn_finished_write(mp);
1247
1248 (void)vn_start_write(vp, &mp, V_WAIT);
1249 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1250 }
1251 }
1252 VOP_UNLOCK(vp);
1253 vn_finished_write(mp);
1254 if (error != 0)
1255 res = FALSE;
1256 VM_OBJECT_WLOCK(object);
1257 }
1258 if ((object->type == OBJT_VNODE ||
1259 object->type == OBJT_DEVICE) && invalidate) {
1260 if (object->type == OBJT_DEVICE)
1261 /*
1262 * The option OBJPR_NOTMAPPED must be passed here
1263 * because vm_object_page_remove() cannot remove
1264 * unmanaged mappings.
1265 */
1266 flags = OBJPR_NOTMAPPED;
1267 else if (old_msync)
1268 flags = 0;
1269 else
1270 flags = OBJPR_CLEANONLY;
1271 vm_object_page_remove(object, OFF_TO_IDX(offset),
1272 OFF_TO_IDX(offset + size + PAGE_MASK), flags);
1273 }
1274 VM_OBJECT_WUNLOCK(object);
1275 return (res);
1276 }
1277
1278 /*
1279 * Determine whether the given advice can be applied to the object. Advice is
1280 * not applied to unmanaged pages since they never belong to page queues, and
1281 * since MADV_FREE is destructive, it can apply only to anonymous pages that
1282 * have been mapped at most once.
1283 */
1284 static bool
vm_object_advice_applies(vm_object_t object,int advice)1285 vm_object_advice_applies(vm_object_t object, int advice)
1286 {
1287
1288 if ((object->flags & OBJ_UNMANAGED) != 0)
1289 return (false);
1290 if (advice != MADV_FREE)
1291 return (true);
1292 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) ==
1293 (OBJ_ONEMAPPING | OBJ_ANON));
1294 }
1295
1296 static void
vm_object_madvise_freespace(vm_object_t object,int advice,vm_pindex_t pindex,vm_size_t size)1297 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
1298 vm_size_t size)
1299 {
1300
1301 if (advice == MADV_FREE)
1302 vm_pager_freespace(object, pindex, size);
1303 }
1304
1305 /*
1306 * vm_object_madvise:
1307 *
1308 * Implements the madvise function at the object/page level.
1309 *
1310 * MADV_WILLNEED (any object)
1311 *
1312 * Activate the specified pages if they are resident.
1313 *
1314 * MADV_DONTNEED (any object)
1315 *
1316 * Deactivate the specified pages if they are resident.
1317 *
1318 * MADV_FREE (OBJT_SWAP objects, OBJ_ONEMAPPING only)
1319 *
1320 * Deactivate and clean the specified pages if they are
1321 * resident. This permits the process to reuse the pages
1322 * without faulting or the kernel to reclaim the pages
1323 * without I/O.
1324 */
1325 void
vm_object_madvise(vm_object_t object,vm_pindex_t pindex,vm_pindex_t end,int advice)1326 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
1327 int advice)
1328 {
1329 vm_pindex_t tpindex;
1330 vm_object_t backing_object, tobject;
1331 vm_page_t m, tm;
1332
1333 if (object == NULL)
1334 return;
1335
1336 relookup:
1337 VM_OBJECT_WLOCK(object);
1338 if (!vm_object_advice_applies(object, advice)) {
1339 VM_OBJECT_WUNLOCK(object);
1340 return;
1341 }
1342 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
1343 tobject = object;
1344
1345 /*
1346 * If the next page isn't resident in the top-level object, we
1347 * need to search the shadow chain. When applying MADV_FREE, we
1348 * take care to release any swap space used to store
1349 * non-resident pages.
1350 */
1351 if (m == NULL || pindex < m->pindex) {
1352 /*
1353 * Optimize a common case: if the top-level object has
1354 * no backing object, we can skip over the non-resident
1355 * range in constant time.
1356 */
1357 if (object->backing_object == NULL) {
1358 tpindex = (m != NULL && m->pindex < end) ?
1359 m->pindex : end;
1360 vm_object_madvise_freespace(object, advice,
1361 pindex, tpindex - pindex);
1362 if ((pindex = tpindex) == end)
1363 break;
1364 goto next_page;
1365 }
1366
1367 tpindex = pindex;
1368 do {
1369 vm_object_madvise_freespace(tobject, advice,
1370 tpindex, 1);
1371 /*
1372 * Prepare to search the next object in the
1373 * chain.
1374 */
1375 backing_object = tobject->backing_object;
1376 if (backing_object == NULL)
1377 goto next_pindex;
1378 VM_OBJECT_WLOCK(backing_object);
1379 tpindex +=
1380 OFF_TO_IDX(tobject->backing_object_offset);
1381 if (tobject != object)
1382 VM_OBJECT_WUNLOCK(tobject);
1383 tobject = backing_object;
1384 if (!vm_object_advice_applies(tobject, advice))
1385 goto next_pindex;
1386 } while ((tm = vm_page_lookup(tobject, tpindex)) ==
1387 NULL);
1388 } else {
1389 next_page:
1390 tm = m;
1391 m = TAILQ_NEXT(m, listq);
1392 }
1393
1394 /*
1395 * If the page is not in a normal state, skip it. The page
1396 * can not be invalidated while the object lock is held.
1397 */
1398 if (!vm_page_all_valid(tm) || vm_page_wired(tm))
1399 goto next_pindex;
1400 KASSERT((tm->flags & PG_FICTITIOUS) == 0,
1401 ("vm_object_madvise: page %p is fictitious", tm));
1402 KASSERT((tm->oflags & VPO_UNMANAGED) == 0,
1403 ("vm_object_madvise: page %p is not managed", tm));
1404 if (vm_page_tryxbusy(tm) == 0) {
1405 if (object != tobject)
1406 VM_OBJECT_WUNLOCK(object);
1407 if (advice == MADV_WILLNEED) {
1408 /*
1409 * Reference the page before unlocking and
1410 * sleeping so that the page daemon is less
1411 * likely to reclaim it.
1412 */
1413 vm_page_aflag_set(tm, PGA_REFERENCED);
1414 }
1415 if (!vm_page_busy_sleep(tm, "madvpo", 0))
1416 VM_OBJECT_WUNLOCK(tobject);
1417 goto relookup;
1418 }
1419 vm_page_advise(tm, advice);
1420 vm_page_xunbusy(tm);
1421 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1);
1422 next_pindex:
1423 if (tobject != object)
1424 VM_OBJECT_WUNLOCK(tobject);
1425 }
1426 VM_OBJECT_WUNLOCK(object);
1427 }
1428
1429 /*
1430 * vm_object_shadow:
1431 *
1432 * Create a new object which is backed by the
1433 * specified existing object range. The source
1434 * object reference is deallocated.
1435 *
1436 * The new object and offset into that object
1437 * are returned in the source parameters.
1438 */
1439 void
vm_object_shadow(vm_object_t * object,vm_ooffset_t * offset,vm_size_t length,struct ucred * cred,bool shared)1440 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length,
1441 struct ucred *cred, bool shared)
1442 {
1443 vm_object_t source;
1444 vm_object_t result;
1445
1446 source = *object;
1447
1448 /*
1449 * Don't create the new object if the old object isn't shared.
1450 *
1451 * If we hold the only reference we can guarantee that it won't
1452 * increase while we have the map locked. Otherwise the race is
1453 * harmless and we will end up with an extra shadow object that
1454 * will be collapsed later.
1455 */
1456 if (source != NULL && source->ref_count == 1 &&
1457 (source->flags & OBJ_ANON) != 0)
1458 return;
1459
1460 /*
1461 * Allocate a new object with the given length.
1462 */
1463 result = vm_object_allocate_anon(atop(length), source, cred, length);
1464
1465 /*
1466 * Store the offset into the source object, and fix up the offset into
1467 * the new object.
1468 */
1469 result->backing_object_offset = *offset;
1470
1471 if (shared || source != NULL) {
1472 VM_OBJECT_WLOCK(result);
1473
1474 /*
1475 * The new object shadows the source object, adding a
1476 * reference to it. Our caller changes his reference
1477 * to point to the new object, removing a reference to
1478 * the source object. Net result: no change of
1479 * reference count, unless the caller needs to add one
1480 * more reference due to forking a shared map entry.
1481 */
1482 if (shared) {
1483 vm_object_reference_locked(result);
1484 vm_object_clear_flag(result, OBJ_ONEMAPPING);
1485 }
1486
1487 /*
1488 * Try to optimize the result object's page color when
1489 * shadowing in order to maintain page coloring
1490 * consistency in the combined shadowed object.
1491 */
1492 if (source != NULL) {
1493 vm_object_backing_insert(result, source);
1494 result->domain = source->domain;
1495 #if VM_NRESERVLEVEL > 0
1496 vm_object_set_flag(result,
1497 (source->flags & OBJ_COLORED));
1498 result->pg_color = (source->pg_color +
1499 OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER -
1500 1)) - 1);
1501 #endif
1502 }
1503 VM_OBJECT_WUNLOCK(result);
1504 }
1505
1506 /*
1507 * Return the new things
1508 */
1509 *offset = 0;
1510 *object = result;
1511 }
1512
1513 /*
1514 * vm_object_split:
1515 *
1516 * Split the pages in a map entry into a new object. This affords
1517 * easier removal of unused pages, and keeps object inheritance from
1518 * being a negative impact on memory usage.
1519 */
1520 void
vm_object_split(vm_map_entry_t entry)1521 vm_object_split(vm_map_entry_t entry)
1522 {
1523 struct pctrie_iter pages;
1524 vm_page_t m;
1525 vm_object_t orig_object, new_object, backing_object;
1526 vm_pindex_t offidxstart;
1527 vm_size_t size;
1528
1529 orig_object = entry->object.vm_object;
1530 KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0,
1531 ("vm_object_split: Splitting object with multiple mappings."));
1532 if ((orig_object->flags & OBJ_ANON) == 0)
1533 return;
1534 if (orig_object->ref_count <= 1)
1535 return;
1536 VM_OBJECT_WUNLOCK(orig_object);
1537
1538 offidxstart = OFF_TO_IDX(entry->offset);
1539 size = atop(entry->end - entry->start);
1540
1541 new_object = vm_object_allocate_anon(size, orig_object,
1542 orig_object->cred, ptoa(size));
1543
1544 /*
1545 * We must wait for the orig_object to complete any in-progress
1546 * collapse so that the swap blocks are stable below. The
1547 * additional reference on backing_object by new object will
1548 * prevent further collapse operations until split completes.
1549 */
1550 VM_OBJECT_WLOCK(orig_object);
1551 vm_object_collapse_wait(orig_object);
1552
1553 /*
1554 * At this point, the new object is still private, so the order in
1555 * which the original and new objects are locked does not matter.
1556 */
1557 VM_OBJECT_WLOCK(new_object);
1558 new_object->domain = orig_object->domain;
1559 backing_object = orig_object->backing_object;
1560 if (backing_object != NULL) {
1561 vm_object_backing_insert_ref(new_object, backing_object);
1562 new_object->backing_object_offset =
1563 orig_object->backing_object_offset + entry->offset;
1564 }
1565 if (orig_object->cred != NULL) {
1566 crhold(orig_object->cred);
1567 KASSERT(orig_object->charge >= ptoa(size),
1568 ("orig_object->charge < 0"));
1569 orig_object->charge -= ptoa(size);
1570 }
1571
1572 /*
1573 * Mark the split operation so that swap_pager_getpages() knows
1574 * that the object is in transition.
1575 */
1576 vm_object_set_flag(orig_object, OBJ_SPLIT);
1577 vm_page_iter_limit_init(&pages, orig_object, offidxstart + size);
1578 retry:
1579 pctrie_iter_reset(&pages);
1580 for (m = vm_page_iter_lookup_ge(&pages, offidxstart); m != NULL;
1581 m = vm_radix_iter_step(&pages)) {
1582 /*
1583 * We must wait for pending I/O to complete before we can
1584 * rename the page.
1585 *
1586 * We do not have to VM_PROT_NONE the page as mappings should
1587 * not be changed by this operation.
1588 */
1589 if (vm_page_tryxbusy(m) == 0) {
1590 VM_OBJECT_WUNLOCK(new_object);
1591 if (vm_page_busy_sleep(m, "spltwt", 0))
1592 VM_OBJECT_WLOCK(orig_object);
1593 VM_OBJECT_WLOCK(new_object);
1594 goto retry;
1595 }
1596
1597 /*
1598 * The page was left invalid. Likely placed there by
1599 * an incomplete fault. Just remove and ignore.
1600 */
1601 if (vm_page_none_valid(m)) {
1602 if (vm_page_iter_remove(&pages))
1603 vm_page_free(m);
1604 continue;
1605 }
1606
1607 /* vm_page_rename() will dirty the page. */
1608 if (vm_page_rename(&pages, new_object, m->pindex - offidxstart)) {
1609 vm_page_xunbusy(m);
1610 VM_OBJECT_WUNLOCK(new_object);
1611 VM_OBJECT_WUNLOCK(orig_object);
1612 vm_radix_wait();
1613 VM_OBJECT_WLOCK(orig_object);
1614 VM_OBJECT_WLOCK(new_object);
1615 goto retry;
1616 }
1617
1618 #if VM_NRESERVLEVEL > 0
1619 /*
1620 * If some of the reservation's allocated pages remain with
1621 * the original object, then transferring the reservation to
1622 * the new object is neither particularly beneficial nor
1623 * particularly harmful as compared to leaving the reservation
1624 * with the original object. If, however, all of the
1625 * reservation's allocated pages are transferred to the new
1626 * object, then transferring the reservation is typically
1627 * beneficial. Determining which of these two cases applies
1628 * would be more costly than unconditionally renaming the
1629 * reservation.
1630 */
1631 vm_reserv_rename(m, new_object, orig_object, offidxstart);
1632 #endif
1633 }
1634
1635 /*
1636 * swap_pager_copy() can sleep, in which case the orig_object's
1637 * and new_object's locks are released and reacquired.
1638 */
1639 swap_pager_copy(orig_object, new_object, offidxstart, 0);
1640
1641 TAILQ_FOREACH(m, &new_object->memq, listq)
1642 vm_page_xunbusy(m);
1643
1644 vm_object_clear_flag(orig_object, OBJ_SPLIT);
1645 VM_OBJECT_WUNLOCK(orig_object);
1646 VM_OBJECT_WUNLOCK(new_object);
1647 entry->object.vm_object = new_object;
1648 entry->offset = 0LL;
1649 vm_object_deallocate(orig_object);
1650 VM_OBJECT_WLOCK(new_object);
1651 }
1652
1653 static vm_page_t
vm_object_collapse_scan_wait(struct pctrie_iter * pages,vm_object_t object,vm_page_t p)1654 vm_object_collapse_scan_wait(struct pctrie_iter *pages, vm_object_t object,
1655 vm_page_t p)
1656 {
1657 vm_object_t backing_object;
1658
1659 VM_OBJECT_ASSERT_WLOCKED(object);
1660 backing_object = object->backing_object;
1661 VM_OBJECT_ASSERT_WLOCKED(backing_object);
1662
1663 KASSERT(p == NULL || p->object == object || p->object == backing_object,
1664 ("invalid ownership %p %p %p", p, object, backing_object));
1665 /* The page is only NULL when rename fails. */
1666 if (p == NULL) {
1667 VM_OBJECT_WUNLOCK(object);
1668 VM_OBJECT_WUNLOCK(backing_object);
1669 vm_radix_wait();
1670 VM_OBJECT_WLOCK(object);
1671 } else if (p->object == object) {
1672 VM_OBJECT_WUNLOCK(backing_object);
1673 if (vm_page_busy_sleep(p, "vmocol", 0))
1674 VM_OBJECT_WLOCK(object);
1675 } else {
1676 VM_OBJECT_WUNLOCK(object);
1677 if (!vm_page_busy_sleep(p, "vmocol", 0))
1678 VM_OBJECT_WUNLOCK(backing_object);
1679 VM_OBJECT_WLOCK(object);
1680 }
1681 VM_OBJECT_WLOCK(backing_object);
1682 vm_page_iter_init(pages, backing_object);
1683 return (vm_page_iter_lookup_ge(pages, 0));
1684 }
1685
1686 static void
vm_object_collapse_scan(vm_object_t object)1687 vm_object_collapse_scan(vm_object_t object)
1688 {
1689 struct pctrie_iter pages;
1690 vm_object_t backing_object;
1691 vm_page_t next, p, pp;
1692 vm_pindex_t backing_offset_index, new_pindex;
1693
1694 VM_OBJECT_ASSERT_WLOCKED(object);
1695 VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
1696
1697 backing_object = object->backing_object;
1698 backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1699
1700 /*
1701 * Our scan
1702 */
1703 vm_page_iter_init(&pages, backing_object);
1704 for (p = vm_page_iter_lookup_ge(&pages, 0); p != NULL; p = next) {
1705 next = TAILQ_NEXT(p, listq);
1706 new_pindex = p->pindex - backing_offset_index;
1707
1708 /*
1709 * Check for busy page
1710 */
1711 if (vm_page_tryxbusy(p) == 0) {
1712 next = vm_object_collapse_scan_wait(&pages, object, p);
1713 continue;
1714 }
1715
1716 KASSERT(object->backing_object == backing_object,
1717 ("vm_object_collapse_scan: backing object mismatch %p != %p",
1718 object->backing_object, backing_object));
1719 KASSERT(p->object == backing_object,
1720 ("vm_object_collapse_scan: object mismatch %p != %p",
1721 p->object, backing_object));
1722
1723 if (p->pindex < backing_offset_index ||
1724 new_pindex >= object->size) {
1725 vm_pager_freespace(backing_object, p->pindex, 1);
1726
1727 KASSERT(!pmap_page_is_mapped(p),
1728 ("freeing mapped page %p", p));
1729 if (vm_page_iter_remove(&pages))
1730 vm_page_free(p);
1731 next = vm_radix_iter_step(&pages);
1732 continue;
1733 }
1734
1735 if (!vm_page_all_valid(p)) {
1736 KASSERT(!pmap_page_is_mapped(p),
1737 ("freeing mapped page %p", p));
1738 if (vm_page_iter_remove(&pages))
1739 vm_page_free(p);
1740 next = vm_radix_iter_step(&pages);
1741 continue;
1742 }
1743
1744 pp = vm_page_lookup(object, new_pindex);
1745 if (pp != NULL && vm_page_tryxbusy(pp) == 0) {
1746 vm_page_xunbusy(p);
1747 /*
1748 * The page in the parent is busy and possibly not
1749 * (yet) valid. Until its state is finalized by the
1750 * busy bit owner, we can't tell whether it shadows the
1751 * original page.
1752 */
1753 next = vm_object_collapse_scan_wait(&pages, object, pp);
1754 continue;
1755 }
1756
1757 if (pp != NULL && vm_page_none_valid(pp)) {
1758 /*
1759 * The page was invalid in the parent. Likely placed
1760 * there by an incomplete fault. Just remove and
1761 * ignore. p can replace it.
1762 */
1763 if (vm_page_remove(pp))
1764 vm_page_free(pp);
1765 pp = NULL;
1766 }
1767
1768 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL,
1769 NULL)) {
1770 /*
1771 * The page already exists in the parent OR swap exists
1772 * for this location in the parent. Leave the parent's
1773 * page alone. Destroy the original page from the
1774 * backing object.
1775 */
1776 vm_pager_freespace(backing_object, p->pindex, 1);
1777 KASSERT(!pmap_page_is_mapped(p),
1778 ("freeing mapped page %p", p));
1779 if (pp != NULL)
1780 vm_page_xunbusy(pp);
1781 if (vm_page_iter_remove(&pages))
1782 vm_page_free(p);
1783 next = vm_radix_iter_step(&pages);
1784 continue;
1785 }
1786
1787 /*
1788 * Page does not exist in parent, rename the page from the
1789 * backing object to the main object.
1790 *
1791 * If the page was mapped to a process, it can remain mapped
1792 * through the rename. vm_page_rename() will dirty the page.
1793 */
1794 if (vm_page_rename(&pages, object, new_pindex)) {
1795 vm_page_xunbusy(p);
1796 next = vm_object_collapse_scan_wait(&pages, object,
1797 NULL);
1798 continue;
1799 }
1800
1801 /* Use the old pindex to free the right page. */
1802 vm_pager_freespace(backing_object, new_pindex +
1803 backing_offset_index, 1);
1804
1805 #if VM_NRESERVLEVEL > 0
1806 /*
1807 * Rename the reservation.
1808 */
1809 vm_reserv_rename(p, object, backing_object,
1810 backing_offset_index);
1811 #endif
1812 vm_page_xunbusy(p);
1813 next = vm_radix_iter_step(&pages);
1814 }
1815 return;
1816 }
1817
1818 /*
1819 * vm_object_collapse:
1820 *
1821 * Collapse an object with the object backing it.
1822 * Pages in the backing object are moved into the
1823 * parent, and the backing object is deallocated.
1824 */
1825 void
vm_object_collapse(vm_object_t object)1826 vm_object_collapse(vm_object_t object)
1827 {
1828 vm_object_t backing_object, new_backing_object;
1829
1830 VM_OBJECT_ASSERT_WLOCKED(object);
1831
1832 while (TRUE) {
1833 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON,
1834 ("collapsing invalid object"));
1835
1836 /*
1837 * Wait for the backing_object to finish any pending
1838 * collapse so that the caller sees the shortest possible
1839 * shadow chain.
1840 */
1841 backing_object = vm_object_backing_collapse_wait(object);
1842 if (backing_object == NULL)
1843 return;
1844
1845 KASSERT(object->ref_count > 0 &&
1846 object->ref_count > atomic_load_int(&object->shadow_count),
1847 ("collapse with invalid ref %d or shadow %d count.",
1848 object->ref_count, atomic_load_int(&object->shadow_count)));
1849 KASSERT((backing_object->flags &
1850 (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
1851 ("vm_object_collapse: Backing object already collapsing."));
1852 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
1853 ("vm_object_collapse: object is already collapsing."));
1854
1855 /*
1856 * We know that we can either collapse the backing object if
1857 * the parent is the only reference to it, or (perhaps) have
1858 * the parent bypass the object if the parent happens to shadow
1859 * all the resident pages in the entire backing object.
1860 */
1861 if (backing_object->ref_count == 1) {
1862 KASSERT(atomic_load_int(&backing_object->shadow_count)
1863 == 1,
1864 ("vm_object_collapse: shadow_count: %d",
1865 atomic_load_int(&backing_object->shadow_count)));
1866 vm_object_pip_add(object, 1);
1867 vm_object_set_flag(object, OBJ_COLLAPSING);
1868 vm_object_pip_add(backing_object, 1);
1869 vm_object_set_flag(backing_object, OBJ_DEAD);
1870
1871 /*
1872 * If there is exactly one reference to the backing
1873 * object, we can collapse it into the parent.
1874 */
1875 vm_object_collapse_scan(object);
1876
1877 /*
1878 * Move the pager from backing_object to object.
1879 *
1880 * swap_pager_copy() can sleep, in which case the
1881 * backing_object's and object's locks are released and
1882 * reacquired.
1883 */
1884 swap_pager_copy(backing_object, object,
1885 OFF_TO_IDX(object->backing_object_offset), TRUE);
1886
1887 /*
1888 * Object now shadows whatever backing_object did.
1889 */
1890 vm_object_clear_flag(object, OBJ_COLLAPSING);
1891 vm_object_backing_transfer(object, backing_object);
1892 object->backing_object_offset +=
1893 backing_object->backing_object_offset;
1894 VM_OBJECT_WUNLOCK(object);
1895 vm_object_pip_wakeup(object);
1896
1897 /*
1898 * Discard backing_object.
1899 *
1900 * Since the backing object has no pages, no pager left,
1901 * and no object references within it, all that is
1902 * necessary is to dispose of it.
1903 */
1904 KASSERT(backing_object->ref_count == 1, (
1905 "backing_object %p was somehow re-referenced during collapse!",
1906 backing_object));
1907 vm_object_pip_wakeup(backing_object);
1908 (void)refcount_release(&backing_object->ref_count);
1909 umtx_shm_object_terminated(backing_object);
1910 vm_object_terminate(backing_object);
1911 counter_u64_add(object_collapses, 1);
1912 VM_OBJECT_WLOCK(object);
1913 } else {
1914 /*
1915 * If we do not entirely shadow the backing object,
1916 * there is nothing we can do so we give up.
1917 *
1918 * The object lock and backing_object lock must not
1919 * be dropped during this sequence.
1920 */
1921 if (!swap_pager_scan_all_shadowed(object)) {
1922 VM_OBJECT_WUNLOCK(backing_object);
1923 break;
1924 }
1925
1926 /*
1927 * Make the parent shadow the next object in the
1928 * chain. Deallocating backing_object will not remove
1929 * it, since its reference count is at least 2.
1930 */
1931 vm_object_backing_remove_locked(object);
1932 new_backing_object = backing_object->backing_object;
1933 if (new_backing_object != NULL) {
1934 vm_object_backing_insert_ref(object,
1935 new_backing_object);
1936 object->backing_object_offset +=
1937 backing_object->backing_object_offset;
1938 }
1939
1940 /*
1941 * Drop the reference count on backing_object. Since
1942 * its ref_count was at least 2, it will not vanish.
1943 */
1944 (void)refcount_release(&backing_object->ref_count);
1945 KASSERT(backing_object->ref_count >= 1, (
1946 "backing_object %p was somehow dereferenced during collapse!",
1947 backing_object));
1948 VM_OBJECT_WUNLOCK(backing_object);
1949 counter_u64_add(object_bypasses, 1);
1950 }
1951
1952 /*
1953 * Try again with this object's new backing object.
1954 */
1955 }
1956 }
1957
1958 /*
1959 * vm_object_page_remove:
1960 *
1961 * For the given object, either frees or invalidates each of the
1962 * specified pages. In general, a page is freed. However, if a page is
1963 * wired for any reason other than the existence of a managed, wired
1964 * mapping, then it may be invalidated but not removed from the object.
1965 * Pages are specified by the given range ["start", "end") and the option
1966 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range
1967 * extends from "start" to the end of the object. If the option
1968 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1969 * specified range are affected. If the option OBJPR_NOTMAPPED is
1970 * specified, then the pages within the specified range must have no
1971 * mappings. Otherwise, if this option is not specified, any mappings to
1972 * the specified pages are removed before the pages are freed or
1973 * invalidated.
1974 *
1975 * In general, this operation should only be performed on objects that
1976 * contain managed pages. There are, however, two exceptions. First, it
1977 * is performed on the kernel and kmem objects by vm_map_entry_delete().
1978 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1979 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must
1980 * not be specified and the option OBJPR_NOTMAPPED must be specified.
1981 *
1982 * The object must be locked.
1983 */
1984 void
vm_object_page_remove(vm_object_t object,vm_pindex_t start,vm_pindex_t end,int options)1985 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1986 int options)
1987 {
1988 struct pctrie_iter pages;
1989 vm_page_t p;
1990
1991 VM_OBJECT_ASSERT_WLOCKED(object);
1992 KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
1993 (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
1994 ("vm_object_page_remove: illegal options for object %p", object));
1995 if (object->resident_page_count == 0)
1996 return;
1997 vm_object_pip_add(object, 1);
1998 vm_page_iter_limit_init(&pages, object, end);
1999 again:
2000 pctrie_iter_reset(&pages);
2001 for (p = vm_page_iter_lookup_ge(&pages, start); p != NULL;
2002 p = vm_radix_iter_step(&pages)) {
2003 /*
2004 * Skip invalid pages if asked to do so. Try to avoid acquiring
2005 * the busy lock, as some consumers rely on this to avoid
2006 * deadlocks.
2007 *
2008 * A thread may concurrently transition the page from invalid to
2009 * valid using only the busy lock, so the result of this check
2010 * is immediately stale. It is up to consumers to handle this,
2011 * for instance by ensuring that all invalid->valid transitions
2012 * happen with a mutex held, as may be possible for a
2013 * filesystem.
2014 */
2015 if ((options & OBJPR_VALIDONLY) != 0 && vm_page_none_valid(p))
2016 continue;
2017
2018 /*
2019 * If the page is wired for any reason besides the existence
2020 * of managed, wired mappings, then it cannot be freed. For
2021 * example, fictitious pages, which represent device memory,
2022 * are inherently wired and cannot be freed. They can,
2023 * however, be invalidated if the option OBJPR_CLEANONLY is
2024 * not specified.
2025 */
2026 if (vm_page_tryxbusy(p) == 0) {
2027 if (vm_page_busy_sleep(p, "vmopar", 0))
2028 VM_OBJECT_WLOCK(object);
2029 goto again;
2030 }
2031 if ((options & OBJPR_VALIDONLY) != 0 && vm_page_none_valid(p)) {
2032 vm_page_xunbusy(p);
2033 continue;
2034 }
2035 if (vm_page_wired(p)) {
2036 wired:
2037 if ((options & OBJPR_NOTMAPPED) == 0 &&
2038 object->ref_count != 0)
2039 pmap_remove_all(p);
2040 if ((options & OBJPR_CLEANONLY) == 0) {
2041 vm_page_invalid(p);
2042 vm_page_undirty(p);
2043 }
2044 vm_page_xunbusy(p);
2045 continue;
2046 }
2047 KASSERT((p->flags & PG_FICTITIOUS) == 0,
2048 ("vm_object_page_remove: page %p is fictitious", p));
2049 if ((options & OBJPR_CLEANONLY) != 0 &&
2050 !vm_page_none_valid(p)) {
2051 if ((options & OBJPR_NOTMAPPED) == 0 &&
2052 object->ref_count != 0 &&
2053 !vm_page_try_remove_write(p))
2054 goto wired;
2055 if (p->dirty != 0) {
2056 vm_page_xunbusy(p);
2057 continue;
2058 }
2059 }
2060 if ((options & OBJPR_NOTMAPPED) == 0 &&
2061 object->ref_count != 0 && !vm_page_try_remove_all(p))
2062 goto wired;
2063 vm_page_iter_free(&pages, p);
2064 }
2065 vm_object_pip_wakeup(object);
2066
2067 vm_pager_freespace(object, start, (end == 0 ? object->size : end) -
2068 start);
2069 }
2070
2071 /*
2072 * vm_object_page_noreuse:
2073 *
2074 * For the given object, attempt to move the specified pages to
2075 * the head of the inactive queue. This bypasses regular LRU
2076 * operation and allows the pages to be reused quickly under memory
2077 * pressure. If a page is wired for any reason, then it will not
2078 * be queued. Pages are specified by the range ["start", "end").
2079 * As a special case, if "end" is zero, then the range extends from
2080 * "start" to the end of the object.
2081 *
2082 * This operation should only be performed on objects that
2083 * contain non-fictitious, managed pages.
2084 *
2085 * The object must be locked.
2086 */
2087 void
vm_object_page_noreuse(vm_object_t object,vm_pindex_t start,vm_pindex_t end)2088 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2089 {
2090 vm_page_t p, next;
2091
2092 VM_OBJECT_ASSERT_LOCKED(object);
2093 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
2094 ("vm_object_page_noreuse: illegal object %p", object));
2095 if (object->resident_page_count == 0)
2096 return;
2097 p = vm_page_find_least(object, start);
2098
2099 /*
2100 * Here, the variable "p" is either (1) the page with the least pindex
2101 * greater than or equal to the parameter "start" or (2) NULL.
2102 */
2103 for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2104 next = TAILQ_NEXT(p, listq);
2105 vm_page_deactivate_noreuse(p);
2106 }
2107 }
2108
2109 /*
2110 * Populate the specified range of the object with valid pages. Returns
2111 * TRUE if the range is successfully populated and FALSE otherwise.
2112 *
2113 * Note: This function should be optimized to pass a larger array of
2114 * pages to vm_pager_get_pages() before it is applied to a non-
2115 * OBJT_DEVICE object.
2116 *
2117 * The object must be locked.
2118 */
2119 boolean_t
vm_object_populate(vm_object_t object,vm_pindex_t start,vm_pindex_t end)2120 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2121 {
2122 vm_page_t m;
2123 vm_pindex_t pindex;
2124 int rv;
2125
2126 VM_OBJECT_ASSERT_WLOCKED(object);
2127 for (pindex = start; pindex < end; pindex++) {
2128 rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL);
2129 if (rv != VM_PAGER_OK)
2130 break;
2131
2132 /*
2133 * Keep "m" busy because a subsequent iteration may unlock
2134 * the object.
2135 */
2136 }
2137 if (pindex > start) {
2138 m = vm_page_lookup(object, start);
2139 while (m != NULL && m->pindex < pindex) {
2140 vm_page_xunbusy(m);
2141 m = TAILQ_NEXT(m, listq);
2142 }
2143 }
2144 return (pindex == end);
2145 }
2146
2147 /*
2148 * Routine: vm_object_coalesce
2149 * Function: Coalesces two objects backing up adjoining
2150 * regions of memory into a single object.
2151 *
2152 * returns TRUE if objects were combined.
2153 *
2154 * NOTE: Only works at the moment if the second object is NULL -
2155 * if it's not, which object do we lock first?
2156 *
2157 * Parameters:
2158 * prev_object First object to coalesce
2159 * prev_offset Offset into prev_object
2160 * prev_size Size of reference to prev_object
2161 * next_size Size of reference to the second object
2162 * reserved Indicator that extension region has
2163 * swap accounted for
2164 *
2165 * Conditions:
2166 * The object must *not* be locked.
2167 */
2168 boolean_t
vm_object_coalesce(vm_object_t prev_object,vm_ooffset_t prev_offset,vm_size_t prev_size,vm_size_t next_size,boolean_t reserved)2169 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2170 vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2171 {
2172 vm_pindex_t next_pindex;
2173
2174 if (prev_object == NULL)
2175 return (TRUE);
2176 if ((prev_object->flags & OBJ_ANON) == 0)
2177 return (FALSE);
2178
2179 VM_OBJECT_WLOCK(prev_object);
2180 /*
2181 * Try to collapse the object first.
2182 */
2183 vm_object_collapse(prev_object);
2184
2185 /*
2186 * Can't coalesce if: . more than one reference . paged out . shadows
2187 * another object . has a copy elsewhere (any of which mean that the
2188 * pages not mapped to prev_entry may be in use anyway)
2189 */
2190 if (prev_object->backing_object != NULL) {
2191 VM_OBJECT_WUNLOCK(prev_object);
2192 return (FALSE);
2193 }
2194
2195 prev_size >>= PAGE_SHIFT;
2196 next_size >>= PAGE_SHIFT;
2197 next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2198
2199 if (prev_object->ref_count > 1 &&
2200 prev_object->size != next_pindex &&
2201 (prev_object->flags & OBJ_ONEMAPPING) == 0) {
2202 VM_OBJECT_WUNLOCK(prev_object);
2203 return (FALSE);
2204 }
2205
2206 /*
2207 * Account for the charge.
2208 */
2209 if (prev_object->cred != NULL) {
2210 /*
2211 * If prev_object was charged, then this mapping,
2212 * although not charged now, may become writable
2213 * later. Non-NULL cred in the object would prevent
2214 * swap reservation during enabling of the write
2215 * access, so reserve swap now. Failed reservation
2216 * cause allocation of the separate object for the map
2217 * entry, and swap reservation for this entry is
2218 * managed in appropriate time.
2219 */
2220 if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
2221 prev_object->cred)) {
2222 VM_OBJECT_WUNLOCK(prev_object);
2223 return (FALSE);
2224 }
2225 prev_object->charge += ptoa(next_size);
2226 }
2227
2228 /*
2229 * Remove any pages that may still be in the object from a previous
2230 * deallocation.
2231 */
2232 if (next_pindex < prev_object->size) {
2233 vm_object_page_remove(prev_object, next_pindex, next_pindex +
2234 next_size, 0);
2235 #if 0
2236 if (prev_object->cred != NULL) {
2237 KASSERT(prev_object->charge >=
2238 ptoa(prev_object->size - next_pindex),
2239 ("object %p overcharged 1 %jx %jx", prev_object,
2240 (uintmax_t)next_pindex, (uintmax_t)next_size));
2241 prev_object->charge -= ptoa(prev_object->size -
2242 next_pindex);
2243 }
2244 #endif
2245 }
2246
2247 /*
2248 * Extend the object if necessary.
2249 */
2250 if (next_pindex + next_size > prev_object->size)
2251 prev_object->size = next_pindex + next_size;
2252
2253 VM_OBJECT_WUNLOCK(prev_object);
2254 return (TRUE);
2255 }
2256
2257 void
vm_object_set_writeable_dirty_(vm_object_t object)2258 vm_object_set_writeable_dirty_(vm_object_t object)
2259 {
2260 atomic_add_int(&object->generation, 1);
2261 }
2262
2263 bool
vm_object_mightbedirty_(vm_object_t object)2264 vm_object_mightbedirty_(vm_object_t object)
2265 {
2266 return (object->generation != object->cleangeneration);
2267 }
2268
2269 /*
2270 * vm_object_unwire:
2271 *
2272 * For each page offset within the specified range of the given object,
2273 * find the highest-level page in the shadow chain and unwire it. A page
2274 * must exist at every page offset, and the highest-level page must be
2275 * wired.
2276 */
2277 void
vm_object_unwire(vm_object_t object,vm_ooffset_t offset,vm_size_t length,uint8_t queue)2278 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
2279 uint8_t queue)
2280 {
2281 vm_object_t tobject, t1object;
2282 vm_page_t m, tm;
2283 vm_pindex_t end_pindex, pindex, tpindex;
2284 int depth, locked_depth;
2285
2286 KASSERT((offset & PAGE_MASK) == 0,
2287 ("vm_object_unwire: offset is not page aligned"));
2288 KASSERT((length & PAGE_MASK) == 0,
2289 ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
2290 /* The wired count of a fictitious page never changes. */
2291 if ((object->flags & OBJ_FICTITIOUS) != 0)
2292 return;
2293 pindex = OFF_TO_IDX(offset);
2294 end_pindex = pindex + atop(length);
2295 again:
2296 locked_depth = 1;
2297 VM_OBJECT_RLOCK(object);
2298 m = vm_page_find_least(object, pindex);
2299 while (pindex < end_pindex) {
2300 if (m == NULL || pindex < m->pindex) {
2301 /*
2302 * The first object in the shadow chain doesn't
2303 * contain a page at the current index. Therefore,
2304 * the page must exist in a backing object.
2305 */
2306 tobject = object;
2307 tpindex = pindex;
2308 depth = 0;
2309 do {
2310 tpindex +=
2311 OFF_TO_IDX(tobject->backing_object_offset);
2312 tobject = tobject->backing_object;
2313 KASSERT(tobject != NULL,
2314 ("vm_object_unwire: missing page"));
2315 if ((tobject->flags & OBJ_FICTITIOUS) != 0)
2316 goto next_page;
2317 depth++;
2318 if (depth == locked_depth) {
2319 locked_depth++;
2320 VM_OBJECT_RLOCK(tobject);
2321 }
2322 } while ((tm = vm_page_lookup(tobject, tpindex)) ==
2323 NULL);
2324 } else {
2325 tm = m;
2326 m = TAILQ_NEXT(m, listq);
2327 }
2328 if (vm_page_trysbusy(tm) == 0) {
2329 for (tobject = object; locked_depth >= 1;
2330 locked_depth--) {
2331 t1object = tobject->backing_object;
2332 if (tm->object != tobject)
2333 VM_OBJECT_RUNLOCK(tobject);
2334 tobject = t1object;
2335 }
2336 tobject = tm->object;
2337 if (!vm_page_busy_sleep(tm, "unwbo",
2338 VM_ALLOC_IGN_SBUSY))
2339 VM_OBJECT_RUNLOCK(tobject);
2340 goto again;
2341 }
2342 vm_page_unwire(tm, queue);
2343 vm_page_sunbusy(tm);
2344 next_page:
2345 pindex++;
2346 }
2347 /* Release the accumulated object locks. */
2348 for (tobject = object; locked_depth >= 1; locked_depth--) {
2349 t1object = tobject->backing_object;
2350 VM_OBJECT_RUNLOCK(tobject);
2351 tobject = t1object;
2352 }
2353 }
2354
2355 /*
2356 * Return the vnode for the given object, or NULL if none exists.
2357 * For tmpfs objects, the function may return NULL if there is
2358 * no vnode allocated at the time of the call.
2359 */
2360 struct vnode *
vm_object_vnode(vm_object_t object)2361 vm_object_vnode(vm_object_t object)
2362 {
2363 struct vnode *vp;
2364
2365 VM_OBJECT_ASSERT_LOCKED(object);
2366 vm_pager_getvp(object, &vp, NULL);
2367 return (vp);
2368 }
2369
2370 /*
2371 * Busy the vm object. This prevents new pages belonging to the object from
2372 * becoming busy. Existing pages persist as busy. Callers are responsible
2373 * for checking page state before proceeding.
2374 */
2375 void
vm_object_busy(vm_object_t obj)2376 vm_object_busy(vm_object_t obj)
2377 {
2378
2379 VM_OBJECT_ASSERT_LOCKED(obj);
2380
2381 blockcount_acquire(&obj->busy, 1);
2382 /* The fence is required to order loads of page busy. */
2383 atomic_thread_fence_acq_rel();
2384 }
2385
2386 void
vm_object_unbusy(vm_object_t obj)2387 vm_object_unbusy(vm_object_t obj)
2388 {
2389
2390 blockcount_release(&obj->busy, 1);
2391 }
2392
2393 void
vm_object_busy_wait(vm_object_t obj,const char * wmesg)2394 vm_object_busy_wait(vm_object_t obj, const char *wmesg)
2395 {
2396
2397 VM_OBJECT_ASSERT_UNLOCKED(obj);
2398
2399 (void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM);
2400 }
2401
2402 /*
2403 * This function aims to determine if the object is mapped,
2404 * specifically, if it is referenced by a vm_map_entry. Because
2405 * objects occasionally acquire transient references that do not
2406 * represent a mapping, the method used here is inexact. However, it
2407 * has very low overhead and is good enough for the advisory
2408 * vm.vmtotal sysctl.
2409 */
2410 bool
vm_object_is_active(vm_object_t obj)2411 vm_object_is_active(vm_object_t obj)
2412 {
2413
2414 return (obj->ref_count > atomic_load_int(&obj->shadow_count));
2415 }
2416
2417 static int
vm_object_list_handler(struct sysctl_req * req,bool swap_only)2418 vm_object_list_handler(struct sysctl_req *req, bool swap_only)
2419 {
2420 struct kinfo_vmobject *kvo;
2421 char *fullpath, *freepath;
2422 struct vnode *vp;
2423 struct vattr va;
2424 vm_object_t obj;
2425 vm_page_t m;
2426 struct cdev *cdev;
2427 struct cdevsw *csw;
2428 u_long sp;
2429 int count, error, ref;
2430 key_t key;
2431 unsigned short seq;
2432 bool want_path;
2433
2434 if (req->oldptr == NULL) {
2435 /*
2436 * If an old buffer has not been provided, generate an
2437 * estimate of the space needed for a subsequent call.
2438 */
2439 mtx_lock(&vm_object_list_mtx);
2440 count = 0;
2441 TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2442 if (obj->type == OBJT_DEAD)
2443 continue;
2444 count++;
2445 }
2446 mtx_unlock(&vm_object_list_mtx);
2447 return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) *
2448 count * 11 / 10));
2449 }
2450
2451 want_path = !(swap_only || jailed(curthread->td_ucred));
2452 kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK | M_ZERO);
2453 error = 0;
2454
2455 /*
2456 * VM objects are type stable and are never removed from the
2457 * list once added. This allows us to safely read obj->object_list
2458 * after reacquiring the VM object lock.
2459 */
2460 mtx_lock(&vm_object_list_mtx);
2461 TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2462 if (obj->type == OBJT_DEAD ||
2463 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0))
2464 continue;
2465 VM_OBJECT_RLOCK(obj);
2466 if (obj->type == OBJT_DEAD ||
2467 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) {
2468 VM_OBJECT_RUNLOCK(obj);
2469 continue;
2470 }
2471 mtx_unlock(&vm_object_list_mtx);
2472 kvo->kvo_size = ptoa(obj->size);
2473 kvo->kvo_resident = obj->resident_page_count;
2474 kvo->kvo_ref_count = obj->ref_count;
2475 kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count);
2476 kvo->kvo_memattr = obj->memattr;
2477 kvo->kvo_active = 0;
2478 kvo->kvo_inactive = 0;
2479 kvo->kvo_flags = 0;
2480 if (!swap_only) {
2481 TAILQ_FOREACH(m, &obj->memq, listq) {
2482 /*
2483 * A page may belong to the object but be
2484 * dequeued and set to PQ_NONE while the
2485 * object lock is not held. This makes the
2486 * reads of m->queue below racy, and we do not
2487 * count pages set to PQ_NONE. However, this
2488 * sysctl is only meant to give an
2489 * approximation of the system anyway.
2490 */
2491 if (vm_page_active(m))
2492 kvo->kvo_active++;
2493 else if (vm_page_inactive(m))
2494 kvo->kvo_inactive++;
2495 else if (vm_page_in_laundry(m))
2496 kvo->kvo_laundry++;
2497 }
2498 }
2499
2500 kvo->kvo_vn_fileid = 0;
2501 kvo->kvo_vn_fsid = 0;
2502 kvo->kvo_vn_fsid_freebsd11 = 0;
2503 freepath = NULL;
2504 fullpath = "";
2505 vp = NULL;
2506 kvo->kvo_type = vm_object_kvme_type(obj, want_path ? &vp :
2507 NULL);
2508 if (vp != NULL) {
2509 vref(vp);
2510 } else if ((obj->flags & OBJ_ANON) != 0) {
2511 MPASS(kvo->kvo_type == KVME_TYPE_SWAP);
2512 kvo->kvo_me = (uintptr_t)obj;
2513 /* tmpfs objs are reported as vnodes */
2514 kvo->kvo_backing_obj = (uintptr_t)obj->backing_object;
2515 sp = swap_pager_swapped_pages(obj);
2516 kvo->kvo_swapped = sp > UINT32_MAX ? UINT32_MAX : sp;
2517 }
2518 if ((obj->type == OBJT_DEVICE || obj->type == OBJT_MGTDEVICE) &&
2519 (obj->flags & OBJ_CDEVH) != 0) {
2520 cdev = obj->un_pager.devp.handle;
2521 if (cdev != NULL) {
2522 csw = dev_refthread(cdev, &ref);
2523 if (csw != NULL) {
2524 strlcpy(kvo->kvo_path, cdev->si_name,
2525 sizeof(kvo->kvo_path));
2526 dev_relthread(cdev, ref);
2527 }
2528 }
2529 }
2530 VM_OBJECT_RUNLOCK(obj);
2531 if ((obj->flags & OBJ_SYSVSHM) != 0) {
2532 kvo->kvo_flags |= KVMO_FLAG_SYSVSHM;
2533 shmobjinfo(obj, &key, &seq);
2534 kvo->kvo_vn_fileid = key;
2535 kvo->kvo_vn_fsid_freebsd11 = seq;
2536 }
2537 if ((obj->flags & OBJ_POSIXSHM) != 0) {
2538 kvo->kvo_flags |= KVMO_FLAG_POSIXSHM;
2539 shm_get_path(obj, kvo->kvo_path,
2540 sizeof(kvo->kvo_path));
2541 }
2542 if (vp != NULL) {
2543 vn_fullpath(vp, &fullpath, &freepath);
2544 vn_lock(vp, LK_SHARED | LK_RETRY);
2545 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) {
2546 kvo->kvo_vn_fileid = va.va_fileid;
2547 kvo->kvo_vn_fsid = va.va_fsid;
2548 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid;
2549 /* truncate */
2550 }
2551 vput(vp);
2552 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path));
2553 free(freepath, M_TEMP);
2554 }
2555
2556 /* Pack record size down */
2557 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path)
2558 + strlen(kvo->kvo_path) + 1;
2559 kvo->kvo_structsize = roundup(kvo->kvo_structsize,
2560 sizeof(uint64_t));
2561 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize);
2562 maybe_yield();
2563 mtx_lock(&vm_object_list_mtx);
2564 if (error)
2565 break;
2566 }
2567 mtx_unlock(&vm_object_list_mtx);
2568 free(kvo, M_TEMP);
2569 return (error);
2570 }
2571
2572 static int
sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)2573 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
2574 {
2575 return (vm_object_list_handler(req, false));
2576 }
2577
2578 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP |
2579 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject",
2580 "List of VM objects");
2581
2582 static int
sysctl_vm_object_list_swap(SYSCTL_HANDLER_ARGS)2583 sysctl_vm_object_list_swap(SYSCTL_HANDLER_ARGS)
2584 {
2585 return (vm_object_list_handler(req, true));
2586 }
2587
2588 /*
2589 * This sysctl returns list of the anonymous or swap objects. Intent
2590 * is to provide stripped optimized list useful to analyze swap use.
2591 * Since technically non-swap (default) objects participate in the
2592 * shadow chains, and are converted to swap type as needed by swap
2593 * pager, we must report them.
2594 */
2595 SYSCTL_PROC(_vm, OID_AUTO, swap_objects,
2596 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 0,
2597 sysctl_vm_object_list_swap, "S,kinfo_vmobject",
2598 "List of swap VM objects");
2599
2600 #include "opt_ddb.h"
2601 #ifdef DDB
2602 #include <sys/kernel.h>
2603
2604 #include <sys/cons.h>
2605
2606 #include <ddb/ddb.h>
2607
2608 static int
_vm_object_in_map(vm_map_t map,vm_object_t object,vm_map_entry_t entry)2609 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2610 {
2611 vm_map_t tmpm;
2612 vm_map_entry_t tmpe;
2613 vm_object_t obj;
2614
2615 if (map == 0)
2616 return 0;
2617
2618 if (entry == 0) {
2619 VM_MAP_ENTRY_FOREACH(tmpe, map) {
2620 if (_vm_object_in_map(map, object, tmpe)) {
2621 return 1;
2622 }
2623 }
2624 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2625 tmpm = entry->object.sub_map;
2626 VM_MAP_ENTRY_FOREACH(tmpe, tmpm) {
2627 if (_vm_object_in_map(tmpm, object, tmpe)) {
2628 return 1;
2629 }
2630 }
2631 } else if ((obj = entry->object.vm_object) != NULL) {
2632 for (; obj; obj = obj->backing_object)
2633 if (obj == object) {
2634 return 1;
2635 }
2636 }
2637 return 0;
2638 }
2639
2640 static int
vm_object_in_map(vm_object_t object)2641 vm_object_in_map(vm_object_t object)
2642 {
2643 struct proc *p;
2644
2645 /* sx_slock(&allproc_lock); */
2646 FOREACH_PROC_IN_SYSTEM(p) {
2647 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2648 continue;
2649 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2650 /* sx_sunlock(&allproc_lock); */
2651 return 1;
2652 }
2653 }
2654 /* sx_sunlock(&allproc_lock); */
2655 if (_vm_object_in_map(kernel_map, object, 0))
2656 return 1;
2657 return 0;
2658 }
2659
DB_SHOW_COMMAND_FLAGS(vmochk,vm_object_check,DB_CMD_MEMSAFE)2660 DB_SHOW_COMMAND_FLAGS(vmochk, vm_object_check, DB_CMD_MEMSAFE)
2661 {
2662 vm_object_t object;
2663
2664 /*
2665 * make sure that internal objs are in a map somewhere
2666 * and none have zero ref counts.
2667 */
2668 TAILQ_FOREACH(object, &vm_object_list, object_list) {
2669 if ((object->flags & OBJ_ANON) != 0) {
2670 if (object->ref_count == 0) {
2671 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2672 (long)object->size);
2673 }
2674 if (!vm_object_in_map(object)) {
2675 db_printf(
2676 "vmochk: internal obj is not in a map: "
2677 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2678 object->ref_count, (u_long)object->size,
2679 (u_long)object->size,
2680 (void *)object->backing_object);
2681 }
2682 }
2683 if (db_pager_quit)
2684 return;
2685 }
2686 }
2687
2688 /*
2689 * vm_object_print: [ debug ]
2690 */
DB_SHOW_COMMAND(object,vm_object_print_static)2691 DB_SHOW_COMMAND(object, vm_object_print_static)
2692 {
2693 /* XXX convert args. */
2694 vm_object_t object = (vm_object_t)addr;
2695 boolean_t full = have_addr;
2696
2697 vm_page_t p;
2698
2699 /* XXX count is an (unused) arg. Avoid shadowing it. */
2700 #define count was_count
2701
2702 int count;
2703
2704 if (object == NULL)
2705 return;
2706
2707 db_iprintf(
2708 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2709 object, (int)object->type, (uintmax_t)object->size,
2710 object->resident_page_count, object->ref_count, object->flags,
2711 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2712 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2713 atomic_load_int(&object->shadow_count),
2714 object->backing_object ? object->backing_object->ref_count : 0,
2715 object->backing_object, (uintmax_t)object->backing_object_offset);
2716
2717 if (!full)
2718 return;
2719
2720 db_indent += 2;
2721 count = 0;
2722 TAILQ_FOREACH(p, &object->memq, listq) {
2723 if (count == 0)
2724 db_iprintf("memory:=");
2725 else if (count == 6) {
2726 db_printf("\n");
2727 db_iprintf(" ...");
2728 count = 0;
2729 } else
2730 db_printf(",");
2731 count++;
2732
2733 db_printf("(off=0x%jx,page=0x%jx)",
2734 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2735
2736 if (db_pager_quit)
2737 break;
2738 }
2739 if (count != 0)
2740 db_printf("\n");
2741 db_indent -= 2;
2742 }
2743
2744 /* XXX. */
2745 #undef count
2746
2747 /* XXX need this non-static entry for calling from vm_map_print. */
2748 void
vm_object_print(long addr,boolean_t have_addr,long count,char * modif)2749 vm_object_print(
2750 /* db_expr_t */ long addr,
2751 boolean_t have_addr,
2752 /* db_expr_t */ long count,
2753 char *modif)
2754 {
2755 vm_object_print_static(addr, have_addr, count, modif);
2756 }
2757
DB_SHOW_COMMAND_FLAGS(vmopag,vm_object_print_pages,DB_CMD_MEMSAFE)2758 DB_SHOW_COMMAND_FLAGS(vmopag, vm_object_print_pages, DB_CMD_MEMSAFE)
2759 {
2760 vm_object_t object;
2761 vm_pindex_t fidx;
2762 vm_paddr_t pa;
2763 vm_page_t m, prev_m;
2764 int rcount;
2765
2766 TAILQ_FOREACH(object, &vm_object_list, object_list) {
2767 db_printf("new object: %p\n", (void *)object);
2768 if (db_pager_quit)
2769 return;
2770
2771 rcount = 0;
2772 fidx = 0;
2773 pa = -1;
2774 TAILQ_FOREACH(m, &object->memq, listq) {
2775 if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2776 prev_m->pindex + 1 != m->pindex) {
2777 if (rcount) {
2778 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2779 (long)fidx, rcount, (long)pa);
2780 if (db_pager_quit)
2781 return;
2782 rcount = 0;
2783 }
2784 }
2785 if (rcount &&
2786 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2787 ++rcount;
2788 continue;
2789 }
2790 if (rcount) {
2791 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2792 (long)fidx, rcount, (long)pa);
2793 if (db_pager_quit)
2794 return;
2795 }
2796 fidx = m->pindex;
2797 pa = VM_PAGE_TO_PHYS(m);
2798 rcount = 1;
2799 }
2800 if (rcount) {
2801 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2802 (long)fidx, rcount, (long)pa);
2803 if (db_pager_quit)
2804 return;
2805 }
2806 }
2807 }
2808 #endif /* DDB */
2809