1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 /*
62 * Virtual memory object module.
63 */
64
65 #include "opt_vm.h"
66
67 #include <sys/systm.h>
68 #include <sys/blockcount.h>
69 #include <sys/conf.h>
70 #include <sys/cpuset.h>
71 #include <sys/ipc.h>
72 #include <sys/jail.h>
73 #include <sys/limits.h>
74 #include <sys/lock.h>
75 #include <sys/mman.h>
76 #include <sys/mount.h>
77 #include <sys/kernel.h>
78 #include <sys/mutex.h>
79 #include <sys/pctrie.h>
80 #include <sys/proc.h>
81 #include <sys/refcount.h>
82 #include <sys/shm.h>
83 #include <sys/sx.h>
84 #include <sys/sysctl.h>
85 #include <sys/resourcevar.h>
86 #include <sys/refcount.h>
87 #include <sys/rwlock.h>
88 #include <sys/user.h>
89 #include <sys/vnode.h>
90 #include <sys/vmmeter.h>
91
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_phys.h>
101 #include <vm/vm_pagequeue.h>
102 #include <vm/swap_pager.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_extern.h>
105 #include <vm/vm_radix.h>
106 #include <vm/vm_reserv.h>
107 #include <vm/uma.h>
108
109 static int old_msync;
110 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
111 "Use old (insecure) msync behavior");
112
113 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
114 int pagerflags, int flags, boolean_t *allclean,
115 boolean_t *eio);
116 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
117 boolean_t *allclean);
118 static void vm_object_backing_remove(vm_object_t object);
119
120 /*
121 * Virtual memory objects maintain the actual data
122 * associated with allocated virtual memory. A given
123 * page of memory exists within exactly one object.
124 *
125 * An object is only deallocated when all "references"
126 * are given up. Only one "reference" to a given
127 * region of an object should be writeable.
128 *
129 * Associated with each object is a list of all resident
130 * memory pages belonging to that object; this list is
131 * maintained by the "vm_page" module, and locked by the object's
132 * lock.
133 *
134 * Each object also records a "pager" routine which is
135 * used to retrieve (and store) pages to the proper backing
136 * storage. In addition, objects may be backed by other
137 * objects from which they were virtual-copied.
138 *
139 * The only items within the object structure which are
140 * modified after time of creation are:
141 * reference count locked by object's lock
142 * pager routine locked by object's lock
143 *
144 */
145
146 struct object_q vm_object_list;
147 struct mtx vm_object_list_mtx; /* lock for object list and count */
148
149 struct vm_object kernel_object_store;
150
151 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
152 "VM object stats");
153
154 static COUNTER_U64_DEFINE_EARLY(object_collapses);
155 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
156 &object_collapses,
157 "VM object collapses");
158
159 static COUNTER_U64_DEFINE_EARLY(object_bypasses);
160 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
161 &object_bypasses,
162 "VM object bypasses");
163
164 static COUNTER_U64_DEFINE_EARLY(object_collapse_waits);
165 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapse_waits, CTLFLAG_RD,
166 &object_collapse_waits,
167 "Number of sleeps for collapse");
168
169 static uma_zone_t obj_zone;
170
171 static int vm_object_zinit(void *mem, int size, int flags);
172
173 #ifdef INVARIANTS
174 static void vm_object_zdtor(void *mem, int size, void *arg);
175
176 static void
vm_object_zdtor(void * mem,int size,void * arg)177 vm_object_zdtor(void *mem, int size, void *arg)
178 {
179 vm_object_t object;
180
181 object = (vm_object_t)mem;
182 KASSERT(object->ref_count == 0,
183 ("object %p ref_count = %d", object, object->ref_count));
184 KASSERT(TAILQ_EMPTY(&object->memq),
185 ("object %p has resident pages in its memq", object));
186 KASSERT(vm_radix_is_empty(&object->rtree),
187 ("object %p has resident pages in its trie", object));
188 #if VM_NRESERVLEVEL > 0
189 KASSERT(LIST_EMPTY(&object->rvq),
190 ("object %p has reservations",
191 object));
192 #endif
193 KASSERT(!vm_object_busied(object),
194 ("object %p busy = %d", object, blockcount_read(&object->busy)));
195 KASSERT(object->resident_page_count == 0,
196 ("object %p resident_page_count = %d",
197 object, object->resident_page_count));
198 KASSERT(atomic_load_int(&object->shadow_count) == 0,
199 ("object %p shadow_count = %d",
200 object, atomic_load_int(&object->shadow_count)));
201 KASSERT(object->type == OBJT_DEAD,
202 ("object %p has non-dead type %d",
203 object, object->type));
204 KASSERT(object->charge == 0 && object->cred == NULL,
205 ("object %p has non-zero charge %ju (%p)",
206 object, (uintmax_t)object->charge, object->cred));
207 }
208 #endif
209
210 static int
vm_object_zinit(void * mem,int size,int flags)211 vm_object_zinit(void *mem, int size, int flags)
212 {
213 vm_object_t object;
214
215 object = (vm_object_t)mem;
216 rw_init_flags(&object->lock, "vmobject", RW_DUPOK | RW_NEW);
217
218 /* These are true for any object that has been freed */
219 object->type = OBJT_DEAD;
220 vm_radix_init(&object->rtree);
221 refcount_init(&object->ref_count, 0);
222 blockcount_init(&object->paging_in_progress);
223 blockcount_init(&object->busy);
224 object->resident_page_count = 0;
225 atomic_store_int(&object->shadow_count, 0);
226 object->flags = OBJ_DEAD;
227
228 mtx_lock(&vm_object_list_mtx);
229 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
230 mtx_unlock(&vm_object_list_mtx);
231 return (0);
232 }
233
234 static void
_vm_object_allocate(objtype_t type,vm_pindex_t size,u_short flags,vm_object_t object,void * handle)235 _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
236 vm_object_t object, void *handle)
237 {
238
239 TAILQ_INIT(&object->memq);
240 LIST_INIT(&object->shadow_head);
241
242 object->type = type;
243 object->flags = flags;
244 if ((flags & OBJ_SWAP) != 0) {
245 pctrie_init(&object->un_pager.swp.swp_blks);
246 object->un_pager.swp.writemappings = 0;
247 }
248
249 /*
250 * Ensure that swap_pager_swapoff() iteration over object_list
251 * sees up to date type and pctrie head if it observed
252 * non-dead object.
253 */
254 atomic_thread_fence_rel();
255
256 object->pg_color = 0;
257 object->size = size;
258 object->domain.dr_policy = NULL;
259 object->generation = 1;
260 object->cleangeneration = 1;
261 refcount_init(&object->ref_count, 1);
262 object->memattr = VM_MEMATTR_DEFAULT;
263 object->cred = NULL;
264 object->charge = 0;
265 object->handle = handle;
266 object->backing_object = NULL;
267 object->backing_object_offset = (vm_ooffset_t) 0;
268 #if VM_NRESERVLEVEL > 0
269 LIST_INIT(&object->rvq);
270 #endif
271 umtx_shm_object_init(object);
272 }
273
274 /*
275 * vm_object_init:
276 *
277 * Initialize the VM objects module.
278 */
279 void
vm_object_init(void)280 vm_object_init(void)
281 {
282 TAILQ_INIT(&vm_object_list);
283 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
284
285 rw_init(&kernel_object->lock, "kernel vm object");
286 vm_radix_init(&kernel_object->rtree);
287 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
288 VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL);
289 #if VM_NRESERVLEVEL > 0
290 kernel_object->flags |= OBJ_COLORED;
291 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
292 #endif
293 kernel_object->un_pager.phys.ops = &default_phys_pg_ops;
294
295 /*
296 * The lock portion of struct vm_object must be type stable due
297 * to vm_pageout_fallback_object_lock locking a vm object
298 * without holding any references to it.
299 *
300 * paging_in_progress is valid always. Lockless references to
301 * the objects may acquire pip and then check OBJ_DEAD.
302 */
303 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
304 #ifdef INVARIANTS
305 vm_object_zdtor,
306 #else
307 NULL,
308 #endif
309 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
310
311 vm_radix_zinit();
312 }
313
314 void
vm_object_clear_flag(vm_object_t object,u_short bits)315 vm_object_clear_flag(vm_object_t object, u_short bits)
316 {
317
318 VM_OBJECT_ASSERT_WLOCKED(object);
319 object->flags &= ~bits;
320 }
321
322 /*
323 * Sets the default memory attribute for the specified object. Pages
324 * that are allocated to this object are by default assigned this memory
325 * attribute.
326 *
327 * Presently, this function must be called before any pages are allocated
328 * to the object. In the future, this requirement may be relaxed for
329 * "default" and "swap" objects.
330 */
331 int
vm_object_set_memattr(vm_object_t object,vm_memattr_t memattr)332 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
333 {
334
335 VM_OBJECT_ASSERT_WLOCKED(object);
336
337 if (object->type == OBJT_DEAD)
338 return (KERN_INVALID_ARGUMENT);
339 if (!TAILQ_EMPTY(&object->memq))
340 return (KERN_FAILURE);
341
342 object->memattr = memattr;
343 return (KERN_SUCCESS);
344 }
345
346 void
vm_object_pip_add(vm_object_t object,short i)347 vm_object_pip_add(vm_object_t object, short i)
348 {
349
350 if (i > 0)
351 blockcount_acquire(&object->paging_in_progress, i);
352 }
353
354 void
vm_object_pip_wakeup(vm_object_t object)355 vm_object_pip_wakeup(vm_object_t object)
356 {
357
358 vm_object_pip_wakeupn(object, 1);
359 }
360
361 void
vm_object_pip_wakeupn(vm_object_t object,short i)362 vm_object_pip_wakeupn(vm_object_t object, short i)
363 {
364
365 if (i > 0)
366 blockcount_release(&object->paging_in_progress, i);
367 }
368
369 /*
370 * Atomically drop the object lock and wait for pip to drain. This protects
371 * from sleep/wakeup races due to identity changes. The lock is not re-acquired
372 * on return.
373 */
374 static void
vm_object_pip_sleep(vm_object_t object,const char * waitid)375 vm_object_pip_sleep(vm_object_t object, const char *waitid)
376 {
377
378 (void)blockcount_sleep(&object->paging_in_progress, &object->lock,
379 waitid, PVM | PDROP);
380 }
381
382 void
vm_object_pip_wait(vm_object_t object,const char * waitid)383 vm_object_pip_wait(vm_object_t object, const char *waitid)
384 {
385
386 VM_OBJECT_ASSERT_WLOCKED(object);
387
388 blockcount_wait(&object->paging_in_progress, &object->lock, waitid,
389 PVM);
390 }
391
392 void
vm_object_pip_wait_unlocked(vm_object_t object,const char * waitid)393 vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid)
394 {
395
396 VM_OBJECT_ASSERT_UNLOCKED(object);
397
398 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM);
399 }
400
401 /*
402 * vm_object_allocate:
403 *
404 * Returns a new object with the given size.
405 */
406 vm_object_t
vm_object_allocate(objtype_t type,vm_pindex_t size)407 vm_object_allocate(objtype_t type, vm_pindex_t size)
408 {
409 vm_object_t object;
410 u_short flags;
411
412 switch (type) {
413 case OBJT_DEAD:
414 panic("vm_object_allocate: can't create OBJT_DEAD");
415 case OBJT_SWAP:
416 flags = OBJ_COLORED | OBJ_SWAP;
417 break;
418 case OBJT_DEVICE:
419 case OBJT_SG:
420 flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
421 break;
422 case OBJT_MGTDEVICE:
423 flags = OBJ_FICTITIOUS;
424 break;
425 case OBJT_PHYS:
426 flags = OBJ_UNMANAGED;
427 break;
428 case OBJT_VNODE:
429 flags = 0;
430 break;
431 default:
432 panic("vm_object_allocate: type %d is undefined or dynamic",
433 type);
434 }
435 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
436 _vm_object_allocate(type, size, flags, object, NULL);
437
438 return (object);
439 }
440
441 vm_object_t
vm_object_allocate_dyn(objtype_t dyntype,vm_pindex_t size,u_short flags)442 vm_object_allocate_dyn(objtype_t dyntype, vm_pindex_t size, u_short flags)
443 {
444 vm_object_t object;
445
446 MPASS(dyntype >= OBJT_FIRST_DYN /* && dyntype < nitems(pagertab) */);
447 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
448 _vm_object_allocate(dyntype, size, flags, object, NULL);
449
450 return (object);
451 }
452
453 /*
454 * vm_object_allocate_anon:
455 *
456 * Returns a new default object of the given size and marked as
457 * anonymous memory for special split/collapse handling. Color
458 * to be initialized by the caller.
459 */
460 vm_object_t
vm_object_allocate_anon(vm_pindex_t size,vm_object_t backing_object,struct ucred * cred,vm_size_t charge)461 vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object,
462 struct ucred *cred, vm_size_t charge)
463 {
464 vm_object_t handle, object;
465
466 if (backing_object == NULL)
467 handle = NULL;
468 else if ((backing_object->flags & OBJ_ANON) != 0)
469 handle = backing_object->handle;
470 else
471 handle = backing_object;
472 object = uma_zalloc(obj_zone, M_WAITOK);
473 _vm_object_allocate(OBJT_SWAP, size,
474 OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle);
475 object->cred = cred;
476 object->charge = cred != NULL ? charge : 0;
477 return (object);
478 }
479
480 static void
vm_object_reference_vnode(vm_object_t object)481 vm_object_reference_vnode(vm_object_t object)
482 {
483 u_int old;
484
485 /*
486 * vnode objects need the lock for the first reference
487 * to serialize with vnode_object_deallocate().
488 */
489 if (!refcount_acquire_if_gt(&object->ref_count, 0)) {
490 VM_OBJECT_RLOCK(object);
491 old = refcount_acquire(&object->ref_count);
492 if (object->type == OBJT_VNODE && old == 0)
493 vref(object->handle);
494 VM_OBJECT_RUNLOCK(object);
495 }
496 }
497
498 /*
499 * vm_object_reference:
500 *
501 * Acquires a reference to the given object.
502 */
503 void
vm_object_reference(vm_object_t object)504 vm_object_reference(vm_object_t object)
505 {
506
507 if (object == NULL)
508 return;
509
510 if (object->type == OBJT_VNODE)
511 vm_object_reference_vnode(object);
512 else
513 refcount_acquire(&object->ref_count);
514 KASSERT((object->flags & OBJ_DEAD) == 0,
515 ("vm_object_reference: Referenced dead object."));
516 }
517
518 /*
519 * vm_object_reference_locked:
520 *
521 * Gets another reference to the given object.
522 *
523 * The object must be locked.
524 */
525 void
vm_object_reference_locked(vm_object_t object)526 vm_object_reference_locked(vm_object_t object)
527 {
528 u_int old;
529
530 VM_OBJECT_ASSERT_LOCKED(object);
531 old = refcount_acquire(&object->ref_count);
532 if (object->type == OBJT_VNODE && old == 0)
533 vref(object->handle);
534 KASSERT((object->flags & OBJ_DEAD) == 0,
535 ("vm_object_reference: Referenced dead object."));
536 }
537
538 /*
539 * Handle deallocating an object of type OBJT_VNODE.
540 */
541 static void
vm_object_deallocate_vnode(vm_object_t object)542 vm_object_deallocate_vnode(vm_object_t object)
543 {
544 struct vnode *vp = (struct vnode *) object->handle;
545 bool last;
546
547 KASSERT(object->type == OBJT_VNODE,
548 ("vm_object_deallocate_vnode: not a vnode object"));
549 KASSERT(vp != NULL, ("vm_object_deallocate_vnode: missing vp"));
550
551 /* Object lock to protect handle lookup. */
552 last = refcount_release(&object->ref_count);
553 VM_OBJECT_RUNLOCK(object);
554
555 if (!last)
556 return;
557
558 if (!umtx_shm_vnobj_persistent)
559 umtx_shm_object_terminated(object);
560
561 /* vrele may need the vnode lock. */
562 vrele(vp);
563 }
564
565 /*
566 * We dropped a reference on an object and discovered that it had a
567 * single remaining shadow. This is a sibling of the reference we
568 * dropped. Attempt to collapse the sibling and backing object.
569 */
570 static vm_object_t
vm_object_deallocate_anon(vm_object_t backing_object)571 vm_object_deallocate_anon(vm_object_t backing_object)
572 {
573 vm_object_t object;
574
575 /* Fetch the final shadow. */
576 object = LIST_FIRST(&backing_object->shadow_head);
577 KASSERT(object != NULL &&
578 atomic_load_int(&backing_object->shadow_count) == 1,
579 ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d",
580 backing_object->ref_count,
581 atomic_load_int(&backing_object->shadow_count)));
582 KASSERT((object->flags & OBJ_ANON) != 0,
583 ("invalid shadow object %p", object));
584
585 if (!VM_OBJECT_TRYWLOCK(object)) {
586 /*
587 * Prevent object from disappearing since we do not have a
588 * reference.
589 */
590 vm_object_pip_add(object, 1);
591 VM_OBJECT_WUNLOCK(backing_object);
592 VM_OBJECT_WLOCK(object);
593 vm_object_pip_wakeup(object);
594 } else
595 VM_OBJECT_WUNLOCK(backing_object);
596
597 /*
598 * Check for a collapse/terminate race with the last reference holder.
599 */
600 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 ||
601 !refcount_acquire_if_not_zero(&object->ref_count)) {
602 VM_OBJECT_WUNLOCK(object);
603 return (NULL);
604 }
605 backing_object = object->backing_object;
606 if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0)
607 vm_object_collapse(object);
608 VM_OBJECT_WUNLOCK(object);
609
610 return (object);
611 }
612
613 /*
614 * vm_object_deallocate:
615 *
616 * Release a reference to the specified object,
617 * gained either through a vm_object_allocate
618 * or a vm_object_reference call. When all references
619 * are gone, storage associated with this object
620 * may be relinquished.
621 *
622 * No object may be locked.
623 */
624 void
vm_object_deallocate(vm_object_t object)625 vm_object_deallocate(vm_object_t object)
626 {
627 vm_object_t temp;
628 bool released;
629
630 while (object != NULL) {
631 /*
632 * If the reference count goes to 0 we start calling
633 * vm_object_terminate() on the object chain. A ref count
634 * of 1 may be a special case depending on the shadow count
635 * being 0 or 1. These cases require a write lock on the
636 * object.
637 */
638 if ((object->flags & OBJ_ANON) == 0)
639 released = refcount_release_if_gt(&object->ref_count, 1);
640 else
641 released = refcount_release_if_gt(&object->ref_count, 2);
642 if (released)
643 return;
644
645 if (object->type == OBJT_VNODE) {
646 VM_OBJECT_RLOCK(object);
647 if (object->type == OBJT_VNODE) {
648 vm_object_deallocate_vnode(object);
649 return;
650 }
651 VM_OBJECT_RUNLOCK(object);
652 }
653
654 VM_OBJECT_WLOCK(object);
655 KASSERT(object->ref_count > 0,
656 ("vm_object_deallocate: object deallocated too many times: %d",
657 object->type));
658
659 /*
660 * If this is not the final reference to an anonymous
661 * object we may need to collapse the shadow chain.
662 */
663 if (!refcount_release(&object->ref_count)) {
664 if (object->ref_count > 1 ||
665 atomic_load_int(&object->shadow_count) == 0) {
666 if ((object->flags & OBJ_ANON) != 0 &&
667 object->ref_count == 1)
668 vm_object_set_flag(object,
669 OBJ_ONEMAPPING);
670 VM_OBJECT_WUNLOCK(object);
671 return;
672 }
673
674 /* Handle collapsing last ref on anonymous objects. */
675 object = vm_object_deallocate_anon(object);
676 continue;
677 }
678
679 /*
680 * Handle the final reference to an object. We restart
681 * the loop with the backing object to avoid recursion.
682 */
683 umtx_shm_object_terminated(object);
684 temp = object->backing_object;
685 if (temp != NULL) {
686 KASSERT(object->type == OBJT_SWAP,
687 ("shadowed tmpfs v_object 2 %p", object));
688 vm_object_backing_remove(object);
689 }
690
691 KASSERT((object->flags & OBJ_DEAD) == 0,
692 ("vm_object_deallocate: Terminating dead object."));
693 vm_object_set_flag(object, OBJ_DEAD);
694 vm_object_terminate(object);
695 object = temp;
696 }
697 }
698
699 void
vm_object_destroy(vm_object_t object)700 vm_object_destroy(vm_object_t object)
701 {
702 uma_zfree(obj_zone, object);
703 }
704
705 static void
vm_object_sub_shadow(vm_object_t object)706 vm_object_sub_shadow(vm_object_t object)
707 {
708 KASSERT(object->shadow_count >= 1,
709 ("object %p sub_shadow count zero", object));
710 atomic_subtract_int(&object->shadow_count, 1);
711 }
712
713 static void
vm_object_backing_remove_locked(vm_object_t object)714 vm_object_backing_remove_locked(vm_object_t object)
715 {
716 vm_object_t backing_object;
717
718 backing_object = object->backing_object;
719 VM_OBJECT_ASSERT_WLOCKED(object);
720 VM_OBJECT_ASSERT_WLOCKED(backing_object);
721
722 KASSERT((object->flags & OBJ_COLLAPSING) == 0,
723 ("vm_object_backing_remove: Removing collapsing object."));
724
725 vm_object_sub_shadow(backing_object);
726 if ((object->flags & OBJ_SHADOWLIST) != 0) {
727 LIST_REMOVE(object, shadow_list);
728 vm_object_clear_flag(object, OBJ_SHADOWLIST);
729 }
730 object->backing_object = NULL;
731 }
732
733 static void
vm_object_backing_remove(vm_object_t object)734 vm_object_backing_remove(vm_object_t object)
735 {
736 vm_object_t backing_object;
737
738 VM_OBJECT_ASSERT_WLOCKED(object);
739
740 backing_object = object->backing_object;
741 if ((object->flags & OBJ_SHADOWLIST) != 0) {
742 VM_OBJECT_WLOCK(backing_object);
743 vm_object_backing_remove_locked(object);
744 VM_OBJECT_WUNLOCK(backing_object);
745 } else {
746 object->backing_object = NULL;
747 vm_object_sub_shadow(backing_object);
748 }
749 }
750
751 static void
vm_object_backing_insert_locked(vm_object_t object,vm_object_t backing_object)752 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object)
753 {
754
755 VM_OBJECT_ASSERT_WLOCKED(object);
756
757 atomic_add_int(&backing_object->shadow_count, 1);
758 if ((backing_object->flags & OBJ_ANON) != 0) {
759 VM_OBJECT_ASSERT_WLOCKED(backing_object);
760 LIST_INSERT_HEAD(&backing_object->shadow_head, object,
761 shadow_list);
762 vm_object_set_flag(object, OBJ_SHADOWLIST);
763 }
764 object->backing_object = backing_object;
765 }
766
767 static void
vm_object_backing_insert(vm_object_t object,vm_object_t backing_object)768 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object)
769 {
770
771 VM_OBJECT_ASSERT_WLOCKED(object);
772
773 if ((backing_object->flags & OBJ_ANON) != 0) {
774 VM_OBJECT_WLOCK(backing_object);
775 vm_object_backing_insert_locked(object, backing_object);
776 VM_OBJECT_WUNLOCK(backing_object);
777 } else {
778 object->backing_object = backing_object;
779 atomic_add_int(&backing_object->shadow_count, 1);
780 }
781 }
782
783 /*
784 * Insert an object into a backing_object's shadow list with an additional
785 * reference to the backing_object added.
786 */
787 static void
vm_object_backing_insert_ref(vm_object_t object,vm_object_t backing_object)788 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object)
789 {
790
791 VM_OBJECT_ASSERT_WLOCKED(object);
792
793 if ((backing_object->flags & OBJ_ANON) != 0) {
794 VM_OBJECT_WLOCK(backing_object);
795 KASSERT((backing_object->flags & OBJ_DEAD) == 0,
796 ("shadowing dead anonymous object"));
797 vm_object_reference_locked(backing_object);
798 vm_object_backing_insert_locked(object, backing_object);
799 vm_object_clear_flag(backing_object, OBJ_ONEMAPPING);
800 VM_OBJECT_WUNLOCK(backing_object);
801 } else {
802 vm_object_reference(backing_object);
803 atomic_add_int(&backing_object->shadow_count, 1);
804 object->backing_object = backing_object;
805 }
806 }
807
808 /*
809 * Transfer a backing reference from backing_object to object.
810 */
811 static void
vm_object_backing_transfer(vm_object_t object,vm_object_t backing_object)812 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object)
813 {
814 vm_object_t new_backing_object;
815
816 /*
817 * Note that the reference to backing_object->backing_object
818 * moves from within backing_object to within object.
819 */
820 vm_object_backing_remove_locked(object);
821 new_backing_object = backing_object->backing_object;
822 if (new_backing_object == NULL)
823 return;
824 if ((new_backing_object->flags & OBJ_ANON) != 0) {
825 VM_OBJECT_WLOCK(new_backing_object);
826 vm_object_backing_remove_locked(backing_object);
827 vm_object_backing_insert_locked(object, new_backing_object);
828 VM_OBJECT_WUNLOCK(new_backing_object);
829 } else {
830 /*
831 * shadow_count for new_backing_object is left
832 * unchanged, its reference provided by backing_object
833 * is replaced by object.
834 */
835 object->backing_object = new_backing_object;
836 backing_object->backing_object = NULL;
837 }
838 }
839
840 /*
841 * Wait for a concurrent collapse to settle.
842 */
843 static void
vm_object_collapse_wait(vm_object_t object)844 vm_object_collapse_wait(vm_object_t object)
845 {
846
847 VM_OBJECT_ASSERT_WLOCKED(object);
848
849 while ((object->flags & OBJ_COLLAPSING) != 0) {
850 vm_object_pip_wait(object, "vmcolwait");
851 counter_u64_add(object_collapse_waits, 1);
852 }
853 }
854
855 /*
856 * Waits for a backing object to clear a pending collapse and returns
857 * it locked if it is an ANON object.
858 */
859 static vm_object_t
vm_object_backing_collapse_wait(vm_object_t object)860 vm_object_backing_collapse_wait(vm_object_t object)
861 {
862 vm_object_t backing_object;
863
864 VM_OBJECT_ASSERT_WLOCKED(object);
865
866 for (;;) {
867 backing_object = object->backing_object;
868 if (backing_object == NULL ||
869 (backing_object->flags & OBJ_ANON) == 0)
870 return (NULL);
871 VM_OBJECT_WLOCK(backing_object);
872 if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0)
873 break;
874 VM_OBJECT_WUNLOCK(object);
875 vm_object_pip_sleep(backing_object, "vmbckwait");
876 counter_u64_add(object_collapse_waits, 1);
877 VM_OBJECT_WLOCK(object);
878 }
879 return (backing_object);
880 }
881
882 /*
883 * vm_object_terminate_single_page removes a pageable page from the object,
884 * and removes it from the paging queues and frees it, if it is not wired.
885 * It is invoked via callback from vm_object_terminate_pages.
886 */
887 static void
vm_object_terminate_single_page(vm_page_t p,void * objectv)888 vm_object_terminate_single_page(vm_page_t p, void *objectv)
889 {
890 vm_object_t object __diagused = objectv;
891
892 vm_page_assert_unbusied(p);
893 KASSERT(p->object == object &&
894 (p->ref_count & VPRC_OBJREF) != 0,
895 ("%s: page %p is inconsistent", __func__, p));
896 p->object = NULL;
897 if (vm_page_drop(p, VPRC_OBJREF) == VPRC_OBJREF) {
898 KASSERT((object->flags & OBJ_UNMANAGED) != 0 ||
899 vm_page_astate_load(p).queue != PQ_NONE,
900 ("%s: page %p does not belong to a queue", __func__, p));
901 VM_CNT_INC(v_pfree);
902 vm_page_free(p);
903 }
904 }
905
906 /*
907 * vm_object_terminate_pages removes any remaining pageable pages
908 * from the object and resets the object to an empty state.
909 */
910 static void
vm_object_terminate_pages(vm_object_t object)911 vm_object_terminate_pages(vm_object_t object)
912 {
913 VM_OBJECT_ASSERT_WLOCKED(object);
914
915 /*
916 * If the object contained any pages, then reset it to an empty state.
917 * Rather than incrementally removing each page from the object, the
918 * page and object are reset to any empty state.
919 */
920 if (object->resident_page_count == 0)
921 return;
922
923 vm_radix_reclaim_callback(&object->rtree,
924 vm_object_terminate_single_page, object);
925 TAILQ_INIT(&object->memq);
926 object->resident_page_count = 0;
927 if (object->type == OBJT_VNODE)
928 vdrop(object->handle);
929 }
930
931 /*
932 * vm_object_terminate actually destroys the specified object, freeing
933 * up all previously used resources.
934 *
935 * The object must be locked.
936 * This routine may block.
937 */
938 void
vm_object_terminate(vm_object_t object)939 vm_object_terminate(vm_object_t object)
940 {
941
942 VM_OBJECT_ASSERT_WLOCKED(object);
943 KASSERT((object->flags & OBJ_DEAD) != 0,
944 ("terminating non-dead obj %p", object));
945 KASSERT((object->flags & OBJ_COLLAPSING) == 0,
946 ("terminating collapsing obj %p", object));
947 KASSERT(object->backing_object == NULL,
948 ("terminating shadow obj %p", object));
949
950 /*
951 * Wait for the pageout daemon and other current users to be
952 * done with the object. Note that new paging_in_progress
953 * users can come after this wait, but they must check
954 * OBJ_DEAD flag set (without unlocking the object), and avoid
955 * the object being terminated.
956 */
957 vm_object_pip_wait(object, "objtrm");
958
959 KASSERT(object->ref_count == 0,
960 ("vm_object_terminate: object with references, ref_count=%d",
961 object->ref_count));
962
963 if ((object->flags & OBJ_PG_DTOR) == 0)
964 vm_object_terminate_pages(object);
965
966 #if VM_NRESERVLEVEL > 0
967 if (__predict_false(!LIST_EMPTY(&object->rvq)))
968 vm_reserv_break_all(object);
969 #endif
970
971 KASSERT(object->cred == NULL || (object->flags & OBJ_SWAP) != 0,
972 ("%s: non-swap obj %p has cred", __func__, object));
973
974 /*
975 * Let the pager know object is dead.
976 */
977 vm_pager_deallocate(object);
978 VM_OBJECT_WUNLOCK(object);
979
980 vm_object_destroy(object);
981 }
982
983 /*
984 * Make the page read-only so that we can clear the object flags. However, if
985 * this is a nosync mmap then the object is likely to stay dirty so do not
986 * mess with the page and do not clear the object flags. Returns TRUE if the
987 * page should be flushed, and FALSE otherwise.
988 */
989 static boolean_t
vm_object_page_remove_write(vm_page_t p,int flags,boolean_t * allclean)990 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean)
991 {
992
993 vm_page_assert_busied(p);
994
995 /*
996 * If we have been asked to skip nosync pages and this is a
997 * nosync page, skip it. Note that the object flags were not
998 * cleared in this case so we do not have to set them.
999 */
1000 if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) {
1001 *allclean = FALSE;
1002 return (FALSE);
1003 } else {
1004 pmap_remove_write(p);
1005 return (p->dirty != 0);
1006 }
1007 }
1008
1009 /*
1010 * vm_object_page_clean
1011 *
1012 * Clean all dirty pages in the specified range of object. Leaves page
1013 * on whatever queue it is currently on. If NOSYNC is set then do not
1014 * write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC),
1015 * leaving the object dirty.
1016 *
1017 * For swap objects backing tmpfs regular files, do not flush anything,
1018 * but remove write protection on the mapped pages to update mtime through
1019 * mmaped writes.
1020 *
1021 * When stuffing pages asynchronously, allow clustering. XXX we need a
1022 * synchronous clustering mode implementation.
1023 *
1024 * Odd semantics: if start == end, we clean everything.
1025 *
1026 * The object must be locked.
1027 *
1028 * Returns FALSE if some page from the range was not written, as
1029 * reported by the pager, and TRUE otherwise.
1030 */
1031 boolean_t
vm_object_page_clean(vm_object_t object,vm_ooffset_t start,vm_ooffset_t end,int flags)1032 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
1033 int flags)
1034 {
1035 vm_page_t np, p;
1036 vm_pindex_t pi, tend, tstart;
1037 int curgeneration, n, pagerflags;
1038 boolean_t eio, res, allclean;
1039
1040 VM_OBJECT_ASSERT_WLOCKED(object);
1041
1042 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0)
1043 return (TRUE);
1044
1045 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
1046 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
1047 pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
1048
1049 tstart = OFF_TO_IDX(start);
1050 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
1051 allclean = tstart == 0 && tend >= object->size;
1052 res = TRUE;
1053
1054 rescan:
1055 curgeneration = object->generation;
1056
1057 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
1058 pi = p->pindex;
1059 if (pi >= tend)
1060 break;
1061 np = TAILQ_NEXT(p, listq);
1062 if (vm_page_none_valid(p))
1063 continue;
1064 if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) {
1065 if (object->generation != curgeneration &&
1066 (flags & OBJPC_SYNC) != 0)
1067 goto rescan;
1068 np = vm_page_find_least(object, pi);
1069 continue;
1070 }
1071 if (!vm_object_page_remove_write(p, flags, &allclean)) {
1072 vm_page_xunbusy(p);
1073 continue;
1074 }
1075 if (object->type == OBJT_VNODE) {
1076 n = vm_object_page_collect_flush(object, p, pagerflags,
1077 flags, &allclean, &eio);
1078 if (eio) {
1079 res = FALSE;
1080 allclean = FALSE;
1081 }
1082 if (object->generation != curgeneration &&
1083 (flags & OBJPC_SYNC) != 0)
1084 goto rescan;
1085
1086 /*
1087 * If the VOP_PUTPAGES() did a truncated write, so
1088 * that even the first page of the run is not fully
1089 * written, vm_pageout_flush() returns 0 as the run
1090 * length. Since the condition that caused truncated
1091 * write may be permanent, e.g. exhausted free space,
1092 * accepting n == 0 would cause an infinite loop.
1093 *
1094 * Forwarding the iterator leaves the unwritten page
1095 * behind, but there is not much we can do there if
1096 * filesystem refuses to write it.
1097 */
1098 if (n == 0) {
1099 n = 1;
1100 allclean = FALSE;
1101 }
1102 } else {
1103 n = 1;
1104 vm_page_xunbusy(p);
1105 }
1106 np = vm_page_find_least(object, pi + n);
1107 }
1108 #if 0
1109 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
1110 #endif
1111
1112 /*
1113 * Leave updating cleangeneration for tmpfs objects to tmpfs
1114 * scan. It needs to update mtime, which happens for other
1115 * filesystems during page writeouts.
1116 */
1117 if (allclean && object->type == OBJT_VNODE)
1118 object->cleangeneration = curgeneration;
1119 return (res);
1120 }
1121
1122 static int
vm_object_page_collect_flush(vm_object_t object,vm_page_t p,int pagerflags,int flags,boolean_t * allclean,boolean_t * eio)1123 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
1124 int flags, boolean_t *allclean, boolean_t *eio)
1125 {
1126 vm_page_t ma[2 * vm_pageout_page_count - 1], tp;
1127 int base, count, runlen;
1128
1129 vm_page_lock_assert(p, MA_NOTOWNED);
1130 vm_page_assert_xbusied(p);
1131 VM_OBJECT_ASSERT_WLOCKED(object);
1132 base = nitems(ma) / 2;
1133 ma[base] = p;
1134 for (count = 1, tp = p; count < vm_pageout_page_count; count++) {
1135 tp = vm_page_next(tp);
1136 if (tp == NULL || vm_page_tryxbusy(tp) == 0)
1137 break;
1138 if (!vm_object_page_remove_write(tp, flags, allclean)) {
1139 vm_page_xunbusy(tp);
1140 break;
1141 }
1142 ma[base + count] = tp;
1143 }
1144
1145 for (tp = p; count < vm_pageout_page_count; count++) {
1146 tp = vm_page_prev(tp);
1147 if (tp == NULL || vm_page_tryxbusy(tp) == 0)
1148 break;
1149 if (!vm_object_page_remove_write(tp, flags, allclean)) {
1150 vm_page_xunbusy(tp);
1151 break;
1152 }
1153 ma[--base] = tp;
1154 }
1155
1156 vm_pageout_flush(&ma[base], count, pagerflags, nitems(ma) / 2 - base,
1157 &runlen, eio);
1158 return (runlen);
1159 }
1160
1161 /*
1162 * Note that there is absolutely no sense in writing out
1163 * anonymous objects, so we track down the vnode object
1164 * to write out.
1165 * We invalidate (remove) all pages from the address space
1166 * for semantic correctness.
1167 *
1168 * If the backing object is a device object with unmanaged pages, then any
1169 * mappings to the specified range of pages must be removed before this
1170 * function is called.
1171 *
1172 * Note: certain anonymous maps, such as MAP_NOSYNC maps,
1173 * may start out with a NULL object.
1174 */
1175 boolean_t
vm_object_sync(vm_object_t object,vm_ooffset_t offset,vm_size_t size,boolean_t syncio,boolean_t invalidate)1176 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1177 boolean_t syncio, boolean_t invalidate)
1178 {
1179 vm_object_t backing_object;
1180 struct vnode *vp;
1181 struct mount *mp;
1182 int error, flags, fsync_after;
1183 boolean_t res;
1184
1185 if (object == NULL)
1186 return (TRUE);
1187 res = TRUE;
1188 error = 0;
1189 VM_OBJECT_WLOCK(object);
1190 while ((backing_object = object->backing_object) != NULL) {
1191 VM_OBJECT_WLOCK(backing_object);
1192 offset += object->backing_object_offset;
1193 VM_OBJECT_WUNLOCK(object);
1194 object = backing_object;
1195 if (object->size < OFF_TO_IDX(offset + size))
1196 size = IDX_TO_OFF(object->size) - offset;
1197 }
1198 /*
1199 * Flush pages if writing is allowed, invalidate them
1200 * if invalidation requested. Pages undergoing I/O
1201 * will be ignored by vm_object_page_remove().
1202 *
1203 * We cannot lock the vnode and then wait for paging
1204 * to complete without deadlocking against vm_fault.
1205 * Instead we simply call vm_object_page_remove() and
1206 * allow it to block internally on a page-by-page
1207 * basis when it encounters pages undergoing async
1208 * I/O.
1209 */
1210 if (object->type == OBJT_VNODE &&
1211 vm_object_mightbedirty(object) != 0 &&
1212 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
1213 VM_OBJECT_WUNLOCK(object);
1214 (void)vn_start_write(vp, &mp, V_WAIT);
1215 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1216 if (syncio && !invalidate && offset == 0 &&
1217 atop(size) == object->size) {
1218 /*
1219 * If syncing the whole mapping of the file,
1220 * it is faster to schedule all the writes in
1221 * async mode, also allowing the clustering,
1222 * and then wait for i/o to complete.
1223 */
1224 flags = 0;
1225 fsync_after = TRUE;
1226 } else {
1227 flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1228 flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
1229 fsync_after = FALSE;
1230 }
1231 VM_OBJECT_WLOCK(object);
1232 res = vm_object_page_clean(object, offset, offset + size,
1233 flags);
1234 VM_OBJECT_WUNLOCK(object);
1235 if (fsync_after) {
1236 for (;;) {
1237 error = VOP_FSYNC(vp, MNT_WAIT, curthread);
1238 if (error != ERELOOKUP)
1239 break;
1240
1241 /*
1242 * Allow SU/bufdaemon to handle more
1243 * dependencies in the meantime.
1244 */
1245 VOP_UNLOCK(vp);
1246 vn_finished_write(mp);
1247
1248 (void)vn_start_write(vp, &mp, V_WAIT);
1249 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1250 }
1251 }
1252 VOP_UNLOCK(vp);
1253 vn_finished_write(mp);
1254 if (error != 0)
1255 res = FALSE;
1256 VM_OBJECT_WLOCK(object);
1257 }
1258 if ((object->type == OBJT_VNODE ||
1259 object->type == OBJT_DEVICE) && invalidate) {
1260 if (object->type == OBJT_DEVICE)
1261 /*
1262 * The option OBJPR_NOTMAPPED must be passed here
1263 * because vm_object_page_remove() cannot remove
1264 * unmanaged mappings.
1265 */
1266 flags = OBJPR_NOTMAPPED;
1267 else if (old_msync)
1268 flags = 0;
1269 else
1270 flags = OBJPR_CLEANONLY;
1271 vm_object_page_remove(object, OFF_TO_IDX(offset),
1272 OFF_TO_IDX(offset + size + PAGE_MASK), flags);
1273 }
1274 VM_OBJECT_WUNLOCK(object);
1275 return (res);
1276 }
1277
1278 /*
1279 * Determine whether the given advice can be applied to the object. Advice is
1280 * not applied to unmanaged pages since they never belong to page queues, and
1281 * since MADV_FREE is destructive, it can apply only to anonymous pages that
1282 * have been mapped at most once.
1283 */
1284 static bool
vm_object_advice_applies(vm_object_t object,int advice)1285 vm_object_advice_applies(vm_object_t object, int advice)
1286 {
1287
1288 if ((object->flags & OBJ_UNMANAGED) != 0)
1289 return (false);
1290 if (advice != MADV_FREE)
1291 return (true);
1292 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) ==
1293 (OBJ_ONEMAPPING | OBJ_ANON));
1294 }
1295
1296 static void
vm_object_madvise_freespace(vm_object_t object,int advice,vm_pindex_t pindex,vm_size_t size)1297 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
1298 vm_size_t size)
1299 {
1300
1301 if (advice == MADV_FREE)
1302 vm_pager_freespace(object, pindex, size);
1303 }
1304
1305 /*
1306 * vm_object_madvise:
1307 *
1308 * Implements the madvise function at the object/page level.
1309 *
1310 * MADV_WILLNEED (any object)
1311 *
1312 * Activate the specified pages if they are resident.
1313 *
1314 * MADV_DONTNEED (any object)
1315 *
1316 * Deactivate the specified pages if they are resident.
1317 *
1318 * MADV_FREE (OBJT_SWAP objects, OBJ_ONEMAPPING only)
1319 *
1320 * Deactivate and clean the specified pages if they are
1321 * resident. This permits the process to reuse the pages
1322 * without faulting or the kernel to reclaim the pages
1323 * without I/O.
1324 */
1325 void
vm_object_madvise(vm_object_t object,vm_pindex_t pindex,vm_pindex_t end,int advice)1326 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
1327 int advice)
1328 {
1329 vm_pindex_t tpindex;
1330 vm_object_t backing_object, tobject;
1331 vm_page_t m, tm;
1332
1333 if (object == NULL)
1334 return;
1335
1336 relookup:
1337 VM_OBJECT_WLOCK(object);
1338 if (!vm_object_advice_applies(object, advice)) {
1339 VM_OBJECT_WUNLOCK(object);
1340 return;
1341 }
1342 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
1343 tobject = object;
1344
1345 /*
1346 * If the next page isn't resident in the top-level object, we
1347 * need to search the shadow chain. When applying MADV_FREE, we
1348 * take care to release any swap space used to store
1349 * non-resident pages.
1350 */
1351 if (m == NULL || pindex < m->pindex) {
1352 /*
1353 * Optimize a common case: if the top-level object has
1354 * no backing object, we can skip over the non-resident
1355 * range in constant time.
1356 */
1357 if (object->backing_object == NULL) {
1358 tpindex = (m != NULL && m->pindex < end) ?
1359 m->pindex : end;
1360 vm_object_madvise_freespace(object, advice,
1361 pindex, tpindex - pindex);
1362 if ((pindex = tpindex) == end)
1363 break;
1364 goto next_page;
1365 }
1366
1367 tpindex = pindex;
1368 do {
1369 vm_object_madvise_freespace(tobject, advice,
1370 tpindex, 1);
1371 /*
1372 * Prepare to search the next object in the
1373 * chain.
1374 */
1375 backing_object = tobject->backing_object;
1376 if (backing_object == NULL)
1377 goto next_pindex;
1378 VM_OBJECT_WLOCK(backing_object);
1379 tpindex +=
1380 OFF_TO_IDX(tobject->backing_object_offset);
1381 if (tobject != object)
1382 VM_OBJECT_WUNLOCK(tobject);
1383 tobject = backing_object;
1384 if (!vm_object_advice_applies(tobject, advice))
1385 goto next_pindex;
1386 } while ((tm = vm_page_lookup(tobject, tpindex)) ==
1387 NULL);
1388 } else {
1389 next_page:
1390 tm = m;
1391 m = TAILQ_NEXT(m, listq);
1392 }
1393
1394 /*
1395 * If the page is not in a normal state, skip it. The page
1396 * can not be invalidated while the object lock is held.
1397 */
1398 if (!vm_page_all_valid(tm) || vm_page_wired(tm))
1399 goto next_pindex;
1400 KASSERT((tm->flags & PG_FICTITIOUS) == 0,
1401 ("vm_object_madvise: page %p is fictitious", tm));
1402 KASSERT((tm->oflags & VPO_UNMANAGED) == 0,
1403 ("vm_object_madvise: page %p is not managed", tm));
1404 if (vm_page_tryxbusy(tm) == 0) {
1405 if (object != tobject)
1406 VM_OBJECT_WUNLOCK(object);
1407 if (advice == MADV_WILLNEED) {
1408 /*
1409 * Reference the page before unlocking and
1410 * sleeping so that the page daemon is less
1411 * likely to reclaim it.
1412 */
1413 vm_page_aflag_set(tm, PGA_REFERENCED);
1414 }
1415 if (!vm_page_busy_sleep(tm, "madvpo", 0))
1416 VM_OBJECT_WUNLOCK(tobject);
1417 goto relookup;
1418 }
1419 vm_page_advise(tm, advice);
1420 vm_page_xunbusy(tm);
1421 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1);
1422 next_pindex:
1423 if (tobject != object)
1424 VM_OBJECT_WUNLOCK(tobject);
1425 }
1426 VM_OBJECT_WUNLOCK(object);
1427 }
1428
1429 /*
1430 * vm_object_shadow:
1431 *
1432 * Create a new object which is backed by the
1433 * specified existing object range. The source
1434 * object reference is deallocated.
1435 *
1436 * The new object and offset into that object
1437 * are returned in the source parameters.
1438 */
1439 void
vm_object_shadow(vm_object_t * object,vm_ooffset_t * offset,vm_size_t length,struct ucred * cred,bool shared)1440 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length,
1441 struct ucred *cred, bool shared)
1442 {
1443 vm_object_t source;
1444 vm_object_t result;
1445
1446 source = *object;
1447
1448 /*
1449 * Don't create the new object if the old object isn't shared.
1450 *
1451 * If we hold the only reference we can guarantee that it won't
1452 * increase while we have the map locked. Otherwise the race is
1453 * harmless and we will end up with an extra shadow object that
1454 * will be collapsed later.
1455 */
1456 if (source != NULL && source->ref_count == 1 &&
1457 (source->flags & OBJ_ANON) != 0)
1458 return;
1459
1460 /*
1461 * Allocate a new object with the given length.
1462 */
1463 result = vm_object_allocate_anon(atop(length), source, cred, length);
1464
1465 /*
1466 * Store the offset into the source object, and fix up the offset into
1467 * the new object.
1468 */
1469 result->backing_object_offset = *offset;
1470
1471 if (shared || source != NULL) {
1472 VM_OBJECT_WLOCK(result);
1473
1474 /*
1475 * The new object shadows the source object, adding a
1476 * reference to it. Our caller changes his reference
1477 * to point to the new object, removing a reference to
1478 * the source object. Net result: no change of
1479 * reference count, unless the caller needs to add one
1480 * more reference due to forking a shared map entry.
1481 */
1482 if (shared) {
1483 vm_object_reference_locked(result);
1484 vm_object_clear_flag(result, OBJ_ONEMAPPING);
1485 }
1486
1487 /*
1488 * Try to optimize the result object's page color when
1489 * shadowing in order to maintain page coloring
1490 * consistency in the combined shadowed object.
1491 */
1492 if (source != NULL) {
1493 vm_object_backing_insert(result, source);
1494 result->domain = source->domain;
1495 #if VM_NRESERVLEVEL > 0
1496 vm_object_set_flag(result,
1497 (source->flags & OBJ_COLORED));
1498 result->pg_color = (source->pg_color +
1499 OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER -
1500 1)) - 1);
1501 #endif
1502 }
1503 VM_OBJECT_WUNLOCK(result);
1504 }
1505
1506 /*
1507 * Return the new things
1508 */
1509 *offset = 0;
1510 *object = result;
1511 }
1512
1513 /*
1514 * vm_object_split:
1515 *
1516 * Split the pages in a map entry into a new object. This affords
1517 * easier removal of unused pages, and keeps object inheritance from
1518 * being a negative impact on memory usage.
1519 */
1520 void
vm_object_split(vm_map_entry_t entry)1521 vm_object_split(vm_map_entry_t entry)
1522 {
1523 struct pctrie_iter pages;
1524 vm_page_t m;
1525 vm_object_t orig_object, new_object, backing_object;
1526 vm_pindex_t offidxstart;
1527 vm_size_t size;
1528
1529 orig_object = entry->object.vm_object;
1530 KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0,
1531 ("vm_object_split: Splitting object with multiple mappings."));
1532 if ((orig_object->flags & OBJ_ANON) == 0)
1533 return;
1534 if (orig_object->ref_count <= 1)
1535 return;
1536 VM_OBJECT_WUNLOCK(orig_object);
1537
1538 offidxstart = OFF_TO_IDX(entry->offset);
1539 size = atop(entry->end - entry->start);
1540
1541 new_object = vm_object_allocate_anon(size, orig_object,
1542 orig_object->cred, ptoa(size));
1543
1544 /*
1545 * We must wait for the orig_object to complete any in-progress
1546 * collapse so that the swap blocks are stable below. The
1547 * additional reference on backing_object by new object will
1548 * prevent further collapse operations until split completes.
1549 */
1550 VM_OBJECT_WLOCK(orig_object);
1551 vm_object_collapse_wait(orig_object);
1552
1553 /*
1554 * At this point, the new object is still private, so the order in
1555 * which the original and new objects are locked does not matter.
1556 */
1557 VM_OBJECT_WLOCK(new_object);
1558 new_object->domain = orig_object->domain;
1559 backing_object = orig_object->backing_object;
1560 if (backing_object != NULL) {
1561 vm_object_backing_insert_ref(new_object, backing_object);
1562 new_object->backing_object_offset =
1563 orig_object->backing_object_offset + entry->offset;
1564 }
1565 if (orig_object->cred != NULL) {
1566 crhold(orig_object->cred);
1567 KASSERT(orig_object->charge >= ptoa(size),
1568 ("orig_object->charge < 0"));
1569 orig_object->charge -= ptoa(size);
1570 }
1571
1572 /*
1573 * Mark the split operation so that swap_pager_getpages() knows
1574 * that the object is in transition.
1575 */
1576 vm_object_set_flag(orig_object, OBJ_SPLIT);
1577 vm_page_iter_limit_init(&pages, orig_object, offidxstart + size);
1578 retry:
1579 pctrie_iter_reset(&pages);
1580 for (m = vm_page_iter_lookup_ge(&pages, offidxstart); m != NULL;
1581 m = vm_radix_iter_step(&pages)) {
1582 /*
1583 * We must wait for pending I/O to complete before we can
1584 * rename the page.
1585 *
1586 * We do not have to VM_PROT_NONE the page as mappings should
1587 * not be changed by this operation.
1588 */
1589 if (vm_page_tryxbusy(m) == 0) {
1590 VM_OBJECT_WUNLOCK(new_object);
1591 if (vm_page_busy_sleep(m, "spltwt", 0))
1592 VM_OBJECT_WLOCK(orig_object);
1593 VM_OBJECT_WLOCK(new_object);
1594 goto retry;
1595 }
1596
1597 /*
1598 * The page was left invalid. Likely placed there by
1599 * an incomplete fault. Just remove and ignore.
1600 */
1601 if (vm_page_none_valid(m)) {
1602 if (vm_page_iter_remove(&pages, m))
1603 vm_page_free(m);
1604 continue;
1605 }
1606
1607 /* vm_page_iter_rename() will dirty the page. */
1608 if (!vm_page_iter_rename(&pages, m, new_object, m->pindex -
1609 offidxstart)) {
1610 vm_page_xunbusy(m);
1611 VM_OBJECT_WUNLOCK(new_object);
1612 VM_OBJECT_WUNLOCK(orig_object);
1613 vm_radix_wait();
1614 VM_OBJECT_WLOCK(orig_object);
1615 VM_OBJECT_WLOCK(new_object);
1616 goto retry;
1617 }
1618
1619 #if VM_NRESERVLEVEL > 0
1620 /*
1621 * If some of the reservation's allocated pages remain with
1622 * the original object, then transferring the reservation to
1623 * the new object is neither particularly beneficial nor
1624 * particularly harmful as compared to leaving the reservation
1625 * with the original object. If, however, all of the
1626 * reservation's allocated pages are transferred to the new
1627 * object, then transferring the reservation is typically
1628 * beneficial. Determining which of these two cases applies
1629 * would be more costly than unconditionally renaming the
1630 * reservation.
1631 */
1632 vm_reserv_rename(m, new_object, orig_object, offidxstart);
1633 #endif
1634 }
1635
1636 /*
1637 * swap_pager_copy() can sleep, in which case the orig_object's
1638 * and new_object's locks are released and reacquired.
1639 */
1640 swap_pager_copy(orig_object, new_object, offidxstart, 0);
1641
1642 TAILQ_FOREACH(m, &new_object->memq, listq)
1643 vm_page_xunbusy(m);
1644
1645 vm_object_clear_flag(orig_object, OBJ_SPLIT);
1646 VM_OBJECT_WUNLOCK(orig_object);
1647 VM_OBJECT_WUNLOCK(new_object);
1648 entry->object.vm_object = new_object;
1649 entry->offset = 0LL;
1650 vm_object_deallocate(orig_object);
1651 VM_OBJECT_WLOCK(new_object);
1652 }
1653
1654 static vm_page_t
vm_object_collapse_scan_wait(struct pctrie_iter * pages,vm_object_t object,vm_page_t p)1655 vm_object_collapse_scan_wait(struct pctrie_iter *pages, vm_object_t object,
1656 vm_page_t p)
1657 {
1658 vm_object_t backing_object;
1659
1660 VM_OBJECT_ASSERT_WLOCKED(object);
1661 backing_object = object->backing_object;
1662 VM_OBJECT_ASSERT_WLOCKED(backing_object);
1663
1664 KASSERT(p == NULL || p->object == object || p->object == backing_object,
1665 ("invalid ownership %p %p %p", p, object, backing_object));
1666 /* The page is only NULL when rename fails. */
1667 if (p == NULL) {
1668 VM_OBJECT_WUNLOCK(object);
1669 VM_OBJECT_WUNLOCK(backing_object);
1670 vm_radix_wait();
1671 VM_OBJECT_WLOCK(object);
1672 } else if (p->object == object) {
1673 VM_OBJECT_WUNLOCK(backing_object);
1674 if (vm_page_busy_sleep(p, "vmocol", 0))
1675 VM_OBJECT_WLOCK(object);
1676 } else {
1677 VM_OBJECT_WUNLOCK(object);
1678 if (!vm_page_busy_sleep(p, "vmocol", 0))
1679 VM_OBJECT_WUNLOCK(backing_object);
1680 VM_OBJECT_WLOCK(object);
1681 }
1682 VM_OBJECT_WLOCK(backing_object);
1683 vm_page_iter_init(pages, backing_object);
1684 return (vm_page_iter_lookup_ge(pages, 0));
1685 }
1686
1687 static void
vm_object_collapse_scan(vm_object_t object)1688 vm_object_collapse_scan(vm_object_t object)
1689 {
1690 struct pctrie_iter pages;
1691 vm_object_t backing_object;
1692 vm_page_t next, p, pp;
1693 vm_pindex_t backing_offset_index, new_pindex;
1694
1695 VM_OBJECT_ASSERT_WLOCKED(object);
1696 VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
1697
1698 backing_object = object->backing_object;
1699 backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1700
1701 /*
1702 * Our scan
1703 */
1704 vm_page_iter_init(&pages, backing_object);
1705 for (p = vm_page_iter_lookup_ge(&pages, 0); p != NULL; p = next) {
1706 next = TAILQ_NEXT(p, listq);
1707 new_pindex = p->pindex - backing_offset_index;
1708
1709 /*
1710 * Check for busy page
1711 */
1712 if (vm_page_tryxbusy(p) == 0) {
1713 next = vm_object_collapse_scan_wait(&pages, object, p);
1714 continue;
1715 }
1716
1717 KASSERT(object->backing_object == backing_object,
1718 ("vm_object_collapse_scan: backing object mismatch %p != %p",
1719 object->backing_object, backing_object));
1720 KASSERT(p->object == backing_object,
1721 ("vm_object_collapse_scan: object mismatch %p != %p",
1722 p->object, backing_object));
1723
1724 if (p->pindex < backing_offset_index ||
1725 new_pindex >= object->size) {
1726 vm_pager_freespace(backing_object, p->pindex, 1);
1727
1728 KASSERT(!pmap_page_is_mapped(p),
1729 ("freeing mapped page %p", p));
1730 if (vm_page_iter_remove(&pages, p))
1731 vm_page_free(p);
1732 next = vm_radix_iter_step(&pages);
1733 continue;
1734 }
1735
1736 if (!vm_page_all_valid(p)) {
1737 KASSERT(!pmap_page_is_mapped(p),
1738 ("freeing mapped page %p", p));
1739 if (vm_page_iter_remove(&pages, p))
1740 vm_page_free(p);
1741 next = vm_radix_iter_step(&pages);
1742 continue;
1743 }
1744
1745 pp = vm_page_lookup(object, new_pindex);
1746 if (pp != NULL && vm_page_tryxbusy(pp) == 0) {
1747 vm_page_xunbusy(p);
1748 /*
1749 * The page in the parent is busy and possibly not
1750 * (yet) valid. Until its state is finalized by the
1751 * busy bit owner, we can't tell whether it shadows the
1752 * original page.
1753 */
1754 next = vm_object_collapse_scan_wait(&pages, object, pp);
1755 continue;
1756 }
1757
1758 if (pp != NULL && vm_page_none_valid(pp)) {
1759 /*
1760 * The page was invalid in the parent. Likely placed
1761 * there by an incomplete fault. Just remove and
1762 * ignore. p can replace it.
1763 */
1764 if (vm_page_remove(pp))
1765 vm_page_free(pp);
1766 pp = NULL;
1767 }
1768
1769 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL,
1770 NULL)) {
1771 /*
1772 * The page already exists in the parent OR swap exists
1773 * for this location in the parent. Leave the parent's
1774 * page alone. Destroy the original page from the
1775 * backing object.
1776 */
1777 vm_pager_freespace(backing_object, p->pindex, 1);
1778 KASSERT(!pmap_page_is_mapped(p),
1779 ("freeing mapped page %p", p));
1780 if (pp != NULL)
1781 vm_page_xunbusy(pp);
1782 if (vm_page_iter_remove(&pages, p))
1783 vm_page_free(p);
1784 next = vm_radix_iter_step(&pages);
1785 continue;
1786 }
1787
1788 /*
1789 * Page does not exist in parent, rename the page from the
1790 * backing object to the main object.
1791 *
1792 * If the page was mapped to a process, it can remain mapped
1793 * through the rename. vm_page_iter_rename() will dirty the
1794 * page.
1795 */
1796 if (!vm_page_iter_rename(&pages, p, object, new_pindex)) {
1797 vm_page_xunbusy(p);
1798 next = vm_object_collapse_scan_wait(&pages, object,
1799 NULL);
1800 continue;
1801 }
1802
1803 /* Use the old pindex to free the right page. */
1804 vm_pager_freespace(backing_object, new_pindex +
1805 backing_offset_index, 1);
1806
1807 #if VM_NRESERVLEVEL > 0
1808 /*
1809 * Rename the reservation.
1810 */
1811 vm_reserv_rename(p, object, backing_object,
1812 backing_offset_index);
1813 #endif
1814 vm_page_xunbusy(p);
1815 next = vm_radix_iter_step(&pages);
1816 }
1817 return;
1818 }
1819
1820 /*
1821 * vm_object_collapse:
1822 *
1823 * Collapse an object with the object backing it.
1824 * Pages in the backing object are moved into the
1825 * parent, and the backing object is deallocated.
1826 */
1827 void
vm_object_collapse(vm_object_t object)1828 vm_object_collapse(vm_object_t object)
1829 {
1830 vm_object_t backing_object, new_backing_object;
1831
1832 VM_OBJECT_ASSERT_WLOCKED(object);
1833
1834 while (TRUE) {
1835 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON,
1836 ("collapsing invalid object"));
1837
1838 /*
1839 * Wait for the backing_object to finish any pending
1840 * collapse so that the caller sees the shortest possible
1841 * shadow chain.
1842 */
1843 backing_object = vm_object_backing_collapse_wait(object);
1844 if (backing_object == NULL)
1845 return;
1846
1847 KASSERT(object->ref_count > 0 &&
1848 object->ref_count > atomic_load_int(&object->shadow_count),
1849 ("collapse with invalid ref %d or shadow %d count.",
1850 object->ref_count, atomic_load_int(&object->shadow_count)));
1851 KASSERT((backing_object->flags &
1852 (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
1853 ("vm_object_collapse: Backing object already collapsing."));
1854 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
1855 ("vm_object_collapse: object is already collapsing."));
1856
1857 /*
1858 * We know that we can either collapse the backing object if
1859 * the parent is the only reference to it, or (perhaps) have
1860 * the parent bypass the object if the parent happens to shadow
1861 * all the resident pages in the entire backing object.
1862 */
1863 if (backing_object->ref_count == 1) {
1864 KASSERT(atomic_load_int(&backing_object->shadow_count)
1865 == 1,
1866 ("vm_object_collapse: shadow_count: %d",
1867 atomic_load_int(&backing_object->shadow_count)));
1868 vm_object_pip_add(object, 1);
1869 vm_object_set_flag(object, OBJ_COLLAPSING);
1870 vm_object_pip_add(backing_object, 1);
1871 vm_object_set_flag(backing_object, OBJ_DEAD);
1872
1873 /*
1874 * If there is exactly one reference to the backing
1875 * object, we can collapse it into the parent.
1876 */
1877 vm_object_collapse_scan(object);
1878
1879 /*
1880 * Move the pager from backing_object to object.
1881 *
1882 * swap_pager_copy() can sleep, in which case the
1883 * backing_object's and object's locks are released and
1884 * reacquired.
1885 */
1886 swap_pager_copy(backing_object, object,
1887 OFF_TO_IDX(object->backing_object_offset), TRUE);
1888
1889 /*
1890 * Object now shadows whatever backing_object did.
1891 */
1892 vm_object_clear_flag(object, OBJ_COLLAPSING);
1893 vm_object_backing_transfer(object, backing_object);
1894 object->backing_object_offset +=
1895 backing_object->backing_object_offset;
1896 VM_OBJECT_WUNLOCK(object);
1897 vm_object_pip_wakeup(object);
1898
1899 /*
1900 * Discard backing_object.
1901 *
1902 * Since the backing object has no pages, no pager left,
1903 * and no object references within it, all that is
1904 * necessary is to dispose of it.
1905 */
1906 KASSERT(backing_object->ref_count == 1, (
1907 "backing_object %p was somehow re-referenced during collapse!",
1908 backing_object));
1909 vm_object_pip_wakeup(backing_object);
1910 (void)refcount_release(&backing_object->ref_count);
1911 umtx_shm_object_terminated(backing_object);
1912 vm_object_terminate(backing_object);
1913 counter_u64_add(object_collapses, 1);
1914 VM_OBJECT_WLOCK(object);
1915 } else {
1916 /*
1917 * If we do not entirely shadow the backing object,
1918 * there is nothing we can do so we give up.
1919 *
1920 * The object lock and backing_object lock must not
1921 * be dropped during this sequence.
1922 */
1923 if (!swap_pager_scan_all_shadowed(object)) {
1924 VM_OBJECT_WUNLOCK(backing_object);
1925 break;
1926 }
1927
1928 /*
1929 * Make the parent shadow the next object in the
1930 * chain. Deallocating backing_object will not remove
1931 * it, since its reference count is at least 2.
1932 */
1933 vm_object_backing_remove_locked(object);
1934 new_backing_object = backing_object->backing_object;
1935 if (new_backing_object != NULL) {
1936 vm_object_backing_insert_ref(object,
1937 new_backing_object);
1938 object->backing_object_offset +=
1939 backing_object->backing_object_offset;
1940 }
1941
1942 /*
1943 * Drop the reference count on backing_object. Since
1944 * its ref_count was at least 2, it will not vanish.
1945 */
1946 (void)refcount_release(&backing_object->ref_count);
1947 KASSERT(backing_object->ref_count >= 1, (
1948 "backing_object %p was somehow dereferenced during collapse!",
1949 backing_object));
1950 VM_OBJECT_WUNLOCK(backing_object);
1951 counter_u64_add(object_bypasses, 1);
1952 }
1953
1954 /*
1955 * Try again with this object's new backing object.
1956 */
1957 }
1958 }
1959
1960 /*
1961 * vm_object_page_remove:
1962 *
1963 * For the given object, either frees or invalidates each of the
1964 * specified pages. In general, a page is freed. However, if a page is
1965 * wired for any reason other than the existence of a managed, wired
1966 * mapping, then it may be invalidated but not removed from the object.
1967 * Pages are specified by the given range ["start", "end") and the option
1968 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range
1969 * extends from "start" to the end of the object. If the option
1970 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1971 * specified range are affected. If the option OBJPR_NOTMAPPED is
1972 * specified, then the pages within the specified range must have no
1973 * mappings. Otherwise, if this option is not specified, any mappings to
1974 * the specified pages are removed before the pages are freed or
1975 * invalidated.
1976 *
1977 * In general, this operation should only be performed on objects that
1978 * contain managed pages. There are, however, two exceptions. First, it
1979 * is performed on the kernel and kmem objects by vm_map_entry_delete().
1980 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1981 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must
1982 * not be specified and the option OBJPR_NOTMAPPED must be specified.
1983 *
1984 * The object must be locked.
1985 */
1986 void
vm_object_page_remove(vm_object_t object,vm_pindex_t start,vm_pindex_t end,int options)1987 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1988 int options)
1989 {
1990 struct pctrie_iter pages;
1991 vm_page_t p;
1992
1993 VM_OBJECT_ASSERT_WLOCKED(object);
1994 KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
1995 (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
1996 ("vm_object_page_remove: illegal options for object %p", object));
1997 if (object->resident_page_count == 0)
1998 return;
1999 vm_object_pip_add(object, 1);
2000 vm_page_iter_limit_init(&pages, object, end);
2001 again:
2002 pctrie_iter_reset(&pages);
2003 for (p = vm_page_iter_lookup_ge(&pages, start); p != NULL;
2004 p = vm_radix_iter_step(&pages)) {
2005 /*
2006 * Skip invalid pages if asked to do so. Try to avoid acquiring
2007 * the busy lock, as some consumers rely on this to avoid
2008 * deadlocks.
2009 *
2010 * A thread may concurrently transition the page from invalid to
2011 * valid using only the busy lock, so the result of this check
2012 * is immediately stale. It is up to consumers to handle this,
2013 * for instance by ensuring that all invalid->valid transitions
2014 * happen with a mutex held, as may be possible for a
2015 * filesystem.
2016 */
2017 if ((options & OBJPR_VALIDONLY) != 0 && vm_page_none_valid(p))
2018 continue;
2019
2020 /*
2021 * If the page is wired for any reason besides the existence
2022 * of managed, wired mappings, then it cannot be freed. For
2023 * example, fictitious pages, which represent device memory,
2024 * are inherently wired and cannot be freed. They can,
2025 * however, be invalidated if the option OBJPR_CLEANONLY is
2026 * not specified.
2027 */
2028 if (vm_page_tryxbusy(p) == 0) {
2029 if (vm_page_busy_sleep(p, "vmopar", 0))
2030 VM_OBJECT_WLOCK(object);
2031 goto again;
2032 }
2033 if ((options & OBJPR_VALIDONLY) != 0 && vm_page_none_valid(p)) {
2034 vm_page_xunbusy(p);
2035 continue;
2036 }
2037 if (vm_page_wired(p)) {
2038 wired:
2039 if ((options & OBJPR_NOTMAPPED) == 0 &&
2040 object->ref_count != 0)
2041 pmap_remove_all(p);
2042 if ((options & OBJPR_CLEANONLY) == 0) {
2043 vm_page_invalid(p);
2044 vm_page_undirty(p);
2045 }
2046 vm_page_xunbusy(p);
2047 continue;
2048 }
2049 KASSERT((p->flags & PG_FICTITIOUS) == 0,
2050 ("vm_object_page_remove: page %p is fictitious", p));
2051 if ((options & OBJPR_CLEANONLY) != 0 &&
2052 !vm_page_none_valid(p)) {
2053 if ((options & OBJPR_NOTMAPPED) == 0 &&
2054 object->ref_count != 0 &&
2055 !vm_page_try_remove_write(p))
2056 goto wired;
2057 if (p->dirty != 0) {
2058 vm_page_xunbusy(p);
2059 continue;
2060 }
2061 }
2062 if ((options & OBJPR_NOTMAPPED) == 0 &&
2063 object->ref_count != 0 && !vm_page_try_remove_all(p))
2064 goto wired;
2065 vm_page_iter_free(&pages, p);
2066 }
2067 vm_object_pip_wakeup(object);
2068
2069 vm_pager_freespace(object, start, (end == 0 ? object->size : end) -
2070 start);
2071 }
2072
2073 /*
2074 * vm_object_page_noreuse:
2075 *
2076 * For the given object, attempt to move the specified pages to
2077 * the head of the inactive queue. This bypasses regular LRU
2078 * operation and allows the pages to be reused quickly under memory
2079 * pressure. If a page is wired for any reason, then it will not
2080 * be queued. Pages are specified by the range ["start", "end").
2081 * As a special case, if "end" is zero, then the range extends from
2082 * "start" to the end of the object.
2083 *
2084 * This operation should only be performed on objects that
2085 * contain non-fictitious, managed pages.
2086 *
2087 * The object must be locked.
2088 */
2089 void
vm_object_page_noreuse(vm_object_t object,vm_pindex_t start,vm_pindex_t end)2090 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2091 {
2092 vm_page_t p, next;
2093
2094 VM_OBJECT_ASSERT_LOCKED(object);
2095 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
2096 ("vm_object_page_noreuse: illegal object %p", object));
2097 if (object->resident_page_count == 0)
2098 return;
2099 p = vm_page_find_least(object, start);
2100
2101 /*
2102 * Here, the variable "p" is either (1) the page with the least pindex
2103 * greater than or equal to the parameter "start" or (2) NULL.
2104 */
2105 for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2106 next = TAILQ_NEXT(p, listq);
2107 vm_page_deactivate_noreuse(p);
2108 }
2109 }
2110
2111 /*
2112 * Populate the specified range of the object with valid pages. Returns
2113 * TRUE if the range is successfully populated and FALSE otherwise.
2114 *
2115 * Note: This function should be optimized to pass a larger array of
2116 * pages to vm_pager_get_pages() before it is applied to a non-
2117 * OBJT_DEVICE object.
2118 *
2119 * The object must be locked.
2120 */
2121 boolean_t
vm_object_populate(vm_object_t object,vm_pindex_t start,vm_pindex_t end)2122 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2123 {
2124 vm_page_t m;
2125 vm_pindex_t pindex;
2126 int rv;
2127
2128 VM_OBJECT_ASSERT_WLOCKED(object);
2129 for (pindex = start; pindex < end; pindex++) {
2130 rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL);
2131 if (rv != VM_PAGER_OK)
2132 break;
2133
2134 /*
2135 * Keep "m" busy because a subsequent iteration may unlock
2136 * the object.
2137 */
2138 }
2139 if (pindex > start) {
2140 m = vm_page_lookup(object, start);
2141 while (m != NULL && m->pindex < pindex) {
2142 vm_page_xunbusy(m);
2143 m = TAILQ_NEXT(m, listq);
2144 }
2145 }
2146 return (pindex == end);
2147 }
2148
2149 /*
2150 * Routine: vm_object_coalesce
2151 * Function: Coalesces two objects backing up adjoining
2152 * regions of memory into a single object.
2153 *
2154 * returns TRUE if objects were combined.
2155 *
2156 * NOTE: Only works at the moment if the second object is NULL -
2157 * if it's not, which object do we lock first?
2158 *
2159 * Parameters:
2160 * prev_object First object to coalesce
2161 * prev_offset Offset into prev_object
2162 * prev_size Size of reference to prev_object
2163 * next_size Size of reference to the second object
2164 * reserved Indicator that extension region has
2165 * swap accounted for
2166 *
2167 * Conditions:
2168 * The object must *not* be locked.
2169 */
2170 boolean_t
vm_object_coalesce(vm_object_t prev_object,vm_ooffset_t prev_offset,vm_size_t prev_size,vm_size_t next_size,boolean_t reserved)2171 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2172 vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2173 {
2174 vm_pindex_t next_pindex;
2175
2176 if (prev_object == NULL)
2177 return (TRUE);
2178 if ((prev_object->flags & OBJ_ANON) == 0)
2179 return (FALSE);
2180
2181 VM_OBJECT_WLOCK(prev_object);
2182 /*
2183 * Try to collapse the object first.
2184 */
2185 vm_object_collapse(prev_object);
2186
2187 /*
2188 * Can't coalesce if: . more than one reference . paged out . shadows
2189 * another object . has a copy elsewhere (any of which mean that the
2190 * pages not mapped to prev_entry may be in use anyway)
2191 */
2192 if (prev_object->backing_object != NULL) {
2193 VM_OBJECT_WUNLOCK(prev_object);
2194 return (FALSE);
2195 }
2196
2197 prev_size >>= PAGE_SHIFT;
2198 next_size >>= PAGE_SHIFT;
2199 next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2200
2201 if (prev_object->ref_count > 1 &&
2202 prev_object->size != next_pindex &&
2203 (prev_object->flags & OBJ_ONEMAPPING) == 0) {
2204 VM_OBJECT_WUNLOCK(prev_object);
2205 return (FALSE);
2206 }
2207
2208 /*
2209 * Account for the charge.
2210 */
2211 if (prev_object->cred != NULL) {
2212 /*
2213 * If prev_object was charged, then this mapping,
2214 * although not charged now, may become writable
2215 * later. Non-NULL cred in the object would prevent
2216 * swap reservation during enabling of the write
2217 * access, so reserve swap now. Failed reservation
2218 * cause allocation of the separate object for the map
2219 * entry, and swap reservation for this entry is
2220 * managed in appropriate time.
2221 */
2222 if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
2223 prev_object->cred)) {
2224 VM_OBJECT_WUNLOCK(prev_object);
2225 return (FALSE);
2226 }
2227 prev_object->charge += ptoa(next_size);
2228 }
2229
2230 /*
2231 * Remove any pages that may still be in the object from a previous
2232 * deallocation.
2233 */
2234 if (next_pindex < prev_object->size) {
2235 vm_object_page_remove(prev_object, next_pindex, next_pindex +
2236 next_size, 0);
2237 #if 0
2238 if (prev_object->cred != NULL) {
2239 KASSERT(prev_object->charge >=
2240 ptoa(prev_object->size - next_pindex),
2241 ("object %p overcharged 1 %jx %jx", prev_object,
2242 (uintmax_t)next_pindex, (uintmax_t)next_size));
2243 prev_object->charge -= ptoa(prev_object->size -
2244 next_pindex);
2245 }
2246 #endif
2247 }
2248
2249 /*
2250 * Extend the object if necessary.
2251 */
2252 if (next_pindex + next_size > prev_object->size)
2253 prev_object->size = next_pindex + next_size;
2254
2255 VM_OBJECT_WUNLOCK(prev_object);
2256 return (TRUE);
2257 }
2258
2259 void
vm_object_set_writeable_dirty_(vm_object_t object)2260 vm_object_set_writeable_dirty_(vm_object_t object)
2261 {
2262 atomic_add_int(&object->generation, 1);
2263 }
2264
2265 bool
vm_object_mightbedirty_(vm_object_t object)2266 vm_object_mightbedirty_(vm_object_t object)
2267 {
2268 return (object->generation != object->cleangeneration);
2269 }
2270
2271 /*
2272 * vm_object_unwire:
2273 *
2274 * For each page offset within the specified range of the given object,
2275 * find the highest-level page in the shadow chain and unwire it. A page
2276 * must exist at every page offset, and the highest-level page must be
2277 * wired.
2278 */
2279 void
vm_object_unwire(vm_object_t object,vm_ooffset_t offset,vm_size_t length,uint8_t queue)2280 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
2281 uint8_t queue)
2282 {
2283 vm_object_t tobject, t1object;
2284 vm_page_t m, tm;
2285 vm_pindex_t end_pindex, pindex, tpindex;
2286 int depth, locked_depth;
2287
2288 KASSERT((offset & PAGE_MASK) == 0,
2289 ("vm_object_unwire: offset is not page aligned"));
2290 KASSERT((length & PAGE_MASK) == 0,
2291 ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
2292 /* The wired count of a fictitious page never changes. */
2293 if ((object->flags & OBJ_FICTITIOUS) != 0)
2294 return;
2295 pindex = OFF_TO_IDX(offset);
2296 end_pindex = pindex + atop(length);
2297 again:
2298 locked_depth = 1;
2299 VM_OBJECT_RLOCK(object);
2300 m = vm_page_find_least(object, pindex);
2301 while (pindex < end_pindex) {
2302 if (m == NULL || pindex < m->pindex) {
2303 /*
2304 * The first object in the shadow chain doesn't
2305 * contain a page at the current index. Therefore,
2306 * the page must exist in a backing object.
2307 */
2308 tobject = object;
2309 tpindex = pindex;
2310 depth = 0;
2311 do {
2312 tpindex +=
2313 OFF_TO_IDX(tobject->backing_object_offset);
2314 tobject = tobject->backing_object;
2315 KASSERT(tobject != NULL,
2316 ("vm_object_unwire: missing page"));
2317 if ((tobject->flags & OBJ_FICTITIOUS) != 0)
2318 goto next_page;
2319 depth++;
2320 if (depth == locked_depth) {
2321 locked_depth++;
2322 VM_OBJECT_RLOCK(tobject);
2323 }
2324 } while ((tm = vm_page_lookup(tobject, tpindex)) ==
2325 NULL);
2326 } else {
2327 tm = m;
2328 m = TAILQ_NEXT(m, listq);
2329 }
2330 if (vm_page_trysbusy(tm) == 0) {
2331 for (tobject = object; locked_depth >= 1;
2332 locked_depth--) {
2333 t1object = tobject->backing_object;
2334 if (tm->object != tobject)
2335 VM_OBJECT_RUNLOCK(tobject);
2336 tobject = t1object;
2337 }
2338 tobject = tm->object;
2339 if (!vm_page_busy_sleep(tm, "unwbo",
2340 VM_ALLOC_IGN_SBUSY))
2341 VM_OBJECT_RUNLOCK(tobject);
2342 goto again;
2343 }
2344 vm_page_unwire(tm, queue);
2345 vm_page_sunbusy(tm);
2346 next_page:
2347 pindex++;
2348 }
2349 /* Release the accumulated object locks. */
2350 for (tobject = object; locked_depth >= 1; locked_depth--) {
2351 t1object = tobject->backing_object;
2352 VM_OBJECT_RUNLOCK(tobject);
2353 tobject = t1object;
2354 }
2355 }
2356
2357 /*
2358 * Return the vnode for the given object, or NULL if none exists.
2359 * For tmpfs objects, the function may return NULL if there is
2360 * no vnode allocated at the time of the call.
2361 */
2362 struct vnode *
vm_object_vnode(vm_object_t object)2363 vm_object_vnode(vm_object_t object)
2364 {
2365 struct vnode *vp;
2366
2367 VM_OBJECT_ASSERT_LOCKED(object);
2368 vm_pager_getvp(object, &vp, NULL);
2369 return (vp);
2370 }
2371
2372 /*
2373 * Busy the vm object. This prevents new pages belonging to the object from
2374 * becoming busy. Existing pages persist as busy. Callers are responsible
2375 * for checking page state before proceeding.
2376 */
2377 void
vm_object_busy(vm_object_t obj)2378 vm_object_busy(vm_object_t obj)
2379 {
2380
2381 VM_OBJECT_ASSERT_LOCKED(obj);
2382
2383 blockcount_acquire(&obj->busy, 1);
2384 /* The fence is required to order loads of page busy. */
2385 atomic_thread_fence_acq_rel();
2386 }
2387
2388 void
vm_object_unbusy(vm_object_t obj)2389 vm_object_unbusy(vm_object_t obj)
2390 {
2391
2392 blockcount_release(&obj->busy, 1);
2393 }
2394
2395 void
vm_object_busy_wait(vm_object_t obj,const char * wmesg)2396 vm_object_busy_wait(vm_object_t obj, const char *wmesg)
2397 {
2398
2399 VM_OBJECT_ASSERT_UNLOCKED(obj);
2400
2401 (void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM);
2402 }
2403
2404 /*
2405 * This function aims to determine if the object is mapped,
2406 * specifically, if it is referenced by a vm_map_entry. Because
2407 * objects occasionally acquire transient references that do not
2408 * represent a mapping, the method used here is inexact. However, it
2409 * has very low overhead and is good enough for the advisory
2410 * vm.vmtotal sysctl.
2411 */
2412 bool
vm_object_is_active(vm_object_t obj)2413 vm_object_is_active(vm_object_t obj)
2414 {
2415
2416 return (obj->ref_count > atomic_load_int(&obj->shadow_count));
2417 }
2418
2419 static int
vm_object_list_handler(struct sysctl_req * req,bool swap_only)2420 vm_object_list_handler(struct sysctl_req *req, bool swap_only)
2421 {
2422 struct kinfo_vmobject *kvo;
2423 char *fullpath, *freepath;
2424 struct vnode *vp;
2425 struct vattr va;
2426 vm_object_t obj;
2427 vm_page_t m;
2428 struct cdev *cdev;
2429 struct cdevsw *csw;
2430 u_long sp;
2431 int count, error, ref;
2432 key_t key;
2433 unsigned short seq;
2434 bool want_path;
2435
2436 if (req->oldptr == NULL) {
2437 /*
2438 * If an old buffer has not been provided, generate an
2439 * estimate of the space needed for a subsequent call.
2440 */
2441 mtx_lock(&vm_object_list_mtx);
2442 count = 0;
2443 TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2444 if (obj->type == OBJT_DEAD)
2445 continue;
2446 count++;
2447 }
2448 mtx_unlock(&vm_object_list_mtx);
2449 return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) *
2450 count * 11 / 10));
2451 }
2452
2453 want_path = !(swap_only || jailed(curthread->td_ucred));
2454 kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK | M_ZERO);
2455 error = 0;
2456
2457 /*
2458 * VM objects are type stable and are never removed from the
2459 * list once added. This allows us to safely read obj->object_list
2460 * after reacquiring the VM object lock.
2461 */
2462 mtx_lock(&vm_object_list_mtx);
2463 TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2464 if (obj->type == OBJT_DEAD ||
2465 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0))
2466 continue;
2467 VM_OBJECT_RLOCK(obj);
2468 if (obj->type == OBJT_DEAD ||
2469 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) {
2470 VM_OBJECT_RUNLOCK(obj);
2471 continue;
2472 }
2473 mtx_unlock(&vm_object_list_mtx);
2474 kvo->kvo_size = ptoa(obj->size);
2475 kvo->kvo_resident = obj->resident_page_count;
2476 kvo->kvo_ref_count = obj->ref_count;
2477 kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count);
2478 kvo->kvo_memattr = obj->memattr;
2479 kvo->kvo_active = 0;
2480 kvo->kvo_inactive = 0;
2481 kvo->kvo_flags = 0;
2482 if (!swap_only) {
2483 TAILQ_FOREACH(m, &obj->memq, listq) {
2484 /*
2485 * A page may belong to the object but be
2486 * dequeued and set to PQ_NONE while the
2487 * object lock is not held. This makes the
2488 * reads of m->queue below racy, and we do not
2489 * count pages set to PQ_NONE. However, this
2490 * sysctl is only meant to give an
2491 * approximation of the system anyway.
2492 */
2493 if (vm_page_active(m))
2494 kvo->kvo_active++;
2495 else if (vm_page_inactive(m))
2496 kvo->kvo_inactive++;
2497 else if (vm_page_in_laundry(m))
2498 kvo->kvo_laundry++;
2499 }
2500 }
2501
2502 kvo->kvo_vn_fileid = 0;
2503 kvo->kvo_vn_fsid = 0;
2504 kvo->kvo_vn_fsid_freebsd11 = 0;
2505 freepath = NULL;
2506 fullpath = "";
2507 vp = NULL;
2508 kvo->kvo_type = vm_object_kvme_type(obj, want_path ? &vp :
2509 NULL);
2510 if (vp != NULL) {
2511 vref(vp);
2512 } else if ((obj->flags & OBJ_ANON) != 0) {
2513 MPASS(kvo->kvo_type == KVME_TYPE_SWAP);
2514 kvo->kvo_me = (uintptr_t)obj;
2515 /* tmpfs objs are reported as vnodes */
2516 kvo->kvo_backing_obj = (uintptr_t)obj->backing_object;
2517 sp = swap_pager_swapped_pages(obj);
2518 kvo->kvo_swapped = sp > UINT32_MAX ? UINT32_MAX : sp;
2519 }
2520 if ((obj->type == OBJT_DEVICE || obj->type == OBJT_MGTDEVICE) &&
2521 (obj->flags & OBJ_CDEVH) != 0) {
2522 cdev = obj->un_pager.devp.handle;
2523 if (cdev != NULL) {
2524 csw = dev_refthread(cdev, &ref);
2525 if (csw != NULL) {
2526 strlcpy(kvo->kvo_path, cdev->si_name,
2527 sizeof(kvo->kvo_path));
2528 dev_relthread(cdev, ref);
2529 }
2530 }
2531 }
2532 VM_OBJECT_RUNLOCK(obj);
2533 if ((obj->flags & OBJ_SYSVSHM) != 0) {
2534 kvo->kvo_flags |= KVMO_FLAG_SYSVSHM;
2535 shmobjinfo(obj, &key, &seq);
2536 kvo->kvo_vn_fileid = key;
2537 kvo->kvo_vn_fsid_freebsd11 = seq;
2538 }
2539 if ((obj->flags & OBJ_POSIXSHM) != 0) {
2540 kvo->kvo_flags |= KVMO_FLAG_POSIXSHM;
2541 shm_get_path(obj, kvo->kvo_path,
2542 sizeof(kvo->kvo_path));
2543 }
2544 if (vp != NULL) {
2545 vn_fullpath(vp, &fullpath, &freepath);
2546 vn_lock(vp, LK_SHARED | LK_RETRY);
2547 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) {
2548 kvo->kvo_vn_fileid = va.va_fileid;
2549 kvo->kvo_vn_fsid = va.va_fsid;
2550 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid;
2551 /* truncate */
2552 }
2553 vput(vp);
2554 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path));
2555 free(freepath, M_TEMP);
2556 }
2557
2558 /* Pack record size down */
2559 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path)
2560 + strlen(kvo->kvo_path) + 1;
2561 kvo->kvo_structsize = roundup(kvo->kvo_structsize,
2562 sizeof(uint64_t));
2563 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize);
2564 maybe_yield();
2565 mtx_lock(&vm_object_list_mtx);
2566 if (error)
2567 break;
2568 }
2569 mtx_unlock(&vm_object_list_mtx);
2570 free(kvo, M_TEMP);
2571 return (error);
2572 }
2573
2574 static int
sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)2575 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
2576 {
2577 return (vm_object_list_handler(req, false));
2578 }
2579
2580 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP |
2581 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject",
2582 "List of VM objects");
2583
2584 static int
sysctl_vm_object_list_swap(SYSCTL_HANDLER_ARGS)2585 sysctl_vm_object_list_swap(SYSCTL_HANDLER_ARGS)
2586 {
2587 return (vm_object_list_handler(req, true));
2588 }
2589
2590 /*
2591 * This sysctl returns list of the anonymous or swap objects. Intent
2592 * is to provide stripped optimized list useful to analyze swap use.
2593 * Since technically non-swap (default) objects participate in the
2594 * shadow chains, and are converted to swap type as needed by swap
2595 * pager, we must report them.
2596 */
2597 SYSCTL_PROC(_vm, OID_AUTO, swap_objects,
2598 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 0,
2599 sysctl_vm_object_list_swap, "S,kinfo_vmobject",
2600 "List of swap VM objects");
2601
2602 #include "opt_ddb.h"
2603 #ifdef DDB
2604 #include <sys/kernel.h>
2605
2606 #include <sys/cons.h>
2607
2608 #include <ddb/ddb.h>
2609
2610 static int
_vm_object_in_map(vm_map_t map,vm_object_t object,vm_map_entry_t entry)2611 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2612 {
2613 vm_map_t tmpm;
2614 vm_map_entry_t tmpe;
2615 vm_object_t obj;
2616
2617 if (map == 0)
2618 return 0;
2619
2620 if (entry == 0) {
2621 VM_MAP_ENTRY_FOREACH(tmpe, map) {
2622 if (_vm_object_in_map(map, object, tmpe)) {
2623 return 1;
2624 }
2625 }
2626 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2627 tmpm = entry->object.sub_map;
2628 VM_MAP_ENTRY_FOREACH(tmpe, tmpm) {
2629 if (_vm_object_in_map(tmpm, object, tmpe)) {
2630 return 1;
2631 }
2632 }
2633 } else if ((obj = entry->object.vm_object) != NULL) {
2634 for (; obj; obj = obj->backing_object)
2635 if (obj == object) {
2636 return 1;
2637 }
2638 }
2639 return 0;
2640 }
2641
2642 static int
vm_object_in_map(vm_object_t object)2643 vm_object_in_map(vm_object_t object)
2644 {
2645 struct proc *p;
2646
2647 /* sx_slock(&allproc_lock); */
2648 FOREACH_PROC_IN_SYSTEM(p) {
2649 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2650 continue;
2651 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2652 /* sx_sunlock(&allproc_lock); */
2653 return 1;
2654 }
2655 }
2656 /* sx_sunlock(&allproc_lock); */
2657 if (_vm_object_in_map(kernel_map, object, 0))
2658 return 1;
2659 return 0;
2660 }
2661
DB_SHOW_COMMAND_FLAGS(vmochk,vm_object_check,DB_CMD_MEMSAFE)2662 DB_SHOW_COMMAND_FLAGS(vmochk, vm_object_check, DB_CMD_MEMSAFE)
2663 {
2664 vm_object_t object;
2665
2666 /*
2667 * make sure that internal objs are in a map somewhere
2668 * and none have zero ref counts.
2669 */
2670 TAILQ_FOREACH(object, &vm_object_list, object_list) {
2671 if ((object->flags & OBJ_ANON) != 0) {
2672 if (object->ref_count == 0) {
2673 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2674 (long)object->size);
2675 }
2676 if (!vm_object_in_map(object)) {
2677 db_printf(
2678 "vmochk: internal obj is not in a map: "
2679 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2680 object->ref_count, (u_long)object->size,
2681 (u_long)object->size,
2682 (void *)object->backing_object);
2683 }
2684 }
2685 if (db_pager_quit)
2686 return;
2687 }
2688 }
2689
2690 /*
2691 * vm_object_print: [ debug ]
2692 */
DB_SHOW_COMMAND(object,vm_object_print_static)2693 DB_SHOW_COMMAND(object, vm_object_print_static)
2694 {
2695 /* XXX convert args. */
2696 vm_object_t object = (vm_object_t)addr;
2697 boolean_t full = have_addr;
2698
2699 vm_page_t p;
2700
2701 /* XXX count is an (unused) arg. Avoid shadowing it. */
2702 #define count was_count
2703
2704 int count;
2705
2706 if (object == NULL)
2707 return;
2708
2709 db_iprintf(
2710 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2711 object, (int)object->type, (uintmax_t)object->size,
2712 object->resident_page_count, object->ref_count, object->flags,
2713 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2714 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2715 atomic_load_int(&object->shadow_count),
2716 object->backing_object ? object->backing_object->ref_count : 0,
2717 object->backing_object, (uintmax_t)object->backing_object_offset);
2718
2719 if (!full)
2720 return;
2721
2722 db_indent += 2;
2723 count = 0;
2724 TAILQ_FOREACH(p, &object->memq, listq) {
2725 if (count == 0)
2726 db_iprintf("memory:=");
2727 else if (count == 6) {
2728 db_printf("\n");
2729 db_iprintf(" ...");
2730 count = 0;
2731 } else
2732 db_printf(",");
2733 count++;
2734
2735 db_printf("(off=0x%jx,page=0x%jx)",
2736 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2737
2738 if (db_pager_quit)
2739 break;
2740 }
2741 if (count != 0)
2742 db_printf("\n");
2743 db_indent -= 2;
2744 }
2745
2746 /* XXX. */
2747 #undef count
2748
2749 /* XXX need this non-static entry for calling from vm_map_print. */
2750 void
vm_object_print(long addr,boolean_t have_addr,long count,char * modif)2751 vm_object_print(
2752 /* db_expr_t */ long addr,
2753 boolean_t have_addr,
2754 /* db_expr_t */ long count,
2755 char *modif)
2756 {
2757 vm_object_print_static(addr, have_addr, count, modif);
2758 }
2759
DB_SHOW_COMMAND_FLAGS(vmopag,vm_object_print_pages,DB_CMD_MEMSAFE)2760 DB_SHOW_COMMAND_FLAGS(vmopag, vm_object_print_pages, DB_CMD_MEMSAFE)
2761 {
2762 vm_object_t object;
2763 vm_pindex_t fidx;
2764 vm_paddr_t pa;
2765 vm_page_t m, prev_m;
2766 int rcount;
2767
2768 TAILQ_FOREACH(object, &vm_object_list, object_list) {
2769 db_printf("new object: %p\n", (void *)object);
2770 if (db_pager_quit)
2771 return;
2772
2773 rcount = 0;
2774 fidx = 0;
2775 pa = -1;
2776 TAILQ_FOREACH(m, &object->memq, listq) {
2777 if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2778 prev_m->pindex + 1 != m->pindex) {
2779 if (rcount) {
2780 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2781 (long)fidx, rcount, (long)pa);
2782 if (db_pager_quit)
2783 return;
2784 rcount = 0;
2785 }
2786 }
2787 if (rcount &&
2788 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2789 ++rcount;
2790 continue;
2791 }
2792 if (rcount) {
2793 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2794 (long)fidx, rcount, (long)pa);
2795 if (db_pager_quit)
2796 return;
2797 }
2798 fidx = m->pindex;
2799 pa = VM_PAGE_TO_PHYS(m);
2800 rcount = 1;
2801 }
2802 if (rcount) {
2803 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2804 (long)fidx, rcount, (long)pa);
2805 if (db_pager_quit)
2806 return;
2807 }
2808 }
2809 }
2810 #endif /* DDB */
2811