xref: /freebsd/sys/vm/vm_pager.c (revision ab875b713d0afab5f0650f06d48a38777a7ae43e)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Paging space routine stubs.  Emulates a matchmaker-like interface
63  *	for builtin pagers.
64  */
65 
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD$");
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/vnode.h>
73 #include <sys/bio.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/malloc.h>
77 #include <sys/rwlock.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_extern.h>
86 
87 int cluster_pbuf_freecnt = -1;	/* unlimited to begin with */
88 
89 struct buf *swbuf;
90 
91 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
92 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
93     vm_ooffset_t, struct ucred *);
94 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
95 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
96 static void dead_pager_dealloc(vm_object_t);
97 
98 static int
99 dead_pager_getpages(obj, ma, count, req)
100 	vm_object_t obj;
101 	vm_page_t *ma;
102 	int count;
103 	int req;
104 {
105 	return VM_PAGER_FAIL;
106 }
107 
108 static vm_object_t
109 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
110     vm_ooffset_t off, struct ucred *cred)
111 {
112 	return NULL;
113 }
114 
115 static void
116 dead_pager_putpages(object, m, count, flags, rtvals)
117 	vm_object_t object;
118 	vm_page_t *m;
119 	int count;
120 	int flags;
121 	int *rtvals;
122 {
123 	int i;
124 
125 	for (i = 0; i < count; i++) {
126 		rtvals[i] = VM_PAGER_AGAIN;
127 	}
128 }
129 
130 static int
131 dead_pager_haspage(object, pindex, prev, next)
132 	vm_object_t object;
133 	vm_pindex_t pindex;
134 	int *prev;
135 	int *next;
136 {
137 	if (prev)
138 		*prev = 0;
139 	if (next)
140 		*next = 0;
141 	return FALSE;
142 }
143 
144 static void
145 dead_pager_dealloc(object)
146 	vm_object_t object;
147 {
148 	return;
149 }
150 
151 static struct pagerops deadpagerops = {
152 	.pgo_alloc = 	dead_pager_alloc,
153 	.pgo_dealloc =	dead_pager_dealloc,
154 	.pgo_getpages =	dead_pager_getpages,
155 	.pgo_putpages =	dead_pager_putpages,
156 	.pgo_haspage =	dead_pager_haspage,
157 };
158 
159 struct pagerops *pagertab[] = {
160 	&defaultpagerops,	/* OBJT_DEFAULT */
161 	&swappagerops,		/* OBJT_SWAP */
162 	&vnodepagerops,		/* OBJT_VNODE */
163 	&devicepagerops,	/* OBJT_DEVICE */
164 	&physpagerops,		/* OBJT_PHYS */
165 	&deadpagerops,		/* OBJT_DEAD */
166 	&sgpagerops,		/* OBJT_SG */
167 	&mgtdevicepagerops,	/* OBJT_MGTDEVICE */
168 };
169 
170 static const int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
171 
172 /*
173  * Kernel address space for mapping pages.
174  * Used by pagers where KVAs are needed for IO.
175  *
176  * XXX needs to be large enough to support the number of pending async
177  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
178  * (MAXPHYS == 64k) if you want to get the most efficiency.
179  */
180 struct mtx_padalign pbuf_mtx;
181 static TAILQ_HEAD(swqueue, buf) bswlist;
182 static int bswneeded;
183 vm_offset_t swapbkva;		/* swap buffers kva */
184 
185 void
186 vm_pager_init()
187 {
188 	struct pagerops **pgops;
189 
190 	TAILQ_INIT(&bswlist);
191 	/*
192 	 * Initialize known pagers
193 	 */
194 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
195 		if ((*pgops)->pgo_init != NULL)
196 			(*(*pgops)->pgo_init) ();
197 }
198 
199 void
200 vm_pager_bufferinit()
201 {
202 	struct buf *bp;
203 	int i;
204 
205 	mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF);
206 	bp = swbuf;
207 	/*
208 	 * Now set up swap and physical I/O buffer headers.
209 	 */
210 	for (i = 0; i < nswbuf; i++, bp++) {
211 		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
212 		BUF_LOCKINIT(bp);
213 		LIST_INIT(&bp->b_dep);
214 		bp->b_rcred = bp->b_wcred = NOCRED;
215 		bp->b_xflags = 0;
216 	}
217 
218 	cluster_pbuf_freecnt = nswbuf / 2;
219 	vnode_pbuf_freecnt = nswbuf / 2 + 1;
220 	vnode_async_pbuf_freecnt = nswbuf / 2;
221 }
222 
223 /*
224  * Allocate an instance of a pager of the given type.
225  * Size, protection and offset parameters are passed in for pagers that
226  * need to perform page-level validation (e.g. the device pager).
227  */
228 vm_object_t
229 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
230     vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
231 {
232 	vm_object_t ret;
233 	struct pagerops *ops;
234 
235 	ops = pagertab[type];
236 	if (ops)
237 		ret = (*ops->pgo_alloc) (handle, size, prot, off, cred);
238 	else
239 		ret = NULL;
240 	return (ret);
241 }
242 
243 /*
244  *	The object must be locked.
245  */
246 void
247 vm_pager_deallocate(object)
248 	vm_object_t object;
249 {
250 
251 	VM_OBJECT_ASSERT_WLOCKED(object);
252 	(*pagertab[object->type]->pgo_dealloc) (object);
253 }
254 
255 static void
256 vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
257 {
258 #ifdef INVARIANTS
259 
260 	VM_OBJECT_ASSERT_WLOCKED(object);
261 	KASSERT(count > 0, ("%s: 0 count", __func__));
262 	/*
263 	 * All pages must be busied, not mapped, not fully valid,
264 	 * not dirty and belong to the proper object.
265 	 */
266 	for (int i = 0 ; i < count; i++) {
267 		vm_page_assert_xbusied(m[i]);
268 		KASSERT(!pmap_page_is_mapped(m[i]),
269 		    ("%s: page %p is mapped", __func__, m[i]));
270 		KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
271 		    ("%s: request for a valid page %p", __func__, m[i]));
272 		KASSERT(m[i]->dirty == 0,
273 		    ("%s: page %p is dirty", __func__, m[i]));
274 		KASSERT(m[i]->object == object,
275 		    ("%s: wrong object %p/%p", __func__, object, m[i]->object));
276 	}
277 #endif
278 }
279 
280 /*
281  * Page in the pages for the object using its associated pager.
282  * The requested page must be fully valid on successful return.
283  */
284 int
285 vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int reqpage)
286 {
287 	int r;
288 
289 	vm_pager_assert_in(object, m, count);
290 
291 	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
292 	if (r != VM_PAGER_OK)
293 		return (r);
294 
295 	/*
296 	 * If pager has replaced the page, assert that it had
297 	 * updated the array.  Also assert that page is still
298 	 * busied.
299 	 */
300 	KASSERT(m[reqpage] == vm_page_lookup(object, m[reqpage]->pindex),
301 	    ("%s: mismatch page %p pindex %ju", __func__,
302 	    m[reqpage], (uintmax_t )m[reqpage]->pindex));
303 	vm_page_assert_xbusied(m[reqpage]);
304 
305 	/*
306 	 * Pager didn't fill up entire page.  Zero out
307 	 * partially filled data.
308 	 */
309 	if (m[reqpage]->valid != VM_PAGE_BITS_ALL)
310 		vm_page_zero_invalid(m[reqpage], TRUE);
311 
312 	return (VM_PAGER_OK);
313 }
314 
315 int
316 vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
317     int reqpage, pgo_getpages_iodone_t iodone, void *arg)
318 {
319 
320 	vm_pager_assert_in(object, m, count);
321 
322 	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
323 	    count, reqpage, iodone, arg));
324 }
325 
326 /*
327  * vm_pager_put_pages() - inline, see vm/vm_pager.h
328  * vm_pager_has_page() - inline, see vm/vm_pager.h
329  */
330 
331 /*
332  * Search the specified pager object list for an object with the
333  * specified handle.  If an object with the specified handle is found,
334  * increase its reference count and return it.  Otherwise, return NULL.
335  *
336  * The pager object list must be locked.
337  */
338 vm_object_t
339 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
340 {
341 	vm_object_t object;
342 
343 	TAILQ_FOREACH(object, pg_list, pager_object_list) {
344 		if (object->handle == handle) {
345 			VM_OBJECT_WLOCK(object);
346 			if ((object->flags & OBJ_DEAD) == 0) {
347 				vm_object_reference_locked(object);
348 				VM_OBJECT_WUNLOCK(object);
349 				break;
350 			}
351 			VM_OBJECT_WUNLOCK(object);
352 		}
353 	}
354 	return (object);
355 }
356 
357 /*
358  * Free the non-requested pages from the given array.  To remove all pages,
359  * caller should provide out of range reqpage number.
360  */
361 void
362 vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
363     int npages, boolean_t object_locked)
364 {
365 	enum { UNLOCKED, CALLER_LOCKED, INTERNALLY_LOCKED } locked;
366 	int i;
367 
368 	if (object_locked) {
369 		VM_OBJECT_ASSERT_WLOCKED(object);
370 		locked = CALLER_LOCKED;
371 	} else {
372 		VM_OBJECT_ASSERT_UNLOCKED(object);
373 		locked = UNLOCKED;
374 	}
375 	for (i = 0; i < npages; ++i) {
376 		if (i != reqpage) {
377 			if (locked == UNLOCKED) {
378 				VM_OBJECT_WLOCK(object);
379 				locked = INTERNALLY_LOCKED;
380 			}
381 			vm_page_lock(ma[i]);
382 			vm_page_free(ma[i]);
383 			vm_page_unlock(ma[i]);
384 		}
385 	}
386 	if (locked == INTERNALLY_LOCKED)
387 		VM_OBJECT_WUNLOCK(object);
388 }
389 
390 /*
391  * initialize a physical buffer
392  */
393 
394 /*
395  * XXX This probably belongs in vfs_bio.c
396  */
397 static void
398 initpbuf(struct buf *bp)
399 {
400 	KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj"));
401 	KASSERT(bp->b_vp == NULL, ("initpbuf with vp"));
402 	bp->b_rcred = NOCRED;
403 	bp->b_wcred = NOCRED;
404 	bp->b_qindex = 0;	/* On no queue (QUEUE_NONE) */
405 	bp->b_kvabase = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
406 	bp->b_data = bp->b_kvabase;
407 	bp->b_kvasize = MAXPHYS;
408 	bp->b_flags = 0;
409 	bp->b_xflags = 0;
410 	bp->b_ioflags = 0;
411 	bp->b_iodone = NULL;
412 	bp->b_error = 0;
413 	BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
414 }
415 
416 /*
417  * allocate a physical buffer
418  *
419  *	There are a limited number (nswbuf) of physical buffers.  We need
420  *	to make sure that no single subsystem is able to hog all of them,
421  *	so each subsystem implements a counter which is typically initialized
422  *	to 1/2 nswbuf.  getpbuf() decrements this counter in allocation and
423  *	increments it on release, and blocks if the counter hits zero.  A
424  *	subsystem may initialize the counter to -1 to disable the feature,
425  *	but it must still be sure to match up all uses of getpbuf() with
426  *	relpbuf() using the same variable.
427  *
428  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
429  *	relatively soon when the rest of the subsystems get smart about it. XXX
430  */
431 struct buf *
432 getpbuf(int *pfreecnt)
433 {
434 	struct buf *bp;
435 
436 	mtx_lock(&pbuf_mtx);
437 
438 	for (;;) {
439 		if (pfreecnt) {
440 			while (*pfreecnt == 0) {
441 				msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
442 			}
443 		}
444 
445 		/* get a bp from the swap buffer header pool */
446 		if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
447 			break;
448 
449 		bswneeded = 1;
450 		msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
451 		/* loop in case someone else grabbed one */
452 	}
453 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
454 	if (pfreecnt)
455 		--*pfreecnt;
456 	mtx_unlock(&pbuf_mtx);
457 
458 	initpbuf(bp);
459 	return bp;
460 }
461 
462 /*
463  * allocate a physical buffer, if one is available.
464  *
465  *	Note that there is no NULL hack here - all subsystems using this
466  *	call understand how to use pfreecnt.
467  */
468 struct buf *
469 trypbuf(int *pfreecnt)
470 {
471 	struct buf *bp;
472 
473 	mtx_lock(&pbuf_mtx);
474 	if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
475 		mtx_unlock(&pbuf_mtx);
476 		return NULL;
477 	}
478 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
479 
480 	--*pfreecnt;
481 
482 	mtx_unlock(&pbuf_mtx);
483 
484 	initpbuf(bp);
485 
486 	return bp;
487 }
488 
489 /*
490  * release a physical buffer
491  *
492  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
493  *	relatively soon when the rest of the subsystems get smart about it. XXX
494  */
495 void
496 relpbuf(struct buf *bp, int *pfreecnt)
497 {
498 
499 	if (bp->b_rcred != NOCRED) {
500 		crfree(bp->b_rcred);
501 		bp->b_rcred = NOCRED;
502 	}
503 	if (bp->b_wcred != NOCRED) {
504 		crfree(bp->b_wcred);
505 		bp->b_wcred = NOCRED;
506 	}
507 
508 	KASSERT(bp->b_vp == NULL, ("relpbuf with vp"));
509 	KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj"));
510 
511 	BUF_UNLOCK(bp);
512 
513 	mtx_lock(&pbuf_mtx);
514 	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
515 
516 	if (bswneeded) {
517 		bswneeded = 0;
518 		wakeup(&bswneeded);
519 	}
520 	if (pfreecnt) {
521 		if (++*pfreecnt == 1)
522 			wakeup(pfreecnt);
523 	}
524 	mtx_unlock(&pbuf_mtx);
525 }
526 
527 /*
528  * Associate a p-buffer with a vnode.
529  *
530  * Also sets B_PAGING flag to indicate that vnode is not fully associated
531  * with the buffer.  i.e. the bp has not been linked into the vnode or
532  * ref-counted.
533  */
534 void
535 pbgetvp(struct vnode *vp, struct buf *bp)
536 {
537 
538 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
539 	KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
540 
541 	bp->b_vp = vp;
542 	bp->b_flags |= B_PAGING;
543 	bp->b_bufobj = &vp->v_bufobj;
544 }
545 
546 /*
547  * Associate a p-buffer with a vnode.
548  *
549  * Also sets B_PAGING flag to indicate that vnode is not fully associated
550  * with the buffer.  i.e. the bp has not been linked into the vnode or
551  * ref-counted.
552  */
553 void
554 pbgetbo(struct bufobj *bo, struct buf *bp)
555 {
556 
557 	KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
558 	KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
559 
560 	bp->b_flags |= B_PAGING;
561 	bp->b_bufobj = bo;
562 }
563 
564 /*
565  * Disassociate a p-buffer from a vnode.
566  */
567 void
568 pbrelvp(struct buf *bp)
569 {
570 
571 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
572 	KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
573 	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
574 	    ("pbrelvp: pager buf on vnode list."));
575 
576 	bp->b_vp = NULL;
577 	bp->b_bufobj = NULL;
578 	bp->b_flags &= ~B_PAGING;
579 }
580 
581 /*
582  * Disassociate a p-buffer from a bufobj.
583  */
584 void
585 pbrelbo(struct buf *bp)
586 {
587 
588 	KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
589 	KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
590 	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
591 	    ("pbrelbo: pager buf on vnode list."));
592 
593 	bp->b_bufobj = NULL;
594 	bp->b_flags &= ~B_PAGING;
595 }
596