xref: /freebsd/sys/vm/vm_pager.c (revision 5f78ec1c9f70cd1e2e348d374a4c6aabfde7795b)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Paging space routine stubs.  Emulates a matchmaker-like interface
63  *	for builtin pagers.
64  */
65 
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD$");
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/vnode.h>
73 #include <sys/bio.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/malloc.h>
77 #include <sys/rwlock.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_extern.h>
86 
87 int cluster_pbuf_freecnt = -1;	/* unlimited to begin with */
88 
89 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
90 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
91     vm_ooffset_t, struct ucred *);
92 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
93 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
94 static void dead_pager_dealloc(vm_object_t);
95 
96 static int
97 dead_pager_getpages(obj, ma, count, req)
98 	vm_object_t obj;
99 	vm_page_t *ma;
100 	int count;
101 	int req;
102 {
103 	return VM_PAGER_FAIL;
104 }
105 
106 static vm_object_t
107 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
108     vm_ooffset_t off, struct ucred *cred)
109 {
110 	return NULL;
111 }
112 
113 static void
114 dead_pager_putpages(object, m, count, flags, rtvals)
115 	vm_object_t object;
116 	vm_page_t *m;
117 	int count;
118 	int flags;
119 	int *rtvals;
120 {
121 	int i;
122 
123 	for (i = 0; i < count; i++) {
124 		rtvals[i] = VM_PAGER_AGAIN;
125 	}
126 }
127 
128 static int
129 dead_pager_haspage(object, pindex, prev, next)
130 	vm_object_t object;
131 	vm_pindex_t pindex;
132 	int *prev;
133 	int *next;
134 {
135 	if (prev)
136 		*prev = 0;
137 	if (next)
138 		*next = 0;
139 	return FALSE;
140 }
141 
142 static void
143 dead_pager_dealloc(object)
144 	vm_object_t object;
145 {
146 	return;
147 }
148 
149 static struct pagerops deadpagerops = {
150 	.pgo_alloc = 	dead_pager_alloc,
151 	.pgo_dealloc =	dead_pager_dealloc,
152 	.pgo_getpages =	dead_pager_getpages,
153 	.pgo_putpages =	dead_pager_putpages,
154 	.pgo_haspage =	dead_pager_haspage,
155 };
156 
157 struct pagerops *pagertab[] = {
158 	&defaultpagerops,	/* OBJT_DEFAULT */
159 	&swappagerops,		/* OBJT_SWAP */
160 	&vnodepagerops,		/* OBJT_VNODE */
161 	&devicepagerops,	/* OBJT_DEVICE */
162 	&physpagerops,		/* OBJT_PHYS */
163 	&deadpagerops,		/* OBJT_DEAD */
164 	&sgpagerops,		/* OBJT_SG */
165 	&mgtdevicepagerops,	/* OBJT_MGTDEVICE */
166 };
167 
168 static const int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
169 
170 /*
171  * Kernel address space for mapping pages.
172  * Used by pagers where KVAs are needed for IO.
173  *
174  * XXX needs to be large enough to support the number of pending async
175  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
176  * (MAXPHYS == 64k) if you want to get the most efficiency.
177  */
178 struct mtx_padalign pbuf_mtx;
179 static TAILQ_HEAD(swqueue, buf) bswlist;
180 static int bswneeded;
181 vm_offset_t swapbkva;		/* swap buffers kva */
182 
183 void
184 vm_pager_init()
185 {
186 	struct pagerops **pgops;
187 
188 	TAILQ_INIT(&bswlist);
189 	/*
190 	 * Initialize known pagers
191 	 */
192 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
193 		if ((*pgops)->pgo_init != NULL)
194 			(*(*pgops)->pgo_init) ();
195 }
196 
197 void
198 vm_pager_bufferinit()
199 {
200 	struct buf *bp;
201 	int i;
202 
203 	mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF);
204 	bp = swbuf;
205 	/*
206 	 * Now set up swap and physical I/O buffer headers.
207 	 */
208 	for (i = 0; i < nswbuf; i++, bp++) {
209 		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
210 		BUF_LOCKINIT(bp);
211 		LIST_INIT(&bp->b_dep);
212 		bp->b_rcred = bp->b_wcred = NOCRED;
213 		bp->b_xflags = 0;
214 	}
215 
216 	cluster_pbuf_freecnt = nswbuf / 2;
217 	vnode_pbuf_freecnt = nswbuf / 2 + 1;
218 	vnode_async_pbuf_freecnt = nswbuf / 2;
219 }
220 
221 /*
222  * Allocate an instance of a pager of the given type.
223  * Size, protection and offset parameters are passed in for pagers that
224  * need to perform page-level validation (e.g. the device pager).
225  */
226 vm_object_t
227 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
228     vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
229 {
230 	vm_object_t ret;
231 	struct pagerops *ops;
232 
233 	ops = pagertab[type];
234 	if (ops)
235 		ret = (*ops->pgo_alloc) (handle, size, prot, off, cred);
236 	else
237 		ret = NULL;
238 	return (ret);
239 }
240 
241 /*
242  *	The object must be locked.
243  */
244 void
245 vm_pager_deallocate(object)
246 	vm_object_t object;
247 {
248 
249 	VM_OBJECT_ASSERT_WLOCKED(object);
250 	(*pagertab[object->type]->pgo_dealloc) (object);
251 }
252 
253 static void
254 vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
255 {
256 #ifdef INVARIANTS
257 
258 	VM_OBJECT_ASSERT_WLOCKED(object);
259 	KASSERT(count > 0, ("%s: 0 count", __func__));
260 	/*
261 	 * All pages must be busied, not mapped, not fully valid,
262 	 * not dirty and belong to the proper object.
263 	 */
264 	for (int i = 0 ; i < count; i++) {
265 		vm_page_assert_xbusied(m[i]);
266 		KASSERT(!pmap_page_is_mapped(m[i]),
267 		    ("%s: page %p is mapped", __func__, m[i]));
268 		KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
269 		    ("%s: request for a valid page %p", __func__, m[i]));
270 		KASSERT(m[i]->dirty == 0,
271 		    ("%s: page %p is dirty", __func__, m[i]));
272 		KASSERT(m[i]->object == object,
273 		    ("%s: wrong object %p/%p", __func__, object, m[i]->object));
274 	}
275 #endif
276 }
277 
278 /*
279  * Page in the pages for the object using its associated pager.
280  * The requested page must be fully valid on successful return.
281  */
282 int
283 vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int reqpage)
284 {
285 	int r;
286 
287 	vm_pager_assert_in(object, m, count);
288 
289 	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
290 	if (r != VM_PAGER_OK)
291 		return (r);
292 
293 	/*
294 	 * If pager has replaced the page, assert that it had
295 	 * updated the array.  Also assert that page is still
296 	 * busied.
297 	 */
298 	KASSERT(m[reqpage] == vm_page_lookup(object, m[reqpage]->pindex),
299 	    ("%s: mismatch page %p pindex %ju", __func__,
300 	    m[reqpage], (uintmax_t )m[reqpage]->pindex));
301 	vm_page_assert_xbusied(m[reqpage]);
302 
303 	/*
304 	 * Pager didn't fill up entire page.  Zero out
305 	 * partially filled data.
306 	 */
307 	if (m[reqpage]->valid != VM_PAGE_BITS_ALL)
308 		vm_page_zero_invalid(m[reqpage], TRUE);
309 
310 	return (VM_PAGER_OK);
311 }
312 
313 int
314 vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
315     int reqpage, pgo_getpages_iodone_t iodone, void *arg)
316 {
317 
318 	vm_pager_assert_in(object, m, count);
319 
320 	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
321 	    count, reqpage, iodone, arg));
322 }
323 
324 /*
325  * vm_pager_put_pages() - inline, see vm/vm_pager.h
326  * vm_pager_has_page() - inline, see vm/vm_pager.h
327  */
328 
329 /*
330  * Search the specified pager object list for an object with the
331  * specified handle.  If an object with the specified handle is found,
332  * increase its reference count and return it.  Otherwise, return NULL.
333  *
334  * The pager object list must be locked.
335  */
336 vm_object_t
337 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
338 {
339 	vm_object_t object;
340 
341 	TAILQ_FOREACH(object, pg_list, pager_object_list) {
342 		if (object->handle == handle) {
343 			VM_OBJECT_WLOCK(object);
344 			if ((object->flags & OBJ_DEAD) == 0) {
345 				vm_object_reference_locked(object);
346 				VM_OBJECT_WUNLOCK(object);
347 				break;
348 			}
349 			VM_OBJECT_WUNLOCK(object);
350 		}
351 	}
352 	return (object);
353 }
354 
355 /*
356  * Free the non-requested pages from the given array.  To remove all pages,
357  * caller should provide out of range reqpage number.
358  */
359 void
360 vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
361     int npages, boolean_t object_locked)
362 {
363 	enum { UNLOCKED, CALLER_LOCKED, INTERNALLY_LOCKED } locked;
364 	int i;
365 
366 	if (object_locked) {
367 		VM_OBJECT_ASSERT_WLOCKED(object);
368 		locked = CALLER_LOCKED;
369 	} else {
370 		VM_OBJECT_ASSERT_UNLOCKED(object);
371 		locked = UNLOCKED;
372 	}
373 	for (i = 0; i < npages; ++i) {
374 		if (i != reqpage) {
375 			if (locked == UNLOCKED) {
376 				VM_OBJECT_WLOCK(object);
377 				locked = INTERNALLY_LOCKED;
378 			}
379 			vm_page_lock(ma[i]);
380 			vm_page_free(ma[i]);
381 			vm_page_unlock(ma[i]);
382 		}
383 	}
384 	if (locked == INTERNALLY_LOCKED)
385 		VM_OBJECT_WUNLOCK(object);
386 }
387 
388 /*
389  * initialize a physical buffer
390  */
391 
392 /*
393  * XXX This probably belongs in vfs_bio.c
394  */
395 static void
396 initpbuf(struct buf *bp)
397 {
398 	KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj"));
399 	KASSERT(bp->b_vp == NULL, ("initpbuf with vp"));
400 	bp->b_rcred = NOCRED;
401 	bp->b_wcred = NOCRED;
402 	bp->b_qindex = 0;	/* On no queue (QUEUE_NONE) */
403 	bp->b_kvabase = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
404 	bp->b_data = bp->b_kvabase;
405 	bp->b_kvasize = MAXPHYS;
406 	bp->b_flags = 0;
407 	bp->b_xflags = 0;
408 	bp->b_ioflags = 0;
409 	bp->b_iodone = NULL;
410 	bp->b_error = 0;
411 	BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
412 }
413 
414 /*
415  * allocate a physical buffer
416  *
417  *	There are a limited number (nswbuf) of physical buffers.  We need
418  *	to make sure that no single subsystem is able to hog all of them,
419  *	so each subsystem implements a counter which is typically initialized
420  *	to 1/2 nswbuf.  getpbuf() decrements this counter in allocation and
421  *	increments it on release, and blocks if the counter hits zero.  A
422  *	subsystem may initialize the counter to -1 to disable the feature,
423  *	but it must still be sure to match up all uses of getpbuf() with
424  *	relpbuf() using the same variable.
425  *
426  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
427  *	relatively soon when the rest of the subsystems get smart about it. XXX
428  */
429 struct buf *
430 getpbuf(int *pfreecnt)
431 {
432 	struct buf *bp;
433 
434 	mtx_lock(&pbuf_mtx);
435 
436 	for (;;) {
437 		if (pfreecnt) {
438 			while (*pfreecnt == 0) {
439 				msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
440 			}
441 		}
442 
443 		/* get a bp from the swap buffer header pool */
444 		if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
445 			break;
446 
447 		bswneeded = 1;
448 		msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
449 		/* loop in case someone else grabbed one */
450 	}
451 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
452 	if (pfreecnt)
453 		--*pfreecnt;
454 	mtx_unlock(&pbuf_mtx);
455 
456 	initpbuf(bp);
457 	return bp;
458 }
459 
460 /*
461  * allocate a physical buffer, if one is available.
462  *
463  *	Note that there is no NULL hack here - all subsystems using this
464  *	call understand how to use pfreecnt.
465  */
466 struct buf *
467 trypbuf(int *pfreecnt)
468 {
469 	struct buf *bp;
470 
471 	mtx_lock(&pbuf_mtx);
472 	if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
473 		mtx_unlock(&pbuf_mtx);
474 		return NULL;
475 	}
476 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
477 
478 	--*pfreecnt;
479 
480 	mtx_unlock(&pbuf_mtx);
481 
482 	initpbuf(bp);
483 
484 	return bp;
485 }
486 
487 /*
488  * release a physical buffer
489  *
490  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
491  *	relatively soon when the rest of the subsystems get smart about it. XXX
492  */
493 void
494 relpbuf(struct buf *bp, int *pfreecnt)
495 {
496 
497 	if (bp->b_rcred != NOCRED) {
498 		crfree(bp->b_rcred);
499 		bp->b_rcred = NOCRED;
500 	}
501 	if (bp->b_wcred != NOCRED) {
502 		crfree(bp->b_wcred);
503 		bp->b_wcred = NOCRED;
504 	}
505 
506 	KASSERT(bp->b_vp == NULL, ("relpbuf with vp"));
507 	KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj"));
508 
509 	BUF_UNLOCK(bp);
510 
511 	mtx_lock(&pbuf_mtx);
512 	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
513 
514 	if (bswneeded) {
515 		bswneeded = 0;
516 		wakeup(&bswneeded);
517 	}
518 	if (pfreecnt) {
519 		if (++*pfreecnt == 1)
520 			wakeup(pfreecnt);
521 	}
522 	mtx_unlock(&pbuf_mtx);
523 }
524 
525 /*
526  * Associate a p-buffer with a vnode.
527  *
528  * Also sets B_PAGING flag to indicate that vnode is not fully associated
529  * with the buffer.  i.e. the bp has not been linked into the vnode or
530  * ref-counted.
531  */
532 void
533 pbgetvp(struct vnode *vp, struct buf *bp)
534 {
535 
536 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
537 	KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
538 
539 	bp->b_vp = vp;
540 	bp->b_flags |= B_PAGING;
541 	bp->b_bufobj = &vp->v_bufobj;
542 }
543 
544 /*
545  * Associate a p-buffer with a vnode.
546  *
547  * Also sets B_PAGING flag to indicate that vnode is not fully associated
548  * with the buffer.  i.e. the bp has not been linked into the vnode or
549  * ref-counted.
550  */
551 void
552 pbgetbo(struct bufobj *bo, struct buf *bp)
553 {
554 
555 	KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
556 	KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
557 
558 	bp->b_flags |= B_PAGING;
559 	bp->b_bufobj = bo;
560 }
561 
562 /*
563  * Disassociate a p-buffer from a vnode.
564  */
565 void
566 pbrelvp(struct buf *bp)
567 {
568 
569 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
570 	KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
571 	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
572 	    ("pbrelvp: pager buf on vnode list."));
573 
574 	bp->b_vp = NULL;
575 	bp->b_bufobj = NULL;
576 	bp->b_flags &= ~B_PAGING;
577 }
578 
579 /*
580  * Disassociate a p-buffer from a bufobj.
581  */
582 void
583 pbrelbo(struct buf *bp)
584 {
585 
586 	KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
587 	KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
588 	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
589 	    ("pbrelbo: pager buf on vnode list."));
590 
591 	bp->b_bufobj = NULL;
592 	bp->b_flags &= ~B_PAGING;
593 }
594