xref: /freebsd/sys/vm/vm_pager.c (revision fffc6e58d90fb163e7fc5c404411e02660e28c84)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  *	Paging space routine stubs.  Emulates a matchmaker-like interface
67  *	for builtin pagers.
68  */
69 
70 #include <sys/cdefs.h>
71 __FBSDID("$FreeBSD$");
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/vnode.h>
77 #include <sys/bio.h>
78 #include <sys/buf.h>
79 #include <sys/ucred.h>
80 #include <sys/malloc.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_extern.h>
88 
89 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data");
90 
91 int cluster_pbuf_freecnt = -1;	/* unlimited to begin with */
92 
93 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
94 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
95 	vm_ooffset_t);
96 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
97 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
98 static void dead_pager_dealloc(vm_object_t);
99 
100 static int
101 dead_pager_getpages(obj, ma, count, req)
102 	vm_object_t obj;
103 	vm_page_t *ma;
104 	int count;
105 	int req;
106 {
107 	return VM_PAGER_FAIL;
108 }
109 
110 static vm_object_t
111 dead_pager_alloc(handle, size, prot, off)
112 	void *handle;
113 	vm_ooffset_t size;
114 	vm_prot_t prot;
115 	vm_ooffset_t off;
116 {
117 	return NULL;
118 }
119 
120 static void
121 dead_pager_putpages(object, m, count, flags, rtvals)
122 	vm_object_t object;
123 	vm_page_t *m;
124 	int count;
125 	int flags;
126 	int *rtvals;
127 {
128 	int i;
129 
130 	for (i = 0; i < count; i++) {
131 		rtvals[i] = VM_PAGER_AGAIN;
132 	}
133 }
134 
135 static int
136 dead_pager_haspage(object, pindex, prev, next)
137 	vm_object_t object;
138 	vm_pindex_t pindex;
139 	int *prev;
140 	int *next;
141 {
142 	if (prev)
143 		*prev = 0;
144 	if (next)
145 		*next = 0;
146 	return FALSE;
147 }
148 
149 static void
150 dead_pager_dealloc(object)
151 	vm_object_t object;
152 {
153 	return;
154 }
155 
156 static struct pagerops deadpagerops = {
157 	NULL,
158 	dead_pager_alloc,
159 	dead_pager_dealloc,
160 	dead_pager_getpages,
161 	dead_pager_putpages,
162 	dead_pager_haspage,
163 	NULL
164 };
165 
166 struct pagerops *pagertab[] = {
167 	&defaultpagerops,	/* OBJT_DEFAULT */
168 	&swappagerops,		/* OBJT_SWAP */
169 	&vnodepagerops,		/* OBJT_VNODE */
170 	&devicepagerops,	/* OBJT_DEVICE */
171 	&physpagerops,		/* OBJT_PHYS */
172 	&deadpagerops		/* OBJT_DEAD */
173 };
174 
175 int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
176 
177 /*
178  * Kernel address space for mapping pages.
179  * Used by pagers where KVAs are needed for IO.
180  *
181  * XXX needs to be large enough to support the number of pending async
182  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
183  * (MAXPHYS == 64k) if you want to get the most efficiency.
184  */
185 #define PAGER_MAP_SIZE	(8 * 1024 * 1024)
186 
187 int pager_map_size = PAGER_MAP_SIZE;
188 vm_map_t pager_map;
189 static int bswneeded;
190 static vm_offset_t swapbkva;		/* swap buffers kva */
191 struct mtx pbuf_mtx;
192 static TAILQ_HEAD(swqueue, buf) bswlist;
193 
194 void
195 vm_pager_init()
196 {
197 	struct pagerops **pgops;
198 
199 	TAILQ_INIT(&bswlist);
200 	/*
201 	 * Initialize known pagers
202 	 */
203 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
204 		if (pgops && ((*pgops)->pgo_init != NULL))
205 			(*(*pgops)->pgo_init) ();
206 }
207 
208 void
209 vm_pager_bufferinit()
210 {
211 	struct buf *bp;
212 	int i;
213 
214 	mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF);
215 	bp = swbuf;
216 	/*
217 	 * Now set up swap and physical I/O buffer headers.
218 	 */
219 	for (i = 0; i < nswbuf; i++, bp++) {
220 		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
221 		BUF_LOCKINIT(bp);
222 		LIST_INIT(&bp->b_dep);
223 		bp->b_rcred = bp->b_wcred = NOCRED;
224 		bp->b_xflags = 0;
225 	}
226 
227 	cluster_pbuf_freecnt = nswbuf / 2;
228 
229 	swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
230 	if (!swapbkva)
231 		panic("Not enough pager_map VM space for physical buffers");
232 }
233 
234 /*
235  * Allocate an instance of a pager of the given type.
236  * Size, protection and offset parameters are passed in for pagers that
237  * need to perform page-level validation (e.g. the device pager).
238  */
239 vm_object_t
240 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
241 		  vm_prot_t prot, vm_ooffset_t off)
242 {
243 	vm_object_t ret;
244 	struct pagerops *ops;
245 
246 	ops = pagertab[type];
247 	if (ops)
248 		ret = (*ops->pgo_alloc) (handle, size, prot, off);
249 	else
250 		ret = NULL;
251 	return (ret);
252 }
253 
254 /*
255  *	The object must be locked.
256  */
257 void
258 vm_pager_deallocate(object)
259 	vm_object_t object;
260 {
261 
262 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
263 	(*pagertab[object->type]->pgo_dealloc) (object);
264 }
265 
266 /*
267  *      vm_pager_strategy:
268  *
269  *      called with no specific spl
270  *      Execute strategy routine directly to pager.
271  */
272 void
273 vm_pager_strategy(vm_object_t object, struct bio *bp)
274 {
275 	if (pagertab[object->type]->pgo_strategy) {
276 		(*pagertab[object->type]->pgo_strategy)(object, bp);
277 	} else {
278 		bp->bio_flags |= BIO_ERROR;
279 		bp->bio_error = ENXIO;
280 		biodone(bp);
281 	}
282 }
283 
284 /*
285  * vm_pager_get_pages() - inline, see vm/vm_pager.h
286  * vm_pager_put_pages() - inline, see vm/vm_pager.h
287  * vm_pager_has_page() - inline, see vm/vm_pager.h
288  * vm_pager_page_inserted() - inline, see vm/vm_pager.h
289  * vm_pager_page_removed() - inline, see vm/vm_pager.h
290  */
291 
292 vm_offset_t
293 vm_pager_map_page(m)
294 	vm_page_t m;
295 {
296 	vm_offset_t kva;
297 
298 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
299 	pmap_qenter(kva, &m, 1);
300 	return (kva);
301 }
302 
303 void
304 vm_pager_unmap_page(kva)
305 	vm_offset_t kva;
306 {
307 
308 	pmap_qremove(kva, 1);
309 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
310 }
311 
312 vm_object_t
313 vm_pager_object_lookup(pg_list, handle)
314 	struct pagerlst *pg_list;
315 	void *handle;
316 {
317 	vm_object_t object;
318 
319 	TAILQ_FOREACH(object, pg_list, pager_object_list)
320 		if (object->handle == handle)
321 			return (object);
322 	return (NULL);
323 }
324 
325 /*
326  * initialize a physical buffer
327  */
328 
329 /*
330  * XXX This probably belongs in vfs_bio.c
331  */
332 static void
333 initpbuf(struct buf *bp)
334 {
335 	bp->b_rcred = NOCRED;
336 	bp->b_wcred = NOCRED;
337 	bp->b_qindex = 0;	/* On no queue (QUEUE_NONE) */
338 	bp->b_saveaddr = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
339 	bp->b_data = bp->b_saveaddr;
340 	bp->b_kvabase = bp->b_saveaddr;
341 	bp->b_kvasize = MAXPHYS;
342 	bp->b_xflags = 0;
343 	bp->b_flags = 0;
344 	bp->b_ioflags = 0;
345 	bp->b_iodone = NULL;
346 	bp->b_error = 0;
347 	bp->b_magic = B_MAGIC_BIO;
348 	bp->b_op = &buf_ops_bio;
349 	BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
350 }
351 
352 /*
353  * allocate a physical buffer
354  *
355  *	There are a limited number (nswbuf) of physical buffers.  We need
356  *	to make sure that no single subsystem is able to hog all of them,
357  *	so each subsystem implements a counter which is typically initialized
358  *	to 1/2 nswbuf.  getpbuf() decrements this counter in allocation and
359  *	increments it on release, and blocks if the counter hits zero.  A
360  *	subsystem may initialize the counter to -1 to disable the feature,
361  *	but it must still be sure to match up all uses of getpbuf() with
362  *	relpbuf() using the same variable.
363  *
364  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
365  *	relatively soon when the rest of the subsystems get smart about it. XXX
366  */
367 struct buf *
368 getpbuf(pfreecnt)
369 	int *pfreecnt;
370 {
371 	int s;
372 	struct buf *bp;
373 
374 	s = splvm();
375 	mtx_lock(&pbuf_mtx);
376 
377 	for (;;) {
378 		if (pfreecnt) {
379 			while (*pfreecnt == 0) {
380 				msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
381 			}
382 		}
383 
384 		/* get a bp from the swap buffer header pool */
385 		if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
386 			break;
387 
388 		bswneeded = 1;
389 		msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
390 		/* loop in case someone else grabbed one */
391 	}
392 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
393 	if (pfreecnt)
394 		--*pfreecnt;
395 	mtx_unlock(&pbuf_mtx);
396 	splx(s);
397 
398 	initpbuf(bp);
399 	return bp;
400 }
401 
402 /*
403  * allocate a physical buffer, if one is available.
404  *
405  *	Note that there is no NULL hack here - all subsystems using this
406  *	call understand how to use pfreecnt.
407  */
408 struct buf *
409 trypbuf(pfreecnt)
410 	int *pfreecnt;
411 {
412 	int s;
413 	struct buf *bp;
414 
415 	s = splvm();
416 	mtx_lock(&pbuf_mtx);
417 	if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
418 		mtx_unlock(&pbuf_mtx);
419 		splx(s);
420 		return NULL;
421 	}
422 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
423 
424 	--*pfreecnt;
425 
426 	mtx_unlock(&pbuf_mtx);
427 	splx(s);
428 
429 	initpbuf(bp);
430 
431 	return bp;
432 }
433 
434 /*
435  * release a physical buffer
436  *
437  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
438  *	relatively soon when the rest of the subsystems get smart about it. XXX
439  */
440 void
441 relpbuf(bp, pfreecnt)
442 	struct buf *bp;
443 	int *pfreecnt;
444 {
445 	int s;
446 
447 	s = splvm();
448 
449 	if (bp->b_rcred != NOCRED) {
450 		crfree(bp->b_rcred);
451 		bp->b_rcred = NOCRED;
452 	}
453 	if (bp->b_wcred != NOCRED) {
454 		crfree(bp->b_wcred);
455 		bp->b_wcred = NOCRED;
456 	}
457 
458 	if (bp->b_vp)
459 		pbrelvp(bp);
460 
461 	BUF_UNLOCK(bp);
462 
463 	mtx_lock(&pbuf_mtx);
464 	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
465 
466 	if (bswneeded) {
467 		bswneeded = 0;
468 		wakeup(&bswneeded);
469 	}
470 	if (pfreecnt) {
471 		if (++*pfreecnt == 1)
472 			wakeup(pfreecnt);
473 	}
474 	mtx_unlock(&pbuf_mtx);
475 	splx(s);
476 }
477