xref: /freebsd/sys/vm/vm_pager.c (revision 0de89efe5c443f213c7ea28773ef2dc6cf3af2ed)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_pager.c,v 1.28 1997/08/25 22:15:28 bde Exp $
65  */
66 
67 /*
68  *	Paging space routine stubs.  Emulates a matchmaker-like interface
69  *	for builtin pagers.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <vm/vm_prot.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_pager.h>
83 #include <vm/vm_extern.h>
84 
85 extern struct pagerops defaultpagerops;
86 extern struct pagerops swappagerops;
87 extern struct pagerops vnodepagerops;
88 extern struct pagerops devicepagerops;
89 
90 static struct pagerops *pagertab[] = {
91 	&defaultpagerops,	/* OBJT_DEFAULT */
92 	&swappagerops,		/* OBJT_SWAP */
93 	&vnodepagerops,		/* OBJT_VNODE */
94 	&devicepagerops,	/* OBJT_DEVICE */
95 };
96 static int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
97 
98 /*
99  * Kernel address space for mapping pages.
100  * Used by pagers where KVAs are needed for IO.
101  *
102  * XXX needs to be large enough to support the number of pending async
103  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
104  * (MAXPHYS == 64k) if you want to get the most efficiency.
105  */
106 #define PAGER_MAP_SIZE	(8 * 1024 * 1024)
107 
108 int pager_map_size = PAGER_MAP_SIZE;
109 vm_map_t pager_map;
110 static int bswneeded;
111 static vm_offset_t swapbkva;		/* swap buffers kva */
112 
113 void
114 vm_pager_init()
115 {
116 	struct pagerops **pgops;
117 
118 	/*
119 	 * Initialize known pagers
120 	 */
121 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
122 		if (pgops && ((*pgops)->pgo_init != NULL))
123 			(*(*pgops)->pgo_init) ();
124 }
125 
126 void
127 vm_pager_bufferinit()
128 {
129 	struct buf *bp;
130 	int i;
131 
132 	bp = swbuf;
133 	/*
134 	 * Now set up swap and physical I/O buffer headers.
135 	 */
136 	for (i = 0; i < nswbuf - 1; i++, bp++) {
137 		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
138 		bp->b_rcred = bp->b_wcred = NOCRED;
139 		bp->b_vnbufs.le_next = NOLIST;
140 	}
141 	bp->b_rcred = bp->b_wcred = NOCRED;
142 	bp->b_vnbufs.le_next = NOLIST;
143 
144 	swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
145 	if (!swapbkva)
146 		panic("Not enough pager_map VM space for physical buffers");
147 }
148 
149 /*
150  * Allocate an instance of a pager of the given type.
151  * Size, protection and offset parameters are passed in for pagers that
152  * need to perform page-level validation (e.g. the device pager).
153  */
154 vm_object_t
155 vm_pager_allocate(objtype_t type, void *handle, vm_size_t size, vm_prot_t prot,
156 		  vm_ooffset_t off)
157 {
158 	struct pagerops *ops;
159 
160 	ops = pagertab[type];
161 	if (ops)
162 		return ((*ops->pgo_alloc) (handle, size, prot, off));
163 	return (NULL);
164 }
165 
166 void
167 vm_pager_deallocate(object)
168 	vm_object_t object;
169 {
170 	(*pagertab[object->type]->pgo_dealloc) (object);
171 }
172 
173 
174 int
175 vm_pager_get_pages(object, m, count, reqpage)
176 	vm_object_t object;
177 	vm_page_t *m;
178 	int count;
179 	int reqpage;
180 {
181 	return ((*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage));
182 }
183 
184 int
185 vm_pager_put_pages(object, m, count, sync, rtvals)
186 	vm_object_t object;
187 	vm_page_t *m;
188 	int count;
189 	boolean_t sync;
190 	int *rtvals;
191 {
192 	return ((*pagertab[object->type]->pgo_putpages)(object, m, count, sync, rtvals));
193 }
194 
195 boolean_t
196 vm_pager_has_page(object, offset, before, after)
197 	vm_object_t object;
198 	vm_pindex_t offset;
199 	int *before;
200 	int *after;
201 {
202 	return ((*pagertab[object->type]->pgo_haspage) (object, offset, before, after));
203 }
204 
205 /*
206  * Called by pageout daemon before going back to sleep.
207  * Gives pagers a chance to clean up any completed async pageing operations.
208  */
209 void
210 vm_pager_sync()
211 {
212 	struct pagerops **pgops;
213 
214 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
215 		if (pgops && ((*pgops)->pgo_sync != NULL))
216 			(*(*pgops)->pgo_sync) ();
217 }
218 
219 vm_offset_t
220 vm_pager_map_page(m)
221 	vm_page_t m;
222 {
223 	vm_offset_t kva;
224 
225 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
226 	pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
227 	return (kva);
228 }
229 
230 void
231 vm_pager_unmap_page(kva)
232 	vm_offset_t kva;
233 {
234 	pmap_kremove(kva);
235 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
236 }
237 
238 vm_object_t
239 vm_pager_object_lookup(pg_list, handle)
240 	register struct pagerlst *pg_list;
241 	void *handle;
242 {
243 	register vm_object_t object;
244 
245 	for (object = TAILQ_FIRST(pg_list); object != NULL; object = TAILQ_NEXT(object,pager_object_list))
246 		if (object->handle == handle)
247 			return (object);
248 	return (NULL);
249 }
250 
251 /*
252  * This routine loses a reference to the object -
253  * thus a reference must be gained before calling.
254  */
255 int
256 pager_cache(object, should_cache)
257 	vm_object_t object;
258 	boolean_t should_cache;
259 {
260 	if (object == NULL)
261 		return (KERN_INVALID_ARGUMENT);
262 
263 	if (should_cache)
264 		object->flags |= OBJ_CANPERSIST;
265 	else
266 		object->flags &= ~OBJ_CANPERSIST;
267 
268 	vm_object_deallocate(object);
269 
270 	return (KERN_SUCCESS);
271 }
272 
273 /*
274  * initialize a physical buffer
275  */
276 
277 static void
278 initpbuf(struct buf *bp) {
279 	bzero(bp, sizeof *bp);
280 	bp->b_rcred = NOCRED;
281 	bp->b_wcred = NOCRED;
282 	bp->b_qindex = QUEUE_NONE;
283 	bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
284 	bp->b_kvabase = bp->b_data;
285 	bp->b_kvasize = MAXPHYS;
286 	bp->b_vnbufs.le_next = NOLIST;
287 }
288 
289 /*
290  * allocate a physical buffer
291  */
292 struct buf *
293 getpbuf()
294 {
295 	int s;
296 	struct buf *bp;
297 
298 	s = splbio();
299 	/* get a bp from the swap buffer header pool */
300 	while ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
301 		bswneeded = 1;
302 		tsleep(&bswneeded, PVM, "wswbuf", 0);
303 	}
304 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
305 	splx(s);
306 
307 	initpbuf(bp);
308 	return bp;
309 }
310 
311 /*
312  * allocate a physical buffer, if one is available
313  */
314 struct buf *
315 trypbuf()
316 {
317 	int s;
318 	struct buf *bp;
319 
320 	s = splbio();
321 	if ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
322 		splx(s);
323 		return NULL;
324 	}
325 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
326 	splx(s);
327 
328 	initpbuf(bp);
329 
330 	return bp;
331 }
332 
333 /*
334  * release a physical buffer
335  */
336 void
337 relpbuf(bp)
338 	struct buf *bp;
339 {
340 	int s;
341 
342 	s = splbio();
343 
344 	if (bp->b_rcred != NOCRED) {
345 		crfree(bp->b_rcred);
346 		bp->b_rcred = NOCRED;
347 	}
348 	if (bp->b_wcred != NOCRED) {
349 		crfree(bp->b_wcred);
350 		bp->b_wcred = NOCRED;
351 	}
352 	if (bp->b_vp)
353 		pbrelvp(bp);
354 
355 	if (bp->b_flags & B_WANTED)
356 		wakeup(bp);
357 
358 	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
359 
360 	if (bswneeded) {
361 		bswneeded = 0;
362 		wakeup(&bswneeded);
363 	}
364 	splx(s);
365 }
366