xref: /freebsd/sys/vm/vm_pager.c (revision 953a3198a35204535cc9d450f04da982a4fea59b)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $Id: vm_pager.c,v 1.16 1995/07/13 08:48:42 davidg Exp $
65  */
66 
67 /*
68  *	Paging space routine stubs.  Emulates a matchmaker-like interface
69  *	for builtin pagers.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/malloc.h>
76 #include <sys/buf.h>
77 #include <sys/ucred.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_pager.h>
83 
84 extern struct pagerops defaultpagerops;
85 extern struct pagerops swappagerops;
86 extern struct pagerops vnodepagerops;
87 extern struct pagerops devicepagerops;
88 
89 struct pagerops *pagertab[] = {
90 	&defaultpagerops,	/* OBJT_DEFAULT */
91 	&swappagerops,		/* OBJT_SWAP */
92 	&vnodepagerops,		/* OBJT_VNODE */
93 	&devicepagerops,	/* OBJT_DEVICE */
94 };
95 int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
96 
97 /*
98  * Kernel address space for mapping pages.
99  * Used by pagers where KVAs are needed for IO.
100  *
101  * XXX needs to be large enough to support the number of pending async
102  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
103  * (MAXPHYS == 64k) if you want to get the most efficiency.
104  */
105 #define PAGER_MAP_SIZE	(8 * 1024 * 1024)
106 
107 int pager_map_size = PAGER_MAP_SIZE;
108 vm_map_t pager_map;
109 boolean_t pager_map_wanted;
110 int bswneeded;
111 vm_offset_t swapbkva;		/* swap buffers kva */
112 
113 void
114 vm_pager_init()
115 {
116 	struct pagerops **pgops;
117 
118 	/*
119 	 * Initialize known pagers
120 	 */
121 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
122 		if (pgops && ((*pgops)->pgo_init != NULL))
123 			(*(*pgops)->pgo_init) ();
124 }
125 
126 void
127 vm_pager_bufferinit()
128 {
129 	struct buf *bp;
130 	int i;
131 
132 	bp = swbuf;
133 	/*
134 	 * Now set up swap and physical I/O buffer headers.
135 	 */
136 	for (i = 0; i < nswbuf - 1; i++, bp++) {
137 		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
138 		bp->b_rcred = bp->b_wcred = NOCRED;
139 		bp->b_vnbufs.le_next = NOLIST;
140 	}
141 	bp->b_rcred = bp->b_wcred = NOCRED;
142 	bp->b_vnbufs.le_next = NOLIST;
143 	bp->b_actf = NULL;
144 
145 	swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
146 	if (!swapbkva)
147 		panic("Not enough pager_map VM space for physical buffers");
148 }
149 
150 /*
151  * Allocate an instance of a pager of the given type.
152  * Size, protection and offset parameters are passed in for pagers that
153  * need to perform page-level validation (e.g. the device pager).
154  */
155 vm_object_t
156 vm_pager_allocate(type, handle, size, prot, off)
157 	objtype_t type;
158 	void *handle;
159 	vm_size_t size;
160 	vm_prot_t prot;
161 	vm_offset_t off;
162 {
163 	struct pagerops *ops;
164 
165 	ops = pagertab[type];
166 	if (ops)
167 		return ((*ops->pgo_alloc) (handle, size, prot, off));
168 	return (NULL);
169 }
170 
171 void
172 vm_pager_deallocate(object)
173 	vm_object_t object;
174 {
175 	(*pagertab[object->type]->pgo_dealloc) (object);
176 }
177 
178 
179 int
180 vm_pager_get_pages(object, m, count, reqpage)
181 	vm_object_t object;
182 	vm_page_t *m;
183 	int count;
184 	int reqpage;
185 {
186 	return ((*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage));
187 }
188 
189 int
190 vm_pager_put_pages(object, m, count, sync, rtvals)
191 	vm_object_t object;
192 	vm_page_t *m;
193 	int count;
194 	boolean_t sync;
195 	int *rtvals;
196 {
197 	return ((*pagertab[object->type]->pgo_putpages)(object, m, count, sync, rtvals));
198 }
199 
200 boolean_t
201 vm_pager_has_page(object, offset, before, after)
202 	vm_object_t object;
203 	vm_offset_t offset;
204 	int *before;
205 	int *after;
206 {
207 	return ((*pagertab[object->type]->pgo_haspage) (object, offset, before, after));
208 }
209 
210 /*
211  * Called by pageout daemon before going back to sleep.
212  * Gives pagers a chance to clean up any completed async pageing operations.
213  */
214 void
215 vm_pager_sync()
216 {
217 	struct pagerops **pgops;
218 
219 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
220 		if (pgops && ((*pgops)->pgo_sync != NULL))
221 			(*(*pgops)->pgo_sync) ();
222 }
223 
224 vm_offset_t
225 vm_pager_map_page(m)
226 	vm_page_t m;
227 {
228 	vm_offset_t kva;
229 
230 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
231 	pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
232 	return (kva);
233 }
234 
235 void
236 vm_pager_unmap_page(kva)
237 	vm_offset_t kva;
238 {
239 	pmap_kremove(kva);
240 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
241 }
242 
243 vm_page_t
244 vm_pager_atop(kva)
245 	vm_offset_t kva;
246 {
247 	vm_offset_t pa;
248 
249 	pa = pmap_kextract(kva);
250 	if (pa == 0)
251 		panic("vm_pager_atop");
252 	return (PHYS_TO_VM_PAGE(pa));
253 }
254 
255 vm_object_t
256 vm_pager_object_lookup(pg_list, handle)
257 	register struct pagerlst *pg_list;
258 	void *handle;
259 {
260 	register vm_object_t object;
261 
262 	for (object = pg_list->tqh_first; object != NULL; object = object->pager_object_list.tqe_next)
263 		if (object->handle == handle)
264 			return (object);
265 	return (NULL);
266 }
267 
268 /*
269  * This routine loses a reference to the object -
270  * thus a reference must be gained before calling.
271  */
272 int
273 pager_cache(object, should_cache)
274 	vm_object_t object;
275 	boolean_t should_cache;
276 {
277 	if (object == NULL)
278 		return (KERN_INVALID_ARGUMENT);
279 
280 	if (should_cache)
281 		object->flags |= OBJ_CANPERSIST;
282 	else
283 		object->flags &= ~OBJ_CANPERSIST;
284 
285 	vm_object_deallocate(object);
286 
287 	return (KERN_SUCCESS);
288 }
289 
290 /*
291  * allocate a physical buffer
292  */
293 struct buf *
294 getpbuf()
295 {
296 	int s;
297 	struct buf *bp;
298 
299 	s = splbio();
300 	/* get a bp from the swap buffer header pool */
301 	while ((bp = bswlist.tqh_first) == NULL) {
302 		bswneeded = 1;
303 		tsleep(&bswneeded, PVM, "wswbuf", 0);
304 	}
305 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
306 	splx(s);
307 
308 	bzero(bp, sizeof *bp);
309 	bp->b_rcred = NOCRED;
310 	bp->b_wcred = NOCRED;
311 	bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
312 	bp->b_vnbufs.le_next = NOLIST;
313 	return bp;
314 }
315 
316 /*
317  * allocate a physical buffer, if one is available
318  */
319 struct buf *
320 trypbuf()
321 {
322 	int s;
323 	struct buf *bp;
324 
325 	s = splbio();
326 	if ((bp = bswlist.tqh_first) == NULL) {
327 		splx(s);
328 		return NULL;
329 	}
330 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
331 	splx(s);
332 
333 	bzero(bp, sizeof *bp);
334 	bp->b_rcred = NOCRED;
335 	bp->b_wcred = NOCRED;
336 	bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
337 	bp->b_vnbufs.le_next = NOLIST;
338 	return bp;
339 }
340 
341 /*
342  * release a physical buffer
343  */
344 void
345 relpbuf(bp)
346 	struct buf *bp;
347 {
348 	int s;
349 
350 	s = splbio();
351 
352 	if (bp->b_rcred != NOCRED) {
353 		crfree(bp->b_rcred);
354 		bp->b_rcred = NOCRED;
355 	}
356 	if (bp->b_wcred != NOCRED) {
357 		crfree(bp->b_wcred);
358 		bp->b_wcred = NOCRED;
359 	}
360 	if (bp->b_vp)
361 		pbrelvp(bp);
362 
363 	if (bp->b_flags & B_WANTED)
364 		wakeup(bp);
365 
366 	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
367 
368 	if (bswneeded) {
369 		bswneeded = 0;
370 		wakeup(&bswneeded);
371 	}
372 	splx(s);
373 }
374