xref: /freebsd/sys/vm/vm_pager.c (revision df8bae1de4b67ccf57f4afebd4e2bf258c38910d)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_pager.c	8.6 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  *	Paging space routine stubs.  Emulates a matchmaker-like interface
67  *	for builtin pagers.
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/malloc.h>
73 
74 #include <vm/vm.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_kern.h>
77 
78 #ifdef SWAPPAGER
79 extern struct pagerops swappagerops;
80 #endif
81 
82 #ifdef VNODEPAGER
83 extern struct pagerops vnodepagerops;
84 #endif
85 
86 #ifdef DEVPAGER
87 extern struct pagerops devicepagerops;
88 #endif
89 
90 struct pagerops *pagertab[] = {
91 #ifdef SWAPPAGER
92 	&swappagerops,		/* PG_SWAP */
93 #else
94 	NULL,
95 #endif
96 #ifdef VNODEPAGER
97 	&vnodepagerops,		/* PG_VNODE */
98 #else
99 	NULL,
100 #endif
101 #ifdef DEVPAGER
102 	&devicepagerops,	/* PG_DEV */
103 #else
104 	NULL,
105 #endif
106 };
107 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
108 
109 struct pagerops *dfltpagerops = NULL;	/* default pager */
110 
111 /*
112  * Kernel address space for mapping pages.
113  * Used by pagers where KVAs are needed for IO.
114  *
115  * XXX needs to be large enough to support the number of pending async
116  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
117  * (MAXPHYS == 64k) if you want to get the most efficiency.
118  */
119 #define PAGER_MAP_SIZE	(4 * 1024 * 1024)
120 
121 vm_map_t pager_map;
122 boolean_t pager_map_wanted;
123 vm_offset_t pager_sva, pager_eva;
124 
125 void
126 vm_pager_init()
127 {
128 	struct pagerops **pgops;
129 
130 	/*
131 	 * Allocate a kernel submap for tracking get/put page mappings
132 	 */
133 	pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
134 				  PAGER_MAP_SIZE, FALSE);
135 	/*
136 	 * Initialize known pagers
137 	 */
138 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
139 		if (pgops)
140 			(*(*pgops)->pgo_init)();
141 	if (dfltpagerops == NULL)
142 		panic("no default pager");
143 }
144 
145 /*
146  * Allocate an instance of a pager of the given type.
147  * Size, protection and offset parameters are passed in for pagers that
148  * need to perform page-level validation (e.g. the device pager).
149  */
150 vm_pager_t
151 vm_pager_allocate(type, handle, size, prot, off)
152 	int type;
153 	caddr_t handle;
154 	vm_size_t size;
155 	vm_prot_t prot;
156 	vm_offset_t off;
157 {
158 	struct pagerops *ops;
159 
160 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
161 	if (ops)
162 		return ((*ops->pgo_alloc)(handle, size, prot, off));
163 	return (NULL);
164 }
165 
166 void
167 vm_pager_deallocate(pager)
168 	vm_pager_t	pager;
169 {
170 	if (pager == NULL)
171 		panic("vm_pager_deallocate: null pager");
172 
173 	(*pager->pg_ops->pgo_dealloc)(pager);
174 }
175 
176 int
177 vm_pager_get_pages(pager, mlist, npages, sync)
178 	vm_pager_t	pager;
179 	vm_page_t	*mlist;
180 	int		npages;
181 	boolean_t	sync;
182 {
183 	int rv;
184 
185 	if (pager == NULL) {
186 		rv = VM_PAGER_OK;
187 		while (npages--)
188 			if (!vm_page_zero_fill(*mlist)) {
189 				rv = VM_PAGER_FAIL;
190 				break;
191 			} else
192 				mlist++;
193 		return (rv);
194 	}
195 	return ((*pager->pg_ops->pgo_getpages)(pager, mlist, npages, sync));
196 }
197 
198 int
199 vm_pager_put_pages(pager, mlist, npages, sync)
200 	vm_pager_t	pager;
201 	vm_page_t	*mlist;
202 	int		npages;
203 	boolean_t	sync;
204 {
205 	if (pager == NULL)
206 		panic("vm_pager_put_pages: null pager");
207 	return ((*pager->pg_ops->pgo_putpages)(pager, mlist, npages, sync));
208 }
209 
210 boolean_t
211 vm_pager_has_page(pager, offset)
212 	vm_pager_t	pager;
213 	vm_offset_t	offset;
214 {
215 	if (pager == NULL)
216 		panic("vm_pager_has_page: null pager");
217 	return ((*pager->pg_ops->pgo_haspage)(pager, offset));
218 }
219 
220 /*
221  * Called by pageout daemon before going back to sleep.
222  * Gives pagers a chance to clean up any completed async pageing operations.
223  */
224 void
225 vm_pager_sync()
226 {
227 	struct pagerops **pgops;
228 
229 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
230 		if (pgops)
231 			(*(*pgops)->pgo_putpages)(NULL, NULL, 0, FALSE);
232 }
233 
234 void
235 vm_pager_cluster(pager, offset, loff, hoff)
236 	vm_pager_t	pager;
237 	vm_offset_t	offset;
238 	vm_offset_t	*loff;
239 	vm_offset_t	*hoff;
240 {
241 	if (pager == NULL)
242 		panic("vm_pager_cluster: null pager");
243 	return ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
244 }
245 
246 void
247 vm_pager_clusternull(pager, offset, loff, hoff)
248 	vm_pager_t	pager;
249 	vm_offset_t	offset;
250 	vm_offset_t	*loff;
251 	vm_offset_t	*hoff;
252 {
253 	panic("vm_pager_nullcluster called");
254 }
255 
256 vm_offset_t
257 vm_pager_map_pages(mlist, npages, canwait)
258 	vm_page_t	*mlist;
259 	int		npages;
260 	boolean_t	canwait;
261 {
262 	vm_offset_t kva, va;
263 	vm_size_t size;
264 	vm_page_t m;
265 
266 	/*
267 	 * Allocate space in the pager map, if none available return 0.
268 	 * This is basically an expansion of kmem_alloc_wait with optional
269 	 * blocking on no space.
270 	 */
271 	size = npages * PAGE_SIZE;
272 	vm_map_lock(pager_map);
273 	while (vm_map_findspace(pager_map, 0, size, &kva)) {
274 		if (!canwait) {
275 			vm_map_unlock(pager_map);
276 			return (0);
277 		}
278 		pager_map_wanted = TRUE;
279 		vm_map_unlock(pager_map);
280 		(void) tsleep(pager_map, PVM, "pager_map", 0);
281 		vm_map_lock(pager_map);
282 	}
283 	vm_map_insert(pager_map, NULL, 0, kva, kva + size);
284 	vm_map_unlock(pager_map);
285 
286 	for (va = kva; npages--; va += PAGE_SIZE) {
287 		m = *mlist++;
288 #ifdef DEBUG
289 		if ((m->flags & PG_BUSY) == 0)
290 			panic("vm_pager_map_pages: page not busy");
291 		if (m->flags & PG_PAGEROWNED)
292 			panic("vm_pager_map_pages: page already in pager");
293 #endif
294 #ifdef DEBUG
295 		m->flags |= PG_PAGEROWNED;
296 #endif
297 		pmap_enter(vm_map_pmap(pager_map), va, VM_PAGE_TO_PHYS(m),
298 			   VM_PROT_DEFAULT, TRUE);
299 	}
300 	return (kva);
301 }
302 
303 void
304 vm_pager_unmap_pages(kva, npages)
305 	vm_offset_t	kva;
306 	int		npages;
307 {
308 	vm_size_t size = npages * PAGE_SIZE;
309 
310 #ifdef DEBUG
311 	vm_offset_t va;
312 	vm_page_t m;
313 	int np = npages;
314 
315 	for (va = kva; np--; va += PAGE_SIZE) {
316 		m = vm_pager_atop(va);
317 		if (m->flags & PG_PAGEROWNED)
318 			m->flags &= ~PG_PAGEROWNED;
319 		else
320 			printf("vm_pager_unmap_pages: %x(%x/%x) not owned\n",
321 			       m, va, VM_PAGE_TO_PHYS(m));
322 	}
323 #endif
324 	pmap_remove(vm_map_pmap(pager_map), kva, kva + size);
325 	vm_map_lock(pager_map);
326 	(void) vm_map_delete(pager_map, kva, kva + size);
327 	if (pager_map_wanted)
328 		wakeup(pager_map);
329 	vm_map_unlock(pager_map);
330 }
331 
332 vm_page_t
333 vm_pager_atop(kva)
334 	vm_offset_t	kva;
335 {
336 	vm_offset_t pa;
337 
338 	pa = pmap_extract(vm_map_pmap(pager_map), kva);
339 	if (pa == 0)
340 		panic("vm_pager_atop");
341 	return (PHYS_TO_VM_PAGE(pa));
342 }
343 
344 vm_pager_t
345 vm_pager_lookup(pglist, handle)
346 	register struct pagerlst *pglist;
347 	caddr_t handle;
348 {
349 	register vm_pager_t pager;
350 
351 	for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next)
352 		if (pager->pg_handle == handle)
353 			return (pager);
354 	return (NULL);
355 }
356 
357 /*
358  * This routine gains a reference to the object.
359  * Explicit deallocation is necessary.
360  */
361 int
362 pager_cache(object, should_cache)
363 	vm_object_t	object;
364 	boolean_t	should_cache;
365 {
366 	if (object == NULL)
367 		return (KERN_INVALID_ARGUMENT);
368 
369 	vm_object_cache_lock();
370 	vm_object_lock(object);
371 	if (should_cache)
372 		object->flags |= OBJ_CANPERSIST;
373 	else
374 		object->flags &= ~OBJ_CANPERSIST;
375 	vm_object_unlock(object);
376 	vm_object_cache_unlock();
377 
378 	vm_object_deallocate(object);
379 
380 	return (KERN_SUCCESS);
381 }
382