xref: /freebsd/sys/vm/vm_pager.c (revision 8169788f40ced7202f6e584ceb67b47e49dff79c)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_pager.c	8.7 (Berkeley) 7/7/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  *	Paging space routine stubs.  Emulates a matchmaker-like interface
67  *	for builtin pagers.
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/malloc.h>
73 
74 #include <vm/vm.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_kern.h>
77 
78 #ifdef SWAPPAGER
79 extern struct pagerops swappagerops;
80 #endif
81 
82 #ifdef VNODEPAGER
83 extern struct pagerops vnodepagerops;
84 #endif
85 
86 #ifdef DEVPAGER
87 extern struct pagerops devicepagerops;
88 #endif
89 
90 struct pagerops *pagertab[] = {
91 #ifdef SWAPPAGER
92 	&swappagerops,		/* PG_SWAP */
93 #else
94 	NULL,
95 #endif
96 #ifdef VNODEPAGER
97 	&vnodepagerops,		/* PG_VNODE */
98 #else
99 	NULL,
100 #endif
101 #ifdef DEVPAGER
102 	&devicepagerops,	/* PG_DEV */
103 #else
104 	NULL,
105 #endif
106 };
107 int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
108 
109 struct pagerops *dfltpagerops = NULL;	/* default pager */
110 
111 /*
112  * Kernel address space for mapping pages.
113  * Used by pagers where KVAs are needed for IO.
114  *
115  * XXX needs to be large enough to support the number of pending async
116  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
117  * (MAXPHYS == 64k) if you want to get the most efficiency.
118  */
119 #define PAGER_MAP_SIZE	(4 * 1024 * 1024)
120 
121 vm_map_t pager_map;
122 boolean_t pager_map_wanted;
123 vm_offset_t pager_sva, pager_eva;
124 
125 void
126 vm_pager_init()
127 {
128 	struct pagerops **pgops;
129 
130 	/*
131 	 * Allocate a kernel submap for tracking get/put page mappings
132 	 */
133 	pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
134 				  PAGER_MAP_SIZE, FALSE);
135 	/*
136 	 * Initialize known pagers
137 	 */
138 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
139 		if (pgops)
140 			(*(*pgops)->pgo_init)();
141 	if (dfltpagerops == NULL)
142 		panic("no default pager");
143 }
144 
145 /*
146  * Allocate an instance of a pager of the given type.
147  * Size, protection and offset parameters are passed in for pagers that
148  * need to perform page-level validation (e.g. the device pager).
149  */
150 vm_pager_t
151 vm_pager_allocate(type, handle, size, prot, off)
152 	int type;
153 	caddr_t handle;
154 	vm_size_t size;
155 	vm_prot_t prot;
156 	vm_offset_t off;
157 {
158 	struct pagerops *ops;
159 
160 	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
161 	if (ops)
162 		return ((*ops->pgo_alloc)(handle, size, prot, off));
163 	return (NULL);
164 }
165 
166 void
167 vm_pager_deallocate(pager)
168 	vm_pager_t	pager;
169 {
170 	if (pager == NULL)
171 		panic("vm_pager_deallocate: null pager");
172 
173 	(*pager->pg_ops->pgo_dealloc)(pager);
174 }
175 
176 int
177 vm_pager_get_pages(pager, mlist, npages, sync)
178 	vm_pager_t	pager;
179 	vm_page_t	*mlist;
180 	int		npages;
181 	boolean_t	sync;
182 {
183 	int rv;
184 
185 	if (pager == NULL) {
186 		rv = VM_PAGER_OK;
187 		while (npages--)
188 			if (!vm_page_zero_fill(*mlist)) {
189 				rv = VM_PAGER_FAIL;
190 				break;
191 			} else
192 				mlist++;
193 		return (rv);
194 	}
195 	return ((*pager->pg_ops->pgo_getpages)(pager, mlist, npages, sync));
196 }
197 
198 int
199 vm_pager_put_pages(pager, mlist, npages, sync)
200 	vm_pager_t	pager;
201 	vm_page_t	*mlist;
202 	int		npages;
203 	boolean_t	sync;
204 {
205 	if (pager == NULL)
206 		panic("vm_pager_put_pages: null pager");
207 	return ((*pager->pg_ops->pgo_putpages)(pager, mlist, npages, sync));
208 }
209 
210 /* XXX compatibility*/
211 int
212 vm_pager_get(pager, m, sync)
213 	vm_pager_t	pager;
214 	vm_page_t	m;
215 	boolean_t	sync;
216 {
217 	return vm_pager_get_pages(pager, &m, 1, sync);
218 }
219 
220 /* XXX compatibility*/
221 int
222 vm_pager_put(pager, m, sync)
223 	vm_pager_t	pager;
224 	vm_page_t	m;
225 	boolean_t	sync;
226 {
227 	return vm_pager_put_pages(pager, &m, 1, sync);
228 }
229 
230 boolean_t
231 vm_pager_has_page(pager, offset)
232 	vm_pager_t	pager;
233 	vm_offset_t	offset;
234 {
235 	if (pager == NULL)
236 		panic("vm_pager_has_page: null pager");
237 	return ((*pager->pg_ops->pgo_haspage)(pager, offset));
238 }
239 
240 /*
241  * Called by pageout daemon before going back to sleep.
242  * Gives pagers a chance to clean up any completed async pageing operations.
243  */
244 void
245 vm_pager_sync()
246 {
247 	struct pagerops **pgops;
248 
249 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
250 		if (pgops)
251 			(*(*pgops)->pgo_putpages)(NULL, NULL, 0, FALSE);
252 }
253 
254 void
255 vm_pager_cluster(pager, offset, loff, hoff)
256 	vm_pager_t	pager;
257 	vm_offset_t	offset;
258 	vm_offset_t	*loff;
259 	vm_offset_t	*hoff;
260 {
261 	if (pager == NULL)
262 		panic("vm_pager_cluster: null pager");
263 	((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
264 }
265 
266 void
267 vm_pager_clusternull(pager, offset, loff, hoff)
268 	vm_pager_t	pager;
269 	vm_offset_t	offset;
270 	vm_offset_t	*loff;
271 	vm_offset_t	*hoff;
272 {
273 	panic("vm_pager_nullcluster called");
274 }
275 
276 vm_offset_t
277 vm_pager_map_pages(mlist, npages, canwait)
278 	vm_page_t	*mlist;
279 	int		npages;
280 	boolean_t	canwait;
281 {
282 	vm_offset_t kva, va;
283 	vm_size_t size;
284 	vm_page_t m;
285 
286 	/*
287 	 * Allocate space in the pager map, if none available return 0.
288 	 * This is basically an expansion of kmem_alloc_wait with optional
289 	 * blocking on no space.
290 	 */
291 	size = npages * PAGE_SIZE;
292 	vm_map_lock(pager_map);
293 	while (vm_map_findspace(pager_map, 0, size, &kva)) {
294 		if (!canwait) {
295 			vm_map_unlock(pager_map);
296 			return (0);
297 		}
298 		pager_map_wanted = TRUE;
299 		vm_map_unlock(pager_map);
300 		(void) tsleep(pager_map, PVM, "pager_map", 0);
301 		vm_map_lock(pager_map);
302 	}
303 	vm_map_insert(pager_map, NULL, 0, kva, kva + size);
304 	vm_map_unlock(pager_map);
305 
306 	for (va = kva; npages--; va += PAGE_SIZE) {
307 		m = *mlist++;
308 #ifdef DEBUG
309 		if ((m->flags & PG_BUSY) == 0)
310 			panic("vm_pager_map_pages: page not busy");
311 		if (m->flags & PG_PAGEROWNED)
312 			panic("vm_pager_map_pages: page already in pager");
313 #endif
314 #ifdef DEBUG
315 		m->flags |= PG_PAGEROWNED;
316 #endif
317 		pmap_enter(vm_map_pmap(pager_map), va, VM_PAGE_TO_PHYS(m),
318 			   VM_PROT_DEFAULT, TRUE);
319 	}
320 	return (kva);
321 }
322 
323 void
324 vm_pager_unmap_pages(kva, npages)
325 	vm_offset_t	kva;
326 	int		npages;
327 {
328 	vm_size_t size = npages * PAGE_SIZE;
329 
330 #ifdef DEBUG
331 	vm_offset_t va;
332 	vm_page_t m;
333 	int np = npages;
334 
335 	for (va = kva; np--; va += PAGE_SIZE) {
336 		m = vm_pager_atop(va);
337 		if (m->flags & PG_PAGEROWNED)
338 			m->flags &= ~PG_PAGEROWNED;
339 		else
340 			printf("vm_pager_unmap_pages: %x(%x/%x) not owned\n",
341 			       m, va, VM_PAGE_TO_PHYS(m));
342 	}
343 #endif
344 	pmap_remove(vm_map_pmap(pager_map), kva, kva + size);
345 	vm_map_lock(pager_map);
346 	(void) vm_map_delete(pager_map, kva, kva + size);
347 	if (pager_map_wanted)
348 		wakeup(pager_map);
349 	vm_map_unlock(pager_map);
350 }
351 
352 vm_page_t
353 vm_pager_atop(kva)
354 	vm_offset_t	kva;
355 {
356 	vm_offset_t pa;
357 
358 	pa = pmap_extract(vm_map_pmap(pager_map), kva);
359 	if (pa == 0)
360 		panic("vm_pager_atop");
361 	return (PHYS_TO_VM_PAGE(pa));
362 }
363 
364 vm_pager_t
365 vm_pager_lookup(pglist, handle)
366 	register struct pagerlst *pglist;
367 	caddr_t handle;
368 {
369 	register vm_pager_t pager;
370 
371 	for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next)
372 		if (pager->pg_handle == handle)
373 			return (pager);
374 	return (NULL);
375 }
376 
377 /*
378  * This routine gains a reference to the object.
379  * Explicit deallocation is necessary.
380  */
381 int
382 pager_cache(object, should_cache)
383 	vm_object_t	object;
384 	boolean_t	should_cache;
385 {
386 	if (object == NULL)
387 		return (KERN_INVALID_ARGUMENT);
388 
389 	vm_object_cache_lock();
390 	vm_object_lock(object);
391 	if (should_cache)
392 		object->flags |= OBJ_CANPERSIST;
393 	else
394 		object->flags &= ~OBJ_CANPERSIST;
395 	vm_object_unlock(object);
396 	vm_object_cache_unlock();
397 
398 	vm_object_deallocate(object);
399 
400 	return (KERN_SUCCESS);
401 }
402