xref: /freebsd/sys/vm/vm_pager.c (revision 23f282aa31e9b6fceacd449020e936e98d6f2298)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD$
65  */
66 
67 /*
68  *	Paging space routine stubs.  Emulates a matchmaker-like interface
69  *	for builtin pagers.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/vnode.h>
76 #include <sys/buf.h>
77 #include <sys/ucred.h>
78 #include <sys/malloc.h>
79 #include <sys/proc.h>
80 
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_extern.h>
87 
88 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data");
89 
90 extern struct pagerops defaultpagerops;
91 extern struct pagerops swappagerops;
92 extern struct pagerops vnodepagerops;
93 extern struct pagerops devicepagerops;
94 
95 int cluster_pbuf_freecnt = -1;	/* unlimited to begin with */
96 
97 static int dead_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
98 static vm_object_t dead_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
99 	vm_ooffset_t));
100 static void dead_pager_putpages __P((vm_object_t, vm_page_t *, int, int, int *));
101 static boolean_t dead_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
102 static void dead_pager_dealloc __P((vm_object_t));
103 
104 static int
105 dead_pager_getpages(obj, ma, count, req)
106 	vm_object_t obj;
107 	vm_page_t *ma;
108 	int count;
109 	int req;
110 {
111 	return VM_PAGER_FAIL;
112 }
113 
114 static vm_object_t
115 dead_pager_alloc(handle, size, prot, off)
116 	void *handle;
117 	vm_ooffset_t size;
118 	vm_prot_t prot;
119 	vm_ooffset_t off;
120 {
121 	return NULL;
122 }
123 
124 static void
125 dead_pager_putpages(object, m, count, flags, rtvals)
126 	vm_object_t object;
127 	vm_page_t *m;
128 	int count;
129 	int flags;
130 	int *rtvals;
131 {
132 	int i;
133 
134 	for (i = 0; i < count; i++) {
135 		rtvals[i] = VM_PAGER_AGAIN;
136 	}
137 }
138 
139 static int
140 dead_pager_haspage(object, pindex, prev, next)
141 	vm_object_t object;
142 	vm_pindex_t pindex;
143 	int *prev;
144 	int *next;
145 {
146 	if (prev)
147 		*prev = 0;
148 	if (next)
149 		*next = 0;
150 	return FALSE;
151 }
152 
153 static void
154 dead_pager_dealloc(object)
155 	vm_object_t object;
156 {
157 	return;
158 }
159 
160 static struct pagerops deadpagerops = {
161 	NULL,
162 	dead_pager_alloc,
163 	dead_pager_dealloc,
164 	dead_pager_getpages,
165 	dead_pager_putpages,
166 	dead_pager_haspage,
167 	NULL
168 };
169 
170 struct pagerops *pagertab[] = {
171 	&defaultpagerops,	/* OBJT_DEFAULT */
172 	&swappagerops,		/* OBJT_SWAP */
173 	&vnodepagerops,		/* OBJT_VNODE */
174 	&devicepagerops,	/* OBJT_DEVICE */
175 	&deadpagerops		/* OBJT_DEAD */
176 };
177 
178 int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
179 
180 /*
181  * Kernel address space for mapping pages.
182  * Used by pagers where KVAs are needed for IO.
183  *
184  * XXX needs to be large enough to support the number of pending async
185  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
186  * (MAXPHYS == 64k) if you want to get the most efficiency.
187  */
188 #define PAGER_MAP_SIZE	(8 * 1024 * 1024)
189 
190 int pager_map_size = PAGER_MAP_SIZE;
191 vm_map_t pager_map;
192 static int bswneeded;
193 static vm_offset_t swapbkva;		/* swap buffers kva */
194 
195 void
196 vm_pager_init()
197 {
198 	struct pagerops **pgops;
199 
200 	/*
201 	 * Initialize known pagers
202 	 */
203 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
204 		if (pgops && ((*pgops)->pgo_init != NULL))
205 			(*(*pgops)->pgo_init) ();
206 }
207 
208 void
209 vm_pager_bufferinit()
210 {
211 	struct buf *bp;
212 	int i;
213 
214 	bp = swbuf;
215 	/*
216 	 * Now set up swap and physical I/O buffer headers.
217 	 */
218 	for (i = 0; i < nswbuf; i++, bp++) {
219 		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
220 		BUF_LOCKINIT(bp);
221 		LIST_INIT(&bp->b_dep);
222 		bp->b_rcred = bp->b_wcred = NOCRED;
223 		bp->b_xflags = 0;
224 	}
225 
226 	cluster_pbuf_freecnt = nswbuf / 2;
227 
228 	swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
229 	if (!swapbkva)
230 		panic("Not enough pager_map VM space for physical buffers");
231 }
232 
233 /*
234  * Allocate an instance of a pager of the given type.
235  * Size, protection and offset parameters are passed in for pagers that
236  * need to perform page-level validation (e.g. the device pager).
237  */
238 vm_object_t
239 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, vm_prot_t prot,
240 		  vm_ooffset_t off)
241 {
242 	struct pagerops *ops;
243 
244 	ops = pagertab[type];
245 	if (ops)
246 		return ((*ops->pgo_alloc) (handle, size, prot, off));
247 	return (NULL);
248 }
249 
250 void
251 vm_pager_deallocate(object)
252 	vm_object_t object;
253 {
254 	(*pagertab[object->type]->pgo_dealloc) (object);
255 }
256 
257 /*
258  *      vm_pager_strategy:
259  *
260  *      called with no specific spl
261  *      Execute strategy routine directly to pager.
262  */
263 
264 void
265 vm_pager_strategy(vm_object_t object, struct buf *bp)
266 {
267 	if (pagertab[object->type]->pgo_strategy) {
268 	    (*pagertab[object->type]->pgo_strategy)(object, bp);
269 	} else {
270 		bp->b_ioflags |= BIO_ERROR;
271 		bp->b_error = ENXIO;
272 		bufdone(bp);
273 	}
274 }
275 
276 /*
277  * vm_pager_get_pages() - inline, see vm/vm_pager.h
278  * vm_pager_put_pages() - inline, see vm/vm_pager.h
279  * vm_pager_has_page() - inline, see vm/vm_pager.h
280  * vm_pager_page_inserted() - inline, see vm/vm_pager.h
281  * vm_pager_page_removed() - inline, see vm/vm_pager.h
282  */
283 
284 #if 0
285 /*
286  *	vm_pager_sync:
287  *
288  *	Called by pageout daemon before going back to sleep.
289  *	Gives pagers a chance to clean up any completed async pageing
290  *	operations.
291  */
292 void
293 vm_pager_sync()
294 {
295 	struct pagerops **pgops;
296 
297 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
298 		if (pgops && ((*pgops)->pgo_sync != NULL))
299 			(*(*pgops)->pgo_sync) ();
300 }
301 
302 #endif
303 
304 vm_offset_t
305 vm_pager_map_page(m)
306 	vm_page_t m;
307 {
308 	vm_offset_t kva;
309 
310 	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
311 	pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
312 	return (kva);
313 }
314 
315 void
316 vm_pager_unmap_page(kva)
317 	vm_offset_t kva;
318 {
319 	pmap_kremove(kva);
320 	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
321 }
322 
323 vm_object_t
324 vm_pager_object_lookup(pg_list, handle)
325 	register struct pagerlst *pg_list;
326 	void *handle;
327 {
328 	register vm_object_t object;
329 
330 	for (object = TAILQ_FIRST(pg_list); object != NULL; object = TAILQ_NEXT(object,pager_object_list))
331 		if (object->handle == handle)
332 			return (object);
333 	return (NULL);
334 }
335 
336 /*
337  * initialize a physical buffer
338  */
339 
340 static void
341 initpbuf(struct buf *bp)
342 {
343 	bp->b_rcred = NOCRED;
344 	bp->b_wcred = NOCRED;
345 	bp->b_qindex = QUEUE_NONE;
346 	bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
347 	bp->b_kvabase = bp->b_data;
348 	bp->b_kvasize = MAXPHYS;
349 	bp->b_xflags = 0;
350 	bp->b_flags = 0;
351 	bp->b_ioflags = 0;
352 	bp->b_iodone = NULL;
353 	bp->b_error = 0;
354 	BUF_LOCK(bp, LK_EXCLUSIVE);
355 }
356 
357 /*
358  * allocate a physical buffer
359  *
360  *	There are a limited number (nswbuf) of physical buffers.  We need
361  *	to make sure that no single subsystem is able to hog all of them,
362  *	so each subsystem implements a counter which is typically initialized
363  *	to 1/2 nswbuf.  getpbuf() decrements this counter in allocation and
364  *	increments it on release, and blocks if the counter hits zero.  A
365  *	subsystem may initialize the counter to -1 to disable the feature,
366  *	but it must still be sure to match up all uses of getpbuf() with
367  *	relpbuf() using the same variable.
368  *
369  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
370  *	relatively soon when the rest of the subsystems get smart about it. XXX
371  */
372 struct buf *
373 getpbuf(pfreecnt)
374 	int *pfreecnt;
375 {
376 	int s;
377 	struct buf *bp;
378 
379 	s = splvm();
380 
381 	for (;;) {
382 		if (pfreecnt) {
383 			while (*pfreecnt == 0) {
384 				tsleep(pfreecnt, PVM, "wswbuf0", 0);
385 			}
386 		}
387 
388 		/* get a bp from the swap buffer header pool */
389 		if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
390 			break;
391 
392 		bswneeded = 1;
393 		tsleep(&bswneeded, PVM, "wswbuf1", 0);
394 		/* loop in case someone else grabbed one */
395 	}
396 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
397 	if (pfreecnt)
398 		--*pfreecnt;
399 	splx(s);
400 
401 	initpbuf(bp);
402 	return bp;
403 }
404 
405 /*
406  * allocate a physical buffer, if one is available.
407  *
408  *	Note that there is no NULL hack here - all subsystems using this
409  *	call understand how to use pfreecnt.
410  */
411 struct buf *
412 trypbuf(pfreecnt)
413 	int *pfreecnt;
414 {
415 	int s;
416 	struct buf *bp;
417 
418 	s = splvm();
419 	if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
420 		splx(s);
421 		return NULL;
422 	}
423 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
424 
425 	--*pfreecnt;
426 
427 	splx(s);
428 
429 	initpbuf(bp);
430 
431 	return bp;
432 }
433 
434 /*
435  * release a physical buffer
436  *
437  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
438  *	relatively soon when the rest of the subsystems get smart about it. XXX
439  */
440 void
441 relpbuf(bp, pfreecnt)
442 	struct buf *bp;
443 	int *pfreecnt;
444 {
445 	int s;
446 
447 	s = splvm();
448 
449 	if (bp->b_rcred != NOCRED) {
450 		crfree(bp->b_rcred);
451 		bp->b_rcred = NOCRED;
452 	}
453 	if (bp->b_wcred != NOCRED) {
454 		crfree(bp->b_wcred);
455 		bp->b_wcred = NOCRED;
456 	}
457 
458 	if (bp->b_vp)
459 		pbrelvp(bp);
460 
461 	BUF_UNLOCK(bp);
462 
463 	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
464 
465 	if (bswneeded) {
466 		bswneeded = 0;
467 		wakeup(&bswneeded);
468 	}
469 	if (pfreecnt) {
470 		if (++*pfreecnt == 1)
471 			wakeup(pfreecnt);
472 	}
473 	splx(s);
474 }
475 
476 /********************************************************
477  *		CHAINING FUNCTIONS			*
478  ********************************************************
479  *
480  *	These functions support recursion of I/O operations
481  *	on bp's, typically by chaining one or more 'child' bp's
482  *	to the parent.  Synchronous, asynchronous, and semi-synchronous
483  *	chaining is possible.
484  */
485 
486 /*
487  *	vm_pager_chain_iodone:
488  *
489  *	io completion routine for child bp.  Currently we fudge a bit
490  *	on dealing with b_resid.   Since users of these routines may issue
491  *	multiple children simultaneously, sequencing of the error can be lost.
492  */
493 
494 static void
495 vm_pager_chain_iodone(struct buf *nbp)
496 {
497 	struct buf *bp;
498 
499 	if ((bp = nbp->b_chain.parent) != NULL) {
500 		if (nbp->b_ioflags & BIO_ERROR) {
501 			bp->b_ioflags |= BIO_ERROR;
502 			bp->b_error = nbp->b_error;
503 		} else if (nbp->b_resid != 0) {
504 			bp->b_ioflags |= BIO_ERROR;
505 			bp->b_error = EINVAL;
506 		} else {
507 			bp->b_resid -= nbp->b_bcount;
508 		}
509 		nbp->b_chain.parent = NULL;
510 		--bp->b_chain.count;
511 		if (bp->b_flags & B_WANT) {
512 			bp->b_flags &= ~B_WANT;
513 			wakeup(bp);
514 		}
515 		if (!bp->b_chain.count && (bp->b_flags & B_AUTOCHAINDONE)) {
516 			bp->b_flags &= ~B_AUTOCHAINDONE;
517 			if (bp->b_resid != 0 && !(bp->b_ioflags & BIO_ERROR)) {
518 				bp->b_ioflags |= BIO_ERROR;
519 				bp->b_error = EINVAL;
520 			}
521 			bufdone(bp);
522 		}
523 	}
524 	nbp->b_flags |= B_DONE;
525 	nbp->b_flags &= ~B_ASYNC;
526 	relpbuf(nbp, NULL);
527 }
528 
529 /*
530  *	getchainbuf:
531  *
532  *	Obtain a physical buffer and chain it to its parent buffer.  When
533  *	I/O completes, the parent buffer will be B_SIGNAL'd.  Errors are
534  *	automatically propagated to the parent
535  */
536 
537 struct buf *
538 getchainbuf(struct buf *bp, struct vnode *vp, int flags)
539 {
540 	struct buf *nbp = getpbuf(NULL);
541 
542 	nbp->b_chain.parent = bp;
543 	++bp->b_chain.count;
544 
545 	if (bp->b_chain.count > 4)
546 		waitchainbuf(bp, 4, 0);
547 
548 	nbp->b_ioflags = bp->b_ioflags & BIO_ORDERED;
549 	nbp->b_flags = flags;
550 	nbp->b_rcred = nbp->b_wcred = proc0.p_ucred;
551 	nbp->b_iodone = vm_pager_chain_iodone;
552 
553 	crhold(nbp->b_rcred);
554 	crhold(nbp->b_wcred);
555 
556 	if (vp)
557 		pbgetvp(vp, nbp);
558 	return(nbp);
559 }
560 
561 void
562 flushchainbuf(struct buf *nbp)
563 {
564 	if (nbp->b_bcount) {
565 		nbp->b_bufsize = nbp->b_bcount;
566 		if (nbp->b_iocmd == BIO_WRITE)
567 			nbp->b_dirtyend = nbp->b_bcount;
568 		BUF_KERNPROC(nbp);
569 		BUF_STRATEGY(nbp);
570 	} else {
571 		bufdone(nbp);
572 	}
573 }
574 
575 void
576 waitchainbuf(struct buf *bp, int count, int done)
577 {
578  	int s;
579 
580 	s = splbio();
581 	while (bp->b_chain.count > count) {
582 		bp->b_flags |= B_WANT;
583 		tsleep(bp, PRIBIO + 4, "bpchain", 0);
584 	}
585 	if (done) {
586 		if (bp->b_resid != 0 && !(bp->b_ioflags & BIO_ERROR)) {
587 			bp->b_ioflags |= BIO_ERROR;
588 			bp->b_error = EINVAL;
589 		}
590 		bufdone(bp);
591 	}
592 	splx(s);
593 }
594 
595 void
596 autochaindone(struct buf *bp)
597 {
598  	int s;
599 
600 	s = splbio();
601 	if (bp->b_chain.count == 0)
602 		bufdone(bp);
603 	else
604 		bp->b_flags |= B_AUTOCHAINDONE;
605 	splx(s);
606 }
607 
608