xref: /freebsd/sys/vm/phys_pager.c (revision efe7553ed7aa65e6b241d4159ec560c22ac94ff1)
160727d8bSWarner Losh /*-
224964514SPeter Wemm  * Copyright (c) 2000 Peter Wemm
324964514SPeter Wemm  *
424964514SPeter Wemm  * Redistribution and use in source and binary forms, with or without
524964514SPeter Wemm  * modification, are permitted provided that the following conditions
624964514SPeter Wemm  * are met:
724964514SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
824964514SPeter Wemm  *    notice, this list of conditions and the following disclaimer.
924964514SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
1024964514SPeter Wemm  *    notice, this list of conditions and the following disclaimer in the
1124964514SPeter Wemm  *    documentation and/or other materials provided with the distribution.
1224964514SPeter Wemm  *
1324964514SPeter Wemm  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
1424964514SPeter Wemm  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1524964514SPeter Wemm  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1624964514SPeter Wemm  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
1724964514SPeter Wemm  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1824964514SPeter Wemm  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1924964514SPeter Wemm  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2024964514SPeter Wemm  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2124964514SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2224964514SPeter Wemm  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2324964514SPeter Wemm  * SUCH DAMAGE.
2424964514SPeter Wemm  */
2524964514SPeter Wemm 
26874651b1SDavid E. O'Brien #include <sys/cdefs.h>
27874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
28874651b1SDavid E. O'Brien 
2924964514SPeter Wemm #include <sys/param.h>
3024964514SPeter Wemm #include <sys/systm.h>
3124964514SPeter Wemm #include <sys/linker_set.h>
3224964514SPeter Wemm #include <sys/conf.h>
33a9fa2c05SAlfred Perlstein #include <sys/kernel.h>
34fb919e4dSMark Murray #include <sys/lock.h>
350cddd8f0SMatthew Dillon #include <sys/proc.h>
36fb919e4dSMark Murray #include <sys/mutex.h>
3724964514SPeter Wemm #include <sys/mman.h>
3824964514SPeter Wemm #include <sys/sysctl.h>
3924964514SPeter Wemm 
4024964514SPeter Wemm #include <vm/vm.h>
4124964514SPeter Wemm #include <vm/vm_object.h>
4224964514SPeter Wemm #include <vm/vm_page.h>
4324964514SPeter Wemm #include <vm/vm_pager.h>
4424964514SPeter Wemm 
4524964514SPeter Wemm /* list of device pager objects */
4624964514SPeter Wemm static struct pagerlst phys_pager_object_list;
47a9fa2c05SAlfred Perlstein /* protect access to phys_pager_object_list */
48a9fa2c05SAlfred Perlstein static struct mtx phys_pager_mtx;
4924964514SPeter Wemm 
5024964514SPeter Wemm static void
51bb663856SPeter Wemm phys_pager_init(void)
5224964514SPeter Wemm {
53bb663856SPeter Wemm 
5424964514SPeter Wemm 	TAILQ_INIT(&phys_pager_object_list);
556008862bSJohn Baldwin 	mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
5624964514SPeter Wemm }
5724964514SPeter Wemm 
582a1618cdSAlan Cox /*
592a1618cdSAlan Cox  * MPSAFE
602a1618cdSAlan Cox  */
6124964514SPeter Wemm static vm_object_t
6224964514SPeter Wemm phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
6324964514SPeter Wemm 		 vm_ooffset_t foff)
6424964514SPeter Wemm {
65efe7553eSKonstantin Belousov 	vm_object_t object, object1;
662f7af3dbSAlan Cox 	vm_pindex_t pindex;
6724964514SPeter Wemm 
6824964514SPeter Wemm 	/*
6924964514SPeter Wemm 	 * Offset should be page aligned.
7024964514SPeter Wemm 	 */
7124964514SPeter Wemm 	if (foff & PAGE_MASK)
7224964514SPeter Wemm 		return (NULL);
7324964514SPeter Wemm 
742f7af3dbSAlan Cox 	pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
7524964514SPeter Wemm 
76b5861b34SAlfred Perlstein 	if (handle != NULL) {
77efe7553eSKonstantin Belousov 		mtx_lock(&phys_pager_mtx);
7824964514SPeter Wemm 		/*
7924964514SPeter Wemm 		 * Look up pager, creating as necessary.
8024964514SPeter Wemm 		 */
81efe7553eSKonstantin Belousov 		object1 = NULL;
8224964514SPeter Wemm 		object = vm_pager_object_lookup(&phys_pager_object_list, handle);
8324964514SPeter Wemm 		if (object == NULL) {
8424964514SPeter Wemm 			/*
8524964514SPeter Wemm 			 * Allocate object and associate it with the pager.
8624964514SPeter Wemm 			 */
87efe7553eSKonstantin Belousov 			mtx_unlock(&phys_pager_mtx);
88efe7553eSKonstantin Belousov 			object1 = vm_object_allocate(OBJT_PHYS, pindex);
89a9fa2c05SAlfred Perlstein 			mtx_lock(&phys_pager_mtx);
90efe7553eSKonstantin Belousov 			object = vm_pager_object_lookup(&phys_pager_object_list,
91efe7553eSKonstantin Belousov 			    handle);
92efe7553eSKonstantin Belousov 			if (object != NULL) {
93efe7553eSKonstantin Belousov 				/*
94efe7553eSKonstantin Belousov 				 * We raced with other thread while
95efe7553eSKonstantin Belousov 				 * allocating object.
96efe7553eSKonstantin Belousov 				 */
97efe7553eSKonstantin Belousov 				if (pindex > object->size)
98efe7553eSKonstantin Belousov 					object->size = pindex;
99efe7553eSKonstantin Belousov 			} else {
100efe7553eSKonstantin Belousov 				object = object1;
101efe7553eSKonstantin Belousov 				object1 = NULL;
102efe7553eSKonstantin Belousov 				object->handle = handle;
10324964514SPeter Wemm 				TAILQ_INSERT_TAIL(&phys_pager_object_list, object,
10424964514SPeter Wemm 				    pager_object_list);
105efe7553eSKonstantin Belousov 			}
10624964514SPeter Wemm 		} else {
1072f7af3dbSAlan Cox 			if (pindex > object->size)
1082f7af3dbSAlan Cox 				object->size = pindex;
10924964514SPeter Wemm 		}
110efe7553eSKonstantin Belousov 		mtx_unlock(&phys_pager_mtx);
111efe7553eSKonstantin Belousov 		vm_object_deallocate(object1);
112b5861b34SAlfred Perlstein 	} else {
1132f7af3dbSAlan Cox 		object = vm_object_allocate(OBJT_PHYS, pindex);
114b5861b34SAlfred Perlstein 	}
11524964514SPeter Wemm 
11624964514SPeter Wemm 	return (object);
11724964514SPeter Wemm }
11824964514SPeter Wemm 
1192a1618cdSAlan Cox /*
1202a1618cdSAlan Cox  * MPSAFE
1212a1618cdSAlan Cox  */
12224964514SPeter Wemm static void
123bb663856SPeter Wemm phys_pager_dealloc(vm_object_t object)
12424964514SPeter Wemm {
12524964514SPeter Wemm 
126a9fa2c05SAlfred Perlstein 	if (object->handle != NULL) {
127efe7553eSKonstantin Belousov 		VM_OBJECT_UNLOCK(object);
128a9fa2c05SAlfred Perlstein 		mtx_lock(&phys_pager_mtx);
12924964514SPeter Wemm 		TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
130a9fa2c05SAlfred Perlstein 		mtx_unlock(&phys_pager_mtx);
131efe7553eSKonstantin Belousov 		VM_OBJECT_LOCK(object);
132a9fa2c05SAlfred Perlstein 	}
13324964514SPeter Wemm }
13424964514SPeter Wemm 
13524964514SPeter Wemm /*
13624964514SPeter Wemm  * Fill as many pages as vm_fault has allocated for us.
13724964514SPeter Wemm  */
138e265f054SAlan Cox static int
139e265f054SAlan Cox phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
140e265f054SAlan Cox {
141e265f054SAlan Cox 	int i;
142e265f054SAlan Cox 
143e265f054SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1448a3ef857SAlan Cox 	for (i = 0; i < count; i++) {
1458a3ef857SAlan Cox 		if (m[i]->valid == 0) {
1468a3ef857SAlan Cox 			if ((m[i]->flags & PG_ZERO) == 0)
1478a3ef857SAlan Cox 				pmap_zero_page(m[i]);
1488a3ef857SAlan Cox 			m[i]->valid = VM_PAGE_BITS_ALL;
1498a3ef857SAlan Cox 		}
1508a3ef857SAlan Cox 		KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
1518a3ef857SAlan Cox 		    ("phys_pager_getpages: partially valid page %p", m[i]));
15224964514SPeter Wemm 		m[i]->dirty = 0;
15324964514SPeter Wemm 		/* The requested page must remain busy, the others not. */
15424964514SPeter Wemm 		if (reqpage != i) {
1559af80719SAlan Cox 			m[i]->oflags &= ~VPO_BUSY;
15624964514SPeter Wemm 			m[i]->busy = 0;
15724964514SPeter Wemm 		}
15824964514SPeter Wemm 	}
15924964514SPeter Wemm 	return (VM_PAGER_OK);
16024964514SPeter Wemm }
16124964514SPeter Wemm 
16224964514SPeter Wemm static void
163bb663856SPeter Wemm phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
164bb663856SPeter Wemm 		    int *rtvals)
16524964514SPeter Wemm {
166bb663856SPeter Wemm 
16724964514SPeter Wemm 	panic("phys_pager_putpage called");
16824964514SPeter Wemm }
16924964514SPeter Wemm 
17024964514SPeter Wemm /*
17124964514SPeter Wemm  * Implement a pretty aggressive clustered getpages strategy.  Hint that
17224964514SPeter Wemm  * everything in an entire 4MB window should be prefaulted at once.
17324964514SPeter Wemm  *
17424964514SPeter Wemm  * XXX 4MB (1024 slots per page table page) is convenient for x86,
17524964514SPeter Wemm  * but may not be for other arches.
17624964514SPeter Wemm  */
17724964514SPeter Wemm #ifndef PHYSCLUSTER
17824964514SPeter Wemm #define PHYSCLUSTER 1024
17924964514SPeter Wemm #endif
18024964514SPeter Wemm static boolean_t
181bb663856SPeter Wemm phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
182bb663856SPeter Wemm 		   int *after)
18324964514SPeter Wemm {
18424964514SPeter Wemm 	vm_pindex_t base, end;
18524964514SPeter Wemm 
18624964514SPeter Wemm 	base = pindex & (~(PHYSCLUSTER - 1));
18724964514SPeter Wemm 	end = base + (PHYSCLUSTER - 1);
18824964514SPeter Wemm 	if (before != NULL)
18924964514SPeter Wemm 		*before = pindex - base;
19024964514SPeter Wemm 	if (after != NULL)
19124964514SPeter Wemm 		*after = end - pindex;
19224964514SPeter Wemm 	return (TRUE);
19324964514SPeter Wemm }
194bb663856SPeter Wemm 
195bb663856SPeter Wemm struct pagerops physpagerops = {
1964e658600SPoul-Henning Kamp 	.pgo_init =	phys_pager_init,
1974e658600SPoul-Henning Kamp 	.pgo_alloc =	phys_pager_alloc,
1984e658600SPoul-Henning Kamp 	.pgo_dealloc = 	phys_pager_dealloc,
1994e658600SPoul-Henning Kamp 	.pgo_getpages =	phys_pager_getpages,
2004e658600SPoul-Henning Kamp 	.pgo_putpages =	phys_pager_putpages,
2014e658600SPoul-Henning Kamp 	.pgo_haspage =	phys_pager_haspage,
202bb663856SPeter Wemm };
203