xref: /freebsd/sys/vm/phys_pager.c (revision 2f513db72b034fd5ef7f080b11be5c711c15186a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Peter Wemm
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/proc.h>
37 #include <sys/mutex.h>
38 #include <sys/mman.h>
39 #include <sys/rwlock.h>
40 #include <sys/sysctl.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_param.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_pager.h>
48 
49 /* list of phys pager objects */
50 static struct pagerlst phys_pager_object_list;
51 /* protect access to phys_pager_object_list */
52 static struct mtx phys_pager_mtx;
53 
54 static void
55 phys_pager_init(void)
56 {
57 
58 	TAILQ_INIT(&phys_pager_object_list);
59 	mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
60 }
61 
62 static vm_object_t
63 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
64     vm_ooffset_t foff, struct ucred *cred)
65 {
66 	vm_object_t object, object1;
67 	vm_pindex_t pindex;
68 
69 	/*
70 	 * Offset should be page aligned.
71 	 */
72 	if (foff & PAGE_MASK)
73 		return (NULL);
74 
75 	pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
76 
77 	if (handle != NULL) {
78 		mtx_lock(&phys_pager_mtx);
79 		/*
80 		 * Look up pager, creating as necessary.
81 		 */
82 		object1 = NULL;
83 		object = vm_pager_object_lookup(&phys_pager_object_list, handle);
84 		if (object == NULL) {
85 			/*
86 			 * Allocate object and associate it with the pager.
87 			 */
88 			mtx_unlock(&phys_pager_mtx);
89 			object1 = vm_object_allocate(OBJT_PHYS, pindex);
90 			mtx_lock(&phys_pager_mtx);
91 			object = vm_pager_object_lookup(&phys_pager_object_list,
92 			    handle);
93 			if (object != NULL) {
94 				/*
95 				 * We raced with other thread while
96 				 * allocating object.
97 				 */
98 				if (pindex > object->size)
99 					object->size = pindex;
100 			} else {
101 				object = object1;
102 				object1 = NULL;
103 				object->handle = handle;
104 				vm_object_set_flag(object, OBJ_POPULATE);
105 				TAILQ_INSERT_TAIL(&phys_pager_object_list,
106 				    object, pager_object_list);
107 			}
108 		} else {
109 			if (pindex > object->size)
110 				object->size = pindex;
111 		}
112 		mtx_unlock(&phys_pager_mtx);
113 		vm_object_deallocate(object1);
114 	} else {
115 		object = vm_object_allocate(OBJT_PHYS, pindex);
116 		vm_object_set_flag(object, OBJ_POPULATE);
117 	}
118 
119 	return (object);
120 }
121 
122 static void
123 phys_pager_dealloc(vm_object_t object)
124 {
125 
126 	if (object->handle != NULL) {
127 		VM_OBJECT_WUNLOCK(object);
128 		mtx_lock(&phys_pager_mtx);
129 		TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
130 		mtx_unlock(&phys_pager_mtx);
131 		VM_OBJECT_WLOCK(object);
132 	}
133 	object->handle = NULL;
134 	object->type = OBJT_DEAD;
135 }
136 
137 /*
138  * Fill as many pages as vm_fault has allocated for us.
139  */
140 static int
141 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
142     int *rahead)
143 {
144 	int i;
145 
146 	for (i = 0; i < count; i++) {
147 		if (vm_page_none_valid(m[i])) {
148 			if ((m[i]->flags & PG_ZERO) == 0)
149 				pmap_zero_page(m[i]);
150 			vm_page_valid(m[i]);
151 		}
152 		KASSERT(vm_page_all_valid(m[i]),
153 		    ("phys_pager_getpages: partially valid page %p", m[i]));
154 		KASSERT(m[i]->dirty == 0,
155 		    ("phys_pager_getpages: dirty page %p", m[i]));
156 	}
157 	if (rbehind)
158 		*rbehind = 0;
159 	if (rahead)
160 		*rahead = 0;
161 	return (VM_PAGER_OK);
162 }
163 
164 /*
165  * Implement a pretty aggressive clustered getpages strategy.  Hint that
166  * everything in an entire 4MB window should be prefaulted at once.
167  *
168  * 4MB (1024 slots per page table page) is convenient for x86,
169  * but may not be for other arches.
170  */
171 #ifndef PHYSCLUSTER
172 #define PHYSCLUSTER 1024
173 #endif
174 static int phys_pager_cluster = PHYSCLUSTER;
175 SYSCTL_INT(_vm, OID_AUTO, phys_pager_cluster, CTLFLAG_RWTUN,
176     &phys_pager_cluster, 0,
177     "prefault window size for phys pager");
178 
179 /*
180  * Max hint to vm_page_alloc() about the further allocation needs
181  * inside the phys_pager_populate() loop.  The number of bits used to
182  * implement VM_ALLOC_COUNT() determines the hard limit on this value.
183  * That limit is currently 65535.
184  */
185 #define	PHYSALLOC	16
186 
187 static int
188 phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
189     int fault_type __unused, vm_prot_t max_prot __unused, vm_pindex_t *first,
190     vm_pindex_t *last)
191 {
192 	vm_page_t m;
193 	vm_pindex_t base, end, i;
194 	int ahead;
195 
196 	base = rounddown(pidx, phys_pager_cluster);
197 	end = base + phys_pager_cluster - 1;
198 	if (end >= object->size)
199 		end = object->size - 1;
200 	if (*first > base)
201 		base = *first;
202 	if (end > *last)
203 		end = *last;
204 	*first = base;
205 	*last = end;
206 
207 	for (i = base; i <= end; i++) {
208 		ahead = MIN(end - i, PHYSALLOC);
209 		m = vm_page_grab(object, i,
210 		    VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead));
211 		if (!vm_page_all_valid(m))
212 			vm_page_zero_invalid(m, TRUE);
213 		KASSERT(m->dirty == 0,
214 		    ("phys_pager_populate: dirty page %p", m));
215 	}
216 	return (VM_PAGER_OK);
217 }
218 
219 static void
220 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
221     int *rtvals)
222 {
223 
224 	panic("phys_pager_putpage called");
225 }
226 
227 static boolean_t
228 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
229     int *after)
230 {
231 	vm_pindex_t base, end;
232 
233 	base = rounddown(pindex, phys_pager_cluster);
234 	end = base + phys_pager_cluster - 1;
235 	if (before != NULL)
236 		*before = pindex - base;
237 	if (after != NULL)
238 		*after = end - pindex;
239 	return (TRUE);
240 }
241 
242 struct pagerops physpagerops = {
243 	.pgo_init =	phys_pager_init,
244 	.pgo_alloc =	phys_pager_alloc,
245 	.pgo_dealloc = 	phys_pager_dealloc,
246 	.pgo_getpages =	phys_pager_getpages,
247 	.pgo_putpages =	phys_pager_putpages,
248 	.pgo_haspage =	phys_pager_haspage,
249 	.pgo_populate =	phys_pager_populate,
250 };
251