xref: /freebsd/sys/vm/phys_pager.c (revision 1b6c76a2fe091c74f08427e6c870851025a9cf67)
1 /*
2  * Copyright (c) 2000 Peter Wemm
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/linker_set.h>
31 #include <sys/conf.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 #include <sys/mman.h>
36 #include <sys/sysctl.h>
37 
38 #include <vm/vm.h>
39 #include <vm/vm_object.h>
40 #include <vm/vm_page.h>
41 #include <vm/vm_pager.h>
42 #include <vm/vm_zone.h>
43 
44 /* prevent concurrant creation races */
45 static int phys_pager_alloc_lock;
46 /* list of device pager objects */
47 static struct pagerlst phys_pager_object_list;
48 /* protect access to phys_pager_object_list */
49 static struct mtx phys_pager_mtx;
50 
51 static void
52 phys_pager_init(void)
53 {
54 
55 	TAILQ_INIT(&phys_pager_object_list);
56 	mtx_init(&phys_pager_mtx, "phys_pager list", MTX_DEF);
57 }
58 
59 static vm_object_t
60 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
61 		 vm_ooffset_t foff)
62 {
63 	vm_object_t object;
64 
65 	/*
66 	 * Offset should be page aligned.
67 	 */
68 	if (foff & PAGE_MASK)
69 		return (NULL);
70 
71 	size = round_page(size);
72 
73 	if (handle != NULL) {
74 		/*
75 		 * Lock to prevent object creation race condition.
76 		 */
77 		while (phys_pager_alloc_lock) {
78 			phys_pager_alloc_lock = -1;
79 			msleep(&phys_pager_alloc_lock, &vm_mtx, PVM, "swpalc", 0);
80 		}
81 		phys_pager_alloc_lock = 1;
82 
83 		/*
84 		 * Look up pager, creating as necessary.
85 		 */
86 		object = vm_pager_object_lookup(&phys_pager_object_list, handle);
87 		if (object == NULL) {
88 			/*
89 			 * Allocate object and associate it with the pager.
90 			 */
91 			object = vm_object_allocate(OBJT_PHYS,
92 				OFF_TO_IDX(foff + size));
93 			object->handle = handle;
94 			mtx_lock(&phys_pager_mtx);
95 			TAILQ_INSERT_TAIL(&phys_pager_object_list, object,
96 			    pager_object_list);
97 			mtx_unlock(&phys_pager_mtx);
98 		} else {
99 			/*
100 			 * Gain a reference to the object.
101 			 */
102 			vm_object_reference(object);
103 			if (OFF_TO_IDX(foff + size) > object->size)
104 				object->size = OFF_TO_IDX(foff + size);
105 		}
106 		if (phys_pager_alloc_lock == -1)
107 			wakeup(&phys_pager_alloc_lock);
108 		phys_pager_alloc_lock = 0;
109 
110 	} else {
111 		object = vm_object_allocate(OBJT_PHYS,
112 			OFF_TO_IDX(foff + size));
113 	}
114 
115 	return (object);
116 }
117 
118 static void
119 phys_pager_dealloc(vm_object_t object)
120 {
121 
122 	if (object->handle != NULL) {
123 		mtx_lock(&phys_pager_mtx);
124 		TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
125 		mtx_unlock(&phys_pager_mtx);
126 	}
127 }
128 
129 static int
130 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
131 {
132 	int i, s;
133 
134 	s = splvm();
135 	/*
136 	 * Fill as many pages as vm_fault has allocated for us.
137 	 */
138 	for (i = 0; i < count; i++) {
139 		if ((m[i]->flags & PG_ZERO) == 0)
140 			vm_page_zero_fill(m[i]);
141 		vm_page_flag_set(m[i], PG_ZERO);
142 		/* Switch off pv_entries */
143 		vm_page_unmanage(m[i]);
144 		m[i]->valid = VM_PAGE_BITS_ALL;
145 		m[i]->dirty = 0;
146 		/* The requested page must remain busy, the others not. */
147 		if (reqpage != i) {
148 			vm_page_flag_clear(m[i], PG_BUSY);
149 			m[i]->busy = 0;
150 		}
151 	}
152 	splx(s);
153 
154 	return (VM_PAGER_OK);
155 }
156 
157 static void
158 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
159 		    int *rtvals)
160 {
161 
162 	panic("phys_pager_putpage called");
163 }
164 
165 /*
166  * Implement a pretty aggressive clustered getpages strategy.  Hint that
167  * everything in an entire 4MB window should be prefaulted at once.
168  *
169  * XXX 4MB (1024 slots per page table page) is convenient for x86,
170  * but may not be for other arches.
171  */
172 #ifndef PHYSCLUSTER
173 #define PHYSCLUSTER 1024
174 #endif
175 static boolean_t
176 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
177 		   int *after)
178 {
179 	vm_pindex_t base, end;
180 
181 	base = pindex & (~(PHYSCLUSTER - 1));
182 	end = base + (PHYSCLUSTER - 1);
183 	if (before != NULL)
184 		*before = pindex - base;
185 	if (after != NULL)
186 		*after = end - pindex;
187 	return (TRUE);
188 }
189 
190 struct pagerops physpagerops = {
191 	phys_pager_init,
192 	phys_pager_alloc,
193 	phys_pager_dealloc,
194 	phys_pager_getpages,
195 	phys_pager_putpages,
196 	phys_pager_haspage,
197 	NULL
198 };
199