1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2000 Peter Wemm 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/conf.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/proc.h> 37 #include <sys/mutex.h> 38 #include <sys/mman.h> 39 #include <sys/rwlock.h> 40 #include <sys/sysctl.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/vm_object.h> 45 #include <vm/vm_page.h> 46 #include <vm/vm_pageout.h> 47 #include <vm/vm_pager.h> 48 49 /* list of phys pager objects */ 50 static struct pagerlst phys_pager_object_list; 51 /* protect access to phys_pager_object_list */ 52 static struct mtx phys_pager_mtx; 53 54 static void 55 phys_pager_init(void) 56 { 57 58 TAILQ_INIT(&phys_pager_object_list); 59 mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF); 60 } 61 62 static vm_object_t 63 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 64 vm_ooffset_t foff, struct ucred *cred) 65 { 66 vm_object_t object, object1; 67 vm_pindex_t pindex; 68 69 /* 70 * Offset should be page aligned. 71 */ 72 if (foff & PAGE_MASK) 73 return (NULL); 74 75 pindex = OFF_TO_IDX(foff + PAGE_MASK + size); 76 77 if (handle != NULL) { 78 mtx_lock(&phys_pager_mtx); 79 /* 80 * Look up pager, creating as necessary. 81 */ 82 object1 = NULL; 83 object = vm_pager_object_lookup(&phys_pager_object_list, handle); 84 if (object == NULL) { 85 /* 86 * Allocate object and associate it with the pager. 87 */ 88 mtx_unlock(&phys_pager_mtx); 89 object1 = vm_object_allocate(OBJT_PHYS, pindex); 90 mtx_lock(&phys_pager_mtx); 91 object = vm_pager_object_lookup(&phys_pager_object_list, 92 handle); 93 if (object != NULL) { 94 /* 95 * We raced with other thread while 96 * allocating object. 97 */ 98 if (pindex > object->size) 99 object->size = pindex; 100 } else { 101 object = object1; 102 object1 = NULL; 103 object->handle = handle; 104 vm_object_set_flag(object, OBJ_POPULATE); 105 TAILQ_INSERT_TAIL(&phys_pager_object_list, 106 object, pager_object_list); 107 } 108 } else { 109 if (pindex > object->size) 110 object->size = pindex; 111 } 112 mtx_unlock(&phys_pager_mtx); 113 vm_object_deallocate(object1); 114 } else { 115 object = vm_object_allocate(OBJT_PHYS, pindex); 116 vm_object_set_flag(object, OBJ_POPULATE); 117 } 118 119 return (object); 120 } 121 122 static void 123 phys_pager_dealloc(vm_object_t object) 124 { 125 126 if (object->handle != NULL) { 127 VM_OBJECT_WUNLOCK(object); 128 mtx_lock(&phys_pager_mtx); 129 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list); 130 mtx_unlock(&phys_pager_mtx); 131 VM_OBJECT_WLOCK(object); 132 } 133 object->handle = NULL; 134 object->type = OBJT_DEAD; 135 } 136 137 /* 138 * Fill as many pages as vm_fault has allocated for us. 139 */ 140 static int 141 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind, 142 int *rahead) 143 { 144 int i; 145 146 VM_OBJECT_ASSERT_WLOCKED(object); 147 for (i = 0; i < count; i++) { 148 if (m[i]->valid == 0) { 149 if ((m[i]->flags & PG_ZERO) == 0) 150 pmap_zero_page(m[i]); 151 m[i]->valid = VM_PAGE_BITS_ALL; 152 } 153 KASSERT(m[i]->valid == VM_PAGE_BITS_ALL, 154 ("phys_pager_getpages: partially valid page %p", m[i])); 155 KASSERT(m[i]->dirty == 0, 156 ("phys_pager_getpages: dirty page %p", m[i])); 157 } 158 if (rbehind) 159 *rbehind = 0; 160 if (rahead) 161 *rahead = 0; 162 return (VM_PAGER_OK); 163 } 164 165 /* 166 * Implement a pretty aggressive clustered getpages strategy. Hint that 167 * everything in an entire 4MB window should be prefaulted at once. 168 * 169 * 4MB (1024 slots per page table page) is convenient for x86, 170 * but may not be for other arches. 171 */ 172 #ifndef PHYSCLUSTER 173 #define PHYSCLUSTER 1024 174 #endif 175 static int phys_pager_cluster = PHYSCLUSTER; 176 SYSCTL_INT(_vm, OID_AUTO, phys_pager_cluster, CTLFLAG_RWTUN, 177 &phys_pager_cluster, 0, 178 "prefault window size for phys pager"); 179 180 /* 181 * Max hint to vm_page_alloc() about the further allocation needs 182 * inside the phys_pager_populate() loop. The number of bits used to 183 * implement VM_ALLOC_COUNT() determines the hard limit on this value. 184 * That limit is currently 65535. 185 */ 186 #define PHYSALLOC 16 187 188 static int 189 phys_pager_populate(vm_object_t object, vm_pindex_t pidx, 190 int fault_type __unused, vm_prot_t max_prot __unused, vm_pindex_t *first, 191 vm_pindex_t *last) 192 { 193 vm_page_t m; 194 vm_pindex_t base, end, i; 195 int ahead; 196 197 base = rounddown(pidx, phys_pager_cluster); 198 end = base + phys_pager_cluster - 1; 199 if (end >= object->size) 200 end = object->size - 1; 201 if (*first > base) 202 base = *first; 203 if (end > *last) 204 end = *last; 205 *first = base; 206 *last = end; 207 208 for (i = base; i <= end; i++) { 209 retry: 210 m = vm_page_lookup(object, i); 211 if (m == NULL) { 212 ahead = MIN(end - i, PHYSALLOC); 213 m = vm_page_alloc(object, i, VM_ALLOC_NORMAL | 214 VM_ALLOC_ZERO | VM_ALLOC_WAITFAIL | 215 VM_ALLOC_COUNT(ahead)); 216 if (m == NULL) 217 goto retry; 218 if ((m->flags & PG_ZERO) == 0) 219 pmap_zero_page(m); 220 m->valid = VM_PAGE_BITS_ALL; 221 } else if (vm_page_xbusied(m)) { 222 vm_page_lock(m); 223 VM_OBJECT_WUNLOCK(object); 224 vm_page_busy_sleep(m, "physb", true); 225 VM_OBJECT_WLOCK(object); 226 goto retry; 227 } else { 228 vm_page_xbusy(m); 229 if (m->valid != VM_PAGE_BITS_ALL) 230 vm_page_zero_invalid(m, TRUE); 231 } 232 233 KASSERT(m->valid == VM_PAGE_BITS_ALL, 234 ("phys_pager_populate: partially valid page %p", m)); 235 KASSERT(m->dirty == 0, 236 ("phys_pager_populate: dirty page %p", m)); 237 } 238 return (VM_PAGER_OK); 239 } 240 241 static void 242 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, 243 int *rtvals) 244 { 245 246 panic("phys_pager_putpage called"); 247 } 248 249 static boolean_t 250 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 251 int *after) 252 { 253 vm_pindex_t base, end; 254 255 base = rounddown(pindex, phys_pager_cluster); 256 end = base + phys_pager_cluster - 1; 257 if (before != NULL) 258 *before = pindex - base; 259 if (after != NULL) 260 *after = end - pindex; 261 return (TRUE); 262 } 263 264 struct pagerops physpagerops = { 265 .pgo_init = phys_pager_init, 266 .pgo_alloc = phys_pager_alloc, 267 .pgo_dealloc = phys_pager_dealloc, 268 .pgo_getpages = phys_pager_getpages, 269 .pgo_putpages = phys_pager_putpages, 270 .pgo_haspage = phys_pager_haspage, 271 .pgo_populate = phys_pager_populate, 272 }; 273