1 /*- 2 * Copyright (c) 2000 Peter Wemm 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/conf.h> 32 #include <sys/kernel.h> 33 #include <sys/lock.h> 34 #include <sys/proc.h> 35 #include <sys/mutex.h> 36 #include <sys/mman.h> 37 #include <sys/rwlock.h> 38 #include <sys/sysctl.h> 39 40 #include <vm/vm.h> 41 #include <vm/vm_param.h> 42 #include <vm/vm_object.h> 43 #include <vm/vm_page.h> 44 #include <vm/vm_pager.h> 45 46 /* list of phys pager objects */ 47 static struct pagerlst phys_pager_object_list; 48 /* protect access to phys_pager_object_list */ 49 static struct mtx phys_pager_mtx; 50 51 static void 52 phys_pager_init(void) 53 { 54 55 TAILQ_INIT(&phys_pager_object_list); 56 mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF); 57 } 58 59 /* 60 * MPSAFE 61 */ 62 static vm_object_t 63 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 64 vm_ooffset_t foff, struct ucred *cred) 65 { 66 vm_object_t object, object1; 67 vm_pindex_t pindex; 68 69 /* 70 * Offset should be page aligned. 71 */ 72 if (foff & PAGE_MASK) 73 return (NULL); 74 75 pindex = OFF_TO_IDX(foff + PAGE_MASK + size); 76 77 if (handle != NULL) { 78 mtx_lock(&phys_pager_mtx); 79 /* 80 * Look up pager, creating as necessary. 81 */ 82 object1 = NULL; 83 object = vm_pager_object_lookup(&phys_pager_object_list, handle); 84 if (object == NULL) { 85 /* 86 * Allocate object and associate it with the pager. 87 */ 88 mtx_unlock(&phys_pager_mtx); 89 object1 = vm_object_allocate(OBJT_PHYS, pindex); 90 mtx_lock(&phys_pager_mtx); 91 object = vm_pager_object_lookup(&phys_pager_object_list, 92 handle); 93 if (object != NULL) { 94 /* 95 * We raced with other thread while 96 * allocating object. 97 */ 98 if (pindex > object->size) 99 object->size = pindex; 100 } else { 101 object = object1; 102 object1 = NULL; 103 object->handle = handle; 104 TAILQ_INSERT_TAIL(&phys_pager_object_list, object, 105 pager_object_list); 106 } 107 } else { 108 if (pindex > object->size) 109 object->size = pindex; 110 } 111 mtx_unlock(&phys_pager_mtx); 112 vm_object_deallocate(object1); 113 } else { 114 object = vm_object_allocate(OBJT_PHYS, pindex); 115 } 116 117 return (object); 118 } 119 120 /* 121 * MPSAFE 122 */ 123 static void 124 phys_pager_dealloc(vm_object_t object) 125 { 126 127 if (object->handle != NULL) { 128 VM_OBJECT_WUNLOCK(object); 129 mtx_lock(&phys_pager_mtx); 130 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list); 131 mtx_unlock(&phys_pager_mtx); 132 VM_OBJECT_WLOCK(object); 133 } 134 object->handle = NULL; 135 object->type = OBJT_DEAD; 136 } 137 138 /* 139 * Fill as many pages as vm_fault has allocated for us. 140 */ 141 static int 142 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 143 { 144 int i; 145 146 VM_OBJECT_ASSERT_WLOCKED(object); 147 for (i = 0; i < count; i++) { 148 if (m[i]->valid == 0) { 149 if ((m[i]->flags & PG_ZERO) == 0) 150 pmap_zero_page(m[i]); 151 m[i]->valid = VM_PAGE_BITS_ALL; 152 } 153 KASSERT(m[i]->valid == VM_PAGE_BITS_ALL, 154 ("phys_pager_getpages: partially valid page %p", m[i])); 155 KASSERT(m[i]->dirty == 0, 156 ("phys_pager_getpages: dirty page %p", m[i])); 157 /* The requested page must remain busy, the others not. */ 158 if (i == reqpage) { 159 vm_page_lock(m[i]); 160 vm_page_flash(m[i]); 161 vm_page_unlock(m[i]); 162 } else 163 vm_page_xunbusy(m[i]); 164 } 165 return (VM_PAGER_OK); 166 } 167 168 static void 169 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, 170 int *rtvals) 171 { 172 173 panic("phys_pager_putpage called"); 174 } 175 176 /* 177 * Implement a pretty aggressive clustered getpages strategy. Hint that 178 * everything in an entire 4MB window should be prefaulted at once. 179 * 180 * XXX 4MB (1024 slots per page table page) is convenient for x86, 181 * but may not be for other arches. 182 */ 183 #ifndef PHYSCLUSTER 184 #define PHYSCLUSTER 1024 185 #endif 186 static boolean_t 187 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 188 int *after) 189 { 190 vm_pindex_t base, end; 191 192 base = pindex & (~(PHYSCLUSTER - 1)); 193 end = base + (PHYSCLUSTER - 1); 194 if (before != NULL) 195 *before = pindex - base; 196 if (after != NULL) 197 *after = end - pindex; 198 return (TRUE); 199 } 200 201 struct pagerops physpagerops = { 202 .pgo_init = phys_pager_init, 203 .pgo_alloc = phys_pager_alloc, 204 .pgo_dealloc = phys_pager_dealloc, 205 .pgo_getpages = phys_pager_getpages, 206 .pgo_putpages = phys_pager_putpages, 207 .pgo_haspage = phys_pager_haspage, 208 }; 209