1 /* 2 * Copyright (c) 2000 Peter Wemm 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/linker_set.h> 32 #include <sys/conf.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/proc.h> 36 #include <sys/mutex.h> 37 #include <sys/mman.h> 38 #include <sys/sysctl.h> 39 40 #include <vm/vm.h> 41 #include <vm/vm_object.h> 42 #include <vm/vm_page.h> 43 #include <vm/vm_pager.h> 44 45 /* prevent concurrant creation races */ 46 static int phys_pager_alloc_lock; 47 /* list of device pager objects */ 48 static struct pagerlst phys_pager_object_list; 49 /* protect access to phys_pager_object_list */ 50 static struct mtx phys_pager_mtx; 51 52 static void 53 phys_pager_init(void) 54 { 55 56 TAILQ_INIT(&phys_pager_object_list); 57 mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF); 58 } 59 60 /* 61 * MPSAFE 62 */ 63 static vm_object_t 64 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 65 vm_ooffset_t foff) 66 { 67 vm_object_t object; 68 69 /* 70 * Offset should be page aligned. 71 */ 72 if (foff & PAGE_MASK) 73 return (NULL); 74 75 size = round_page(size); 76 77 if (handle != NULL) { 78 mtx_lock(&Giant); 79 /* 80 * Lock to prevent object creation race condition. 81 */ 82 while (phys_pager_alloc_lock) { 83 phys_pager_alloc_lock = -1; 84 tsleep(&phys_pager_alloc_lock, PVM, "swpalc", 0); 85 } 86 phys_pager_alloc_lock = 1; 87 88 /* 89 * Look up pager, creating as necessary. 90 */ 91 object = vm_pager_object_lookup(&phys_pager_object_list, handle); 92 if (object == NULL) { 93 /* 94 * Allocate object and associate it with the pager. 95 */ 96 object = vm_object_allocate(OBJT_PHYS, 97 OFF_TO_IDX(foff + size)); 98 object->handle = handle; 99 mtx_lock(&phys_pager_mtx); 100 TAILQ_INSERT_TAIL(&phys_pager_object_list, object, 101 pager_object_list); 102 mtx_unlock(&phys_pager_mtx); 103 } else { 104 /* 105 * Gain a reference to the object. 106 */ 107 vm_object_reference(object); 108 if (OFF_TO_IDX(foff + size) > object->size) 109 object->size = OFF_TO_IDX(foff + size); 110 } 111 if (phys_pager_alloc_lock == -1) 112 wakeup(&phys_pager_alloc_lock); 113 phys_pager_alloc_lock = 0; 114 mtx_unlock(&Giant); 115 } else { 116 object = vm_object_allocate(OBJT_PHYS, 117 OFF_TO_IDX(foff + size)); 118 } 119 120 return (object); 121 } 122 123 /* 124 * MPSAFE 125 */ 126 static void 127 phys_pager_dealloc(vm_object_t object) 128 { 129 130 if (object->handle != NULL) { 131 mtx_lock(&phys_pager_mtx); 132 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list); 133 mtx_unlock(&phys_pager_mtx); 134 } 135 } 136 137 static int 138 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 139 { 140 int i, s; 141 142 s = splvm(); 143 vm_page_lock_queues(); 144 /* 145 * Fill as many pages as vm_fault has allocated for us. 146 */ 147 for (i = 0; i < count; i++) { 148 if ((m[i]->flags & PG_ZERO) == 0) { 149 vm_page_unlock_queues(); 150 pmap_zero_page(m[i]); 151 vm_page_lock_queues(); 152 } 153 vm_page_flag_set(m[i], PG_ZERO); 154 /* Switch off pv_entries */ 155 vm_page_unmanage(m[i]); 156 m[i]->valid = VM_PAGE_BITS_ALL; 157 m[i]->dirty = 0; 158 /* The requested page must remain busy, the others not. */ 159 if (reqpage != i) { 160 vm_page_flag_clear(m[i], PG_BUSY); 161 m[i]->busy = 0; 162 } 163 } 164 vm_page_unlock_queues(); 165 splx(s); 166 167 return (VM_PAGER_OK); 168 } 169 170 static void 171 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, 172 int *rtvals) 173 { 174 175 panic("phys_pager_putpage called"); 176 } 177 178 /* 179 * Implement a pretty aggressive clustered getpages strategy. Hint that 180 * everything in an entire 4MB window should be prefaulted at once. 181 * 182 * XXX 4MB (1024 slots per page table page) is convenient for x86, 183 * but may not be for other arches. 184 */ 185 #ifndef PHYSCLUSTER 186 #define PHYSCLUSTER 1024 187 #endif 188 static boolean_t 189 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 190 int *after) 191 { 192 vm_pindex_t base, end; 193 194 base = pindex & (~(PHYSCLUSTER - 1)); 195 end = base + (PHYSCLUSTER - 1); 196 if (before != NULL) 197 *before = pindex - base; 198 if (after != NULL) 199 *after = end - pindex; 200 return (TRUE); 201 } 202 203 struct pagerops physpagerops = { 204 .pgo_init = phys_pager_init, 205 .pgo_alloc = phys_pager_alloc, 206 .pgo_dealloc = phys_pager_dealloc, 207 .pgo_getpages = phys_pager_getpages, 208 .pgo_putpages = phys_pager_putpages, 209 .pgo_haspage = phys_pager_haspage, 210 }; 211