1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) 4 * Copyright (c) 2017 Mellanox Technologies, Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/rwlock.h> 40 #include <sys/proc.h> 41 #include <sys/sched.h> 42 43 #include <machine/bus.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_param.h> 48 #include <vm/vm_kern.h> 49 #include <vm/vm_object.h> 50 #include <vm/vm_map.h> 51 #include <vm/vm_page.h> 52 #include <vm/vm_pageout.h> 53 #include <vm/vm_pager.h> 54 #include <vm/vm_radix.h> 55 #include <vm/vm_reserv.h> 56 #include <vm/vm_extern.h> 57 58 #include <vm/uma.h> 59 #include <vm/uma_int.h> 60 61 #include <linux/gfp.h> 62 #include <linux/mm.h> 63 #include <linux/preempt.h> 64 #include <linux/fs.h> 65 66 void * 67 linux_page_address(struct page *page) 68 { 69 70 if (page->object != kmem_object && page->object != kernel_object) { 71 return (PMAP_HAS_DMAP ? 72 ((void *)(uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))) : 73 NULL); 74 } 75 return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + 76 IDX_TO_OFF(page->pindex))); 77 } 78 79 vm_page_t 80 linux_alloc_pages(gfp_t flags, unsigned int order) 81 { 82 vm_page_t page; 83 84 if (PMAP_HAS_DMAP) { 85 unsigned long npages = 1UL << order; 86 int req = (flags & M_ZERO) ? (VM_ALLOC_ZERO | VM_ALLOC_NOOBJ | 87 VM_ALLOC_NORMAL) : (VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL); 88 89 if (order == 0 && (flags & GFP_DMA32) == 0) { 90 page = vm_page_alloc(NULL, 0, req); 91 if (page == NULL) 92 return (NULL); 93 } else { 94 vm_paddr_t pmax = (flags & GFP_DMA32) ? 95 BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; 96 retry: 97 page = vm_page_alloc_contig(NULL, 0, req, 98 npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 99 100 if (page == NULL) { 101 if (flags & M_WAITOK) { 102 if (!vm_page_reclaim_contig(req, 103 npages, 0, pmax, PAGE_SIZE, 0)) { 104 vm_wait(NULL); 105 } 106 flags &= ~M_WAITOK; 107 goto retry; 108 } 109 return (NULL); 110 } 111 } 112 if (flags & M_ZERO) { 113 unsigned long x; 114 115 for (x = 0; x != npages; x++) { 116 vm_page_t pgo = page + x; 117 118 if ((pgo->flags & PG_ZERO) == 0) 119 pmap_zero_page(pgo); 120 } 121 } 122 } else { 123 vm_offset_t vaddr; 124 125 vaddr = linux_alloc_kmem(flags, order); 126 if (vaddr == 0) 127 return (NULL); 128 129 page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr)); 130 131 KASSERT(vaddr == (vm_offset_t)page_address(page), 132 ("Page address mismatch")); 133 } 134 135 return (page); 136 } 137 138 void 139 linux_free_pages(vm_page_t page, unsigned int order) 140 { 141 if (PMAP_HAS_DMAP) { 142 unsigned long npages = 1UL << order; 143 unsigned long x; 144 145 for (x = 0; x != npages; x++) { 146 vm_page_t pgo = page + x; 147 148 vm_page_lock(pgo); 149 vm_page_free(pgo); 150 vm_page_unlock(pgo); 151 } 152 } else { 153 vm_offset_t vaddr; 154 155 vaddr = (vm_offset_t)page_address(page); 156 157 linux_free_kmem(vaddr, order); 158 } 159 } 160 161 vm_offset_t 162 linux_alloc_kmem(gfp_t flags, unsigned int order) 163 { 164 size_t size = ((size_t)PAGE_SIZE) << order; 165 vm_offset_t addr; 166 167 if ((flags & GFP_DMA32) == 0) { 168 addr = kmem_malloc(kmem_arena, size, flags & GFP_NATIVE_MASK); 169 } else { 170 addr = kmem_alloc_contig(kmem_arena, size, 171 flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT, 172 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 173 } 174 return (addr); 175 } 176 177 void 178 linux_free_kmem(vm_offset_t addr, unsigned int order) 179 { 180 size_t size = ((size_t)PAGE_SIZE) << order; 181 182 kmem_free(kmem_arena, addr, size); 183 } 184 185 static int 186 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, 187 int write, struct page **pages) 188 { 189 vm_prot_t prot; 190 size_t len; 191 int count; 192 int i; 193 194 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 195 len = ((size_t)nr_pages) << PAGE_SHIFT; 196 count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); 197 if (count == -1) 198 return (-EFAULT); 199 200 for (i = 0; i != nr_pages; i++) { 201 struct page *pg = pages[i]; 202 203 vm_page_lock(pg); 204 vm_page_wire(pg); 205 vm_page_unhold(pg); 206 vm_page_unlock(pg); 207 } 208 return (nr_pages); 209 } 210 211 int 212 __get_user_pages_fast(unsigned long start, int nr_pages, int write, 213 struct page **pages) 214 { 215 vm_map_t map; 216 vm_page_t *mp; 217 vm_offset_t va; 218 vm_offset_t end; 219 vm_prot_t prot; 220 int count; 221 222 if (nr_pages == 0 || in_interrupt()) 223 return (0); 224 225 MPASS(pages != NULL); 226 va = start; 227 map = &curthread->td_proc->p_vmspace->vm_map; 228 end = start + (((size_t)nr_pages) << PAGE_SHIFT); 229 if (start < vm_map_min(map) || end > vm_map_max(map)) 230 return (-EINVAL); 231 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 232 for (count = 0, mp = pages, va = start; va < end; 233 mp++, va += PAGE_SIZE, count++) { 234 *mp = pmap_extract_and_hold(map->pmap, va, prot); 235 if (*mp == NULL) 236 break; 237 238 vm_page_lock(*mp); 239 vm_page_wire(*mp); 240 vm_page_unhold(*mp); 241 vm_page_unlock(*mp); 242 243 if ((prot & VM_PROT_WRITE) != 0 && 244 (*mp)->dirty != VM_PAGE_BITS_ALL) { 245 /* 246 * Explicitly dirty the physical page. Otherwise, the 247 * caller's changes may go unnoticed because they are 248 * performed through an unmanaged mapping or by a DMA 249 * operation. 250 * 251 * The object lock is not held here. 252 * See vm_page_clear_dirty_mask(). 253 */ 254 vm_page_dirty(*mp); 255 } 256 } 257 return (count); 258 } 259 260 long 261 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, 262 unsigned long start, unsigned long nr_pages, int gup_flags, 263 struct page **pages, struct vm_area_struct **vmas) 264 { 265 vm_map_t map; 266 267 map = &task->task_thread->td_proc->p_vmspace->vm_map; 268 return (linux_get_user_pages_internal(map, start, nr_pages, 269 !!(gup_flags & FOLL_WRITE), pages)); 270 } 271 272 long 273 get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags, 274 struct page **pages, struct vm_area_struct **vmas) 275 { 276 vm_map_t map; 277 278 map = &curthread->td_proc->p_vmspace->vm_map; 279 return (linux_get_user_pages_internal(map, start, nr_pages, 280 !!(gup_flags & FOLL_WRITE), pages)); 281 } 282 283 int 284 is_vmalloc_addr(const void *addr) 285 { 286 return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); 287 } 288 289 struct page * 290 linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp) 291 { 292 vm_page_t page; 293 int rv; 294 295 if ((gfp & GFP_NOWAIT) != 0) 296 panic("GFP_NOWAIT is unimplemented"); 297 298 VM_OBJECT_WLOCK(obj); 299 page = vm_page_grab(obj, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | 300 VM_ALLOC_WIRED); 301 if (page->valid != VM_PAGE_BITS_ALL) { 302 vm_page_xbusy(page); 303 if (vm_pager_has_page(obj, pindex, NULL, NULL)) { 304 rv = vm_pager_get_pages(obj, &page, 1, NULL, NULL); 305 if (rv != VM_PAGER_OK) { 306 vm_page_lock(page); 307 vm_page_unwire(page, PQ_NONE); 308 vm_page_free(page); 309 vm_page_unlock(page); 310 VM_OBJECT_WUNLOCK(obj); 311 return (ERR_PTR(-EINVAL)); 312 } 313 MPASS(page->valid == VM_PAGE_BITS_ALL); 314 } else { 315 pmap_zero_page(page); 316 page->valid = VM_PAGE_BITS_ALL; 317 page->dirty = 0; 318 } 319 vm_page_xunbusy(page); 320 } 321 VM_OBJECT_WUNLOCK(obj); 322 return (page); 323 } 324 325 struct linux_file * 326 linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags) 327 { 328 struct fileobj { 329 struct linux_file file __aligned(sizeof(void *)); 330 struct vnode vnode __aligned(sizeof(void *)); 331 }; 332 struct fileobj *fileobj; 333 struct linux_file *filp; 334 struct vnode *vp; 335 int error; 336 337 fileobj = kzalloc(sizeof(*fileobj), GFP_KERNEL); 338 if (fileobj == NULL) { 339 error = -ENOMEM; 340 goto err_0; 341 } 342 filp = &fileobj->file; 343 vp = &fileobj->vnode; 344 345 filp->f_count = 1; 346 filp->f_vnode = vp; 347 filp->f_shmem = vm_pager_allocate(OBJT_DEFAULT, NULL, size, 348 VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred); 349 if (filp->f_shmem == NULL) { 350 error = -ENOMEM; 351 goto err_1; 352 } 353 return (filp); 354 err_1: 355 kfree(filp); 356 err_0: 357 return (ERR_PTR(error)); 358 } 359 360 static vm_ooffset_t 361 linux_invalidate_mapping_pages_sub(vm_object_t obj, vm_pindex_t start, 362 vm_pindex_t end, int flags) 363 { 364 int start_count, end_count; 365 366 VM_OBJECT_WLOCK(obj); 367 start_count = obj->resident_page_count; 368 vm_object_page_remove(obj, start, end, flags); 369 end_count = obj->resident_page_count; 370 VM_OBJECT_WUNLOCK(obj); 371 return (start_count - end_count); 372 } 373 374 unsigned long 375 linux_invalidate_mapping_pages(vm_object_t obj, pgoff_t start, pgoff_t end) 376 { 377 378 return (linux_invalidate_mapping_pages_sub(obj, start, end, OBJPR_CLEANONLY)); 379 } 380 381 void 382 linux_shmem_truncate_range(vm_object_t obj, loff_t lstart, loff_t lend) 383 { 384 vm_pindex_t start = OFF_TO_IDX(lstart + PAGE_SIZE - 1); 385 vm_pindex_t end = OFF_TO_IDX(lend + 1); 386 387 (void) linux_invalidate_mapping_pages_sub(obj, start, end, 0); 388 } 389