1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) 4 * Copyright (c) 2017 Mellanox Technologies, Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/rwlock.h> 40 #include <sys/proc.h> 41 #include <sys/sched.h> 42 43 #include <machine/bus.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_param.h> 48 #include <vm/vm_kern.h> 49 #include <vm/vm_object.h> 50 #include <vm/vm_map.h> 51 #include <vm/vm_page.h> 52 #include <vm/vm_pageout.h> 53 #include <vm/vm_pager.h> 54 #include <vm/vm_radix.h> 55 #include <vm/vm_reserv.h> 56 #include <vm/vm_extern.h> 57 58 #include <vm/uma.h> 59 #include <vm/uma_int.h> 60 61 #include <linux/gfp.h> 62 #include <linux/mm.h> 63 #include <linux/preempt.h> 64 #include <linux/fs.h> 65 #include <linux/shmem_fs.h> 66 67 void 68 si_meminfo(struct sysinfo *si) 69 { 70 si->totalram = physmem; 71 si->totalhigh = 0; 72 si->mem_unit = PAGE_SIZE; 73 } 74 75 void * 76 linux_page_address(struct page *page) 77 { 78 79 if (page->object != kernel_object) { 80 return (PMAP_HAS_DMAP ? 81 ((void *)(uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))) : 82 NULL); 83 } 84 return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + 85 IDX_TO_OFF(page->pindex))); 86 } 87 88 vm_page_t 89 linux_alloc_pages(gfp_t flags, unsigned int order) 90 { 91 vm_page_t page; 92 93 if (PMAP_HAS_DMAP) { 94 unsigned long npages = 1UL << order; 95 int req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_NORMAL; 96 97 if ((flags & M_ZERO) != 0) 98 req |= VM_ALLOC_ZERO; 99 if (order == 0 && (flags & GFP_DMA32) == 0) { 100 page = vm_page_alloc(NULL, 0, req); 101 if (page == NULL) 102 return (NULL); 103 } else { 104 vm_paddr_t pmax = (flags & GFP_DMA32) ? 105 BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; 106 retry: 107 page = vm_page_alloc_contig(NULL, 0, req, 108 npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 109 110 if (page == NULL) { 111 if (flags & M_WAITOK) { 112 if (!vm_page_reclaim_contig(req, 113 npages, 0, pmax, PAGE_SIZE, 0)) { 114 vm_wait(NULL); 115 } 116 flags &= ~M_WAITOK; 117 goto retry; 118 } 119 return (NULL); 120 } 121 } 122 if (flags & M_ZERO) { 123 unsigned long x; 124 125 for (x = 0; x != npages; x++) { 126 vm_page_t pgo = page + x; 127 128 if ((pgo->flags & PG_ZERO) == 0) 129 pmap_zero_page(pgo); 130 } 131 } 132 } else { 133 vm_offset_t vaddr; 134 135 vaddr = linux_alloc_kmem(flags, order); 136 if (vaddr == 0) 137 return (NULL); 138 139 page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr)); 140 141 KASSERT(vaddr == (vm_offset_t)page_address(page), 142 ("Page address mismatch")); 143 } 144 145 return (page); 146 } 147 148 void 149 linux_free_pages(vm_page_t page, unsigned int order) 150 { 151 if (PMAP_HAS_DMAP) { 152 unsigned long npages = 1UL << order; 153 unsigned long x; 154 155 for (x = 0; x != npages; x++) { 156 vm_page_t pgo = page + x; 157 158 if (vm_page_unwire_noq(pgo)) 159 vm_page_free(pgo); 160 } 161 } else { 162 vm_offset_t vaddr; 163 164 vaddr = (vm_offset_t)page_address(page); 165 166 linux_free_kmem(vaddr, order); 167 } 168 } 169 170 vm_offset_t 171 linux_alloc_kmem(gfp_t flags, unsigned int order) 172 { 173 size_t size = ((size_t)PAGE_SIZE) << order; 174 vm_offset_t addr; 175 176 if ((flags & GFP_DMA32) == 0) { 177 addr = kmem_malloc(size, flags & GFP_NATIVE_MASK); 178 } else { 179 addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, 180 BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 181 } 182 return (addr); 183 } 184 185 void 186 linux_free_kmem(vm_offset_t addr, unsigned int order) 187 { 188 size_t size = ((size_t)PAGE_SIZE) << order; 189 190 kmem_free(addr, size); 191 } 192 193 static int 194 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, 195 int write, struct page **pages) 196 { 197 vm_prot_t prot; 198 size_t len; 199 int count; 200 201 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 202 len = ((size_t)nr_pages) << PAGE_SHIFT; 203 count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); 204 return (count == -1 ? -EFAULT : nr_pages); 205 } 206 207 int 208 __get_user_pages_fast(unsigned long start, int nr_pages, int write, 209 struct page **pages) 210 { 211 vm_map_t map; 212 vm_page_t *mp; 213 vm_offset_t va; 214 vm_offset_t end; 215 vm_prot_t prot; 216 int count; 217 218 if (nr_pages == 0 || in_interrupt()) 219 return (0); 220 221 MPASS(pages != NULL); 222 va = start; 223 map = &curthread->td_proc->p_vmspace->vm_map; 224 end = start + (((size_t)nr_pages) << PAGE_SHIFT); 225 if (start < vm_map_min(map) || end > vm_map_max(map)) 226 return (-EINVAL); 227 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 228 for (count = 0, mp = pages, va = start; va < end; 229 mp++, va += PAGE_SIZE, count++) { 230 *mp = pmap_extract_and_hold(map->pmap, va, prot); 231 if (*mp == NULL) 232 break; 233 234 if ((prot & VM_PROT_WRITE) != 0 && 235 (*mp)->dirty != VM_PAGE_BITS_ALL) { 236 /* 237 * Explicitly dirty the physical page. Otherwise, the 238 * caller's changes may go unnoticed because they are 239 * performed through an unmanaged mapping or by a DMA 240 * operation. 241 * 242 * The object lock is not held here. 243 * See vm_page_clear_dirty_mask(). 244 */ 245 vm_page_dirty(*mp); 246 } 247 } 248 return (count); 249 } 250 251 long 252 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, 253 unsigned long start, unsigned long nr_pages, int gup_flags, 254 struct page **pages, struct vm_area_struct **vmas) 255 { 256 vm_map_t map; 257 258 map = &task->task_thread->td_proc->p_vmspace->vm_map; 259 return (linux_get_user_pages_internal(map, start, nr_pages, 260 !!(gup_flags & FOLL_WRITE), pages)); 261 } 262 263 long 264 get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags, 265 struct page **pages, struct vm_area_struct **vmas) 266 { 267 vm_map_t map; 268 269 map = &curthread->td_proc->p_vmspace->vm_map; 270 return (linux_get_user_pages_internal(map, start, nr_pages, 271 !!(gup_flags & FOLL_WRITE), pages)); 272 } 273 274 int 275 is_vmalloc_addr(const void *addr) 276 { 277 return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); 278 } 279