xref: /freebsd/sys/compat/linuxkpi/common/src/linux_page.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2016 Matt Macy (mmacy@nextbsd.org)
4  * Copyright (c) 2017 Mellanox Technologies, Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/rwlock.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 
43 #include <machine/bus.h>
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_pageout.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_radix.h>
55 #include <vm/vm_reserv.h>
56 #include <vm/vm_extern.h>
57 
58 #include <vm/uma.h>
59 #include <vm/uma_int.h>
60 
61 #include <linux/gfp.h>
62 #include <linux/mm.h>
63 #include <linux/preempt.h>
64 #include <linux/fs.h>
65 
66 #if defined(__amd64__) || defined(__aarch64__) || defined(__riscv)
67 #define	LINUXKPI_HAVE_DMAP
68 #else
69 #undef	LINUXKPI_HAVE_DMAP
70 #endif
71 
72 void *
73 linux_page_address(struct page *page)
74 {
75 
76 	if (page->object != kmem_object && page->object != kernel_object) {
77 #ifdef LINUXKPI_HAVE_DMAP
78 		return ((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page)));
79 #else
80 		return (NULL);
81 #endif
82 	}
83 	return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
84 	    IDX_TO_OFF(page->pindex)));
85 }
86 
87 vm_page_t
88 linux_alloc_pages(gfp_t flags, unsigned int order)
89 {
90 #ifdef LINUXKPI_HAVE_DMAP
91 	unsigned long npages = 1UL << order;
92 	int req = (flags & M_ZERO) ? (VM_ALLOC_ZERO | VM_ALLOC_NOOBJ |
93 	    VM_ALLOC_NORMAL) : (VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL);
94 	vm_page_t page;
95 
96 	if (order == 0 && (flags & GFP_DMA32) == 0) {
97 		page = vm_page_alloc(NULL, 0, req);
98 		if (page == NULL)
99 			return (NULL);
100 	} else {
101 		vm_paddr_t pmax = (flags & GFP_DMA32) ?
102 		    BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
103 retry:
104 		page = vm_page_alloc_contig(NULL, 0, req,
105 		    npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
106 
107 		if (page == NULL) {
108 			if (flags & M_WAITOK) {
109 				if (!vm_page_reclaim_contig(req,
110 				    npages, 0, pmax, PAGE_SIZE, 0)) {
111 					VM_WAIT;
112 				}
113 				flags &= ~M_WAITOK;
114 				goto retry;
115 			}
116 			return (NULL);
117 		}
118 	}
119 	if (flags & M_ZERO) {
120 		unsigned long x;
121 
122 		for (x = 0; x != npages; x++) {
123 			vm_page_t pgo = page + x;
124 
125 			if ((pgo->flags & PG_ZERO) == 0)
126 				pmap_zero_page(pgo);
127 		}
128 	}
129 #else
130 	vm_offset_t vaddr;
131 	vm_page_t page;
132 
133 	vaddr = linux_alloc_kmem(flags, order);
134 	if (vaddr == 0)
135 		return (NULL);
136 
137 	page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr));
138 
139 	KASSERT(vaddr == (vm_offset_t)page_address(page),
140 	    ("Page address mismatch"));
141 #endif
142 	return (page);
143 }
144 
145 void
146 linux_free_pages(vm_page_t page, unsigned int order)
147 {
148 #ifdef LINUXKPI_HAVE_DMAP
149 	unsigned long npages = 1UL << order;
150 	unsigned long x;
151 
152 	for (x = 0; x != npages; x++) {
153 		vm_page_t pgo = page + x;
154 
155 		vm_page_lock(pgo);
156 		vm_page_free(pgo);
157 		vm_page_unlock(pgo);
158 	}
159 #else
160 	vm_offset_t vaddr;
161 
162 	vaddr = (vm_offset_t)page_address(page);
163 
164 	linux_free_kmem(vaddr, order);
165 #endif
166 }
167 
168 vm_offset_t
169 linux_alloc_kmem(gfp_t flags, unsigned int order)
170 {
171 	size_t size = ((size_t)PAGE_SIZE) << order;
172 	vm_offset_t addr;
173 
174 	if ((flags & GFP_DMA32) == 0) {
175 		addr = kmem_malloc(kmem_arena, size, flags & GFP_NATIVE_MASK);
176 	} else {
177 		addr = kmem_alloc_contig(kmem_arena, size,
178 		    flags & GFP_NATIVE_MASK, 0, BUS_SPACE_MAXADDR_32BIT,
179 		    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
180 	}
181 	return (addr);
182 }
183 
184 void
185 linux_free_kmem(vm_offset_t addr, unsigned int order)
186 {
187 	size_t size = ((size_t)PAGE_SIZE) << order;
188 
189 	kmem_free(kmem_arena, addr, size);
190 }
191 
192 static int
193 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
194     int write, struct page **pages)
195 {
196 	vm_prot_t prot;
197 	size_t len;
198 	int count;
199 	int i;
200 
201 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
202 	len = ((size_t)nr_pages) << PAGE_SHIFT;
203 	count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
204 	if (count == -1)
205 		return (-EFAULT);
206 
207 	for (i = 0; i != nr_pages; i++) {
208 		struct page *pg = pages[i];
209 
210 		vm_page_lock(pg);
211 		vm_page_wire(pg);
212 		vm_page_unhold(pg);
213 		vm_page_unlock(pg);
214 	}
215 	return (nr_pages);
216 }
217 
218 int
219 __get_user_pages_fast(unsigned long start, int nr_pages, int write,
220     struct page **pages)
221 {
222 	vm_map_t map;
223 	vm_page_t *mp;
224 	vm_offset_t va;
225 	vm_offset_t end;
226 	vm_prot_t prot;
227 	int count;
228 
229 	if (nr_pages == 0 || in_interrupt())
230 		return (0);
231 
232 	MPASS(pages != NULL);
233 	va = start;
234 	map = &curthread->td_proc->p_vmspace->vm_map;
235 	end = start + (((size_t)nr_pages) << PAGE_SHIFT);
236 	if (start < vm_map_min(map) || end > vm_map_max(map))
237 		return (-EINVAL);
238 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
239 	for (count = 0, mp = pages, va = start; va < end;
240 	    mp++, va += PAGE_SIZE, count++) {
241 		*mp = pmap_extract_and_hold(map->pmap, va, prot);
242 		if (*mp == NULL)
243 			break;
244 
245 		vm_page_lock(*mp);
246 		vm_page_wire(*mp);
247 		vm_page_unhold(*mp);
248 		vm_page_unlock(*mp);
249 
250 		if ((prot & VM_PROT_WRITE) != 0 &&
251 		    (*mp)->dirty != VM_PAGE_BITS_ALL) {
252 			/*
253 			 * Explicitly dirty the physical page.  Otherwise, the
254 			 * caller's changes may go unnoticed because they are
255 			 * performed through an unmanaged mapping or by a DMA
256 			 * operation.
257 			 *
258 			 * The object lock is not held here.
259 			 * See vm_page_clear_dirty_mask().
260 			 */
261 			vm_page_dirty(*mp);
262 		}
263 	}
264 	return (count);
265 }
266 
267 long
268 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
269     unsigned long start, unsigned long nr_pages, int gup_flags,
270     struct page **pages, struct vm_area_struct **vmas)
271 {
272 	vm_map_t map;
273 
274 	map = &task->task_thread->td_proc->p_vmspace->vm_map;
275 	return (linux_get_user_pages_internal(map, start, nr_pages,
276 	    !!(gup_flags & FOLL_WRITE), pages));
277 }
278 
279 long
280 get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags,
281     struct page **pages, struct vm_area_struct **vmas)
282 {
283 	vm_map_t map;
284 
285 	map = &curthread->td_proc->p_vmspace->vm_map;
286 	return (linux_get_user_pages_internal(map, start, nr_pages,
287 	    !!(gup_flags & FOLL_WRITE), pages));
288 }
289 
290 int
291 is_vmalloc_addr(const void *addr)
292 {
293 	return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
294 }
295 
296 struct page *
297 linux_shmem_read_mapping_page_gfp(vm_object_t obj, int pindex, gfp_t gfp)
298 {
299 	vm_page_t page;
300 	int rv;
301 
302 	if ((gfp & GFP_NOWAIT) != 0)
303 		panic("GFP_NOWAIT is unimplemented");
304 
305 	VM_OBJECT_WLOCK(obj);
306 	page = vm_page_grab(obj, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
307 	    VM_ALLOC_WIRED);
308 	if (page->valid != VM_PAGE_BITS_ALL) {
309 		vm_page_xbusy(page);
310 		if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
311 			rv = vm_pager_get_pages(obj, &page, 1, NULL, NULL);
312 			if (rv != VM_PAGER_OK) {
313 				vm_page_lock(page);
314 				vm_page_unwire(page, PQ_NONE);
315 				vm_page_free(page);
316 				vm_page_unlock(page);
317 				VM_OBJECT_WUNLOCK(obj);
318 				return (ERR_PTR(-EINVAL));
319 			}
320 			MPASS(page->valid == VM_PAGE_BITS_ALL);
321 		} else {
322 			pmap_zero_page(page);
323 			page->valid = VM_PAGE_BITS_ALL;
324 			page->dirty = 0;
325 		}
326 		vm_page_xunbusy(page);
327 	}
328 	VM_OBJECT_WUNLOCK(obj);
329 	return (page);
330 }
331 
332 struct linux_file *
333 linux_shmem_file_setup(const char *name, loff_t size, unsigned long flags)
334 {
335 	struct fileobj {
336 		struct linux_file file __aligned(sizeof(void *));
337 		struct vnode vnode __aligned(sizeof(void *));
338 	};
339 	struct fileobj *fileobj;
340 	struct linux_file *filp;
341 	struct vnode *vp;
342 	int error;
343 
344 	fileobj = kzalloc(sizeof(*fileobj), GFP_KERNEL);
345 	if (fileobj == NULL) {
346 		error = -ENOMEM;
347 		goto err_0;
348 	}
349 	filp = &fileobj->file;
350 	vp = &fileobj->vnode;
351 
352 	filp->f_count = 1;
353 	filp->f_vnode = vp;
354 	filp->f_shmem = vm_pager_allocate(OBJT_DEFAULT, NULL, size,
355 	    VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred);
356 	if (filp->f_shmem == NULL) {
357 		error = -ENOMEM;
358 		goto err_1;
359 	}
360 	return (filp);
361 err_1:
362 	kfree(filp);
363 err_0:
364 	return (ERR_PTR(error));
365 }
366 
367 static vm_ooffset_t
368 linux_invalidate_mapping_pages_sub(vm_object_t obj, vm_pindex_t start,
369     vm_pindex_t end, int flags)
370 {
371 	int start_count, end_count;
372 
373 	VM_OBJECT_WLOCK(obj);
374 	start_count = obj->resident_page_count;
375 	vm_object_page_remove(obj, start, end, flags);
376 	end_count = obj->resident_page_count;
377 	VM_OBJECT_WUNLOCK(obj);
378 	return (start_count - end_count);
379 }
380 
381 unsigned long
382 linux_invalidate_mapping_pages(vm_object_t obj, pgoff_t start, pgoff_t end)
383 {
384 
385 	return (linux_invalidate_mapping_pages_sub(obj, start, end, OBJPR_CLEANONLY));
386 }
387 
388 void
389 linux_shmem_truncate_range(vm_object_t obj, loff_t lstart, loff_t lend)
390 {
391 	vm_pindex_t start = OFF_TO_IDX(lstart + PAGE_SIZE - 1);
392 	vm_pindex_t end = OFF_TO_IDX(lend + 1);
393 
394 	(void) linux_invalidate_mapping_pages_sub(obj, start, end, 0);
395 }
396