xref: /freebsd/sys/compat/linuxkpi/common/src/linux_page.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
4  * Copyright (c) 2017 Mellanox Technologies, Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/rwlock.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/memrange.h>
43 
44 #include <machine/bus.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_param.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_radix.h>
56 #include <vm/vm_reserv.h>
57 #include <vm/vm_extern.h>
58 
59 #include <vm/uma.h>
60 #include <vm/uma_int.h>
61 
62 #include <linux/gfp.h>
63 #include <linux/mm.h>
64 #include <linux/preempt.h>
65 #include <linux/fs.h>
66 #include <linux/shmem_fs.h>
67 #include <linux/kernel.h>
68 #include <linux/idr.h>
69 #include <linux/io.h>
70 #include <linux/io-mapping.h>
71 
72 #ifdef __i386__
73 DEFINE_IDR(mtrr_idr);
74 static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
75 extern int pat_works;
76 #endif
77 
78 void
79 si_meminfo(struct sysinfo *si)
80 {
81 	si->totalram = physmem;
82 	si->freeram = vm_free_count();
83 	si->totalhigh = 0;
84 	si->freehigh = 0;
85 	si->mem_unit = PAGE_SIZE;
86 }
87 
88 void *
89 linux_page_address(struct page *page)
90 {
91 
92 	if (page->object != kernel_object) {
93 		return (PMAP_HAS_DMAP ?
94 		    ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) :
95 		    NULL);
96 	}
97 	return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
98 	    IDX_TO_OFF(page->pindex)));
99 }
100 
101 struct page *
102 linux_alloc_pages(gfp_t flags, unsigned int order)
103 {
104 	struct page *page;
105 
106 	if (PMAP_HAS_DMAP) {
107 		unsigned long npages = 1UL << order;
108 		int req = VM_ALLOC_WIRED;
109 
110 		if ((flags & M_ZERO) != 0)
111 			req |= VM_ALLOC_ZERO;
112 		if (order == 0 && (flags & GFP_DMA32) == 0) {
113 			page = vm_page_alloc_noobj(req);
114 			if (page == NULL)
115 				return (NULL);
116 		} else {
117 			vm_paddr_t pmax = (flags & GFP_DMA32) ?
118 			    BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
119 		retry:
120 			page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
121 			    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
122 			if (page == NULL) {
123 				if (flags & M_WAITOK) {
124 					if (!vm_page_reclaim_contig(req,
125 					    npages, 0, pmax, PAGE_SIZE, 0)) {
126 						vm_wait(NULL);
127 					}
128 					flags &= ~M_WAITOK;
129 					goto retry;
130 				}
131 				return (NULL);
132 			}
133 		}
134 	} else {
135 		vm_offset_t vaddr;
136 
137 		vaddr = linux_alloc_kmem(flags, order);
138 		if (vaddr == 0)
139 			return (NULL);
140 
141 		page = virt_to_page((void *)vaddr);
142 
143 		KASSERT(vaddr == (vm_offset_t)page_address(page),
144 		    ("Page address mismatch"));
145 	}
146 
147 	return (page);
148 }
149 
150 void
151 linux_free_pages(struct page *page, unsigned int order)
152 {
153 	if (PMAP_HAS_DMAP) {
154 		unsigned long npages = 1UL << order;
155 		unsigned long x;
156 
157 		for (x = 0; x != npages; x++) {
158 			vm_page_t pgo = page + x;
159 
160 			if (vm_page_unwire_noq(pgo))
161 				vm_page_free(pgo);
162 		}
163 	} else {
164 		vm_offset_t vaddr;
165 
166 		vaddr = (vm_offset_t)page_address(page);
167 
168 		linux_free_kmem(vaddr, order);
169 	}
170 }
171 
172 vm_offset_t
173 linux_alloc_kmem(gfp_t flags, unsigned int order)
174 {
175 	size_t size = ((size_t)PAGE_SIZE) << order;
176 	void *addr;
177 
178 	if ((flags & GFP_DMA32) == 0) {
179 		addr = kmem_malloc(size, flags & GFP_NATIVE_MASK);
180 	} else {
181 		addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
182 		    BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
183 	}
184 	return ((vm_offset_t)addr);
185 }
186 
187 void
188 linux_free_kmem(vm_offset_t addr, unsigned int order)
189 {
190 	size_t size = ((size_t)PAGE_SIZE) << order;
191 
192 	kmem_free((void *)addr, size);
193 }
194 
195 static int
196 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
197     int write, struct page **pages)
198 {
199 	vm_prot_t prot;
200 	size_t len;
201 	int count;
202 
203 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
204 	len = ptoa((vm_offset_t)nr_pages);
205 	count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
206 	return (count == -1 ? -EFAULT : nr_pages);
207 }
208 
209 int
210 __get_user_pages_fast(unsigned long start, int nr_pages, int write,
211     struct page **pages)
212 {
213 	vm_map_t map;
214 	vm_page_t *mp;
215 	vm_offset_t va;
216 	vm_offset_t end;
217 	vm_prot_t prot;
218 	int count;
219 
220 	if (nr_pages == 0 || in_interrupt())
221 		return (0);
222 
223 	MPASS(pages != NULL);
224 	map = &curthread->td_proc->p_vmspace->vm_map;
225 	end = start + ptoa((vm_offset_t)nr_pages);
226 	if (!vm_map_range_valid(map, start, end))
227 		return (-EINVAL);
228 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
229 	for (count = 0, mp = pages, va = start; va < end;
230 	    mp++, va += PAGE_SIZE, count++) {
231 		*mp = pmap_extract_and_hold(map->pmap, va, prot);
232 		if (*mp == NULL)
233 			break;
234 
235 		if ((prot & VM_PROT_WRITE) != 0 &&
236 		    (*mp)->dirty != VM_PAGE_BITS_ALL) {
237 			/*
238 			 * Explicitly dirty the physical page.  Otherwise, the
239 			 * caller's changes may go unnoticed because they are
240 			 * performed through an unmanaged mapping or by a DMA
241 			 * operation.
242 			 *
243 			 * The object lock is not held here.
244 			 * See vm_page_clear_dirty_mask().
245 			 */
246 			vm_page_dirty(*mp);
247 		}
248 	}
249 	return (count);
250 }
251 
252 long
253 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
254     unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
255     struct page **pages, struct vm_area_struct **vmas)
256 {
257 	vm_map_t map;
258 
259 	map = &task->task_thread->td_proc->p_vmspace->vm_map;
260 	return (linux_get_user_pages_internal(map, start, nr_pages,
261 	    !!(gup_flags & FOLL_WRITE), pages));
262 }
263 
264 long
265 get_user_pages(unsigned long start, unsigned long nr_pages,
266     unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas)
267 {
268 	vm_map_t map;
269 
270 	map = &curthread->td_proc->p_vmspace->vm_map;
271 	return (linux_get_user_pages_internal(map, start, nr_pages,
272 	    !!(gup_flags & FOLL_WRITE), pages));
273 }
274 
275 int
276 is_vmalloc_addr(const void *addr)
277 {
278 	return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
279 }
280 
281 vm_fault_t
282 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
283     unsigned long pfn, pgprot_t prot)
284 {
285 	vm_object_t vm_obj = vma->vm_obj;
286 	vm_object_t tmp_obj;
287 	vm_page_t page;
288 	vm_pindex_t pindex;
289 
290 	VM_OBJECT_ASSERT_WLOCKED(vm_obj);
291 	pindex = OFF_TO_IDX(addr - vma->vm_start);
292 	if (vma->vm_pfn_count == 0)
293 		vma->vm_pfn_first = pindex;
294 	MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
295 
296 retry:
297 	page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT);
298 	if (page == NULL) {
299 		page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
300 		if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL))
301 			goto retry;
302 		if (page->object != NULL) {
303 			tmp_obj = page->object;
304 			vm_page_xunbusy(page);
305 			VM_OBJECT_WUNLOCK(vm_obj);
306 			VM_OBJECT_WLOCK(tmp_obj);
307 			if (page->object == tmp_obj &&
308 			    vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
309 				KASSERT(page->object == tmp_obj,
310 				    ("page has changed identity"));
311 				KASSERT((page->oflags & VPO_UNMANAGED) == 0,
312 				    ("page does not belong to shmem"));
313 				vm_pager_page_unswapped(page);
314 				if (pmap_page_is_mapped(page)) {
315 					vm_page_xunbusy(page);
316 					VM_OBJECT_WUNLOCK(tmp_obj);
317 					printf("%s: page rename failed: page "
318 					    "is mapped\n", __func__);
319 					VM_OBJECT_WLOCK(vm_obj);
320 					return (VM_FAULT_NOPAGE);
321 				}
322 				vm_page_remove(page);
323 			}
324 			VM_OBJECT_WUNLOCK(tmp_obj);
325 			VM_OBJECT_WLOCK(vm_obj);
326 			goto retry;
327 		}
328 		if (vm_page_insert(page, vm_obj, pindex)) {
329 			vm_page_xunbusy(page);
330 			return (VM_FAULT_OOM);
331 		}
332 		vm_page_valid(page);
333 	}
334 	pmap_page_set_memattr(page, pgprot2cachemode(prot));
335 	vma->vm_pfn_count++;
336 
337 	return (VM_FAULT_NOPAGE);
338 }
339 
340 int
341 lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr,
342     unsigned long start_pfn, unsigned long size, pgprot_t prot)
343 {
344 	vm_object_t vm_obj;
345 	unsigned long addr, pfn;
346 	int err = 0;
347 
348 	vm_obj = vma->vm_obj;
349 
350 	VM_OBJECT_WLOCK(vm_obj);
351 	for (addr = start_addr, pfn = start_pfn;
352 	    addr < start_addr + size;
353 	    addr += PAGE_SIZE) {
354 		vm_fault_t ret;
355 retry:
356 		ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
357 
358 		if ((ret & VM_FAULT_OOM) != 0) {
359 			VM_OBJECT_WUNLOCK(vm_obj);
360 			vm_wait(NULL);
361 			VM_OBJECT_WLOCK(vm_obj);
362 			goto retry;
363 		}
364 
365 		if ((ret & VM_FAULT_ERROR) != 0) {
366 			err = -EFAULT;
367 			break;
368 		}
369 
370 		pfn++;
371 	}
372 	VM_OBJECT_WUNLOCK(vm_obj);
373 
374 	if (unlikely(err)) {
375 		zap_vma_ptes(vma, start_addr,
376 		    (pfn - start_pfn) << PAGE_SHIFT);
377 		return (err);
378 	}
379 
380 	return (0);
381 }
382 
383 int
384 lkpi_io_mapping_map_user(struct io_mapping *iomap,
385     struct vm_area_struct *vma, unsigned long addr,
386     unsigned long pfn, unsigned long size)
387 {
388 	pgprot_t prot;
389 	int ret;
390 
391 	prot = cachemode2protval(iomap->attr);
392 	ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot);
393 
394 	return (ret);
395 }
396 
397 /*
398  * Although FreeBSD version of unmap_mapping_range has semantics and types of
399  * parameters compatible with Linux version, the values passed in are different
400  * @obj should match to vm_private_data field of vm_area_struct returned by
401  *      mmap file operation handler, see linux_file_mmap_single() sources
402  * @holelen should match to size of area to be munmapped.
403  */
404 void
405 lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
406     loff_t const holelen, int even_cows __unused)
407 {
408 	vm_object_t devobj;
409 	vm_page_t page;
410 	int i, page_count;
411 
412 	devobj = cdev_pager_lookup(obj);
413 	if (devobj != NULL) {
414 		page_count = OFF_TO_IDX(holelen);
415 
416 		VM_OBJECT_WLOCK(devobj);
417 retry:
418 		for (i = 0; i < page_count; i++) {
419 			page = vm_page_lookup(devobj, i);
420 			if (page == NULL)
421 				continue;
422 			if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL))
423 				goto retry;
424 			cdev_pager_free_page(devobj, page);
425 		}
426 		VM_OBJECT_WUNLOCK(devobj);
427 		vm_object_deallocate(devobj);
428 	}
429 }
430 
431 int
432 lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
433 {
434 #ifdef __i386__
435 	struct mem_range_desc *mrdesc;
436 	int error, id, act;
437 
438 	/* If PAT is available, do nothing */
439 	if (pat_works)
440 		return (0);
441 
442 	mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
443 	mrdesc->mr_base = base;
444 	mrdesc->mr_len = size;
445 	mrdesc->mr_flags = MDF_WRITECOMBINE;
446 	strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
447 	act = MEMRANGE_SET_UPDATE;
448 	error = mem_range_attr_set(mrdesc, &act);
449 	if (error == 0) {
450 		error = idr_get_new(&mtrr_idr, mrdesc, &id);
451 		MPASS(idr_find(&mtrr_idr, id) == mrdesc);
452 		if (error != 0) {
453 			act = MEMRANGE_SET_REMOVE;
454 			mem_range_attr_set(mrdesc, &act);
455 		}
456 	}
457 	if (error != 0) {
458 		free(mrdesc, M_LKMTRR);
459 		pr_warn(
460 		    "Failed to add WC MTRR for [%p-%p]: %d; "
461 		    "performance may suffer\n",
462 		    (void *)base, (void *)(base + size - 1), error);
463 	} else
464 		pr_warn("Successfully added WC MTRR for [%p-%p]\n",
465 		    (void *)base, (void *)(base + size - 1));
466 
467 	return (error != 0 ? -error : id + __MTRR_ID_BASE);
468 #else
469 	return (0);
470 #endif
471 }
472 
473 void
474 lkpi_arch_phys_wc_del(int reg)
475 {
476 #ifdef __i386__
477 	struct mem_range_desc *mrdesc;
478 	int act;
479 
480 	/* Check if arch_phys_wc_add() failed. */
481 	if (reg < __MTRR_ID_BASE)
482 		return;
483 
484 	mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
485 	MPASS(mrdesc != NULL);
486 	idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
487 	act = MEMRANGE_SET_REMOVE;
488 	mem_range_attr_set(mrdesc, &act);
489 	free(mrdesc, M_LKMTRR);
490 #endif
491 }
492 
493 /*
494  * This is a highly simplified version of the Linux page_frag_cache.
495  * We only support up-to 1 single page as fragment size and we will
496  * always return a full page.  This may be wasteful on small objects
497  * but the only known consumer (mt76) is either asking for a half-page
498  * or a full page.  If this was to become a problem we can implement
499  * a more elaborate version.
500  */
501 void *
502 linuxkpi_page_frag_alloc(struct page_frag_cache *pfc,
503     size_t fragsz, gfp_t gfp)
504 {
505 	vm_page_t pages;
506 
507 	if (fragsz == 0)
508 		return (NULL);
509 
510 	KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet "
511 	    "supported", __func__, fragsz));
512 
513 	pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1));
514 	if (pages == NULL)
515 		return (NULL);
516 	pfc->va = linux_page_address(pages);
517 
518 	/* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */
519 	pfc->pagecnt_bias = 0;
520 
521 	return (pfc->va);
522 }
523 
524 void
525 linuxkpi_page_frag_free(void *addr)
526 {
527 	vm_page_t page;
528 
529 	page = virt_to_page(addr);
530 	linux_free_pages(page, 0);
531 }
532 
533 void
534 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
535 {
536 
537 	linux_free_pages(page, 0);
538 }
539