xref: /freebsd/sys/compat/linuxkpi/common/src/linux_page.c (revision c8089810a94027b7f937b9e63d2d8d31a02aaac5)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
4  * Copyright (c) 2017 Mellanox Technologies, Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/rwlock.h>
37 #include <sys/proc.h>
38 #include <sys/sched.h>
39 #include <sys/memrange.h>
40 
41 #include <machine/bus.h>
42 
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vm_radix.h>
53 #include <vm/vm_reserv.h>
54 #include <vm/vm_extern.h>
55 
56 #include <vm/uma.h>
57 #include <vm/uma_int.h>
58 
59 #include <linux/gfp.h>
60 #include <linux/mm.h>
61 #include <linux/preempt.h>
62 #include <linux/fs.h>
63 #include <linux/shmem_fs.h>
64 #include <linux/kernel.h>
65 #include <linux/idr.h>
66 #include <linux/io.h>
67 #include <linux/io-mapping.h>
68 
69 #ifdef __i386__
70 DEFINE_IDR(mtrr_idr);
71 static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
72 extern int pat_works;
73 #endif
74 
75 void
76 si_meminfo(struct sysinfo *si)
77 {
78 	si->totalram = physmem;
79 	si->freeram = vm_free_count();
80 	si->totalhigh = 0;
81 	si->freehigh = 0;
82 	si->mem_unit = PAGE_SIZE;
83 }
84 
85 void *
86 linux_page_address(const struct page *page)
87 {
88 
89 	if (page->object != kernel_object) {
90 		return (PMAP_HAS_DMAP ?
91 		    ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) :
92 		    NULL);
93 	}
94 	return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
95 	    IDX_TO_OFF(page->pindex)));
96 }
97 
98 struct page *
99 linux_alloc_pages(gfp_t flags, unsigned int order)
100 {
101 	struct page *page;
102 
103 	if (PMAP_HAS_DMAP) {
104 		unsigned long npages = 1UL << order;
105 		int req = VM_ALLOC_WIRED;
106 
107 		if ((flags & M_ZERO) != 0)
108 			req |= VM_ALLOC_ZERO;
109 		if (order == 0 && (flags & GFP_DMA32) == 0) {
110 			page = vm_page_alloc_noobj(req);
111 			if (page == NULL)
112 				return (NULL);
113 		} else {
114 			vm_paddr_t pmax = (flags & GFP_DMA32) ?
115 			    BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
116 		retry:
117 			page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
118 			    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
119 			if (page == NULL) {
120 				if ((flags & (M_WAITOK | __GFP_NORETRY)) ==
121 				    M_WAITOK) {
122 					int err = vm_page_reclaim_contig(req,
123 					    npages, 0, pmax, PAGE_SIZE, 0);
124 					if (err == ENOMEM)
125 						vm_wait(NULL);
126 					else if (err != 0)
127 						return (NULL);
128 					flags &= ~M_WAITOK;
129 					goto retry;
130 				}
131 				return (NULL);
132 			}
133 		}
134 	} else {
135 		vm_offset_t vaddr;
136 
137 		vaddr = linux_alloc_kmem(flags, order);
138 		if (vaddr == 0)
139 			return (NULL);
140 
141 		page = virt_to_page((void *)vaddr);
142 
143 		KASSERT(vaddr == (vm_offset_t)page_address(page),
144 		    ("Page address mismatch"));
145 	}
146 
147 	return (page);
148 }
149 
150 static void
151 _linux_free_kmem(vm_offset_t addr, unsigned int order)
152 {
153 	size_t size = ((size_t)PAGE_SIZE) << order;
154 
155 	kmem_free((void *)addr, size);
156 }
157 
158 void
159 linux_free_pages(struct page *page, unsigned int order)
160 {
161 	if (PMAP_HAS_DMAP) {
162 		unsigned long npages = 1UL << order;
163 		unsigned long x;
164 
165 		for (x = 0; x != npages; x++) {
166 			vm_page_t pgo = page + x;
167 
168 			/*
169 			 * The "free page" function is used in several
170 			 * contexts.
171 			 *
172 			 * Some pages are allocated by `linux_alloc_pages()`
173 			 * above, but not all of them are. For instance in the
174 			 * DRM drivers, some pages come from
175 			 * `shmem_read_mapping_page_gfp()`.
176 			 *
177 			 * That's why we need to check if the page is managed
178 			 * or not here.
179 			 */
180 			if ((pgo->oflags & VPO_UNMANAGED) == 0) {
181 				vm_page_unwire(pgo, PQ_ACTIVE);
182 			} else {
183 				if (vm_page_unwire_noq(pgo))
184 					vm_page_free(pgo);
185 			}
186 		}
187 	} else {
188 		vm_offset_t vaddr;
189 
190 		vaddr = (vm_offset_t)page_address(page);
191 
192 		_linux_free_kmem(vaddr, order);
193 	}
194 }
195 
196 void
197 linux_release_pages(struct page **pages, int nr)
198 {
199 	int i;
200 
201 	for (i = 0; i < nr; i++)
202 		__free_page(pages[i]);
203 }
204 
205 vm_offset_t
206 linux_alloc_kmem(gfp_t flags, unsigned int order)
207 {
208 	size_t size = ((size_t)PAGE_SIZE) << order;
209 	void *addr;
210 
211 	addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
212 	    ((flags & GFP_DMA32) == 0) ? -1UL : BUS_SPACE_MAXADDR_32BIT,
213 	    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
214 
215 	return ((vm_offset_t)addr);
216 }
217 
218 void
219 linux_free_kmem(vm_offset_t addr, unsigned int order)
220 {
221 	KASSERT((addr & ~PAGE_MASK) == 0,
222 	    ("%s: addr %p is not page aligned", __func__, (void *)addr));
223 
224 	if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
225 		_linux_free_kmem(addr, order);
226 	} else {
227 		vm_page_t page;
228 
229 		page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr));
230 		linux_free_pages(page, order);
231 	}
232 }
233 
234 static int
235 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
236     int write, struct page **pages)
237 {
238 	vm_prot_t prot;
239 	size_t len;
240 	int count;
241 
242 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
243 	len = ptoa((vm_offset_t)nr_pages);
244 	count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
245 	return (count == -1 ? -EFAULT : nr_pages);
246 }
247 
248 int
249 __get_user_pages_fast(unsigned long start, int nr_pages, int write,
250     struct page **pages)
251 {
252 	vm_map_t map;
253 	vm_page_t *mp;
254 	vm_offset_t va;
255 	vm_offset_t end;
256 	vm_prot_t prot;
257 	int count;
258 
259 	if (nr_pages == 0 || in_interrupt())
260 		return (0);
261 
262 	MPASS(pages != NULL);
263 	map = &curthread->td_proc->p_vmspace->vm_map;
264 	end = start + ptoa((vm_offset_t)nr_pages);
265 	if (!vm_map_range_valid(map, start, end))
266 		return (-EINVAL);
267 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
268 	for (count = 0, mp = pages, va = start; va < end;
269 	    mp++, va += PAGE_SIZE, count++) {
270 		*mp = pmap_extract_and_hold(map->pmap, va, prot);
271 		if (*mp == NULL)
272 			break;
273 
274 		if ((prot & VM_PROT_WRITE) != 0 &&
275 		    (*mp)->dirty != VM_PAGE_BITS_ALL) {
276 			/*
277 			 * Explicitly dirty the physical page.  Otherwise, the
278 			 * caller's changes may go unnoticed because they are
279 			 * performed through an unmanaged mapping or by a DMA
280 			 * operation.
281 			 *
282 			 * The object lock is not held here.
283 			 * See vm_page_clear_dirty_mask().
284 			 */
285 			vm_page_dirty(*mp);
286 		}
287 	}
288 	return (count);
289 }
290 
291 long
292 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
293     unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
294     struct page **pages, struct vm_area_struct **vmas)
295 {
296 	vm_map_t map;
297 
298 	map = &task->task_thread->td_proc->p_vmspace->vm_map;
299 	return (linux_get_user_pages_internal(map, start, nr_pages,
300 	    !!(gup_flags & FOLL_WRITE), pages));
301 }
302 
303 long
304 lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
305     unsigned int gup_flags, struct page **pages)
306 {
307 	vm_map_t map;
308 
309 	map = &curthread->td_proc->p_vmspace->vm_map;
310 	return (linux_get_user_pages_internal(map, start, nr_pages,
311 	    !!(gup_flags & FOLL_WRITE), pages));
312 }
313 
314 int
315 is_vmalloc_addr(const void *addr)
316 {
317 	return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
318 }
319 
320 vm_fault_t
321 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
322     unsigned long pfn, pgprot_t prot)
323 {
324 	struct pctrie_iter pages;
325 	vm_object_t vm_obj = vma->vm_obj;
326 	vm_object_t tmp_obj;
327 	vm_page_t page;
328 	vm_pindex_t pindex;
329 
330 	VM_OBJECT_ASSERT_WLOCKED(vm_obj);
331 	vm_page_iter_init(&pages, vm_obj);
332 	pindex = OFF_TO_IDX(addr - vma->vm_start);
333 	if (vma->vm_pfn_count == 0)
334 		vma->vm_pfn_first = pindex;
335 	MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
336 
337 retry:
338 	page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages);
339 	if (page == NULL) {
340 		page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
341 		if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
342 			pctrie_iter_reset(&pages);
343 			goto retry;
344 		}
345 		if (page->object != NULL) {
346 			tmp_obj = page->object;
347 			vm_page_xunbusy(page);
348 			VM_OBJECT_WUNLOCK(vm_obj);
349 			VM_OBJECT_WLOCK(tmp_obj);
350 			if (page->object == tmp_obj &&
351 			    vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
352 				KASSERT(page->object == tmp_obj,
353 				    ("page has changed identity"));
354 				KASSERT((page->oflags & VPO_UNMANAGED) == 0,
355 				    ("page does not belong to shmem"));
356 				vm_pager_page_unswapped(page);
357 				if (pmap_page_is_mapped(page)) {
358 					vm_page_xunbusy(page);
359 					VM_OBJECT_WUNLOCK(tmp_obj);
360 					printf("%s: page rename failed: page "
361 					    "is mapped\n", __func__);
362 					VM_OBJECT_WLOCK(vm_obj);
363 					return (VM_FAULT_NOPAGE);
364 				}
365 				vm_page_remove(page);
366 			}
367 			VM_OBJECT_WUNLOCK(tmp_obj);
368 			pctrie_iter_reset(&pages);
369 			VM_OBJECT_WLOCK(vm_obj);
370 			goto retry;
371 		}
372 		if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) {
373 			vm_page_xunbusy(page);
374 			return (VM_FAULT_OOM);
375 		}
376 		vm_page_valid(page);
377 	}
378 	pmap_page_set_memattr(page, pgprot2cachemode(prot));
379 	vma->vm_pfn_count++;
380 
381 	return (VM_FAULT_NOPAGE);
382 }
383 
384 int
385 lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr,
386     unsigned long start_pfn, unsigned long size, pgprot_t prot)
387 {
388 	vm_object_t vm_obj;
389 	unsigned long addr, pfn;
390 	int err = 0;
391 
392 	vm_obj = vma->vm_obj;
393 
394 	VM_OBJECT_WLOCK(vm_obj);
395 	for (addr = start_addr, pfn = start_pfn;
396 	    addr < start_addr + size;
397 	    addr += PAGE_SIZE) {
398 		vm_fault_t ret;
399 retry:
400 		ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
401 
402 		if ((ret & VM_FAULT_OOM) != 0) {
403 			VM_OBJECT_WUNLOCK(vm_obj);
404 			vm_wait(NULL);
405 			VM_OBJECT_WLOCK(vm_obj);
406 			goto retry;
407 		}
408 
409 		if ((ret & VM_FAULT_ERROR) != 0) {
410 			err = -EFAULT;
411 			break;
412 		}
413 
414 		pfn++;
415 	}
416 	VM_OBJECT_WUNLOCK(vm_obj);
417 
418 	if (unlikely(err)) {
419 		zap_vma_ptes(vma, start_addr,
420 		    (pfn - start_pfn) << PAGE_SHIFT);
421 		return (err);
422 	}
423 
424 	return (0);
425 }
426 
427 int
428 lkpi_io_mapping_map_user(struct io_mapping *iomap,
429     struct vm_area_struct *vma, unsigned long addr,
430     unsigned long pfn, unsigned long size)
431 {
432 	pgprot_t prot;
433 	int ret;
434 
435 	prot = cachemode2protval(iomap->attr);
436 	ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot);
437 
438 	return (ret);
439 }
440 
441 /*
442  * Although FreeBSD version of unmap_mapping_range has semantics and types of
443  * parameters compatible with Linux version, the values passed in are different
444  * @obj should match to vm_private_data field of vm_area_struct returned by
445  *      mmap file operation handler, see linux_file_mmap_single() sources
446  * @holelen should match to size of area to be munmapped.
447  */
448 void
449 lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
450     loff_t const holelen __unused, int even_cows __unused)
451 {
452 	vm_object_t devobj;
453 
454 	devobj = cdev_pager_lookup(obj);
455 	if (devobj != NULL) {
456 		cdev_mgtdev_pager_free_pages(devobj);
457 		vm_object_deallocate(devobj);
458 	}
459 }
460 
461 int
462 lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
463 {
464 #ifdef __i386__
465 	struct mem_range_desc *mrdesc;
466 	int error, id, act;
467 
468 	/* If PAT is available, do nothing */
469 	if (pat_works)
470 		return (0);
471 
472 	mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
473 	mrdesc->mr_base = base;
474 	mrdesc->mr_len = size;
475 	mrdesc->mr_flags = MDF_WRITECOMBINE;
476 	strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
477 	act = MEMRANGE_SET_UPDATE;
478 	error = mem_range_attr_set(mrdesc, &act);
479 	if (error == 0) {
480 		error = idr_get_new(&mtrr_idr, mrdesc, &id);
481 		MPASS(idr_find(&mtrr_idr, id) == mrdesc);
482 		if (error != 0) {
483 			act = MEMRANGE_SET_REMOVE;
484 			mem_range_attr_set(mrdesc, &act);
485 		}
486 	}
487 	if (error != 0) {
488 		free(mrdesc, M_LKMTRR);
489 		pr_warn(
490 		    "Failed to add WC MTRR for [%p-%p]: %d; "
491 		    "performance may suffer\n",
492 		    (void *)base, (void *)(base + size - 1), error);
493 	} else
494 		pr_warn("Successfully added WC MTRR for [%p-%p]\n",
495 		    (void *)base, (void *)(base + size - 1));
496 
497 	return (error != 0 ? -error : id + __MTRR_ID_BASE);
498 #else
499 	return (0);
500 #endif
501 }
502 
503 void
504 lkpi_arch_phys_wc_del(int reg)
505 {
506 #ifdef __i386__
507 	struct mem_range_desc *mrdesc;
508 	int act;
509 
510 	/* Check if arch_phys_wc_add() failed. */
511 	if (reg < __MTRR_ID_BASE)
512 		return;
513 
514 	mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
515 	MPASS(mrdesc != NULL);
516 	idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
517 	act = MEMRANGE_SET_REMOVE;
518 	mem_range_attr_set(mrdesc, &act);
519 	free(mrdesc, M_LKMTRR);
520 #endif
521 }
522 
523 /*
524  * This is a highly simplified version of the Linux page_frag_cache.
525  * We only support up-to 1 single page as fragment size and we will
526  * always return a full page.  This may be wasteful on small objects
527  * but the only known consumer (mt76) is either asking for a half-page
528  * or a full page.  If this was to become a problem we can implement
529  * a more elaborate version.
530  */
531 void *
532 linuxkpi_page_frag_alloc(struct page_frag_cache *pfc,
533     size_t fragsz, gfp_t gfp)
534 {
535 	vm_page_t pages;
536 
537 	if (fragsz == 0)
538 		return (NULL);
539 
540 	KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet "
541 	    "supported", __func__, fragsz));
542 
543 	pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1));
544 	if (pages == NULL)
545 		return (NULL);
546 	pfc->va = linux_page_address(pages);
547 
548 	/* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */
549 	pfc->pagecnt_bias = 0;
550 
551 	return (pfc->va);
552 }
553 
554 void
555 linuxkpi_page_frag_free(void *addr)
556 {
557 	vm_page_t page;
558 
559 	page = virt_to_page(addr);
560 	linux_free_pages(page, 0);
561 }
562 
563 void
564 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
565 {
566 
567 	linux_free_pages(page, 0);
568 }
569