xref: /freebsd/sys/compat/linuxkpi/common/src/linux_page.c (revision e3b16f53a6455903a7e814045584fe203d4fff64)
11cdefd08SHans Petter Selasky /*-
21cdefd08SHans Petter Selasky  * Copyright (c) 2010 Isilon Systems, Inc.
396fc97c8SStephen Hurd  * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
41cdefd08SHans Petter Selasky  * Copyright (c) 2017 Mellanox Technologies, Ltd.
51cdefd08SHans Petter Selasky  * All rights reserved.
61cdefd08SHans Petter Selasky  *
71cdefd08SHans Petter Selasky  * Redistribution and use in source and binary forms, with or without
81cdefd08SHans Petter Selasky  * modification, are permitted provided that the following conditions
91cdefd08SHans Petter Selasky  * are met:
101cdefd08SHans Petter Selasky  * 1. Redistributions of source code must retain the above copyright
111cdefd08SHans Petter Selasky  *    notice unmodified, this list of conditions, and the following
121cdefd08SHans Petter Selasky  *    disclaimer.
131cdefd08SHans Petter Selasky  * 2. Redistributions in binary form must reproduce the above copyright
141cdefd08SHans Petter Selasky  *    notice, this list of conditions and the following disclaimer in the
151cdefd08SHans Petter Selasky  *    documentation and/or other materials provided with the distribution.
161cdefd08SHans Petter Selasky  *
171cdefd08SHans Petter Selasky  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
181cdefd08SHans Petter Selasky  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
191cdefd08SHans Petter Selasky  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
201cdefd08SHans Petter Selasky  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
211cdefd08SHans Petter Selasky  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
221cdefd08SHans Petter Selasky  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
231cdefd08SHans Petter Selasky  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
241cdefd08SHans Petter Selasky  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
251cdefd08SHans Petter Selasky  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
261cdefd08SHans Petter Selasky  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
271cdefd08SHans Petter Selasky  */
281cdefd08SHans Petter Selasky 
291cdefd08SHans Petter Selasky #include <sys/param.h>
301cdefd08SHans Petter Selasky #include <sys/systm.h>
311cdefd08SHans Petter Selasky #include <sys/malloc.h>
321cdefd08SHans Petter Selasky #include <sys/kernel.h>
331cdefd08SHans Petter Selasky #include <sys/sysctl.h>
341cdefd08SHans Petter Selasky #include <sys/lock.h>
351cdefd08SHans Petter Selasky #include <sys/mutex.h>
361cdefd08SHans Petter Selasky #include <sys/rwlock.h>
371cdefd08SHans Petter Selasky #include <sys/proc.h>
381cdefd08SHans Petter Selasky #include <sys/sched.h>
3998b12978SVladimir Kondratyev #include <sys/memrange.h>
401cdefd08SHans Petter Selasky 
411cdefd08SHans Petter Selasky #include <machine/bus.h>
421cdefd08SHans Petter Selasky 
431cdefd08SHans Petter Selasky #include <vm/vm.h>
448186b527SHans Petter Selasky #include <vm/pmap.h>
458186b527SHans Petter Selasky #include <vm/vm_param.h>
468186b527SHans Petter Selasky #include <vm/vm_kern.h>
478186b527SHans Petter Selasky #include <vm/vm_object.h>
488186b527SHans Petter Selasky #include <vm/vm_map.h>
491cdefd08SHans Petter Selasky #include <vm/vm_page.h>
501cdefd08SHans Petter Selasky #include <vm/vm_pageout.h>
518186b527SHans Petter Selasky #include <vm/vm_pager.h>
528186b527SHans Petter Selasky #include <vm/vm_radix.h>
538186b527SHans Petter Selasky #include <vm/vm_reserv.h>
548186b527SHans Petter Selasky #include <vm/vm_extern.h>
558186b527SHans Petter Selasky 
568186b527SHans Petter Selasky #include <vm/uma.h>
578186b527SHans Petter Selasky #include <vm/uma_int.h>
588186b527SHans Petter Selasky 
598186b527SHans Petter Selasky #include <linux/gfp.h>
608186b527SHans Petter Selasky #include <linux/mm.h>
618186b527SHans Petter Selasky #include <linux/preempt.h>
62c05238a6SHans Petter Selasky #include <linux/fs.h>
631179b649SEmmanuel Vadot #include <linux/shmem_fs.h>
6498b12978SVladimir Kondratyev #include <linux/kernel.h>
6598b12978SVladimir Kondratyev #include <linux/idr.h>
6698b12978SVladimir Kondratyev #include <linux/io.h>
67b99bc862SJean-Sébastien Pédron #include <linux/io-mapping.h>
6898b12978SVladimir Kondratyev 
6998b12978SVladimir Kondratyev #ifdef __i386__
7098b12978SVladimir Kondratyev DEFINE_IDR(mtrr_idr);
7198b12978SVladimir Kondratyev static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
7298b12978SVladimir Kondratyev extern int pat_works;
7398b12978SVladimir Kondratyev #endif
741cdefd08SHans Petter Selasky 
758734a562SHans Petter Selasky void
si_meminfo(struct sysinfo * si)768734a562SHans Petter Selasky si_meminfo(struct sysinfo *si)
778734a562SHans Petter Selasky {
788734a562SHans Petter Selasky 	si->totalram = physmem;
79e400b695SJean-Sébastien Pédron 	si->freeram = vm_free_count();
808734a562SHans Petter Selasky 	si->totalhigh = 0;
81e400b695SJean-Sébastien Pédron 	si->freehigh = 0;
828734a562SHans Petter Selasky 	si->mem_unit = PAGE_SIZE;
838734a562SHans Petter Selasky }
848734a562SHans Petter Selasky 
851cdefd08SHans Petter Selasky void *
linux_page_address(const struct page * page)865a263e84SJean-Sébastien Pédron linux_page_address(const struct page *page)
871cdefd08SHans Petter Selasky {
88a8c348dbSHans Petter Selasky 
89546c117fSMark Johnston 	if (page->object != kernel_object) {
909a8196ceSNathan Whitehorn 		return (PMAP_HAS_DMAP ?
91d1ea0764SBjoern A. Zeeb 		    ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) :
9251bd6f96SJustin Hibbits 		    NULL);
93a8c348dbSHans Petter Selasky 	}
941cdefd08SHans Petter Selasky 	return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
951cdefd08SHans Petter Selasky 	    IDX_TO_OFF(page->pindex)));
961cdefd08SHans Petter Selasky }
971cdefd08SHans Petter Selasky 
989e9c682fSBjoern A. Zeeb struct page *
linux_alloc_pages(gfp_t flags,unsigned int order)991cdefd08SHans Petter Selasky linux_alloc_pages(gfp_t flags, unsigned int order)
1001cdefd08SHans Petter Selasky {
1019e9c682fSBjoern A. Zeeb 	struct page *page;
102ad6b97e7SNathan Whitehorn 
103ad6b97e7SNathan Whitehorn 	if (PMAP_HAS_DMAP) {
1041cdefd08SHans Petter Selasky 		unsigned long npages = 1UL << order;
10584c39222SMark Johnston 		int req = VM_ALLOC_WIRED;
1061cdefd08SHans Petter Selasky 
1071ef5e651SMark Johnston 		if ((flags & M_ZERO) != 0)
1081ef5e651SMark Johnston 			req |= VM_ALLOC_ZERO;
1091cdefd08SHans Petter Selasky 		if (order == 0 && (flags & GFP_DMA32) == 0) {
110a4667e09SMark Johnston 			page = vm_page_alloc_noobj(req);
1111cdefd08SHans Petter Selasky 			if (page == NULL)
1121cdefd08SHans Petter Selasky 				return (NULL);
1131cdefd08SHans Petter Selasky 		} else {
1141cdefd08SHans Petter Selasky 			vm_paddr_t pmax = (flags & GFP_DMA32) ?
1151cdefd08SHans Petter Selasky 			    BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
1161cdefd08SHans Petter Selasky 		retry:
11784c39222SMark Johnston 			page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
11884c39222SMark Johnston 			    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
1191cdefd08SHans Petter Selasky 			if (page == NULL) {
120718d1928SMathieu 				if ((flags & (M_WAITOK | __GFP_NORETRY)) ==
121718d1928SMathieu 				    M_WAITOK) {
1222619c5ccSJason A. Harmening 					int err = vm_page_reclaim_contig(req,
1232619c5ccSJason A. Harmening 					    npages, 0, pmax, PAGE_SIZE, 0);
1242619c5ccSJason A. Harmening 					if (err == ENOMEM)
1252c0f13aaSKonstantin Belousov 						vm_wait(NULL);
1262619c5ccSJason A. Harmening 					else if (err != 0)
1272619c5ccSJason A. Harmening 						return (NULL);
1281cdefd08SHans Petter Selasky 					flags &= ~M_WAITOK;
1291cdefd08SHans Petter Selasky 					goto retry;
1301cdefd08SHans Petter Selasky 				}
1311cdefd08SHans Petter Selasky 				return (NULL);
1321cdefd08SHans Petter Selasky 			}
1331cdefd08SHans Petter Selasky 		}
134ad6b97e7SNathan Whitehorn 	} else {
1351cdefd08SHans Petter Selasky 		vm_offset_t vaddr;
1361cdefd08SHans Petter Selasky 
1371cdefd08SHans Petter Selasky 		vaddr = linux_alloc_kmem(flags, order);
1381cdefd08SHans Petter Selasky 		if (vaddr == 0)
1391cdefd08SHans Petter Selasky 			return (NULL);
1401cdefd08SHans Petter Selasky 
141dcb0c549SBjoern A. Zeeb 		page = virt_to_page((void *)vaddr);
1421cdefd08SHans Petter Selasky 
1431cdefd08SHans Petter Selasky 		KASSERT(vaddr == (vm_offset_t)page_address(page),
1441cdefd08SHans Petter Selasky 		    ("Page address mismatch"));
145ad6b97e7SNathan Whitehorn 	}
146ad6b97e7SNathan Whitehorn 
1471cdefd08SHans Petter Selasky 	return (page);
1481cdefd08SHans Petter Selasky }
1491cdefd08SHans Petter Selasky 
1506223d0b6SMark Johnston static void
_linux_free_kmem(vm_offset_t addr,unsigned int order)1516223d0b6SMark Johnston _linux_free_kmem(vm_offset_t addr, unsigned int order)
1526223d0b6SMark Johnston {
1536223d0b6SMark Johnston 	size_t size = ((size_t)PAGE_SIZE) << order;
1546223d0b6SMark Johnston 
1556223d0b6SMark Johnston 	kmem_free((void *)addr, size);
1566223d0b6SMark Johnston }
1576223d0b6SMark Johnston 
1581cdefd08SHans Petter Selasky void
linux_free_pages(struct page * page,unsigned int order)1599e9c682fSBjoern A. Zeeb linux_free_pages(struct page *page, unsigned int order)
1601cdefd08SHans Petter Selasky {
1619a8196ceSNathan Whitehorn 	if (PMAP_HAS_DMAP) {
1621cdefd08SHans Petter Selasky 		unsigned long npages = 1UL << order;
1631cdefd08SHans Petter Selasky 		unsigned long x;
1641cdefd08SHans Petter Selasky 
1651cdefd08SHans Petter Selasky 		for (x = 0; x != npages; x++) {
1661cdefd08SHans Petter Selasky 			vm_page_t pgo = page + x;
1671cdefd08SHans Petter Selasky 
168c8089810SJean-Sébastien Pédron 			/*
169c8089810SJean-Sébastien Pédron 			 * The "free page" function is used in several
170c8089810SJean-Sébastien Pédron 			 * contexts.
171c8089810SJean-Sébastien Pédron 			 *
172c8089810SJean-Sébastien Pédron 			 * Some pages are allocated by `linux_alloc_pages()`
173c8089810SJean-Sébastien Pédron 			 * above, but not all of them are. For instance in the
174c8089810SJean-Sébastien Pédron 			 * DRM drivers, some pages come from
175c8089810SJean-Sébastien Pédron 			 * `shmem_read_mapping_page_gfp()`.
176c8089810SJean-Sébastien Pédron 			 *
177c8089810SJean-Sébastien Pédron 			 * That's why we need to check if the page is managed
178c8089810SJean-Sébastien Pédron 			 * or not here.
179c8089810SJean-Sébastien Pédron 			 */
180c8089810SJean-Sébastien Pédron 			if ((pgo->oflags & VPO_UNMANAGED) == 0) {
181c8089810SJean-Sébastien Pédron 				vm_page_unwire(pgo, PQ_ACTIVE);
182c8089810SJean-Sébastien Pédron 			} else {
1831ef5e651SMark Johnston 				if (vm_page_unwire_noq(pgo))
1841cdefd08SHans Petter Selasky 					vm_page_free(pgo);
1851cdefd08SHans Petter Selasky 			}
186c8089810SJean-Sébastien Pédron 		}
1879a8196ceSNathan Whitehorn 	} else {
1881cdefd08SHans Petter Selasky 		vm_offset_t vaddr;
1891cdefd08SHans Petter Selasky 
1901cdefd08SHans Petter Selasky 		vaddr = (vm_offset_t)page_address(page);
1911cdefd08SHans Petter Selasky 
1926223d0b6SMark Johnston 		_linux_free_kmem(vaddr, order);
1939a8196ceSNathan Whitehorn 	}
1941cdefd08SHans Petter Selasky }
1951cdefd08SHans Petter Selasky 
196e51729f6SJean-Sébastien Pédron void
linux_release_pages(release_pages_arg arg,int nr)197*e3b16f53SJean-Sébastien Pédron linux_release_pages(release_pages_arg arg, int nr)
198e51729f6SJean-Sébastien Pédron {
199e51729f6SJean-Sébastien Pédron 	int i;
200e51729f6SJean-Sébastien Pédron 
201*e3b16f53SJean-Sébastien Pédron 	CTASSERT(offsetof(struct folio, page) == 0);
202*e3b16f53SJean-Sébastien Pédron 
203e51729f6SJean-Sébastien Pédron 	for (i = 0; i < nr; i++)
204*e3b16f53SJean-Sébastien Pédron 		__free_page(arg.pages[i]);
205e51729f6SJean-Sébastien Pédron }
206e51729f6SJean-Sébastien Pédron 
2071cdefd08SHans Petter Selasky vm_offset_t
linux_alloc_kmem(gfp_t flags,unsigned int order)2081cdefd08SHans Petter Selasky linux_alloc_kmem(gfp_t flags, unsigned int order)
2091cdefd08SHans Petter Selasky {
2101cdefd08SHans Petter Selasky 	size_t size = ((size_t)PAGE_SIZE) << order;
211f49fd63aSJohn Baldwin 	void *addr;
2121cdefd08SHans Petter Selasky 
21344d0efb2SAlan Cox 	addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
214a5c7b44dSBjoern A. Zeeb 	    ((flags & GFP_DMA32) == 0) ? -1UL : BUS_SPACE_MAXADDR_32BIT,
215a5c7b44dSBjoern A. Zeeb 	    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
216a5c7b44dSBjoern A. Zeeb 
217f49fd63aSJohn Baldwin 	return ((vm_offset_t)addr);
2181cdefd08SHans Petter Selasky }
2191cdefd08SHans Petter Selasky 
2201cdefd08SHans Petter Selasky void
linux_free_kmem(vm_offset_t addr,unsigned int order)2211cdefd08SHans Petter Selasky linux_free_kmem(vm_offset_t addr, unsigned int order)
2221cdefd08SHans Petter Selasky {
223f88bd117SMark Johnston 	KASSERT((addr & ~PAGE_MASK) == 0,
2246223d0b6SMark Johnston 	    ("%s: addr %p is not page aligned", __func__, (void *)addr));
2251cdefd08SHans Petter Selasky 
2266223d0b6SMark Johnston 	if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
2276223d0b6SMark Johnston 		_linux_free_kmem(addr, order);
2286223d0b6SMark Johnston 	} else {
2296223d0b6SMark Johnston 		vm_page_t page;
2306223d0b6SMark Johnston 
2316223d0b6SMark Johnston 		page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr));
2326223d0b6SMark Johnston 		linux_free_pages(page, order);
2336223d0b6SMark Johnston 	}
2341cdefd08SHans Petter Selasky }
2358186b527SHans Petter Selasky 
2368186b527SHans Petter Selasky static int
linux_get_user_pages_internal(vm_map_t map,unsigned long start,int nr_pages,int write,struct page ** pages)2378186b527SHans Petter Selasky linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
2388186b527SHans Petter Selasky     int write, struct page **pages)
2398186b527SHans Petter Selasky {
2408186b527SHans Petter Selasky 	vm_prot_t prot;
2418186b527SHans Petter Selasky 	size_t len;
2428186b527SHans Petter Selasky 	int count;
2438186b527SHans Petter Selasky 
2448186b527SHans Petter Selasky 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
2453507b8d4SMark Johnston 	len = ptoa((vm_offset_t)nr_pages);
2468186b527SHans Petter Selasky 	count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
247eeacb3b0SMark Johnston 	return (count == -1 ? -EFAULT : nr_pages);
2488186b527SHans Petter Selasky }
2498186b527SHans Petter Selasky 
2508186b527SHans Petter Selasky int
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)2518186b527SHans Petter Selasky __get_user_pages_fast(unsigned long start, int nr_pages, int write,
2528186b527SHans Petter Selasky     struct page **pages)
2538186b527SHans Petter Selasky {
2548186b527SHans Petter Selasky 	vm_map_t map;
2558186b527SHans Petter Selasky 	vm_page_t *mp;
2568186b527SHans Petter Selasky 	vm_offset_t va;
2578186b527SHans Petter Selasky 	vm_offset_t end;
2588186b527SHans Petter Selasky 	vm_prot_t prot;
2598186b527SHans Petter Selasky 	int count;
2608186b527SHans Petter Selasky 
2618186b527SHans Petter Selasky 	if (nr_pages == 0 || in_interrupt())
2628186b527SHans Petter Selasky 		return (0);
2638186b527SHans Petter Selasky 
2648186b527SHans Petter Selasky 	MPASS(pages != NULL);
2658186b527SHans Petter Selasky 	map = &curthread->td_proc->p_vmspace->vm_map;
2663507b8d4SMark Johnston 	end = start + ptoa((vm_offset_t)nr_pages);
2670f1e6ec5SMark Johnston 	if (!vm_map_range_valid(map, start, end))
2688186b527SHans Petter Selasky 		return (-EINVAL);
2698186b527SHans Petter Selasky 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
2708186b527SHans Petter Selasky 	for (count = 0, mp = pages, va = start; va < end;
2718186b527SHans Petter Selasky 	    mp++, va += PAGE_SIZE, count++) {
2728186b527SHans Petter Selasky 		*mp = pmap_extract_and_hold(map->pmap, va, prot);
2738186b527SHans Petter Selasky 		if (*mp == NULL)
2748186b527SHans Petter Selasky 			break;
2758186b527SHans Petter Selasky 
2768186b527SHans Petter Selasky 		if ((prot & VM_PROT_WRITE) != 0 &&
2778186b527SHans Petter Selasky 		    (*mp)->dirty != VM_PAGE_BITS_ALL) {
2788186b527SHans Petter Selasky 			/*
2798186b527SHans Petter Selasky 			 * Explicitly dirty the physical page.  Otherwise, the
2808186b527SHans Petter Selasky 			 * caller's changes may go unnoticed because they are
2818186b527SHans Petter Selasky 			 * performed through an unmanaged mapping or by a DMA
2828186b527SHans Petter Selasky 			 * operation.
2838186b527SHans Petter Selasky 			 *
2848186b527SHans Petter Selasky 			 * The object lock is not held here.
2858186b527SHans Petter Selasky 			 * See vm_page_clear_dirty_mask().
2868186b527SHans Petter Selasky 			 */
2878186b527SHans Petter Selasky 			vm_page_dirty(*mp);
2888186b527SHans Petter Selasky 		}
2898186b527SHans Petter Selasky 	}
2908186b527SHans Petter Selasky 	return (count);
2918186b527SHans Petter Selasky }
2928186b527SHans Petter Selasky 
2938186b527SHans Petter Selasky long
get_user_pages_remote(struct task_struct * task,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas)2948186b527SHans Petter Selasky get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
2953d751b7aSJean-Sébastien Pédron     unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
2968186b527SHans Petter Selasky     struct page **pages, struct vm_area_struct **vmas)
2978186b527SHans Petter Selasky {
2988186b527SHans Petter Selasky 	vm_map_t map;
2998186b527SHans Petter Selasky 
30068b9f2f0SHans Petter Selasky 	map = &task->task_thread->td_proc->p_vmspace->vm_map;
3018186b527SHans Petter Selasky 	return (linux_get_user_pages_internal(map, start, nr_pages,
3028186b527SHans Petter Selasky 	    !!(gup_flags & FOLL_WRITE), pages));
3038186b527SHans Petter Selasky }
3048186b527SHans Petter Selasky 
3058186b527SHans Petter Selasky long
lkpi_get_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)306d42136e3SVladimir Kondratyev lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
307d42136e3SVladimir Kondratyev     unsigned int gup_flags, struct page **pages)
3088186b527SHans Petter Selasky {
3098186b527SHans Petter Selasky 	vm_map_t map;
3108186b527SHans Petter Selasky 
3118186b527SHans Petter Selasky 	map = &curthread->td_proc->p_vmspace->vm_map;
3128186b527SHans Petter Selasky 	return (linux_get_user_pages_internal(map, start, nr_pages,
3138186b527SHans Petter Selasky 	    !!(gup_flags & FOLL_WRITE), pages));
3148186b527SHans Petter Selasky }
3158186b527SHans Petter Selasky 
3168186b527SHans Petter Selasky int
is_vmalloc_addr(const void * addr)3178186b527SHans Petter Selasky is_vmalloc_addr(const void *addr)
3188186b527SHans Petter Selasky {
3198186b527SHans Petter Selasky 	return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
3208186b527SHans Petter Selasky }
321f6823dacSVladimir Kondratyev 
322f6823dacSVladimir Kondratyev vm_fault_t
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t prot)323f6823dacSVladimir Kondratyev lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
324f6823dacSVladimir Kondratyev     unsigned long pfn, pgprot_t prot)
325f6823dacSVladimir Kondratyev {
326bcd85e01SDoug Moore 	struct pctrie_iter pages;
327f6823dacSVladimir Kondratyev 	vm_object_t vm_obj = vma->vm_obj;
32862ff0566SVladimir Kondratyev 	vm_object_t tmp_obj;
329f6823dacSVladimir Kondratyev 	vm_page_t page;
330f6823dacSVladimir Kondratyev 	vm_pindex_t pindex;
331f6823dacSVladimir Kondratyev 
332f6823dacSVladimir Kondratyev 	VM_OBJECT_ASSERT_WLOCKED(vm_obj);
333bcd85e01SDoug Moore 	vm_page_iter_init(&pages, vm_obj);
334f6823dacSVladimir Kondratyev 	pindex = OFF_TO_IDX(addr - vma->vm_start);
335f6823dacSVladimir Kondratyev 	if (vma->vm_pfn_count == 0)
336f6823dacSVladimir Kondratyev 		vma->vm_pfn_first = pindex;
337f6823dacSVladimir Kondratyev 	MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
338f6823dacSVladimir Kondratyev 
339f6823dacSVladimir Kondratyev retry:
340bcd85e01SDoug Moore 	page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages);
341f6823dacSVladimir Kondratyev 	if (page == NULL) {
342f6823dacSVladimir Kondratyev 		page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
343bcd85e01SDoug Moore 		if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
344bcd85e01SDoug Moore 			pctrie_iter_reset(&pages);
345f6823dacSVladimir Kondratyev 			goto retry;
346bcd85e01SDoug Moore 		}
34762ff0566SVladimir Kondratyev 		if (page->object != NULL) {
34862ff0566SVladimir Kondratyev 			tmp_obj = page->object;
34962ff0566SVladimir Kondratyev 			vm_page_xunbusy(page);
35062ff0566SVladimir Kondratyev 			VM_OBJECT_WUNLOCK(vm_obj);
35162ff0566SVladimir Kondratyev 			VM_OBJECT_WLOCK(tmp_obj);
35262ff0566SVladimir Kondratyev 			if (page->object == tmp_obj &&
35362ff0566SVladimir Kondratyev 			    vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
35462ff0566SVladimir Kondratyev 				KASSERT(page->object == tmp_obj,
35562ff0566SVladimir Kondratyev 				    ("page has changed identity"));
35662ff0566SVladimir Kondratyev 				KASSERT((page->oflags & VPO_UNMANAGED) == 0,
35762ff0566SVladimir Kondratyev 				    ("page does not belong to shmem"));
35862ff0566SVladimir Kondratyev 				vm_pager_page_unswapped(page);
35962ff0566SVladimir Kondratyev 				if (pmap_page_is_mapped(page)) {
36062ff0566SVladimir Kondratyev 					vm_page_xunbusy(page);
36162ff0566SVladimir Kondratyev 					VM_OBJECT_WUNLOCK(tmp_obj);
36262ff0566SVladimir Kondratyev 					printf("%s: page rename failed: page "
36362ff0566SVladimir Kondratyev 					    "is mapped\n", __func__);
36462ff0566SVladimir Kondratyev 					VM_OBJECT_WLOCK(vm_obj);
36562ff0566SVladimir Kondratyev 					return (VM_FAULT_NOPAGE);
36662ff0566SVladimir Kondratyev 				}
36762ff0566SVladimir Kondratyev 				vm_page_remove(page);
36862ff0566SVladimir Kondratyev 			}
36962ff0566SVladimir Kondratyev 			VM_OBJECT_WUNLOCK(tmp_obj);
370bcd85e01SDoug Moore 			pctrie_iter_reset(&pages);
37162ff0566SVladimir Kondratyev 			VM_OBJECT_WLOCK(vm_obj);
37262ff0566SVladimir Kondratyev 			goto retry;
37362ff0566SVladimir Kondratyev 		}
374bcd85e01SDoug Moore 		if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) {
375f6823dacSVladimir Kondratyev 			vm_page_xunbusy(page);
376f6823dacSVladimir Kondratyev 			return (VM_FAULT_OOM);
377f6823dacSVladimir Kondratyev 		}
378f6823dacSVladimir Kondratyev 		vm_page_valid(page);
379f6823dacSVladimir Kondratyev 	}
380f6823dacSVladimir Kondratyev 	pmap_page_set_memattr(page, pgprot2cachemode(prot));
381f6823dacSVladimir Kondratyev 	vma->vm_pfn_count++;
382f6823dacSVladimir Kondratyev 
383f6823dacSVladimir Kondratyev 	return (VM_FAULT_NOPAGE);
384f6823dacSVladimir Kondratyev }
385c072f6e8SVladimir Kondratyev 
386b99bc862SJean-Sébastien Pédron int
lkpi_remap_pfn_range(struct vm_area_struct * vma,unsigned long start_addr,unsigned long start_pfn,unsigned long size,pgprot_t prot)387b99bc862SJean-Sébastien Pédron lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr,
388b99bc862SJean-Sébastien Pédron     unsigned long start_pfn, unsigned long size, pgprot_t prot)
389b99bc862SJean-Sébastien Pédron {
390b99bc862SJean-Sébastien Pédron 	vm_object_t vm_obj;
391b99bc862SJean-Sébastien Pédron 	unsigned long addr, pfn;
392b99bc862SJean-Sébastien Pédron 	int err = 0;
393b99bc862SJean-Sébastien Pédron 
394b99bc862SJean-Sébastien Pédron 	vm_obj = vma->vm_obj;
395b99bc862SJean-Sébastien Pédron 
396b99bc862SJean-Sébastien Pédron 	VM_OBJECT_WLOCK(vm_obj);
397b99bc862SJean-Sébastien Pédron 	for (addr = start_addr, pfn = start_pfn;
398b99bc862SJean-Sébastien Pédron 	    addr < start_addr + size;
399b99bc862SJean-Sébastien Pédron 	    addr += PAGE_SIZE) {
400b99bc862SJean-Sébastien Pédron 		vm_fault_t ret;
401b99bc862SJean-Sébastien Pédron retry:
402b99bc862SJean-Sébastien Pédron 		ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
403b99bc862SJean-Sébastien Pédron 
404b99bc862SJean-Sébastien Pédron 		if ((ret & VM_FAULT_OOM) != 0) {
405b99bc862SJean-Sébastien Pédron 			VM_OBJECT_WUNLOCK(vm_obj);
406b99bc862SJean-Sébastien Pédron 			vm_wait(NULL);
407b99bc862SJean-Sébastien Pédron 			VM_OBJECT_WLOCK(vm_obj);
408b99bc862SJean-Sébastien Pédron 			goto retry;
409b99bc862SJean-Sébastien Pédron 		}
410b99bc862SJean-Sébastien Pédron 
411b99bc862SJean-Sébastien Pédron 		if ((ret & VM_FAULT_ERROR) != 0) {
412b99bc862SJean-Sébastien Pédron 			err = -EFAULT;
413b99bc862SJean-Sébastien Pédron 			break;
414b99bc862SJean-Sébastien Pédron 		}
415b99bc862SJean-Sébastien Pédron 
416b99bc862SJean-Sébastien Pédron 		pfn++;
417b99bc862SJean-Sébastien Pédron 	}
418b99bc862SJean-Sébastien Pédron 	VM_OBJECT_WUNLOCK(vm_obj);
419b99bc862SJean-Sébastien Pédron 
420b99bc862SJean-Sébastien Pédron 	if (unlikely(err)) {
421b99bc862SJean-Sébastien Pédron 		zap_vma_ptes(vma, start_addr,
422b99bc862SJean-Sébastien Pédron 		    (pfn - start_pfn) << PAGE_SHIFT);
423b99bc862SJean-Sébastien Pédron 		return (err);
424b99bc862SJean-Sébastien Pédron 	}
425b99bc862SJean-Sébastien Pédron 
426b99bc862SJean-Sébastien Pédron 	return (0);
427b99bc862SJean-Sébastien Pédron }
428b99bc862SJean-Sébastien Pédron 
429b99bc862SJean-Sébastien Pédron int
lkpi_io_mapping_map_user(struct io_mapping * iomap,struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size)430b99bc862SJean-Sébastien Pédron lkpi_io_mapping_map_user(struct io_mapping *iomap,
431b99bc862SJean-Sébastien Pédron     struct vm_area_struct *vma, unsigned long addr,
432b99bc862SJean-Sébastien Pédron     unsigned long pfn, unsigned long size)
433b99bc862SJean-Sébastien Pédron {
434b99bc862SJean-Sébastien Pédron 	pgprot_t prot;
435b99bc862SJean-Sébastien Pédron 	int ret;
436b99bc862SJean-Sébastien Pédron 
437b99bc862SJean-Sébastien Pédron 	prot = cachemode2protval(iomap->attr);
438b99bc862SJean-Sébastien Pédron 	ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot);
439b99bc862SJean-Sébastien Pédron 
440b99bc862SJean-Sébastien Pédron 	return (ret);
441b99bc862SJean-Sébastien Pédron }
442b99bc862SJean-Sébastien Pédron 
443c072f6e8SVladimir Kondratyev /*
444c072f6e8SVladimir Kondratyev  * Although FreeBSD version of unmap_mapping_range has semantics and types of
445c072f6e8SVladimir Kondratyev  * parameters compatible with Linux version, the values passed in are different
446c072f6e8SVladimir Kondratyev  * @obj should match to vm_private_data field of vm_area_struct returned by
447c072f6e8SVladimir Kondratyev  *      mmap file operation handler, see linux_file_mmap_single() sources
448c072f6e8SVladimir Kondratyev  * @holelen should match to size of area to be munmapped.
449c072f6e8SVladimir Kondratyev  */
450c072f6e8SVladimir Kondratyev void
lkpi_unmap_mapping_range(void * obj,loff_t const holebegin __unused,loff_t const holelen __unused,int even_cows __unused)451c072f6e8SVladimir Kondratyev lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
45238e3125dSDoug Moore     loff_t const holelen __unused, int even_cows __unused)
453c072f6e8SVladimir Kondratyev {
454c072f6e8SVladimir Kondratyev 	vm_object_t devobj;
455c072f6e8SVladimir Kondratyev 
456c072f6e8SVladimir Kondratyev 	devobj = cdev_pager_lookup(obj);
457c072f6e8SVladimir Kondratyev 	if (devobj != NULL) {
45838e3125dSDoug Moore 		cdev_mgtdev_pager_free_pages(devobj);
459c072f6e8SVladimir Kondratyev 		vm_object_deallocate(devobj);
460c072f6e8SVladimir Kondratyev 	}
461c072f6e8SVladimir Kondratyev }
46298b12978SVladimir Kondratyev 
46398b12978SVladimir Kondratyev int
lkpi_arch_phys_wc_add(unsigned long base,unsigned long size)46498b12978SVladimir Kondratyev lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
46598b12978SVladimir Kondratyev {
46698b12978SVladimir Kondratyev #ifdef __i386__
46798b12978SVladimir Kondratyev 	struct mem_range_desc *mrdesc;
46898b12978SVladimir Kondratyev 	int error, id, act;
46998b12978SVladimir Kondratyev 
47098b12978SVladimir Kondratyev 	/* If PAT is available, do nothing */
47198b12978SVladimir Kondratyev 	if (pat_works)
47298b12978SVladimir Kondratyev 		return (0);
47398b12978SVladimir Kondratyev 
47498b12978SVladimir Kondratyev 	mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
47598b12978SVladimir Kondratyev 	mrdesc->mr_base = base;
47698b12978SVladimir Kondratyev 	mrdesc->mr_len = size;
47798b12978SVladimir Kondratyev 	mrdesc->mr_flags = MDF_WRITECOMBINE;
47898b12978SVladimir Kondratyev 	strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
47998b12978SVladimir Kondratyev 	act = MEMRANGE_SET_UPDATE;
48098b12978SVladimir Kondratyev 	error = mem_range_attr_set(mrdesc, &act);
48198b12978SVladimir Kondratyev 	if (error == 0) {
48298b12978SVladimir Kondratyev 		error = idr_get_new(&mtrr_idr, mrdesc, &id);
48398b12978SVladimir Kondratyev 		MPASS(idr_find(&mtrr_idr, id) == mrdesc);
48498b12978SVladimir Kondratyev 		if (error != 0) {
48598b12978SVladimir Kondratyev 			act = MEMRANGE_SET_REMOVE;
48698b12978SVladimir Kondratyev 			mem_range_attr_set(mrdesc, &act);
48798b12978SVladimir Kondratyev 		}
48898b12978SVladimir Kondratyev 	}
48998b12978SVladimir Kondratyev 	if (error != 0) {
49098b12978SVladimir Kondratyev 		free(mrdesc, M_LKMTRR);
49198b12978SVladimir Kondratyev 		pr_warn(
49298b12978SVladimir Kondratyev 		    "Failed to add WC MTRR for [%p-%p]: %d; "
49398b12978SVladimir Kondratyev 		    "performance may suffer\n",
49498b12978SVladimir Kondratyev 		    (void *)base, (void *)(base + size - 1), error);
49598b12978SVladimir Kondratyev 	} else
49698b12978SVladimir Kondratyev 		pr_warn("Successfully added WC MTRR for [%p-%p]\n",
49798b12978SVladimir Kondratyev 		    (void *)base, (void *)(base + size - 1));
49898b12978SVladimir Kondratyev 
49998b12978SVladimir Kondratyev 	return (error != 0 ? -error : id + __MTRR_ID_BASE);
50098b12978SVladimir Kondratyev #else
50198b12978SVladimir Kondratyev 	return (0);
50298b12978SVladimir Kondratyev #endif
50398b12978SVladimir Kondratyev }
50498b12978SVladimir Kondratyev 
50598b12978SVladimir Kondratyev void
lkpi_arch_phys_wc_del(int reg)50698b12978SVladimir Kondratyev lkpi_arch_phys_wc_del(int reg)
50798b12978SVladimir Kondratyev {
50898b12978SVladimir Kondratyev #ifdef __i386__
50998b12978SVladimir Kondratyev 	struct mem_range_desc *mrdesc;
51098b12978SVladimir Kondratyev 	int act;
51198b12978SVladimir Kondratyev 
51298b12978SVladimir Kondratyev 	/* Check if arch_phys_wc_add() failed. */
51398b12978SVladimir Kondratyev 	if (reg < __MTRR_ID_BASE)
51498b12978SVladimir Kondratyev 		return;
51598b12978SVladimir Kondratyev 
51698b12978SVladimir Kondratyev 	mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
51798b12978SVladimir Kondratyev 	MPASS(mrdesc != NULL);
51898b12978SVladimir Kondratyev 	idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
51998b12978SVladimir Kondratyev 	act = MEMRANGE_SET_REMOVE;
52098b12978SVladimir Kondratyev 	mem_range_attr_set(mrdesc, &act);
52198b12978SVladimir Kondratyev 	free(mrdesc, M_LKMTRR);
52298b12978SVladimir Kondratyev #endif
52398b12978SVladimir Kondratyev }
52455038a63SBjoern A. Zeeb 
52555038a63SBjoern A. Zeeb /*
52655038a63SBjoern A. Zeeb  * This is a highly simplified version of the Linux page_frag_cache.
52755038a63SBjoern A. Zeeb  * We only support up-to 1 single page as fragment size and we will
52855038a63SBjoern A. Zeeb  * always return a full page.  This may be wasteful on small objects
52955038a63SBjoern A. Zeeb  * but the only known consumer (mt76) is either asking for a half-page
53055038a63SBjoern A. Zeeb  * or a full page.  If this was to become a problem we can implement
53155038a63SBjoern A. Zeeb  * a more elaborate version.
53255038a63SBjoern A. Zeeb  */
53355038a63SBjoern A. Zeeb void *
linuxkpi_page_frag_alloc(struct page_frag_cache * pfc,size_t fragsz,gfp_t gfp)53455038a63SBjoern A. Zeeb linuxkpi_page_frag_alloc(struct page_frag_cache *pfc,
53555038a63SBjoern A. Zeeb     size_t fragsz, gfp_t gfp)
53655038a63SBjoern A. Zeeb {
53755038a63SBjoern A. Zeeb 	vm_page_t pages;
53855038a63SBjoern A. Zeeb 
53955038a63SBjoern A. Zeeb 	if (fragsz == 0)
54055038a63SBjoern A. Zeeb 		return (NULL);
54155038a63SBjoern A. Zeeb 
54255038a63SBjoern A. Zeeb 	KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet "
54355038a63SBjoern A. Zeeb 	    "supported", __func__, fragsz));
54455038a63SBjoern A. Zeeb 
54555038a63SBjoern A. Zeeb 	pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1));
54651e94a46SBjoern A. Zeeb 	if (pages == NULL)
54751e94a46SBjoern A. Zeeb 		return (NULL);
54855038a63SBjoern A. Zeeb 	pfc->va = linux_page_address(pages);
54955038a63SBjoern A. Zeeb 
55055038a63SBjoern A. Zeeb 	/* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */
55155038a63SBjoern A. Zeeb 	pfc->pagecnt_bias = 0;
55255038a63SBjoern A. Zeeb 
55355038a63SBjoern A. Zeeb 	return (pfc->va);
55455038a63SBjoern A. Zeeb }
55555038a63SBjoern A. Zeeb 
55655038a63SBjoern A. Zeeb void
linuxkpi_page_frag_free(void * addr)55755038a63SBjoern A. Zeeb linuxkpi_page_frag_free(void *addr)
55855038a63SBjoern A. Zeeb {
55955038a63SBjoern A. Zeeb 	vm_page_t page;
56055038a63SBjoern A. Zeeb 
561dcb0c549SBjoern A. Zeeb 	page = virt_to_page(addr);
56255038a63SBjoern A. Zeeb 	linux_free_pages(page, 0);
56355038a63SBjoern A. Zeeb }
56455038a63SBjoern A. Zeeb 
56555038a63SBjoern A. Zeeb void
linuxkpi__page_frag_cache_drain(struct page * page,size_t count __unused)56655038a63SBjoern A. Zeeb linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
56755038a63SBjoern A. Zeeb {
56855038a63SBjoern A. Zeeb 
56955038a63SBjoern A. Zeeb 	linux_free_pages(page, 0);
57055038a63SBjoern A. Zeeb }
571