xref: /freebsd/sys/compat/linuxkpi/common/src/linux_page.c (revision 5956d97f4b3204318ceb6aa9c77bd0bc6ea87a41)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
4  * Copyright (c) 2017 Mellanox Technologies, Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/rwlock.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/memrange.h>
43 
44 #include <machine/bus.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_param.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_radix.h>
56 #include <vm/vm_reserv.h>
57 #include <vm/vm_extern.h>
58 
59 #include <vm/uma.h>
60 #include <vm/uma_int.h>
61 
62 #include <linux/gfp.h>
63 #include <linux/mm.h>
64 #include <linux/preempt.h>
65 #include <linux/fs.h>
66 #include <linux/shmem_fs.h>
67 #include <linux/kernel.h>
68 #include <linux/idr.h>
69 #include <linux/io.h>
70 
71 #ifdef __i386__
72 DEFINE_IDR(mtrr_idr);
73 static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
74 extern int pat_works;
75 #endif
76 
77 void
78 si_meminfo(struct sysinfo *si)
79 {
80 	si->totalram = physmem;
81 	si->totalhigh = 0;
82 	si->mem_unit = PAGE_SIZE;
83 }
84 
85 void *
86 linux_page_address(struct page *page)
87 {
88 
89 	if (page->object != kernel_object) {
90 		return (PMAP_HAS_DMAP ?
91 		    ((void *)(uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page))) :
92 		    NULL);
93 	}
94 	return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
95 	    IDX_TO_OFF(page->pindex)));
96 }
97 
98 vm_page_t
99 linux_alloc_pages(gfp_t flags, unsigned int order)
100 {
101 	vm_page_t page;
102 
103 	if (PMAP_HAS_DMAP) {
104 		unsigned long npages = 1UL << order;
105 		int req = VM_ALLOC_WIRED;
106 
107 		if ((flags & M_ZERO) != 0)
108 			req |= VM_ALLOC_ZERO;
109 		if (order == 0 && (flags & GFP_DMA32) == 0) {
110 			page = vm_page_alloc_noobj(req);
111 			if (page == NULL)
112 				return (NULL);
113 		} else {
114 			vm_paddr_t pmax = (flags & GFP_DMA32) ?
115 			    BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
116 		retry:
117 			page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
118 			    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
119 			if (page == NULL) {
120 				if (flags & M_WAITOK) {
121 					if (!vm_page_reclaim_contig(req,
122 					    npages, 0, pmax, PAGE_SIZE, 0)) {
123 						vm_wait(NULL);
124 					}
125 					flags &= ~M_WAITOK;
126 					goto retry;
127 				}
128 				return (NULL);
129 			}
130 		}
131 	} else {
132 		vm_offset_t vaddr;
133 
134 		vaddr = linux_alloc_kmem(flags, order);
135 		if (vaddr == 0)
136 			return (NULL);
137 
138 		page = PHYS_TO_VM_PAGE(vtophys((void *)vaddr));
139 
140 		KASSERT(vaddr == (vm_offset_t)page_address(page),
141 		    ("Page address mismatch"));
142 	}
143 
144 	return (page);
145 }
146 
147 void
148 linux_free_pages(vm_page_t page, unsigned int order)
149 {
150 	if (PMAP_HAS_DMAP) {
151 		unsigned long npages = 1UL << order;
152 		unsigned long x;
153 
154 		for (x = 0; x != npages; x++) {
155 			vm_page_t pgo = page + x;
156 
157 			if (vm_page_unwire_noq(pgo))
158 				vm_page_free(pgo);
159 		}
160 	} else {
161 		vm_offset_t vaddr;
162 
163 		vaddr = (vm_offset_t)page_address(page);
164 
165 		linux_free_kmem(vaddr, order);
166 	}
167 }
168 
169 vm_offset_t
170 linux_alloc_kmem(gfp_t flags, unsigned int order)
171 {
172 	size_t size = ((size_t)PAGE_SIZE) << order;
173 	vm_offset_t addr;
174 
175 	if ((flags & GFP_DMA32) == 0) {
176 		addr = kmem_malloc(size, flags & GFP_NATIVE_MASK);
177 	} else {
178 		addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
179 		    BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
180 	}
181 	return (addr);
182 }
183 
184 void
185 linux_free_kmem(vm_offset_t addr, unsigned int order)
186 {
187 	size_t size = ((size_t)PAGE_SIZE) << order;
188 
189 	kmem_free(addr, size);
190 }
191 
192 static int
193 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
194     int write, struct page **pages)
195 {
196 	vm_prot_t prot;
197 	size_t len;
198 	int count;
199 
200 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
201 	len = ptoa((vm_offset_t)nr_pages);
202 	count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
203 	return (count == -1 ? -EFAULT : nr_pages);
204 }
205 
206 int
207 __get_user_pages_fast(unsigned long start, int nr_pages, int write,
208     struct page **pages)
209 {
210 	vm_map_t map;
211 	vm_page_t *mp;
212 	vm_offset_t va;
213 	vm_offset_t end;
214 	vm_prot_t prot;
215 	int count;
216 
217 	if (nr_pages == 0 || in_interrupt())
218 		return (0);
219 
220 	MPASS(pages != NULL);
221 	map = &curthread->td_proc->p_vmspace->vm_map;
222 	end = start + ptoa((vm_offset_t)nr_pages);
223 	if (!vm_map_range_valid(map, start, end))
224 		return (-EINVAL);
225 	prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
226 	for (count = 0, mp = pages, va = start; va < end;
227 	    mp++, va += PAGE_SIZE, count++) {
228 		*mp = pmap_extract_and_hold(map->pmap, va, prot);
229 		if (*mp == NULL)
230 			break;
231 
232 		if ((prot & VM_PROT_WRITE) != 0 &&
233 		    (*mp)->dirty != VM_PAGE_BITS_ALL) {
234 			/*
235 			 * Explicitly dirty the physical page.  Otherwise, the
236 			 * caller's changes may go unnoticed because they are
237 			 * performed through an unmanaged mapping or by a DMA
238 			 * operation.
239 			 *
240 			 * The object lock is not held here.
241 			 * See vm_page_clear_dirty_mask().
242 			 */
243 			vm_page_dirty(*mp);
244 		}
245 	}
246 	return (count);
247 }
248 
249 long
250 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
251     unsigned long start, unsigned long nr_pages, int gup_flags,
252     struct page **pages, struct vm_area_struct **vmas)
253 {
254 	vm_map_t map;
255 
256 	map = &task->task_thread->td_proc->p_vmspace->vm_map;
257 	return (linux_get_user_pages_internal(map, start, nr_pages,
258 	    !!(gup_flags & FOLL_WRITE), pages));
259 }
260 
261 long
262 get_user_pages(unsigned long start, unsigned long nr_pages, int gup_flags,
263     struct page **pages, struct vm_area_struct **vmas)
264 {
265 	vm_map_t map;
266 
267 	map = &curthread->td_proc->p_vmspace->vm_map;
268 	return (linux_get_user_pages_internal(map, start, nr_pages,
269 	    !!(gup_flags & FOLL_WRITE), pages));
270 }
271 
272 int
273 is_vmalloc_addr(const void *addr)
274 {
275 	return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
276 }
277 
278 vm_fault_t
279 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
280     unsigned long pfn, pgprot_t prot)
281 {
282 	vm_object_t vm_obj = vma->vm_obj;
283 	vm_object_t tmp_obj;
284 	vm_page_t page;
285 	vm_pindex_t pindex;
286 
287 	VM_OBJECT_ASSERT_WLOCKED(vm_obj);
288 	pindex = OFF_TO_IDX(addr - vma->vm_start);
289 	if (vma->vm_pfn_count == 0)
290 		vma->vm_pfn_first = pindex;
291 	MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
292 
293 retry:
294 	page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT);
295 	if (page == NULL) {
296 		page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
297 		if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL))
298 			goto retry;
299 		if (page->object != NULL) {
300 			tmp_obj = page->object;
301 			vm_page_xunbusy(page);
302 			VM_OBJECT_WUNLOCK(vm_obj);
303 			VM_OBJECT_WLOCK(tmp_obj);
304 			if (page->object == tmp_obj &&
305 			    vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
306 				KASSERT(page->object == tmp_obj,
307 				    ("page has changed identity"));
308 				KASSERT((page->oflags & VPO_UNMANAGED) == 0,
309 				    ("page does not belong to shmem"));
310 				vm_pager_page_unswapped(page);
311 				if (pmap_page_is_mapped(page)) {
312 					vm_page_xunbusy(page);
313 					VM_OBJECT_WUNLOCK(tmp_obj);
314 					printf("%s: page rename failed: page "
315 					    "is mapped\n", __func__);
316 					VM_OBJECT_WLOCK(vm_obj);
317 					return (VM_FAULT_NOPAGE);
318 				}
319 				vm_page_remove(page);
320 			}
321 			VM_OBJECT_WUNLOCK(tmp_obj);
322 			VM_OBJECT_WLOCK(vm_obj);
323 			goto retry;
324 		}
325 		if (vm_page_insert(page, vm_obj, pindex)) {
326 			vm_page_xunbusy(page);
327 			return (VM_FAULT_OOM);
328 		}
329 		vm_page_valid(page);
330 	}
331 	pmap_page_set_memattr(page, pgprot2cachemode(prot));
332 	vma->vm_pfn_count++;
333 
334 	return (VM_FAULT_NOPAGE);
335 }
336 
337 /*
338  * Although FreeBSD version of unmap_mapping_range has semantics and types of
339  * parameters compatible with Linux version, the values passed in are different
340  * @obj should match to vm_private_data field of vm_area_struct returned by
341  *      mmap file operation handler, see linux_file_mmap_single() sources
342  * @holelen should match to size of area to be munmapped.
343  */
344 void
345 lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
346     loff_t const holelen, int even_cows __unused)
347 {
348 	vm_object_t devobj;
349 	vm_page_t page;
350 	int i, page_count;
351 
352 	devobj = cdev_pager_lookup(obj);
353 	if (devobj != NULL) {
354 		page_count = OFF_TO_IDX(holelen);
355 
356 		VM_OBJECT_WLOCK(devobj);
357 retry:
358 		for (i = 0; i < page_count; i++) {
359 			page = vm_page_lookup(devobj, i);
360 			if (page == NULL)
361 				continue;
362 			if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL))
363 				goto retry;
364 			cdev_pager_free_page(devobj, page);
365 		}
366 		VM_OBJECT_WUNLOCK(devobj);
367 		vm_object_deallocate(devobj);
368 	}
369 }
370 
371 int
372 lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
373 {
374 #ifdef __i386__
375 	struct mem_range_desc *mrdesc;
376 	int error, id, act;
377 
378 	/* If PAT is available, do nothing */
379 	if (pat_works)
380 		return (0);
381 
382 	mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
383 	mrdesc->mr_base = base;
384 	mrdesc->mr_len = size;
385 	mrdesc->mr_flags = MDF_WRITECOMBINE;
386 	strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
387 	act = MEMRANGE_SET_UPDATE;
388 	error = mem_range_attr_set(mrdesc, &act);
389 	if (error == 0) {
390 		error = idr_get_new(&mtrr_idr, mrdesc, &id);
391 		MPASS(idr_find(&mtrr_idr, id) == mrdesc);
392 		if (error != 0) {
393 			act = MEMRANGE_SET_REMOVE;
394 			mem_range_attr_set(mrdesc, &act);
395 		}
396 	}
397 	if (error != 0) {
398 		free(mrdesc, M_LKMTRR);
399 		pr_warn(
400 		    "Failed to add WC MTRR for [%p-%p]: %d; "
401 		    "performance may suffer\n",
402 		    (void *)base, (void *)(base + size - 1), error);
403 	} else
404 		pr_warn("Successfully added WC MTRR for [%p-%p]\n",
405 		    (void *)base, (void *)(base + size - 1));
406 
407 	return (error != 0 ? -error : id + __MTRR_ID_BASE);
408 #else
409 	return (0);
410 #endif
411 }
412 
413 void
414 lkpi_arch_phys_wc_del(int reg)
415 {
416 #ifdef __i386__
417 	struct mem_range_desc *mrdesc;
418 	int act;
419 
420 	/* Check if arch_phys_wc_add() failed. */
421 	if (reg < __MTRR_ID_BASE)
422 		return;
423 
424 	mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
425 	MPASS(mrdesc != NULL);
426 	idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
427 	act = MEMRANGE_SET_REMOVE;
428 	mem_range_attr_set(mrdesc, &act);
429 	free(mrdesc, M_LKMTRR);
430 #endif
431 }
432