1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
4 * Copyright (c) 2017 Mellanox Technologies, Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/rwlock.h>
37 #include <sys/proc.h>
38 #include <sys/sched.h>
39 #include <sys/memrange.h>
40
41 #include <machine/bus.h>
42
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vm_radix.h>
53 #include <vm/vm_reserv.h>
54 #include <vm/vm_extern.h>
55
56 #include <vm/uma.h>
57 #include <vm/uma_int.h>
58
59 #include <linux/gfp.h>
60 #include <linux/mm.h>
61 #include <linux/preempt.h>
62 #include <linux/fs.h>
63 #include <linux/shmem_fs.h>
64 #include <linux/kernel.h>
65 #include <linux/idr.h>
66 #include <linux/io.h>
67 #include <linux/io-mapping.h>
68
69 #ifdef __i386__
70 DEFINE_IDR(mtrr_idr);
71 static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
72 extern int pat_works;
73 #endif
74
75 void
si_meminfo(struct sysinfo * si)76 si_meminfo(struct sysinfo *si)
77 {
78 si->totalram = physmem;
79 si->freeram = vm_free_count();
80 si->totalhigh = 0;
81 si->freehigh = 0;
82 si->mem_unit = PAGE_SIZE;
83 }
84
85 void *
linux_page_address(const struct page * page)86 linux_page_address(const struct page *page)
87 {
88
89 if (page->object != kernel_object) {
90 return (PMAP_HAS_DMAP ?
91 ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) :
92 NULL);
93 }
94 return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
95 IDX_TO_OFF(page->pindex)));
96 }
97
98 struct page *
linux_alloc_pages(gfp_t flags,unsigned int order)99 linux_alloc_pages(gfp_t flags, unsigned int order)
100 {
101 struct page *page;
102
103 if (PMAP_HAS_DMAP) {
104 unsigned long npages = 1UL << order;
105 int req = VM_ALLOC_WIRED;
106
107 if ((flags & M_ZERO) != 0)
108 req |= VM_ALLOC_ZERO;
109 if (order == 0 && (flags & GFP_DMA32) == 0) {
110 page = vm_page_alloc_noobj(req);
111 if (page == NULL)
112 return (NULL);
113 } else {
114 vm_paddr_t pmax = (flags & GFP_DMA32) ?
115 BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
116 retry:
117 page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
118 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
119 if (page == NULL) {
120 if ((flags & (M_WAITOK | __GFP_NORETRY)) ==
121 M_WAITOK) {
122 int err = vm_page_reclaim_contig(req,
123 npages, 0, pmax, PAGE_SIZE, 0);
124 if (err == ENOMEM)
125 vm_wait(NULL);
126 else if (err != 0)
127 return (NULL);
128 flags &= ~M_WAITOK;
129 goto retry;
130 }
131 return (NULL);
132 }
133 }
134 } else {
135 vm_offset_t vaddr;
136
137 vaddr = linux_alloc_kmem(flags, order);
138 if (vaddr == 0)
139 return (NULL);
140
141 page = virt_to_page((void *)vaddr);
142
143 KASSERT(vaddr == (vm_offset_t)page_address(page),
144 ("Page address mismatch"));
145 }
146
147 return (page);
148 }
149
150 static void
_linux_free_kmem(vm_offset_t addr,unsigned int order)151 _linux_free_kmem(vm_offset_t addr, unsigned int order)
152 {
153 size_t size = ((size_t)PAGE_SIZE) << order;
154
155 kmem_free((void *)addr, size);
156 }
157
158 void
linux_free_pages(struct page * page,unsigned int order)159 linux_free_pages(struct page *page, unsigned int order)
160 {
161 if (PMAP_HAS_DMAP) {
162 unsigned long npages = 1UL << order;
163 unsigned long x;
164
165 for (x = 0; x != npages; x++) {
166 vm_page_t pgo = page + x;
167
168 /*
169 * The "free page" function is used in several
170 * contexts.
171 *
172 * Some pages are allocated by `linux_alloc_pages()`
173 * above, but not all of them are. For instance in the
174 * DRM drivers, some pages come from
175 * `shmem_read_mapping_page_gfp()`.
176 *
177 * That's why we need to check if the page is managed
178 * or not here.
179 */
180 if ((pgo->oflags & VPO_UNMANAGED) == 0) {
181 vm_page_unwire(pgo, PQ_ACTIVE);
182 } else {
183 if (vm_page_unwire_noq(pgo))
184 vm_page_free(pgo);
185 }
186 }
187 } else {
188 vm_offset_t vaddr;
189
190 vaddr = (vm_offset_t)page_address(page);
191
192 _linux_free_kmem(vaddr, order);
193 }
194 }
195
196 void
linux_release_pages(release_pages_arg arg,int nr)197 linux_release_pages(release_pages_arg arg, int nr)
198 {
199 int i;
200
201 CTASSERT(offsetof(struct folio, page) == 0);
202
203 for (i = 0; i < nr; i++)
204 __free_page(arg.pages[i]);
205 }
206
207 vm_offset_t
linux_alloc_kmem(gfp_t flags,unsigned int order)208 linux_alloc_kmem(gfp_t flags, unsigned int order)
209 {
210 size_t size = ((size_t)PAGE_SIZE) << order;
211 void *addr;
212
213 addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
214 ((flags & GFP_DMA32) == 0) ? -1UL : BUS_SPACE_MAXADDR_32BIT,
215 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
216
217 return ((vm_offset_t)addr);
218 }
219
220 void
linux_free_kmem(vm_offset_t addr,unsigned int order)221 linux_free_kmem(vm_offset_t addr, unsigned int order)
222 {
223 KASSERT((addr & ~PAGE_MASK) == 0,
224 ("%s: addr %p is not page aligned", __func__, (void *)addr));
225
226 if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
227 _linux_free_kmem(addr, order);
228 } else {
229 vm_page_t page;
230
231 page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr));
232 linux_free_pages(page, order);
233 }
234 }
235
236 static int
linux_get_user_pages_internal(vm_map_t map,unsigned long start,int nr_pages,int write,struct page ** pages)237 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
238 int write, struct page **pages)
239 {
240 vm_prot_t prot;
241 size_t len;
242 int count;
243
244 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
245 len = ptoa((vm_offset_t)nr_pages);
246 count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
247 return (count == -1 ? -EFAULT : nr_pages);
248 }
249
250 int
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)251 __get_user_pages_fast(unsigned long start, int nr_pages, int write,
252 struct page **pages)
253 {
254 vm_map_t map;
255 vm_page_t *mp;
256 vm_offset_t va;
257 vm_offset_t end;
258 vm_prot_t prot;
259 int count;
260
261 if (nr_pages == 0 || in_interrupt())
262 return (0);
263
264 MPASS(pages != NULL);
265 map = &curthread->td_proc->p_vmspace->vm_map;
266 end = start + ptoa((vm_offset_t)nr_pages);
267 if (!vm_map_range_valid(map, start, end))
268 return (-EINVAL);
269 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
270 for (count = 0, mp = pages, va = start; va < end;
271 mp++, va += PAGE_SIZE, count++) {
272 *mp = pmap_extract_and_hold(map->pmap, va, prot);
273 if (*mp == NULL)
274 break;
275
276 if ((prot & VM_PROT_WRITE) != 0 &&
277 (*mp)->dirty != VM_PAGE_BITS_ALL) {
278 /*
279 * Explicitly dirty the physical page. Otherwise, the
280 * caller's changes may go unnoticed because they are
281 * performed through an unmanaged mapping or by a DMA
282 * operation.
283 *
284 * The object lock is not held here.
285 * See vm_page_clear_dirty_mask().
286 */
287 vm_page_dirty(*mp);
288 }
289 }
290 return (count);
291 }
292
293 long
get_user_pages_remote(struct task_struct * task,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas)294 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
295 unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
296 struct page **pages, struct vm_area_struct **vmas)
297 {
298 vm_map_t map;
299
300 map = &task->task_thread->td_proc->p_vmspace->vm_map;
301 return (linux_get_user_pages_internal(map, start, nr_pages,
302 !!(gup_flags & FOLL_WRITE), pages));
303 }
304
305 long
lkpi_get_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)306 lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
307 unsigned int gup_flags, struct page **pages)
308 {
309 vm_map_t map;
310
311 map = &curthread->td_proc->p_vmspace->vm_map;
312 return (linux_get_user_pages_internal(map, start, nr_pages,
313 !!(gup_flags & FOLL_WRITE), pages));
314 }
315
316 int
is_vmalloc_addr(const void * addr)317 is_vmalloc_addr(const void *addr)
318 {
319 return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
320 }
321
322 vm_fault_t
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t prot)323 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
324 unsigned long pfn, pgprot_t prot)
325 {
326 struct pctrie_iter pages;
327 vm_object_t vm_obj = vma->vm_obj;
328 vm_object_t tmp_obj;
329 vm_page_t page;
330 vm_pindex_t pindex;
331
332 VM_OBJECT_ASSERT_WLOCKED(vm_obj);
333 vm_page_iter_init(&pages, vm_obj);
334 pindex = OFF_TO_IDX(addr - vma->vm_start);
335 if (vma->vm_pfn_count == 0)
336 vma->vm_pfn_first = pindex;
337 MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
338
339 retry:
340 page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages);
341 if (page == NULL) {
342 page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
343 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
344 pctrie_iter_reset(&pages);
345 goto retry;
346 }
347 if (page->object != NULL) {
348 tmp_obj = page->object;
349 vm_page_xunbusy(page);
350 VM_OBJECT_WUNLOCK(vm_obj);
351 VM_OBJECT_WLOCK(tmp_obj);
352 if (page->object == tmp_obj &&
353 vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
354 KASSERT(page->object == tmp_obj,
355 ("page has changed identity"));
356 KASSERT((page->oflags & VPO_UNMANAGED) == 0,
357 ("page does not belong to shmem"));
358 vm_pager_page_unswapped(page);
359 if (pmap_page_is_mapped(page)) {
360 vm_page_xunbusy(page);
361 VM_OBJECT_WUNLOCK(tmp_obj);
362 printf("%s: page rename failed: page "
363 "is mapped\n", __func__);
364 VM_OBJECT_WLOCK(vm_obj);
365 return (VM_FAULT_NOPAGE);
366 }
367 vm_page_remove(page);
368 }
369 VM_OBJECT_WUNLOCK(tmp_obj);
370 pctrie_iter_reset(&pages);
371 VM_OBJECT_WLOCK(vm_obj);
372 goto retry;
373 }
374 if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) {
375 vm_page_xunbusy(page);
376 return (VM_FAULT_OOM);
377 }
378 vm_page_valid(page);
379 }
380 pmap_page_set_memattr(page, pgprot2cachemode(prot));
381 vma->vm_pfn_count++;
382
383 return (VM_FAULT_NOPAGE);
384 }
385
386 int
lkpi_remap_pfn_range(struct vm_area_struct * vma,unsigned long start_addr,unsigned long start_pfn,unsigned long size,pgprot_t prot)387 lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr,
388 unsigned long start_pfn, unsigned long size, pgprot_t prot)
389 {
390 vm_object_t vm_obj;
391 unsigned long addr, pfn;
392 int err = 0;
393
394 vm_obj = vma->vm_obj;
395
396 VM_OBJECT_WLOCK(vm_obj);
397 for (addr = start_addr, pfn = start_pfn;
398 addr < start_addr + size;
399 addr += PAGE_SIZE) {
400 vm_fault_t ret;
401 retry:
402 ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
403
404 if ((ret & VM_FAULT_OOM) != 0) {
405 VM_OBJECT_WUNLOCK(vm_obj);
406 vm_wait(NULL);
407 VM_OBJECT_WLOCK(vm_obj);
408 goto retry;
409 }
410
411 if ((ret & VM_FAULT_ERROR) != 0) {
412 err = -EFAULT;
413 break;
414 }
415
416 pfn++;
417 }
418 VM_OBJECT_WUNLOCK(vm_obj);
419
420 if (unlikely(err)) {
421 zap_vma_ptes(vma, start_addr,
422 (pfn - start_pfn) << PAGE_SHIFT);
423 return (err);
424 }
425
426 return (0);
427 }
428
429 int
lkpi_io_mapping_map_user(struct io_mapping * iomap,struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size)430 lkpi_io_mapping_map_user(struct io_mapping *iomap,
431 struct vm_area_struct *vma, unsigned long addr,
432 unsigned long pfn, unsigned long size)
433 {
434 pgprot_t prot;
435 int ret;
436
437 prot = cachemode2protval(iomap->attr);
438 ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot);
439
440 return (ret);
441 }
442
443 /*
444 * Although FreeBSD version of unmap_mapping_range has semantics and types of
445 * parameters compatible with Linux version, the values passed in are different
446 * @obj should match to vm_private_data field of vm_area_struct returned by
447 * mmap file operation handler, see linux_file_mmap_single() sources
448 * @holelen should match to size of area to be munmapped.
449 */
450 void
lkpi_unmap_mapping_range(void * obj,loff_t const holebegin __unused,loff_t const holelen __unused,int even_cows __unused)451 lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
452 loff_t const holelen __unused, int even_cows __unused)
453 {
454 vm_object_t devobj;
455
456 devobj = cdev_pager_lookup(obj);
457 if (devobj != NULL) {
458 cdev_mgtdev_pager_free_pages(devobj);
459 vm_object_deallocate(devobj);
460 }
461 }
462
463 int
lkpi_arch_phys_wc_add(unsigned long base,unsigned long size)464 lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
465 {
466 #ifdef __i386__
467 struct mem_range_desc *mrdesc;
468 int error, id, act;
469
470 /* If PAT is available, do nothing */
471 if (pat_works)
472 return (0);
473
474 mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
475 mrdesc->mr_base = base;
476 mrdesc->mr_len = size;
477 mrdesc->mr_flags = MDF_WRITECOMBINE;
478 strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
479 act = MEMRANGE_SET_UPDATE;
480 error = mem_range_attr_set(mrdesc, &act);
481 if (error == 0) {
482 error = idr_get_new(&mtrr_idr, mrdesc, &id);
483 MPASS(idr_find(&mtrr_idr, id) == mrdesc);
484 if (error != 0) {
485 act = MEMRANGE_SET_REMOVE;
486 mem_range_attr_set(mrdesc, &act);
487 }
488 }
489 if (error != 0) {
490 free(mrdesc, M_LKMTRR);
491 pr_warn(
492 "Failed to add WC MTRR for [%p-%p]: %d; "
493 "performance may suffer\n",
494 (void *)base, (void *)(base + size - 1), error);
495 } else
496 pr_warn("Successfully added WC MTRR for [%p-%p]\n",
497 (void *)base, (void *)(base + size - 1));
498
499 return (error != 0 ? -error : id + __MTRR_ID_BASE);
500 #else
501 return (0);
502 #endif
503 }
504
505 void
lkpi_arch_phys_wc_del(int reg)506 lkpi_arch_phys_wc_del(int reg)
507 {
508 #ifdef __i386__
509 struct mem_range_desc *mrdesc;
510 int act;
511
512 /* Check if arch_phys_wc_add() failed. */
513 if (reg < __MTRR_ID_BASE)
514 return;
515
516 mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
517 MPASS(mrdesc != NULL);
518 idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
519 act = MEMRANGE_SET_REMOVE;
520 mem_range_attr_set(mrdesc, &act);
521 free(mrdesc, M_LKMTRR);
522 #endif
523 }
524
525 /*
526 * This is a highly simplified version of the Linux page_frag_cache.
527 * We only support up-to 1 single page as fragment size and we will
528 * always return a full page. This may be wasteful on small objects
529 * but the only known consumer (mt76) is either asking for a half-page
530 * or a full page. If this was to become a problem we can implement
531 * a more elaborate version.
532 */
533 void *
linuxkpi_page_frag_alloc(struct page_frag_cache * pfc,size_t fragsz,gfp_t gfp)534 linuxkpi_page_frag_alloc(struct page_frag_cache *pfc,
535 size_t fragsz, gfp_t gfp)
536 {
537 vm_page_t pages;
538
539 if (fragsz == 0)
540 return (NULL);
541
542 KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet "
543 "supported", __func__, fragsz));
544
545 pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1));
546 if (pages == NULL)
547 return (NULL);
548 pfc->va = linux_page_address(pages);
549
550 /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */
551 pfc->pagecnt_bias = 0;
552
553 return (pfc->va);
554 }
555
556 void
linuxkpi_page_frag_free(void * addr)557 linuxkpi_page_frag_free(void *addr)
558 {
559 vm_page_t page;
560
561 page = virt_to_page(addr);
562 linux_free_pages(page, 0);
563 }
564
565 void
linuxkpi__page_frag_cache_drain(struct page * page,size_t count __unused)566 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
567 {
568
569 linux_free_pages(page, 0);
570 }
571