1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
4 * Copyright (c) 2017 Mellanox Technologies, Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/rwlock.h>
37 #include <sys/proc.h>
38 #include <sys/sched.h>
39 #include <sys/memrange.h>
40
41 #include <machine/bus.h>
42
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vm_radix.h>
53 #include <vm/vm_reserv.h>
54 #include <vm/vm_extern.h>
55
56 #include <vm/uma.h>
57 #include <vm/uma_int.h>
58
59 #include <linux/gfp.h>
60 #include <linux/mm.h>
61 #include <linux/preempt.h>
62 #include <linux/fs.h>
63 #include <linux/shmem_fs.h>
64 #include <linux/kernel.h>
65 #include <linux/idr.h>
66 #include <linux/io.h>
67 #include <linux/io-mapping.h>
68
69 #ifdef __i386__
70 DEFINE_IDR(mtrr_idr);
71 static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat");
72 extern int pat_works;
73 #endif
74
75 void
si_meminfo(struct sysinfo * si)76 si_meminfo(struct sysinfo *si)
77 {
78 si->totalram = physmem;
79 si->freeram = vm_free_count();
80 si->totalhigh = 0;
81 si->freehigh = 0;
82 si->mem_unit = PAGE_SIZE;
83 }
84
85 void *
linux_page_address(const struct page * page)86 linux_page_address(const struct page *page)
87 {
88
89 if (page->object != kernel_object) {
90 return (PMAP_HAS_DMAP ? PHYS_TO_DMAP(page_to_phys(page)) :
91 NULL);
92 }
93 return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
94 IDX_TO_OFF(page->pindex)));
95 }
96
97 struct page *
linux_alloc_pages(gfp_t flags,unsigned int order)98 linux_alloc_pages(gfp_t flags, unsigned int order)
99 {
100 struct page *page;
101
102 if (PMAP_HAS_DMAP) {
103 unsigned long npages = 1UL << order;
104 int req = VM_ALLOC_WIRED;
105
106 if ((flags & M_ZERO) != 0)
107 req |= VM_ALLOC_ZERO;
108
109 if (order == 0 && (flags & GFP_DMA32) == 0) {
110 page = vm_page_alloc_noobj(req);
111 if (page == NULL)
112 return (NULL);
113 } else {
114 vm_paddr_t pmax = (flags & GFP_DMA32) ?
115 BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
116
117 if ((flags & __GFP_NORETRY) != 0)
118 req |= VM_ALLOC_NORECLAIM;
119
120 retry:
121 if ((flags & __GFP_THISNODE) != 0) {
122 int curdomain = PCPU_GET(domain);
123 page = vm_page_alloc_noobj_contig_domain(
124 curdomain, req, npages, 0, pmax,
125 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
126 } else {
127 page = vm_page_alloc_noobj_contig(
128 req, npages, 0, pmax,
129 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
130 }
131
132 if (page == NULL) {
133 if ((flags & (M_WAITOK | __GFP_NORETRY | __GFP_THISNODE)) ==
134 M_WAITOK) {
135 int err = vm_page_reclaim_contig(req,
136 npages, 0, pmax, PAGE_SIZE, 0);
137 if (err == ENOMEM)
138 vm_wait(NULL);
139 else if (err != 0)
140 return (NULL);
141 flags &= ~M_WAITOK;
142 goto retry;
143 }
144 return (NULL);
145 }
146 }
147 } else {
148 vm_offset_t vaddr;
149
150 vaddr = linux_alloc_kmem(flags, order);
151 if (vaddr == 0)
152 return (NULL);
153
154 page = virt_to_page((void *)vaddr);
155
156 KASSERT(vaddr == (vm_offset_t)page_address(page),
157 ("Page address mismatch"));
158 }
159
160 return (page);
161 }
162
163 static void
_linux_free_kmem(vm_offset_t addr,unsigned int order)164 _linux_free_kmem(vm_offset_t addr, unsigned int order)
165 {
166 size_t size = ((size_t)PAGE_SIZE) << order;
167
168 kmem_free((void *)addr, size);
169 }
170
171 void
linux_free_pages(struct page * page,unsigned int order)172 linux_free_pages(struct page *page, unsigned int order)
173 {
174 if (PMAP_HAS_DMAP) {
175 unsigned long npages = 1UL << order;
176 unsigned long x;
177
178 for (x = 0; x != npages; x++) {
179 vm_page_t pgo = page + x;
180
181 /*
182 * The "free page" function is used in several
183 * contexts.
184 *
185 * Some pages are allocated by `linux_alloc_pages()`
186 * above, but not all of them are. For instance in the
187 * DRM drivers, some pages come from
188 * `shmem_read_mapping_page_gfp()`.
189 *
190 * That's why we need to check if the page is managed
191 * or not here.
192 */
193 if ((pgo->oflags & VPO_UNMANAGED) == 0) {
194 vm_page_unwire(pgo, PQ_ACTIVE);
195 } else {
196 if (vm_page_unwire_noq(pgo))
197 vm_page_free(pgo);
198 }
199 }
200 } else {
201 vm_offset_t vaddr;
202
203 vaddr = (vm_offset_t)page_address(page);
204
205 _linux_free_kmem(vaddr, order);
206 }
207 }
208
209 void
linux_release_pages(release_pages_arg arg,int nr)210 linux_release_pages(release_pages_arg arg, int nr)
211 {
212 int i;
213
214 CTASSERT(offsetof(struct folio, page) == 0);
215
216 for (i = 0; i < nr; i++)
217 __free_page(arg.pages[i]);
218 }
219
220 vm_offset_t
linux_alloc_kmem(gfp_t flags,unsigned int order)221 linux_alloc_kmem(gfp_t flags, unsigned int order)
222 {
223 size_t size = ((size_t)PAGE_SIZE) << order;
224 void *addr;
225
226 addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
227 ((flags & GFP_DMA32) == 0) ? -1UL : BUS_SPACE_MAXADDR_32BIT,
228 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
229
230 return ((vm_offset_t)addr);
231 }
232
233 void
linux_free_kmem(vm_offset_t addr,unsigned int order)234 linux_free_kmem(vm_offset_t addr, unsigned int order)
235 {
236 KASSERT((addr & ~PAGE_MASK) == 0,
237 ("%s: addr %p is not page aligned", __func__, (void *)addr));
238
239 if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
240 _linux_free_kmem(addr, order);
241 } else {
242 vm_page_t page;
243
244 page = DMAP_TO_VM_PAGE(addr);
245 linux_free_pages(page, order);
246 }
247 }
248
249 static int
linux_get_user_pages_internal(vm_map_t map,unsigned long start,int nr_pages,int write,struct page ** pages)250 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
251 int write, struct page **pages)
252 {
253 vm_prot_t prot;
254 size_t len;
255 int count;
256
257 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
258 len = ptoa((vm_offset_t)nr_pages);
259 count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
260 return (count == -1 ? -EFAULT : nr_pages);
261 }
262
263 int
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)264 __get_user_pages_fast(unsigned long start, int nr_pages, int write,
265 struct page **pages)
266 {
267 vm_map_t map;
268 vm_page_t *mp;
269 vm_offset_t va;
270 vm_offset_t end;
271 vm_prot_t prot;
272 int count;
273
274 if (nr_pages == 0 || in_interrupt())
275 return (0);
276
277 MPASS(pages != NULL);
278 map = &curthread->td_proc->p_vmspace->vm_map;
279 end = start + ptoa((vm_offset_t)nr_pages);
280 if (!vm_map_range_valid(map, start, end))
281 return (-EINVAL);
282 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
283 for (count = 0, mp = pages, va = start; va < end;
284 mp++, va += PAGE_SIZE, count++) {
285 *mp = pmap_extract_and_hold(map->pmap, va, prot);
286 if (*mp == NULL)
287 break;
288
289 if ((prot & VM_PROT_WRITE) != 0 &&
290 (*mp)->dirty != VM_PAGE_BITS_ALL) {
291 /*
292 * Explicitly dirty the physical page. Otherwise, the
293 * caller's changes may go unnoticed because they are
294 * performed through an unmanaged mapping or by a DMA
295 * operation.
296 *
297 * The object lock is not held here.
298 * See vm_page_clear_dirty_mask().
299 */
300 vm_page_dirty(*mp);
301 }
302 }
303 return (count);
304 }
305
306 long
get_user_pages_remote(struct task_struct * task,struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas)307 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm,
308 unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
309 struct page **pages, struct vm_area_struct **vmas)
310 {
311 vm_map_t map;
312
313 map = &task->task_thread->td_proc->p_vmspace->vm_map;
314 return (linux_get_user_pages_internal(map, start, nr_pages,
315 !!(gup_flags & FOLL_WRITE), pages));
316 }
317
318 long
lkpi_get_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)319 lkpi_get_user_pages(unsigned long start, unsigned long nr_pages,
320 unsigned int gup_flags, struct page **pages)
321 {
322 vm_map_t map;
323
324 map = &curthread->td_proc->p_vmspace->vm_map;
325 return (linux_get_user_pages_internal(map, start, nr_pages,
326 !!(gup_flags & FOLL_WRITE), pages));
327 }
328
329 /*
330 * Hash of vmmap addresses. This is infrequently accessed and does not
331 * need to be particularly large. This is done because we must store the
332 * caller's idea of the map size to properly unmap.
333 */
334 struct vmmap {
335 LIST_ENTRY(vmmap) vm_next;
336 void *vm_addr;
337 unsigned long vm_size;
338 };
339
340 struct vmmaphd {
341 struct vmmap *lh_first;
342 };
343 #define VMMAP_HASH_SIZE 64
344 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
345 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
346 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
347 static struct mtx vmmaplock;
348
349 int
is_vmalloc_addr(const void * addr)350 is_vmalloc_addr(const void *addr)
351 {
352 struct vmmap *vmmap;
353
354 mtx_lock(&vmmaplock);
355 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
356 if (addr == vmmap->vm_addr)
357 break;
358 mtx_unlock(&vmmaplock);
359 if (vmmap != NULL)
360 return (1);
361
362 return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
363 }
364
365 static void
vmmap_add(void * addr,unsigned long size)366 vmmap_add(void *addr, unsigned long size)
367 {
368 struct vmmap *vmmap;
369
370 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
371 mtx_lock(&vmmaplock);
372 vmmap->vm_size = size;
373 vmmap->vm_addr = addr;
374 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
375 mtx_unlock(&vmmaplock);
376 }
377
378 static struct vmmap *
vmmap_remove(void * addr)379 vmmap_remove(void *addr)
380 {
381 struct vmmap *vmmap;
382
383 mtx_lock(&vmmaplock);
384 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
385 if (vmmap->vm_addr == addr)
386 break;
387 if (vmmap)
388 LIST_REMOVE(vmmap, vm_next);
389 mtx_unlock(&vmmaplock);
390
391 return (vmmap);
392 }
393
394 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
395 void *
_ioremap_attr(vm_paddr_t phys_addr,unsigned long size,int attr)396 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
397 {
398 void *addr;
399
400 addr = pmap_mapdev_attr(phys_addr, size, attr);
401 if (addr == NULL)
402 return (NULL);
403 vmmap_add(addr, size);
404
405 return (addr);
406 }
407 #endif
408
409 void
iounmap(void * addr)410 iounmap(void *addr)
411 {
412 struct vmmap *vmmap;
413
414 vmmap = vmmap_remove(addr);
415 if (vmmap == NULL)
416 return;
417 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
418 pmap_unmapdev(addr, vmmap->vm_size);
419 #endif
420 kfree(vmmap);
421 }
422
423 void *
vmap(struct page ** pages,unsigned int count,unsigned long flags,int prot)424 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
425 {
426 void *off;
427 size_t size;
428
429 size = count * PAGE_SIZE;
430 off = kva_alloc(size);
431 if (off == NULL)
432 return (NULL);
433 vmmap_add(off, size);
434 pmap_qenter(off, pages, count);
435
436 return (off);
437 }
438
439 #define VMAP_MAX_CHUNK_SIZE (65536U / sizeof(struct vm_page)) /* KMEM_ZMAX */
440
441 void *
linuxkpi_vmap_pfn(unsigned long * pfns,unsigned int count,int prot)442 linuxkpi_vmap_pfn(unsigned long *pfns, unsigned int count, int prot)
443 {
444 vm_page_t m, *ma, fma;
445 void *off;
446 char *coff;
447 vm_paddr_t pa;
448 vm_memattr_t attr;
449 size_t size;
450 unsigned int i, c, chunk;
451
452 size = ptoa(count);
453 off = kva_alloc(size);
454 if (off == NULL)
455 return (NULL);
456 vmmap_add(off, size);
457
458 chunk = MIN(count, VMAP_MAX_CHUNK_SIZE);
459 attr = pgprot2cachemode(prot);
460 ma = malloc(chunk * sizeof(vm_page_t), M_TEMP, M_WAITOK | M_ZERO);
461 fma = NULL;
462 c = 0;
463 coff = off;
464 for (i = 0; i < count; i++) {
465 pa = IDX_TO_OFF(pfns[i]);
466 m = PHYS_TO_VM_PAGE(pa);
467 if (m == NULL) {
468 if (fma == NULL)
469 fma = malloc(chunk * sizeof(struct vm_page),
470 M_TEMP, M_WAITOK | M_ZERO);
471 m = fma + c;
472 vm_page_initfake(m, pa, attr);
473 } else {
474 pmap_page_set_memattr(m, attr);
475 }
476 ma[c] = m;
477 c++;
478 if (c == chunk || i == count - 1) {
479 pmap_qenter(coff, ma, c);
480 if (i == count - 1)
481 break;
482 coff += ptoa(c);
483 c = 0;
484 memset(ma, 0, chunk * sizeof(vm_page_t));
485 if (fma != NULL)
486 memset(fma, 0, chunk * sizeof(struct vm_page));
487 }
488 }
489 free(fma, M_TEMP);
490 free(ma, M_TEMP);
491
492 return (off);
493 }
494
495 void
vunmap(void * addr)496 vunmap(void *addr)
497 {
498 struct vmmap *vmmap;
499
500 vmmap = vmmap_remove(addr);
501 if (vmmap == NULL)
502 return;
503 pmap_qremove(addr, vmmap->vm_size / PAGE_SIZE);
504 kva_free(addr, vmmap->vm_size);
505 kfree(vmmap);
506 }
507
508 vm_fault_t
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t prot)509 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
510 unsigned long pfn, pgprot_t prot)
511 {
512 struct pctrie_iter pages;
513 vm_object_t vm_obj = vma->vm_obj;
514 vm_object_t tmp_obj;
515 vm_page_t page;
516 vm_pindex_t pindex;
517
518 VM_OBJECT_ASSERT_WLOCKED(vm_obj);
519 vm_page_iter_init(&pages, vm_obj);
520 pindex = OFF_TO_IDX(addr - vma->vm_start);
521 if (vma->vm_pfn_count == 0)
522 vma->vm_pfn_first = pindex;
523 MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
524
525 retry:
526 page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages);
527 if (page == NULL) {
528 page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
529 if (page == NULL)
530 return (VM_FAULT_SIGBUS);
531 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
532 pctrie_iter_reset(&pages);
533 goto retry;
534 }
535 if (page->object != NULL) {
536 tmp_obj = page->object;
537 vm_page_xunbusy(page);
538 VM_OBJECT_WUNLOCK(vm_obj);
539 VM_OBJECT_WLOCK(tmp_obj);
540 if (page->object == tmp_obj &&
541 vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) {
542 KASSERT(page->object == tmp_obj,
543 ("page has changed identity"));
544 KASSERT((page->oflags & VPO_UNMANAGED) == 0,
545 ("page does not belong to shmem"));
546 vm_pager_page_unswapped(page);
547 if (pmap_page_is_mapped(page)) {
548 vm_page_xunbusy(page);
549 VM_OBJECT_WUNLOCK(tmp_obj);
550 printf("%s: page rename failed: page "
551 "is mapped\n", __func__);
552 VM_OBJECT_WLOCK(vm_obj);
553 return (VM_FAULT_NOPAGE);
554 }
555 vm_page_remove(page);
556 }
557 VM_OBJECT_WUNLOCK(tmp_obj);
558 pctrie_iter_reset(&pages);
559 VM_OBJECT_WLOCK(vm_obj);
560 goto retry;
561 }
562 if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) {
563 vm_page_xunbusy(page);
564 return (VM_FAULT_OOM);
565 }
566 vm_page_valid(page);
567 }
568 pmap_page_set_memattr(page, pgprot2cachemode(prot));
569 vma->vm_pfn_count++;
570
571 return (VM_FAULT_NOPAGE);
572 }
573
574 int
lkpi_remap_pfn_range(struct vm_area_struct * vma,unsigned long start_addr,unsigned long start_pfn,unsigned long size,pgprot_t prot)575 lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr,
576 unsigned long start_pfn, unsigned long size, pgprot_t prot)
577 {
578 vm_object_t vm_obj;
579 unsigned long addr, pfn;
580 int err = 0;
581
582 vm_obj = vma->vm_obj;
583
584 VM_OBJECT_WLOCK(vm_obj);
585 for (addr = start_addr, pfn = start_pfn;
586 addr < start_addr + size;
587 addr += PAGE_SIZE) {
588 vm_fault_t ret;
589 retry:
590 ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
591
592 if ((ret & VM_FAULT_OOM) != 0) {
593 VM_OBJECT_WUNLOCK(vm_obj);
594 vm_wait(NULL);
595 VM_OBJECT_WLOCK(vm_obj);
596 goto retry;
597 }
598
599 if ((ret & VM_FAULT_ERROR) != 0) {
600 err = -EFAULT;
601 break;
602 }
603
604 pfn++;
605 }
606 VM_OBJECT_WUNLOCK(vm_obj);
607
608 if (unlikely(err)) {
609 zap_vma_ptes(vma, start_addr,
610 (pfn - start_pfn) << PAGE_SHIFT);
611 return (err);
612 }
613
614 return (0);
615 }
616
617 int
lkpi_io_mapping_map_user(struct io_mapping * iomap,struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size)618 lkpi_io_mapping_map_user(struct io_mapping *iomap,
619 struct vm_area_struct *vma, unsigned long addr,
620 unsigned long pfn, unsigned long size)
621 {
622 pgprot_t prot;
623 int ret;
624
625 prot = cachemode2protval(iomap->attr);
626 ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot);
627
628 return (ret);
629 }
630
631 /*
632 * Although FreeBSD version of unmap_mapping_range has semantics and types of
633 * parameters compatible with Linux version, the values passed in are different
634 * @obj should match to vm_private_data field of vm_area_struct returned by
635 * mmap file operation handler, see linux_file_mmap_single() sources
636 * @holelen should match to size of area to be munmapped.
637 */
638 void
lkpi_unmap_mapping_range(void * obj,loff_t const holebegin __unused,loff_t const holelen __unused,int even_cows __unused)639 lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
640 loff_t const holelen __unused, int even_cows __unused)
641 {
642 vm_object_t devobj;
643
644 devobj = cdev_pager_lookup(obj);
645 if (devobj != NULL) {
646 cdev_mgtdev_pager_free_pages(devobj);
647 vm_object_deallocate(devobj);
648 }
649 }
650
651 int
lkpi_arch_phys_wc_add(unsigned long base,unsigned long size)652 lkpi_arch_phys_wc_add(unsigned long base, unsigned long size)
653 {
654 #ifdef __i386__
655 struct mem_range_desc *mrdesc;
656 int error, id, act;
657
658 /* If PAT is available, do nothing */
659 if (pat_works)
660 return (0);
661
662 mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK);
663 mrdesc->mr_base = base;
664 mrdesc->mr_len = size;
665 mrdesc->mr_flags = MDF_WRITECOMBINE;
666 strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner));
667 act = MEMRANGE_SET_UPDATE;
668 error = mem_range_attr_set(mrdesc, &act);
669 if (error == 0) {
670 error = idr_get_new(&mtrr_idr, mrdesc, &id);
671 MPASS(idr_find(&mtrr_idr, id) == mrdesc);
672 if (error != 0) {
673 act = MEMRANGE_SET_REMOVE;
674 mem_range_attr_set(mrdesc, &act);
675 }
676 }
677 if (error != 0) {
678 free(mrdesc, M_LKMTRR);
679 pr_warn(
680 "Failed to add WC MTRR for [%p-%p]: %d; "
681 "performance may suffer\n",
682 (void *)base, (void *)(base + size - 1), error);
683 } else
684 pr_warn("Successfully added WC MTRR for [%p-%p]\n",
685 (void *)base, (void *)(base + size - 1));
686
687 return (error != 0 ? -error : id + __MTRR_ID_BASE);
688 #else
689 return (0);
690 #endif
691 }
692
693 void
lkpi_arch_phys_wc_del(int reg)694 lkpi_arch_phys_wc_del(int reg)
695 {
696 #ifdef __i386__
697 struct mem_range_desc *mrdesc;
698 int act;
699
700 /* Check if arch_phys_wc_add() failed. */
701 if (reg < __MTRR_ID_BASE)
702 return;
703
704 mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE);
705 MPASS(mrdesc != NULL);
706 idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE);
707 act = MEMRANGE_SET_REMOVE;
708 mem_range_attr_set(mrdesc, &act);
709 free(mrdesc, M_LKMTRR);
710 #endif
711 }
712
713 /*
714 * This is a highly simplified version of the Linux page_frag_cache.
715 * We only support up-to 1 single page as fragment size and we will
716 * always return a full page. This may be wasteful on small objects
717 * but the only known consumer (mt76) is either asking for a half-page
718 * or a full page. If this was to become a problem we can implement
719 * a more elaborate version.
720 */
721 void *
linuxkpi_page_frag_alloc(struct page_frag_cache * pfc,size_t fragsz,gfp_t gfp)722 linuxkpi_page_frag_alloc(struct page_frag_cache *pfc,
723 size_t fragsz, gfp_t gfp)
724 {
725 vm_page_t pages;
726
727 if (fragsz == 0)
728 return (NULL);
729
730 KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet "
731 "supported", __func__, fragsz));
732
733 pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1));
734 if (pages == NULL)
735 return (NULL);
736 pfc->va = linux_page_address(pages);
737
738 /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */
739 pfc->pagecnt_bias = 0;
740
741 return (pfc->va);
742 }
743
744 void
linuxkpi_page_frag_free(void * addr)745 linuxkpi_page_frag_free(void *addr)
746 {
747 vm_page_t page;
748
749 page = virt_to_page(addr);
750 linux_free_pages(page, 0);
751 }
752
753 void
linuxkpi__page_frag_cache_drain(struct page * page,size_t count __unused)754 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused)
755 {
756
757 linux_free_pages(page, 0);
758 }
759
760 static void
lkpi_page_init(void * arg)761 lkpi_page_init(void *arg)
762 {
763 int i;
764
765 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
766 for (i = 0; i < VMMAP_HASH_SIZE; i++)
767 LIST_INIT(&vmmaphead[i]);
768 }
769 SYSINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_init, NULL);
770
771 static void
lkpi_page_uninit(void * arg)772 lkpi_page_uninit(void *arg)
773 {
774 mtx_destroy(&vmmaplock);
775 }
776 SYSUNINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_uninit, NULL);
777