1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) 4 * Copyright (c) 2017 Mellanox Technologies, Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/rwlock.h> 37 #include <sys/proc.h> 38 #include <sys/sched.h> 39 #include <sys/memrange.h> 40 41 #include <machine/bus.h> 42 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_param.h> 46 #include <vm/vm_kern.h> 47 #include <vm/vm_object.h> 48 #include <vm/vm_map.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_pageout.h> 51 #include <vm/vm_pager.h> 52 #include <vm/vm_radix.h> 53 #include <vm/vm_reserv.h> 54 #include <vm/vm_extern.h> 55 56 #include <vm/uma.h> 57 #include <vm/uma_int.h> 58 59 #include <linux/gfp.h> 60 #include <linux/mm.h> 61 #include <linux/preempt.h> 62 #include <linux/fs.h> 63 #include <linux/shmem_fs.h> 64 #include <linux/kernel.h> 65 #include <linux/idr.h> 66 #include <linux/io.h> 67 #include <linux/io-mapping.h> 68 69 #ifdef __i386__ 70 DEFINE_IDR(mtrr_idr); 71 static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat"); 72 extern int pat_works; 73 #endif 74 75 void 76 si_meminfo(struct sysinfo *si) 77 { 78 si->totalram = physmem; 79 si->freeram = vm_free_count(); 80 si->totalhigh = 0; 81 si->freehigh = 0; 82 si->mem_unit = PAGE_SIZE; 83 } 84 85 void * 86 linux_page_address(const struct page *page) 87 { 88 89 if (page->object != kernel_object) { 90 return (PMAP_HAS_DMAP ? 91 ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) : 92 NULL); 93 } 94 return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + 95 IDX_TO_OFF(page->pindex))); 96 } 97 98 struct page * 99 linux_alloc_pages(gfp_t flags, unsigned int order) 100 { 101 struct page *page; 102 103 if (PMAP_HAS_DMAP) { 104 unsigned long npages = 1UL << order; 105 int req = VM_ALLOC_WIRED; 106 107 if ((flags & M_ZERO) != 0) 108 req |= VM_ALLOC_ZERO; 109 110 if (order == 0 && (flags & GFP_DMA32) == 0) { 111 page = vm_page_alloc_noobj(req); 112 if (page == NULL) 113 return (NULL); 114 } else { 115 vm_paddr_t pmax = (flags & GFP_DMA32) ? 116 BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; 117 118 if ((flags & __GFP_NORETRY) != 0) 119 req |= VM_ALLOC_NORECLAIM; 120 121 retry: 122 if ((flags & __GFP_THISNODE) != 0) { 123 int curdomain = PCPU_GET(domain); 124 page = vm_page_alloc_noobj_contig_domain( 125 curdomain, req, npages, 0, pmax, 126 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 127 } else { 128 page = vm_page_alloc_noobj_contig( 129 req, npages, 0, pmax, 130 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 131 } 132 133 if (page == NULL) { 134 if ((flags & (M_WAITOK | __GFP_NORETRY | __GFP_THISNODE)) == 135 M_WAITOK) { 136 int err = vm_page_reclaim_contig(req, 137 npages, 0, pmax, PAGE_SIZE, 0); 138 if (err == ENOMEM) 139 vm_wait(NULL); 140 else if (err != 0) 141 return (NULL); 142 flags &= ~M_WAITOK; 143 goto retry; 144 } 145 return (NULL); 146 } 147 } 148 } else { 149 vm_offset_t vaddr; 150 151 vaddr = linux_alloc_kmem(flags, order); 152 if (vaddr == 0) 153 return (NULL); 154 155 page = virt_to_page((void *)vaddr); 156 157 KASSERT(vaddr == (vm_offset_t)page_address(page), 158 ("Page address mismatch")); 159 } 160 161 return (page); 162 } 163 164 static void 165 _linux_free_kmem(vm_offset_t addr, unsigned int order) 166 { 167 size_t size = ((size_t)PAGE_SIZE) << order; 168 169 kmem_free((void *)addr, size); 170 } 171 172 void 173 linux_free_pages(struct page *page, unsigned int order) 174 { 175 if (PMAP_HAS_DMAP) { 176 unsigned long npages = 1UL << order; 177 unsigned long x; 178 179 for (x = 0; x != npages; x++) { 180 vm_page_t pgo = page + x; 181 182 /* 183 * The "free page" function is used in several 184 * contexts. 185 * 186 * Some pages are allocated by `linux_alloc_pages()` 187 * above, but not all of them are. For instance in the 188 * DRM drivers, some pages come from 189 * `shmem_read_mapping_page_gfp()`. 190 * 191 * That's why we need to check if the page is managed 192 * or not here. 193 */ 194 if ((pgo->oflags & VPO_UNMANAGED) == 0) { 195 vm_page_unwire(pgo, PQ_ACTIVE); 196 } else { 197 if (vm_page_unwire_noq(pgo)) 198 vm_page_free(pgo); 199 } 200 } 201 } else { 202 vm_offset_t vaddr; 203 204 vaddr = (vm_offset_t)page_address(page); 205 206 _linux_free_kmem(vaddr, order); 207 } 208 } 209 210 void 211 linux_release_pages(release_pages_arg arg, int nr) 212 { 213 int i; 214 215 CTASSERT(offsetof(struct folio, page) == 0); 216 217 for (i = 0; i < nr; i++) 218 __free_page(arg.pages[i]); 219 } 220 221 vm_offset_t 222 linux_alloc_kmem(gfp_t flags, unsigned int order) 223 { 224 size_t size = ((size_t)PAGE_SIZE) << order; 225 void *addr; 226 227 addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, 228 ((flags & GFP_DMA32) == 0) ? -1UL : BUS_SPACE_MAXADDR_32BIT, 229 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 230 231 return ((vm_offset_t)addr); 232 } 233 234 void 235 linux_free_kmem(vm_offset_t addr, unsigned int order) 236 { 237 KASSERT((addr & ~PAGE_MASK) == 0, 238 ("%s: addr %p is not page aligned", __func__, (void *)addr)); 239 240 if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) { 241 _linux_free_kmem(addr, order); 242 } else { 243 vm_page_t page; 244 245 page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr)); 246 linux_free_pages(page, order); 247 } 248 } 249 250 static int 251 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, 252 int write, struct page **pages) 253 { 254 vm_prot_t prot; 255 size_t len; 256 int count; 257 258 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 259 len = ptoa((vm_offset_t)nr_pages); 260 count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); 261 return (count == -1 ? -EFAULT : nr_pages); 262 } 263 264 int 265 __get_user_pages_fast(unsigned long start, int nr_pages, int write, 266 struct page **pages) 267 { 268 vm_map_t map; 269 vm_page_t *mp; 270 vm_offset_t va; 271 vm_offset_t end; 272 vm_prot_t prot; 273 int count; 274 275 if (nr_pages == 0 || in_interrupt()) 276 return (0); 277 278 MPASS(pages != NULL); 279 map = &curthread->td_proc->p_vmspace->vm_map; 280 end = start + ptoa((vm_offset_t)nr_pages); 281 if (!vm_map_range_valid(map, start, end)) 282 return (-EINVAL); 283 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 284 for (count = 0, mp = pages, va = start; va < end; 285 mp++, va += PAGE_SIZE, count++) { 286 *mp = pmap_extract_and_hold(map->pmap, va, prot); 287 if (*mp == NULL) 288 break; 289 290 if ((prot & VM_PROT_WRITE) != 0 && 291 (*mp)->dirty != VM_PAGE_BITS_ALL) { 292 /* 293 * Explicitly dirty the physical page. Otherwise, the 294 * caller's changes may go unnoticed because they are 295 * performed through an unmanaged mapping or by a DMA 296 * operation. 297 * 298 * The object lock is not held here. 299 * See vm_page_clear_dirty_mask(). 300 */ 301 vm_page_dirty(*mp); 302 } 303 } 304 return (count); 305 } 306 307 long 308 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, 309 unsigned long start, unsigned long nr_pages, unsigned int gup_flags, 310 struct page **pages, struct vm_area_struct **vmas) 311 { 312 vm_map_t map; 313 314 map = &task->task_thread->td_proc->p_vmspace->vm_map; 315 return (linux_get_user_pages_internal(map, start, nr_pages, 316 !!(gup_flags & FOLL_WRITE), pages)); 317 } 318 319 long 320 lkpi_get_user_pages(unsigned long start, unsigned long nr_pages, 321 unsigned int gup_flags, struct page **pages) 322 { 323 vm_map_t map; 324 325 map = &curthread->td_proc->p_vmspace->vm_map; 326 return (linux_get_user_pages_internal(map, start, nr_pages, 327 !!(gup_flags & FOLL_WRITE), pages)); 328 } 329 330 /* 331 * Hash of vmmap addresses. This is infrequently accessed and does not 332 * need to be particularly large. This is done because we must store the 333 * caller's idea of the map size to properly unmap. 334 */ 335 struct vmmap { 336 LIST_ENTRY(vmmap) vm_next; 337 void *vm_addr; 338 unsigned long vm_size; 339 }; 340 341 struct vmmaphd { 342 struct vmmap *lh_first; 343 }; 344 #define VMMAP_HASH_SIZE 64 345 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 346 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 347 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 348 static struct mtx vmmaplock; 349 350 int 351 is_vmalloc_addr(const void *addr) 352 { 353 struct vmmap *vmmap; 354 355 mtx_lock(&vmmaplock); 356 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 357 if (addr == vmmap->vm_addr) 358 break; 359 mtx_unlock(&vmmaplock); 360 if (vmmap != NULL) 361 return (1); 362 363 return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); 364 } 365 366 static void 367 vmmap_add(void *addr, unsigned long size) 368 { 369 struct vmmap *vmmap; 370 371 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 372 mtx_lock(&vmmaplock); 373 vmmap->vm_size = size; 374 vmmap->vm_addr = addr; 375 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 376 mtx_unlock(&vmmaplock); 377 } 378 379 static struct vmmap * 380 vmmap_remove(void *addr) 381 { 382 struct vmmap *vmmap; 383 384 mtx_lock(&vmmaplock); 385 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 386 if (vmmap->vm_addr == addr) 387 break; 388 if (vmmap) 389 LIST_REMOVE(vmmap, vm_next); 390 mtx_unlock(&vmmaplock); 391 392 return (vmmap); 393 } 394 395 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 396 void * 397 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 398 { 399 void *addr; 400 401 addr = pmap_mapdev_attr(phys_addr, size, attr); 402 if (addr == NULL) 403 return (NULL); 404 vmmap_add(addr, size); 405 406 return (addr); 407 } 408 #endif 409 410 void 411 iounmap(void *addr) 412 { 413 struct vmmap *vmmap; 414 415 vmmap = vmmap_remove(addr); 416 if (vmmap == NULL) 417 return; 418 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) 419 pmap_unmapdev(addr, vmmap->vm_size); 420 #endif 421 kfree(vmmap); 422 } 423 424 void * 425 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 426 { 427 vm_offset_t off; 428 size_t size; 429 430 size = count * PAGE_SIZE; 431 off = kva_alloc(size); 432 if (off == 0) 433 return (NULL); 434 vmmap_add((void *)off, size); 435 pmap_qenter(off, pages, count); 436 437 return ((void *)off); 438 } 439 440 #define VMAP_MAX_CHUNK_SIZE (65536U / sizeof(struct vm_page)) /* KMEM_ZMAX */ 441 442 void * 443 linuxkpi_vmap_pfn(unsigned long *pfns, unsigned int count, int prot) 444 { 445 vm_page_t m, *ma, fma; 446 vm_offset_t off, coff; 447 vm_paddr_t pa; 448 vm_memattr_t attr; 449 size_t size; 450 unsigned int i, c, chunk; 451 452 size = ptoa(count); 453 off = kva_alloc(size); 454 if (off == 0) 455 return (NULL); 456 vmmap_add((void *)off, size); 457 458 chunk = MIN(count, VMAP_MAX_CHUNK_SIZE); 459 attr = pgprot2cachemode(prot); 460 ma = malloc(chunk * sizeof(vm_page_t), M_TEMP, M_WAITOK | M_ZERO); 461 fma = NULL; 462 c = 0; 463 coff = off; 464 for (i = 0; i < count; i++) { 465 pa = IDX_TO_OFF(pfns[i]); 466 m = PHYS_TO_VM_PAGE(pa); 467 if (m == NULL) { 468 if (fma == NULL) 469 fma = malloc(chunk * sizeof(struct vm_page), 470 M_TEMP, M_WAITOK | M_ZERO); 471 m = fma + c; 472 vm_page_initfake(m, pa, attr); 473 } else { 474 pmap_page_set_memattr(m, attr); 475 } 476 ma[c] = m; 477 c++; 478 if (c == chunk || i == count - 1) { 479 pmap_qenter(coff, ma, c); 480 if (i == count - 1) 481 break; 482 coff += ptoa(c); 483 c = 0; 484 memset(ma, 0, chunk * sizeof(vm_page_t)); 485 if (fma != NULL) 486 memset(fma, 0, chunk * sizeof(struct vm_page)); 487 } 488 } 489 free(fma, M_TEMP); 490 free(ma, M_TEMP); 491 492 return ((void *)off); 493 } 494 495 void 496 vunmap(void *addr) 497 { 498 struct vmmap *vmmap; 499 500 vmmap = vmmap_remove(addr); 501 if (vmmap == NULL) 502 return; 503 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 504 kva_free((vm_offset_t)addr, vmmap->vm_size); 505 kfree(vmmap); 506 } 507 508 vm_fault_t 509 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, 510 unsigned long pfn, pgprot_t prot) 511 { 512 struct pctrie_iter pages; 513 vm_object_t vm_obj = vma->vm_obj; 514 vm_object_t tmp_obj; 515 vm_page_t page; 516 vm_pindex_t pindex; 517 518 VM_OBJECT_ASSERT_WLOCKED(vm_obj); 519 vm_page_iter_init(&pages, vm_obj); 520 pindex = OFF_TO_IDX(addr - vma->vm_start); 521 if (vma->vm_pfn_count == 0) 522 vma->vm_pfn_first = pindex; 523 MPASS(pindex <= OFF_TO_IDX(vma->vm_end)); 524 525 retry: 526 page = vm_page_grab_iter(vm_obj, pindex, VM_ALLOC_NOCREAT, &pages); 527 if (page == NULL) { 528 page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); 529 if (page == NULL) 530 return (VM_FAULT_SIGBUS); 531 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { 532 pctrie_iter_reset(&pages); 533 goto retry; 534 } 535 if (page->object != NULL) { 536 tmp_obj = page->object; 537 vm_page_xunbusy(page); 538 VM_OBJECT_WUNLOCK(vm_obj); 539 VM_OBJECT_WLOCK(tmp_obj); 540 if (page->object == tmp_obj && 541 vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { 542 KASSERT(page->object == tmp_obj, 543 ("page has changed identity")); 544 KASSERT((page->oflags & VPO_UNMANAGED) == 0, 545 ("page does not belong to shmem")); 546 vm_pager_page_unswapped(page); 547 if (pmap_page_is_mapped(page)) { 548 vm_page_xunbusy(page); 549 VM_OBJECT_WUNLOCK(tmp_obj); 550 printf("%s: page rename failed: page " 551 "is mapped\n", __func__); 552 VM_OBJECT_WLOCK(vm_obj); 553 return (VM_FAULT_NOPAGE); 554 } 555 vm_page_remove(page); 556 } 557 VM_OBJECT_WUNLOCK(tmp_obj); 558 pctrie_iter_reset(&pages); 559 VM_OBJECT_WLOCK(vm_obj); 560 goto retry; 561 } 562 if (vm_page_iter_insert(page, vm_obj, pindex, &pages) != 0) { 563 vm_page_xunbusy(page); 564 return (VM_FAULT_OOM); 565 } 566 vm_page_valid(page); 567 } 568 pmap_page_set_memattr(page, pgprot2cachemode(prot)); 569 vma->vm_pfn_count++; 570 571 return (VM_FAULT_NOPAGE); 572 } 573 574 int 575 lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr, 576 unsigned long start_pfn, unsigned long size, pgprot_t prot) 577 { 578 vm_object_t vm_obj; 579 unsigned long addr, pfn; 580 int err = 0; 581 582 vm_obj = vma->vm_obj; 583 584 VM_OBJECT_WLOCK(vm_obj); 585 for (addr = start_addr, pfn = start_pfn; 586 addr < start_addr + size; 587 addr += PAGE_SIZE) { 588 vm_fault_t ret; 589 retry: 590 ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot); 591 592 if ((ret & VM_FAULT_OOM) != 0) { 593 VM_OBJECT_WUNLOCK(vm_obj); 594 vm_wait(NULL); 595 VM_OBJECT_WLOCK(vm_obj); 596 goto retry; 597 } 598 599 if ((ret & VM_FAULT_ERROR) != 0) { 600 err = -EFAULT; 601 break; 602 } 603 604 pfn++; 605 } 606 VM_OBJECT_WUNLOCK(vm_obj); 607 608 if (unlikely(err)) { 609 zap_vma_ptes(vma, start_addr, 610 (pfn - start_pfn) << PAGE_SHIFT); 611 return (err); 612 } 613 614 return (0); 615 } 616 617 int 618 lkpi_io_mapping_map_user(struct io_mapping *iomap, 619 struct vm_area_struct *vma, unsigned long addr, 620 unsigned long pfn, unsigned long size) 621 { 622 pgprot_t prot; 623 int ret; 624 625 prot = cachemode2protval(iomap->attr); 626 ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot); 627 628 return (ret); 629 } 630 631 /* 632 * Although FreeBSD version of unmap_mapping_range has semantics and types of 633 * parameters compatible with Linux version, the values passed in are different 634 * @obj should match to vm_private_data field of vm_area_struct returned by 635 * mmap file operation handler, see linux_file_mmap_single() sources 636 * @holelen should match to size of area to be munmapped. 637 */ 638 void 639 lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused, 640 loff_t const holelen __unused, int even_cows __unused) 641 { 642 vm_object_t devobj; 643 644 devobj = cdev_pager_lookup(obj); 645 if (devobj != NULL) { 646 cdev_mgtdev_pager_free_pages(devobj); 647 vm_object_deallocate(devobj); 648 } 649 } 650 651 int 652 lkpi_arch_phys_wc_add(unsigned long base, unsigned long size) 653 { 654 #ifdef __i386__ 655 struct mem_range_desc *mrdesc; 656 int error, id, act; 657 658 /* If PAT is available, do nothing */ 659 if (pat_works) 660 return (0); 661 662 mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK); 663 mrdesc->mr_base = base; 664 mrdesc->mr_len = size; 665 mrdesc->mr_flags = MDF_WRITECOMBINE; 666 strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner)); 667 act = MEMRANGE_SET_UPDATE; 668 error = mem_range_attr_set(mrdesc, &act); 669 if (error == 0) { 670 error = idr_get_new(&mtrr_idr, mrdesc, &id); 671 MPASS(idr_find(&mtrr_idr, id) == mrdesc); 672 if (error != 0) { 673 act = MEMRANGE_SET_REMOVE; 674 mem_range_attr_set(mrdesc, &act); 675 } 676 } 677 if (error != 0) { 678 free(mrdesc, M_LKMTRR); 679 pr_warn( 680 "Failed to add WC MTRR for [%p-%p]: %d; " 681 "performance may suffer\n", 682 (void *)base, (void *)(base + size - 1), error); 683 } else 684 pr_warn("Successfully added WC MTRR for [%p-%p]\n", 685 (void *)base, (void *)(base + size - 1)); 686 687 return (error != 0 ? -error : id + __MTRR_ID_BASE); 688 #else 689 return (0); 690 #endif 691 } 692 693 void 694 lkpi_arch_phys_wc_del(int reg) 695 { 696 #ifdef __i386__ 697 struct mem_range_desc *mrdesc; 698 int act; 699 700 /* Check if arch_phys_wc_add() failed. */ 701 if (reg < __MTRR_ID_BASE) 702 return; 703 704 mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE); 705 MPASS(mrdesc != NULL); 706 idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE); 707 act = MEMRANGE_SET_REMOVE; 708 mem_range_attr_set(mrdesc, &act); 709 free(mrdesc, M_LKMTRR); 710 #endif 711 } 712 713 /* 714 * This is a highly simplified version of the Linux page_frag_cache. 715 * We only support up-to 1 single page as fragment size and we will 716 * always return a full page. This may be wasteful on small objects 717 * but the only known consumer (mt76) is either asking for a half-page 718 * or a full page. If this was to become a problem we can implement 719 * a more elaborate version. 720 */ 721 void * 722 linuxkpi_page_frag_alloc(struct page_frag_cache *pfc, 723 size_t fragsz, gfp_t gfp) 724 { 725 vm_page_t pages; 726 727 if (fragsz == 0) 728 return (NULL); 729 730 KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet " 731 "supported", __func__, fragsz)); 732 733 pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1)); 734 if (pages == NULL) 735 return (NULL); 736 pfc->va = linux_page_address(pages); 737 738 /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */ 739 pfc->pagecnt_bias = 0; 740 741 return (pfc->va); 742 } 743 744 void 745 linuxkpi_page_frag_free(void *addr) 746 { 747 vm_page_t page; 748 749 page = virt_to_page(addr); 750 linux_free_pages(page, 0); 751 } 752 753 void 754 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) 755 { 756 757 linux_free_pages(page, 0); 758 } 759 760 static void 761 lkpi_page_init(void *arg) 762 { 763 int i; 764 765 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 766 for (i = 0; i < VMMAP_HASH_SIZE; i++) 767 LIST_INIT(&vmmaphead[i]); 768 } 769 SYSINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_init, NULL); 770 771 static void 772 lkpi_page_uninit(void *arg) 773 { 774 mtx_destroy(&vmmaplock); 775 } 776 SYSUNINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_uninit, NULL); 777