1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) 4 * Copyright (c) 2017 Mellanox Technologies, Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/rwlock.h> 37 #include <sys/proc.h> 38 #include <sys/sched.h> 39 #include <sys/memrange.h> 40 41 #include <machine/bus.h> 42 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_param.h> 46 #include <vm/vm_kern.h> 47 #include <vm/vm_object.h> 48 #include <vm/vm_map.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_pageout.h> 51 #include <vm/vm_pager.h> 52 #include <vm/vm_radix.h> 53 #include <vm/vm_reserv.h> 54 #include <vm/vm_extern.h> 55 56 #include <vm/uma.h> 57 #include <vm/uma_int.h> 58 59 #include <linux/gfp.h> 60 #include <linux/mm.h> 61 #include <linux/preempt.h> 62 #include <linux/fs.h> 63 #include <linux/shmem_fs.h> 64 #include <linux/kernel.h> 65 #include <linux/idr.h> 66 #include <linux/io.h> 67 #include <linux/io-mapping.h> 68 69 #ifdef __i386__ 70 DEFINE_IDR(mtrr_idr); 71 static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat"); 72 extern int pat_works; 73 #endif 74 75 void 76 si_meminfo(struct sysinfo *si) 77 { 78 si->totalram = physmem; 79 si->freeram = vm_free_count(); 80 si->totalhigh = 0; 81 si->freehigh = 0; 82 si->mem_unit = PAGE_SIZE; 83 } 84 85 void * 86 linux_page_address(struct page *page) 87 { 88 89 if (page->object != kernel_object) { 90 return (PMAP_HAS_DMAP ? 91 ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) : 92 NULL); 93 } 94 return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + 95 IDX_TO_OFF(page->pindex))); 96 } 97 98 struct page * 99 linux_alloc_pages(gfp_t flags, unsigned int order) 100 { 101 struct page *page; 102 103 if (PMAP_HAS_DMAP) { 104 unsigned long npages = 1UL << order; 105 int req = VM_ALLOC_WIRED; 106 107 if ((flags & M_ZERO) != 0) 108 req |= VM_ALLOC_ZERO; 109 if (order == 0 && (flags & GFP_DMA32) == 0) { 110 page = vm_page_alloc_noobj(req); 111 if (page == NULL) 112 return (NULL); 113 } else { 114 vm_paddr_t pmax = (flags & GFP_DMA32) ? 115 BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; 116 retry: 117 page = vm_page_alloc_noobj_contig(req, npages, 0, pmax, 118 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 119 if (page == NULL) { 120 if (flags & M_WAITOK) { 121 if (!vm_page_reclaim_contig(req, 122 npages, 0, pmax, PAGE_SIZE, 0)) { 123 vm_wait(NULL); 124 } 125 flags &= ~M_WAITOK; 126 goto retry; 127 } 128 return (NULL); 129 } 130 } 131 } else { 132 vm_offset_t vaddr; 133 134 vaddr = linux_alloc_kmem(flags, order); 135 if (vaddr == 0) 136 return (NULL); 137 138 page = virt_to_page((void *)vaddr); 139 140 KASSERT(vaddr == (vm_offset_t)page_address(page), 141 ("Page address mismatch")); 142 } 143 144 return (page); 145 } 146 147 static void 148 _linux_free_kmem(vm_offset_t addr, unsigned int order) 149 { 150 size_t size = ((size_t)PAGE_SIZE) << order; 151 152 kmem_free((void *)addr, size); 153 } 154 155 void 156 linux_free_pages(struct page *page, unsigned int order) 157 { 158 if (PMAP_HAS_DMAP) { 159 unsigned long npages = 1UL << order; 160 unsigned long x; 161 162 for (x = 0; x != npages; x++) { 163 vm_page_t pgo = page + x; 164 165 if (vm_page_unwire_noq(pgo)) 166 vm_page_free(pgo); 167 } 168 } else { 169 vm_offset_t vaddr; 170 171 vaddr = (vm_offset_t)page_address(page); 172 173 _linux_free_kmem(vaddr, order); 174 } 175 } 176 177 vm_offset_t 178 linux_alloc_kmem(gfp_t flags, unsigned int order) 179 { 180 size_t size = ((size_t)PAGE_SIZE) << order; 181 void *addr; 182 183 if ((flags & GFP_DMA32) == 0) { 184 addr = kmem_malloc(size, flags & GFP_NATIVE_MASK); 185 } else { 186 addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, 187 BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 188 } 189 return ((vm_offset_t)addr); 190 } 191 192 void 193 linux_free_kmem(vm_offset_t addr, unsigned int order) 194 { 195 KASSERT((addr & ~PAGE_MASK) == 0, 196 ("%s: addr %p is not page aligned", __func__, (void *)addr)); 197 198 if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) { 199 _linux_free_kmem(addr, order); 200 } else { 201 vm_page_t page; 202 203 page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr)); 204 linux_free_pages(page, order); 205 } 206 } 207 208 static int 209 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, 210 int write, struct page **pages) 211 { 212 vm_prot_t prot; 213 size_t len; 214 int count; 215 216 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 217 len = ptoa((vm_offset_t)nr_pages); 218 count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); 219 return (count == -1 ? -EFAULT : nr_pages); 220 } 221 222 int 223 __get_user_pages_fast(unsigned long start, int nr_pages, int write, 224 struct page **pages) 225 { 226 vm_map_t map; 227 vm_page_t *mp; 228 vm_offset_t va; 229 vm_offset_t end; 230 vm_prot_t prot; 231 int count; 232 233 if (nr_pages == 0 || in_interrupt()) 234 return (0); 235 236 MPASS(pages != NULL); 237 map = &curthread->td_proc->p_vmspace->vm_map; 238 end = start + ptoa((vm_offset_t)nr_pages); 239 if (!vm_map_range_valid(map, start, end)) 240 return (-EINVAL); 241 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 242 for (count = 0, mp = pages, va = start; va < end; 243 mp++, va += PAGE_SIZE, count++) { 244 *mp = pmap_extract_and_hold(map->pmap, va, prot); 245 if (*mp == NULL) 246 break; 247 248 if ((prot & VM_PROT_WRITE) != 0 && 249 (*mp)->dirty != VM_PAGE_BITS_ALL) { 250 /* 251 * Explicitly dirty the physical page. Otherwise, the 252 * caller's changes may go unnoticed because they are 253 * performed through an unmanaged mapping or by a DMA 254 * operation. 255 * 256 * The object lock is not held here. 257 * See vm_page_clear_dirty_mask(). 258 */ 259 vm_page_dirty(*mp); 260 } 261 } 262 return (count); 263 } 264 265 long 266 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, 267 unsigned long start, unsigned long nr_pages, unsigned int gup_flags, 268 struct page **pages, struct vm_area_struct **vmas) 269 { 270 vm_map_t map; 271 272 map = &task->task_thread->td_proc->p_vmspace->vm_map; 273 return (linux_get_user_pages_internal(map, start, nr_pages, 274 !!(gup_flags & FOLL_WRITE), pages)); 275 } 276 277 long 278 get_user_pages(unsigned long start, unsigned long nr_pages, 279 unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) 280 { 281 vm_map_t map; 282 283 map = &curthread->td_proc->p_vmspace->vm_map; 284 return (linux_get_user_pages_internal(map, start, nr_pages, 285 !!(gup_flags & FOLL_WRITE), pages)); 286 } 287 288 int 289 is_vmalloc_addr(const void *addr) 290 { 291 return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); 292 } 293 294 vm_fault_t 295 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, 296 unsigned long pfn, pgprot_t prot) 297 { 298 vm_object_t vm_obj = vma->vm_obj; 299 vm_object_t tmp_obj; 300 vm_page_t page; 301 vm_pindex_t pindex; 302 303 VM_OBJECT_ASSERT_WLOCKED(vm_obj); 304 pindex = OFF_TO_IDX(addr - vma->vm_start); 305 if (vma->vm_pfn_count == 0) 306 vma->vm_pfn_first = pindex; 307 MPASS(pindex <= OFF_TO_IDX(vma->vm_end)); 308 309 retry: 310 page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT); 311 if (page == NULL) { 312 page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); 313 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) 314 goto retry; 315 if (page->object != NULL) { 316 tmp_obj = page->object; 317 vm_page_xunbusy(page); 318 VM_OBJECT_WUNLOCK(vm_obj); 319 VM_OBJECT_WLOCK(tmp_obj); 320 if (page->object == tmp_obj && 321 vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { 322 KASSERT(page->object == tmp_obj, 323 ("page has changed identity")); 324 KASSERT((page->oflags & VPO_UNMANAGED) == 0, 325 ("page does not belong to shmem")); 326 vm_pager_page_unswapped(page); 327 if (pmap_page_is_mapped(page)) { 328 vm_page_xunbusy(page); 329 VM_OBJECT_WUNLOCK(tmp_obj); 330 printf("%s: page rename failed: page " 331 "is mapped\n", __func__); 332 VM_OBJECT_WLOCK(vm_obj); 333 return (VM_FAULT_NOPAGE); 334 } 335 vm_page_remove(page); 336 } 337 VM_OBJECT_WUNLOCK(tmp_obj); 338 VM_OBJECT_WLOCK(vm_obj); 339 goto retry; 340 } 341 if (vm_page_insert(page, vm_obj, pindex)) { 342 vm_page_xunbusy(page); 343 return (VM_FAULT_OOM); 344 } 345 vm_page_valid(page); 346 } 347 pmap_page_set_memattr(page, pgprot2cachemode(prot)); 348 vma->vm_pfn_count++; 349 350 return (VM_FAULT_NOPAGE); 351 } 352 353 int 354 lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr, 355 unsigned long start_pfn, unsigned long size, pgprot_t prot) 356 { 357 vm_object_t vm_obj; 358 unsigned long addr, pfn; 359 int err = 0; 360 361 vm_obj = vma->vm_obj; 362 363 VM_OBJECT_WLOCK(vm_obj); 364 for (addr = start_addr, pfn = start_pfn; 365 addr < start_addr + size; 366 addr += PAGE_SIZE) { 367 vm_fault_t ret; 368 retry: 369 ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot); 370 371 if ((ret & VM_FAULT_OOM) != 0) { 372 VM_OBJECT_WUNLOCK(vm_obj); 373 vm_wait(NULL); 374 VM_OBJECT_WLOCK(vm_obj); 375 goto retry; 376 } 377 378 if ((ret & VM_FAULT_ERROR) != 0) { 379 err = -EFAULT; 380 break; 381 } 382 383 pfn++; 384 } 385 VM_OBJECT_WUNLOCK(vm_obj); 386 387 if (unlikely(err)) { 388 zap_vma_ptes(vma, start_addr, 389 (pfn - start_pfn) << PAGE_SHIFT); 390 return (err); 391 } 392 393 return (0); 394 } 395 396 int 397 lkpi_io_mapping_map_user(struct io_mapping *iomap, 398 struct vm_area_struct *vma, unsigned long addr, 399 unsigned long pfn, unsigned long size) 400 { 401 pgprot_t prot; 402 int ret; 403 404 prot = cachemode2protval(iomap->attr); 405 ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot); 406 407 return (ret); 408 } 409 410 /* 411 * Although FreeBSD version of unmap_mapping_range has semantics and types of 412 * parameters compatible with Linux version, the values passed in are different 413 * @obj should match to vm_private_data field of vm_area_struct returned by 414 * mmap file operation handler, see linux_file_mmap_single() sources 415 * @holelen should match to size of area to be munmapped. 416 */ 417 void 418 lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused, 419 loff_t const holelen, int even_cows __unused) 420 { 421 vm_object_t devobj; 422 vm_page_t page; 423 int i, page_count; 424 425 devobj = cdev_pager_lookup(obj); 426 if (devobj != NULL) { 427 page_count = OFF_TO_IDX(holelen); 428 429 VM_OBJECT_WLOCK(devobj); 430 retry: 431 for (i = 0; i < page_count; i++) { 432 page = vm_page_lookup(devobj, i); 433 if (page == NULL) 434 continue; 435 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) 436 goto retry; 437 cdev_pager_free_page(devobj, page); 438 } 439 VM_OBJECT_WUNLOCK(devobj); 440 vm_object_deallocate(devobj); 441 } 442 } 443 444 int 445 lkpi_arch_phys_wc_add(unsigned long base, unsigned long size) 446 { 447 #ifdef __i386__ 448 struct mem_range_desc *mrdesc; 449 int error, id, act; 450 451 /* If PAT is available, do nothing */ 452 if (pat_works) 453 return (0); 454 455 mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK); 456 mrdesc->mr_base = base; 457 mrdesc->mr_len = size; 458 mrdesc->mr_flags = MDF_WRITECOMBINE; 459 strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner)); 460 act = MEMRANGE_SET_UPDATE; 461 error = mem_range_attr_set(mrdesc, &act); 462 if (error == 0) { 463 error = idr_get_new(&mtrr_idr, mrdesc, &id); 464 MPASS(idr_find(&mtrr_idr, id) == mrdesc); 465 if (error != 0) { 466 act = MEMRANGE_SET_REMOVE; 467 mem_range_attr_set(mrdesc, &act); 468 } 469 } 470 if (error != 0) { 471 free(mrdesc, M_LKMTRR); 472 pr_warn( 473 "Failed to add WC MTRR for [%p-%p]: %d; " 474 "performance may suffer\n", 475 (void *)base, (void *)(base + size - 1), error); 476 } else 477 pr_warn("Successfully added WC MTRR for [%p-%p]\n", 478 (void *)base, (void *)(base + size - 1)); 479 480 return (error != 0 ? -error : id + __MTRR_ID_BASE); 481 #else 482 return (0); 483 #endif 484 } 485 486 void 487 lkpi_arch_phys_wc_del(int reg) 488 { 489 #ifdef __i386__ 490 struct mem_range_desc *mrdesc; 491 int act; 492 493 /* Check if arch_phys_wc_add() failed. */ 494 if (reg < __MTRR_ID_BASE) 495 return; 496 497 mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE); 498 MPASS(mrdesc != NULL); 499 idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE); 500 act = MEMRANGE_SET_REMOVE; 501 mem_range_attr_set(mrdesc, &act); 502 free(mrdesc, M_LKMTRR); 503 #endif 504 } 505 506 /* 507 * This is a highly simplified version of the Linux page_frag_cache. 508 * We only support up-to 1 single page as fragment size and we will 509 * always return a full page. This may be wasteful on small objects 510 * but the only known consumer (mt76) is either asking for a half-page 511 * or a full page. If this was to become a problem we can implement 512 * a more elaborate version. 513 */ 514 void * 515 linuxkpi_page_frag_alloc(struct page_frag_cache *pfc, 516 size_t fragsz, gfp_t gfp) 517 { 518 vm_page_t pages; 519 520 if (fragsz == 0) 521 return (NULL); 522 523 KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet " 524 "supported", __func__, fragsz)); 525 526 pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1)); 527 if (pages == NULL) 528 return (NULL); 529 pfc->va = linux_page_address(pages); 530 531 /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */ 532 pfc->pagecnt_bias = 0; 533 534 return (pfc->va); 535 } 536 537 void 538 linuxkpi_page_frag_free(void *addr) 539 { 540 vm_page_t page; 541 542 page = virt_to_page(addr); 543 linux_free_pages(page, 0); 544 } 545 546 void 547 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) 548 { 549 550 linux_free_pages(page, 0); 551 } 552