1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) 4 * Copyright (c) 2017 Mellanox Technologies, Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/rwlock.h> 37 #include <sys/proc.h> 38 #include <sys/sched.h> 39 #include <sys/memrange.h> 40 41 #include <machine/bus.h> 42 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_param.h> 46 #include <vm/vm_kern.h> 47 #include <vm/vm_object.h> 48 #include <vm/vm_map.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_pageout.h> 51 #include <vm/vm_pager.h> 52 #include <vm/vm_radix.h> 53 #include <vm/vm_reserv.h> 54 #include <vm/vm_extern.h> 55 56 #include <vm/uma.h> 57 #include <vm/uma_int.h> 58 59 #include <linux/gfp.h> 60 #include <linux/mm.h> 61 #include <linux/preempt.h> 62 #include <linux/fs.h> 63 #include <linux/shmem_fs.h> 64 #include <linux/kernel.h> 65 #include <linux/idr.h> 66 #include <linux/io.h> 67 #include <linux/io-mapping.h> 68 69 #ifdef __i386__ 70 DEFINE_IDR(mtrr_idr); 71 static MALLOC_DEFINE(M_LKMTRR, "idr", "Linux MTRR compat"); 72 extern int pat_works; 73 #endif 74 75 void 76 si_meminfo(struct sysinfo *si) 77 { 78 si->totalram = physmem; 79 si->freeram = vm_free_count(); 80 si->totalhigh = 0; 81 si->freehigh = 0; 82 si->mem_unit = PAGE_SIZE; 83 } 84 85 void * 86 linux_page_address(struct page *page) 87 { 88 89 if (page->object != kernel_object) { 90 return (PMAP_HAS_DMAP ? 91 ((void *)(uintptr_t)PHYS_TO_DMAP(page_to_phys(page))) : 92 NULL); 93 } 94 return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS + 95 IDX_TO_OFF(page->pindex))); 96 } 97 98 struct page * 99 linux_alloc_pages(gfp_t flags, unsigned int order) 100 { 101 struct page *page; 102 103 if (PMAP_HAS_DMAP) { 104 unsigned long npages = 1UL << order; 105 int req = VM_ALLOC_WIRED; 106 107 if ((flags & M_ZERO) != 0) 108 req |= VM_ALLOC_ZERO; 109 if (order == 0 && (flags & GFP_DMA32) == 0) { 110 page = vm_page_alloc_noobj(req); 111 if (page == NULL) 112 return (NULL); 113 } else { 114 vm_paddr_t pmax = (flags & GFP_DMA32) ? 115 BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR; 116 retry: 117 page = vm_page_alloc_noobj_contig(req, npages, 0, pmax, 118 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 119 if (page == NULL) { 120 if (flags & M_WAITOK) { 121 int err = vm_page_reclaim_contig(req, 122 npages, 0, pmax, PAGE_SIZE, 0); 123 if (err == ENOMEM) 124 vm_wait(NULL); 125 else if (err != 0) 126 return (NULL); 127 flags &= ~M_WAITOK; 128 goto retry; 129 } 130 return (NULL); 131 } 132 } 133 } else { 134 vm_offset_t vaddr; 135 136 vaddr = linux_alloc_kmem(flags, order); 137 if (vaddr == 0) 138 return (NULL); 139 140 page = virt_to_page((void *)vaddr); 141 142 KASSERT(vaddr == (vm_offset_t)page_address(page), 143 ("Page address mismatch")); 144 } 145 146 return (page); 147 } 148 149 static void 150 _linux_free_kmem(vm_offset_t addr, unsigned int order) 151 { 152 size_t size = ((size_t)PAGE_SIZE) << order; 153 154 kmem_free((void *)addr, size); 155 } 156 157 void 158 linux_free_pages(struct page *page, unsigned int order) 159 { 160 if (PMAP_HAS_DMAP) { 161 unsigned long npages = 1UL << order; 162 unsigned long x; 163 164 for (x = 0; x != npages; x++) { 165 vm_page_t pgo = page + x; 166 167 if (vm_page_unwire_noq(pgo)) 168 vm_page_free(pgo); 169 } 170 } else { 171 vm_offset_t vaddr; 172 173 vaddr = (vm_offset_t)page_address(page); 174 175 _linux_free_kmem(vaddr, order); 176 } 177 } 178 179 vm_offset_t 180 linux_alloc_kmem(gfp_t flags, unsigned int order) 181 { 182 size_t size = ((size_t)PAGE_SIZE) << order; 183 void *addr; 184 185 if ((flags & GFP_DMA32) == 0) { 186 addr = kmem_malloc(size, flags & GFP_NATIVE_MASK); 187 } else { 188 addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0, 189 BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); 190 } 191 return ((vm_offset_t)addr); 192 } 193 194 void 195 linux_free_kmem(vm_offset_t addr, unsigned int order) 196 { 197 KASSERT((addr & ~PAGE_MASK) == 0, 198 ("%s: addr %p is not page aligned", __func__, (void *)addr)); 199 200 if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) { 201 _linux_free_kmem(addr, order); 202 } else { 203 vm_page_t page; 204 205 page = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(addr)); 206 linux_free_pages(page, order); 207 } 208 } 209 210 static int 211 linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages, 212 int write, struct page **pages) 213 { 214 vm_prot_t prot; 215 size_t len; 216 int count; 217 218 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 219 len = ptoa((vm_offset_t)nr_pages); 220 count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages); 221 return (count == -1 ? -EFAULT : nr_pages); 222 } 223 224 int 225 __get_user_pages_fast(unsigned long start, int nr_pages, int write, 226 struct page **pages) 227 { 228 vm_map_t map; 229 vm_page_t *mp; 230 vm_offset_t va; 231 vm_offset_t end; 232 vm_prot_t prot; 233 int count; 234 235 if (nr_pages == 0 || in_interrupt()) 236 return (0); 237 238 MPASS(pages != NULL); 239 map = &curthread->td_proc->p_vmspace->vm_map; 240 end = start + ptoa((vm_offset_t)nr_pages); 241 if (!vm_map_range_valid(map, start, end)) 242 return (-EINVAL); 243 prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ; 244 for (count = 0, mp = pages, va = start; va < end; 245 mp++, va += PAGE_SIZE, count++) { 246 *mp = pmap_extract_and_hold(map->pmap, va, prot); 247 if (*mp == NULL) 248 break; 249 250 if ((prot & VM_PROT_WRITE) != 0 && 251 (*mp)->dirty != VM_PAGE_BITS_ALL) { 252 /* 253 * Explicitly dirty the physical page. Otherwise, the 254 * caller's changes may go unnoticed because they are 255 * performed through an unmanaged mapping or by a DMA 256 * operation. 257 * 258 * The object lock is not held here. 259 * See vm_page_clear_dirty_mask(). 260 */ 261 vm_page_dirty(*mp); 262 } 263 } 264 return (count); 265 } 266 267 long 268 get_user_pages_remote(struct task_struct *task, struct mm_struct *mm, 269 unsigned long start, unsigned long nr_pages, unsigned int gup_flags, 270 struct page **pages, struct vm_area_struct **vmas) 271 { 272 vm_map_t map; 273 274 map = &task->task_thread->td_proc->p_vmspace->vm_map; 275 return (linux_get_user_pages_internal(map, start, nr_pages, 276 !!(gup_flags & FOLL_WRITE), pages)); 277 } 278 279 long 280 get_user_pages(unsigned long start, unsigned long nr_pages, 281 unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) 282 { 283 vm_map_t map; 284 285 map = &curthread->td_proc->p_vmspace->vm_map; 286 return (linux_get_user_pages_internal(map, start, nr_pages, 287 !!(gup_flags & FOLL_WRITE), pages)); 288 } 289 290 int 291 is_vmalloc_addr(const void *addr) 292 { 293 return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL); 294 } 295 296 vm_fault_t 297 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr, 298 unsigned long pfn, pgprot_t prot) 299 { 300 vm_object_t vm_obj = vma->vm_obj; 301 vm_object_t tmp_obj; 302 vm_page_t page; 303 vm_pindex_t pindex; 304 305 VM_OBJECT_ASSERT_WLOCKED(vm_obj); 306 pindex = OFF_TO_IDX(addr - vma->vm_start); 307 if (vma->vm_pfn_count == 0) 308 vma->vm_pfn_first = pindex; 309 MPASS(pindex <= OFF_TO_IDX(vma->vm_end)); 310 311 retry: 312 page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT); 313 if (page == NULL) { 314 page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn)); 315 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) 316 goto retry; 317 if (page->object != NULL) { 318 tmp_obj = page->object; 319 vm_page_xunbusy(page); 320 VM_OBJECT_WUNLOCK(vm_obj); 321 VM_OBJECT_WLOCK(tmp_obj); 322 if (page->object == tmp_obj && 323 vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) { 324 KASSERT(page->object == tmp_obj, 325 ("page has changed identity")); 326 KASSERT((page->oflags & VPO_UNMANAGED) == 0, 327 ("page does not belong to shmem")); 328 vm_pager_page_unswapped(page); 329 if (pmap_page_is_mapped(page)) { 330 vm_page_xunbusy(page); 331 VM_OBJECT_WUNLOCK(tmp_obj); 332 printf("%s: page rename failed: page " 333 "is mapped\n", __func__); 334 VM_OBJECT_WLOCK(vm_obj); 335 return (VM_FAULT_NOPAGE); 336 } 337 vm_page_remove(page); 338 } 339 VM_OBJECT_WUNLOCK(tmp_obj); 340 VM_OBJECT_WLOCK(vm_obj); 341 goto retry; 342 } 343 if (vm_page_insert(page, vm_obj, pindex)) { 344 vm_page_xunbusy(page); 345 return (VM_FAULT_OOM); 346 } 347 vm_page_valid(page); 348 } 349 pmap_page_set_memattr(page, pgprot2cachemode(prot)); 350 vma->vm_pfn_count++; 351 352 return (VM_FAULT_NOPAGE); 353 } 354 355 int 356 lkpi_remap_pfn_range(struct vm_area_struct *vma, unsigned long start_addr, 357 unsigned long start_pfn, unsigned long size, pgprot_t prot) 358 { 359 vm_object_t vm_obj; 360 unsigned long addr, pfn; 361 int err = 0; 362 363 vm_obj = vma->vm_obj; 364 365 VM_OBJECT_WLOCK(vm_obj); 366 for (addr = start_addr, pfn = start_pfn; 367 addr < start_addr + size; 368 addr += PAGE_SIZE) { 369 vm_fault_t ret; 370 retry: 371 ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot); 372 373 if ((ret & VM_FAULT_OOM) != 0) { 374 VM_OBJECT_WUNLOCK(vm_obj); 375 vm_wait(NULL); 376 VM_OBJECT_WLOCK(vm_obj); 377 goto retry; 378 } 379 380 if ((ret & VM_FAULT_ERROR) != 0) { 381 err = -EFAULT; 382 break; 383 } 384 385 pfn++; 386 } 387 VM_OBJECT_WUNLOCK(vm_obj); 388 389 if (unlikely(err)) { 390 zap_vma_ptes(vma, start_addr, 391 (pfn - start_pfn) << PAGE_SHIFT); 392 return (err); 393 } 394 395 return (0); 396 } 397 398 int 399 lkpi_io_mapping_map_user(struct io_mapping *iomap, 400 struct vm_area_struct *vma, unsigned long addr, 401 unsigned long pfn, unsigned long size) 402 { 403 pgprot_t prot; 404 int ret; 405 406 prot = cachemode2protval(iomap->attr); 407 ret = lkpi_remap_pfn_range(vma, addr, pfn, size, prot); 408 409 return (ret); 410 } 411 412 /* 413 * Although FreeBSD version of unmap_mapping_range has semantics and types of 414 * parameters compatible with Linux version, the values passed in are different 415 * @obj should match to vm_private_data field of vm_area_struct returned by 416 * mmap file operation handler, see linux_file_mmap_single() sources 417 * @holelen should match to size of area to be munmapped. 418 */ 419 void 420 lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused, 421 loff_t const holelen, int even_cows __unused) 422 { 423 vm_object_t devobj; 424 vm_page_t page; 425 int i, page_count; 426 427 devobj = cdev_pager_lookup(obj); 428 if (devobj != NULL) { 429 page_count = OFF_TO_IDX(holelen); 430 431 VM_OBJECT_WLOCK(devobj); 432 retry: 433 for (i = 0; i < page_count; i++) { 434 page = vm_page_lookup(devobj, i); 435 if (page == NULL) 436 continue; 437 if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL)) 438 goto retry; 439 cdev_pager_free_page(devobj, page); 440 } 441 VM_OBJECT_WUNLOCK(devobj); 442 vm_object_deallocate(devobj); 443 } 444 } 445 446 int 447 lkpi_arch_phys_wc_add(unsigned long base, unsigned long size) 448 { 449 #ifdef __i386__ 450 struct mem_range_desc *mrdesc; 451 int error, id, act; 452 453 /* If PAT is available, do nothing */ 454 if (pat_works) 455 return (0); 456 457 mrdesc = malloc(sizeof(*mrdesc), M_LKMTRR, M_WAITOK); 458 mrdesc->mr_base = base; 459 mrdesc->mr_len = size; 460 mrdesc->mr_flags = MDF_WRITECOMBINE; 461 strlcpy(mrdesc->mr_owner, "drm", sizeof(mrdesc->mr_owner)); 462 act = MEMRANGE_SET_UPDATE; 463 error = mem_range_attr_set(mrdesc, &act); 464 if (error == 0) { 465 error = idr_get_new(&mtrr_idr, mrdesc, &id); 466 MPASS(idr_find(&mtrr_idr, id) == mrdesc); 467 if (error != 0) { 468 act = MEMRANGE_SET_REMOVE; 469 mem_range_attr_set(mrdesc, &act); 470 } 471 } 472 if (error != 0) { 473 free(mrdesc, M_LKMTRR); 474 pr_warn( 475 "Failed to add WC MTRR for [%p-%p]: %d; " 476 "performance may suffer\n", 477 (void *)base, (void *)(base + size - 1), error); 478 } else 479 pr_warn("Successfully added WC MTRR for [%p-%p]\n", 480 (void *)base, (void *)(base + size - 1)); 481 482 return (error != 0 ? -error : id + __MTRR_ID_BASE); 483 #else 484 return (0); 485 #endif 486 } 487 488 void 489 lkpi_arch_phys_wc_del(int reg) 490 { 491 #ifdef __i386__ 492 struct mem_range_desc *mrdesc; 493 int act; 494 495 /* Check if arch_phys_wc_add() failed. */ 496 if (reg < __MTRR_ID_BASE) 497 return; 498 499 mrdesc = idr_find(&mtrr_idr, reg - __MTRR_ID_BASE); 500 MPASS(mrdesc != NULL); 501 idr_remove(&mtrr_idr, reg - __MTRR_ID_BASE); 502 act = MEMRANGE_SET_REMOVE; 503 mem_range_attr_set(mrdesc, &act); 504 free(mrdesc, M_LKMTRR); 505 #endif 506 } 507 508 /* 509 * This is a highly simplified version of the Linux page_frag_cache. 510 * We only support up-to 1 single page as fragment size and we will 511 * always return a full page. This may be wasteful on small objects 512 * but the only known consumer (mt76) is either asking for a half-page 513 * or a full page. If this was to become a problem we can implement 514 * a more elaborate version. 515 */ 516 void * 517 linuxkpi_page_frag_alloc(struct page_frag_cache *pfc, 518 size_t fragsz, gfp_t gfp) 519 { 520 vm_page_t pages; 521 522 if (fragsz == 0) 523 return (NULL); 524 525 KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet " 526 "supported", __func__, fragsz)); 527 528 pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1)); 529 if (pages == NULL) 530 return (NULL); 531 pfc->va = linux_page_address(pages); 532 533 /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */ 534 pfc->pagecnt_bias = 0; 535 536 return (pfc->va); 537 } 538 539 void 540 linuxkpi_page_frag_free(void *addr) 541 { 542 vm_page_t page; 543 544 page = virt_to_page(addr); 545 linux_free_pages(page, 0); 546 } 547 548 void 549 linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) 550 { 551 552 linux_free_pages(page, 0); 553 } 554