1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1993 Linus Torvalds 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/set_memory.h> 22 #include <linux/debugobjects.h> 23 #include <linux/kallsyms.h> 24 #include <linux/list.h> 25 #include <linux/notifier.h> 26 #include <linux/rbtree.h> 27 #include <linux/xarray.h> 28 #include <linux/io.h> 29 #include <linux/rcupdate.h> 30 #include <linux/pfn.h> 31 #include <linux/kmemleak.h> 32 #include <linux/atomic.h> 33 #include <linux/compiler.h> 34 #include <linux/memcontrol.h> 35 #include <linux/llist.h> 36 #include <linux/uio.h> 37 #include <linux/bitops.h> 38 #include <linux/rbtree_augmented.h> 39 #include <linux/overflow.h> 40 #include <linux/pgtable.h> 41 #include <linux/hugetlb.h> 42 #include <linux/sched/mm.h> 43 #include <asm/tlbflush.h> 44 #include <asm/shmparam.h> 45 #include <linux/page_owner.h> 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/vmalloc.h> 49 50 #include "internal.h" 51 #include "pgalloc-track.h" 52 53 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 54 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 55 56 static int __init set_nohugeiomap(char *str) 57 { 58 ioremap_max_page_shift = PAGE_SHIFT; 59 return 0; 60 } 61 early_param("nohugeiomap", set_nohugeiomap); 62 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 63 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 64 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 65 66 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 67 static bool __ro_after_init vmap_allow_huge = true; 68 69 static int __init set_nohugevmalloc(char *str) 70 { 71 vmap_allow_huge = false; 72 return 0; 73 } 74 early_param("nohugevmalloc", set_nohugevmalloc); 75 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 76 static const bool vmap_allow_huge = false; 77 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 78 79 bool is_vmalloc_addr(const void *x) 80 { 81 unsigned long addr = (unsigned long)kasan_reset_tag(x); 82 83 return addr >= VMALLOC_START && addr < VMALLOC_END; 84 } 85 EXPORT_SYMBOL(is_vmalloc_addr); 86 87 struct vfree_deferred { 88 struct llist_head list; 89 struct work_struct wq; 90 }; 91 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 92 93 /*** Page table manipulation functions ***/ 94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 95 phys_addr_t phys_addr, pgprot_t prot, 96 unsigned int max_page_shift, pgtbl_mod_mask *mask) 97 { 98 pte_t *pte; 99 u64 pfn; 100 struct page *page; 101 unsigned long size = PAGE_SIZE; 102 103 pfn = phys_addr >> PAGE_SHIFT; 104 pte = pte_alloc_kernel_track(pmd, addr, mask); 105 if (!pte) 106 return -ENOMEM; 107 do { 108 if (unlikely(!pte_none(ptep_get(pte)))) { 109 if (pfn_valid(pfn)) { 110 page = pfn_to_page(pfn); 111 dump_page(page, "remapping already mapped page"); 112 } 113 BUG(); 114 } 115 116 #ifdef CONFIG_HUGETLB_PAGE 117 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 118 if (size != PAGE_SIZE) { 119 pte_t entry = pfn_pte(pfn, prot); 120 121 entry = arch_make_huge_pte(entry, ilog2(size), 0); 122 set_huge_pte_at(&init_mm, addr, pte, entry, size); 123 pfn += PFN_DOWN(size); 124 continue; 125 } 126 #endif 127 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 128 pfn++; 129 } while (pte += PFN_DOWN(size), addr += size, addr != end); 130 *mask |= PGTBL_PTE_MODIFIED; 131 return 0; 132 } 133 134 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 135 phys_addr_t phys_addr, pgprot_t prot, 136 unsigned int max_page_shift) 137 { 138 if (max_page_shift < PMD_SHIFT) 139 return 0; 140 141 if (!arch_vmap_pmd_supported(prot)) 142 return 0; 143 144 if ((end - addr) != PMD_SIZE) 145 return 0; 146 147 if (!IS_ALIGNED(addr, PMD_SIZE)) 148 return 0; 149 150 if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 151 return 0; 152 153 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 154 return 0; 155 156 return pmd_set_huge(pmd, phys_addr, prot); 157 } 158 159 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 160 phys_addr_t phys_addr, pgprot_t prot, 161 unsigned int max_page_shift, pgtbl_mod_mask *mask) 162 { 163 pmd_t *pmd; 164 unsigned long next; 165 166 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 167 if (!pmd) 168 return -ENOMEM; 169 do { 170 next = pmd_addr_end(addr, end); 171 172 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 173 max_page_shift)) { 174 *mask |= PGTBL_PMD_MODIFIED; 175 continue; 176 } 177 178 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) 179 return -ENOMEM; 180 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 181 return 0; 182 } 183 184 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 185 phys_addr_t phys_addr, pgprot_t prot, 186 unsigned int max_page_shift) 187 { 188 if (max_page_shift < PUD_SHIFT) 189 return 0; 190 191 if (!arch_vmap_pud_supported(prot)) 192 return 0; 193 194 if ((end - addr) != PUD_SIZE) 195 return 0; 196 197 if (!IS_ALIGNED(addr, PUD_SIZE)) 198 return 0; 199 200 if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 201 return 0; 202 203 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 204 return 0; 205 206 return pud_set_huge(pud, phys_addr, prot); 207 } 208 209 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 210 phys_addr_t phys_addr, pgprot_t prot, 211 unsigned int max_page_shift, pgtbl_mod_mask *mask) 212 { 213 pud_t *pud; 214 unsigned long next; 215 216 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 217 if (!pud) 218 return -ENOMEM; 219 do { 220 next = pud_addr_end(addr, end); 221 222 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 223 max_page_shift)) { 224 *mask |= PGTBL_PUD_MODIFIED; 225 continue; 226 } 227 228 if (vmap_pmd_range(pud, addr, next, phys_addr, prot, 229 max_page_shift, mask)) 230 return -ENOMEM; 231 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 232 return 0; 233 } 234 235 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 236 phys_addr_t phys_addr, pgprot_t prot, 237 unsigned int max_page_shift) 238 { 239 if (max_page_shift < P4D_SHIFT) 240 return 0; 241 242 if (!arch_vmap_p4d_supported(prot)) 243 return 0; 244 245 if ((end - addr) != P4D_SIZE) 246 return 0; 247 248 if (!IS_ALIGNED(addr, P4D_SIZE)) 249 return 0; 250 251 if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 252 return 0; 253 254 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 255 return 0; 256 257 return p4d_set_huge(p4d, phys_addr, prot); 258 } 259 260 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 261 phys_addr_t phys_addr, pgprot_t prot, 262 unsigned int max_page_shift, pgtbl_mod_mask *mask) 263 { 264 p4d_t *p4d; 265 unsigned long next; 266 267 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 268 if (!p4d) 269 return -ENOMEM; 270 do { 271 next = p4d_addr_end(addr, end); 272 273 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 274 max_page_shift)) { 275 *mask |= PGTBL_P4D_MODIFIED; 276 continue; 277 } 278 279 if (vmap_pud_range(p4d, addr, next, phys_addr, prot, 280 max_page_shift, mask)) 281 return -ENOMEM; 282 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 283 return 0; 284 } 285 286 static int vmap_range_noflush(unsigned long addr, unsigned long end, 287 phys_addr_t phys_addr, pgprot_t prot, 288 unsigned int max_page_shift) 289 { 290 pgd_t *pgd; 291 unsigned long start; 292 unsigned long next; 293 int err; 294 pgtbl_mod_mask mask = 0; 295 296 might_sleep(); 297 BUG_ON(addr >= end); 298 299 start = addr; 300 pgd = pgd_offset_k(addr); 301 do { 302 next = pgd_addr_end(addr, end); 303 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 304 max_page_shift, &mask); 305 if (err) 306 break; 307 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 308 309 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 310 arch_sync_kernel_mappings(start, end); 311 312 return err; 313 } 314 315 int vmap_page_range(unsigned long addr, unsigned long end, 316 phys_addr_t phys_addr, pgprot_t prot) 317 { 318 int err; 319 320 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 321 ioremap_max_page_shift); 322 flush_cache_vmap(addr, end); 323 if (!err) 324 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, 325 ioremap_max_page_shift); 326 return err; 327 } 328 329 int ioremap_page_range(unsigned long addr, unsigned long end, 330 phys_addr_t phys_addr, pgprot_t prot) 331 { 332 struct vm_struct *area; 333 334 area = find_vm_area((void *)addr); 335 if (!area || !(area->flags & VM_IOREMAP)) { 336 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); 337 return -EINVAL; 338 } 339 if (addr != (unsigned long)area->addr || 340 (void *)end != area->addr + get_vm_area_size(area)) { 341 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n", 342 addr, end, (long)area->addr, 343 (long)area->addr + get_vm_area_size(area)); 344 return -ERANGE; 345 } 346 return vmap_page_range(addr, end, phys_addr, prot); 347 } 348 349 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 350 pgtbl_mod_mask *mask) 351 { 352 pte_t *pte; 353 354 pte = pte_offset_kernel(pmd, addr); 355 do { 356 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 357 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 358 } while (pte++, addr += PAGE_SIZE, addr != end); 359 *mask |= PGTBL_PTE_MODIFIED; 360 } 361 362 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 363 pgtbl_mod_mask *mask) 364 { 365 pmd_t *pmd; 366 unsigned long next; 367 int cleared; 368 369 pmd = pmd_offset(pud, addr); 370 do { 371 next = pmd_addr_end(addr, end); 372 373 cleared = pmd_clear_huge(pmd); 374 if (cleared || pmd_bad(*pmd)) 375 *mask |= PGTBL_PMD_MODIFIED; 376 377 if (cleared) 378 continue; 379 if (pmd_none_or_clear_bad(pmd)) 380 continue; 381 vunmap_pte_range(pmd, addr, next, mask); 382 383 cond_resched(); 384 } while (pmd++, addr = next, addr != end); 385 } 386 387 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 388 pgtbl_mod_mask *mask) 389 { 390 pud_t *pud; 391 unsigned long next; 392 int cleared; 393 394 pud = pud_offset(p4d, addr); 395 do { 396 next = pud_addr_end(addr, end); 397 398 cleared = pud_clear_huge(pud); 399 if (cleared || pud_bad(*pud)) 400 *mask |= PGTBL_PUD_MODIFIED; 401 402 if (cleared) 403 continue; 404 if (pud_none_or_clear_bad(pud)) 405 continue; 406 vunmap_pmd_range(pud, addr, next, mask); 407 } while (pud++, addr = next, addr != end); 408 } 409 410 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 411 pgtbl_mod_mask *mask) 412 { 413 p4d_t *p4d; 414 unsigned long next; 415 416 p4d = p4d_offset(pgd, addr); 417 do { 418 next = p4d_addr_end(addr, end); 419 420 p4d_clear_huge(p4d); 421 if (p4d_bad(*p4d)) 422 *mask |= PGTBL_P4D_MODIFIED; 423 424 if (p4d_none_or_clear_bad(p4d)) 425 continue; 426 vunmap_pud_range(p4d, addr, next, mask); 427 } while (p4d++, addr = next, addr != end); 428 } 429 430 /* 431 * vunmap_range_noflush is similar to vunmap_range, but does not 432 * flush caches or TLBs. 433 * 434 * The caller is responsible for calling flush_cache_vmap() before calling 435 * this function, and flush_tlb_kernel_range after it has returned 436 * successfully (and before the addresses are expected to cause a page fault 437 * or be re-mapped for something else, if TLB flushes are being delayed or 438 * coalesced). 439 * 440 * This is an internal function only. Do not use outside mm/. 441 */ 442 void __vunmap_range_noflush(unsigned long start, unsigned long end) 443 { 444 unsigned long next; 445 pgd_t *pgd; 446 unsigned long addr = start; 447 pgtbl_mod_mask mask = 0; 448 449 BUG_ON(addr >= end); 450 pgd = pgd_offset_k(addr); 451 do { 452 next = pgd_addr_end(addr, end); 453 if (pgd_bad(*pgd)) 454 mask |= PGTBL_PGD_MODIFIED; 455 if (pgd_none_or_clear_bad(pgd)) 456 continue; 457 vunmap_p4d_range(pgd, addr, next, &mask); 458 } while (pgd++, addr = next, addr != end); 459 460 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 461 arch_sync_kernel_mappings(start, end); 462 } 463 464 void vunmap_range_noflush(unsigned long start, unsigned long end) 465 { 466 kmsan_vunmap_range_noflush(start, end); 467 __vunmap_range_noflush(start, end); 468 } 469 470 /** 471 * vunmap_range - unmap kernel virtual addresses 472 * @addr: start of the VM area to unmap 473 * @end: end of the VM area to unmap (non-inclusive) 474 * 475 * Clears any present PTEs in the virtual address range, flushes TLBs and 476 * caches. Any subsequent access to the address before it has been re-mapped 477 * is a kernel bug. 478 */ 479 void vunmap_range(unsigned long addr, unsigned long end) 480 { 481 flush_cache_vunmap(addr, end); 482 vunmap_range_noflush(addr, end); 483 flush_tlb_kernel_range(addr, end); 484 } 485 486 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 487 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 488 pgtbl_mod_mask *mask) 489 { 490 pte_t *pte; 491 492 /* 493 * nr is a running index into the array which helps higher level 494 * callers keep track of where we're up to. 495 */ 496 497 pte = pte_alloc_kernel_track(pmd, addr, mask); 498 if (!pte) 499 return -ENOMEM; 500 do { 501 struct page *page = pages[*nr]; 502 503 if (WARN_ON(!pte_none(ptep_get(pte)))) 504 return -EBUSY; 505 if (WARN_ON(!page)) 506 return -ENOMEM; 507 if (WARN_ON(!pfn_valid(page_to_pfn(page)))) 508 return -EINVAL; 509 510 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 511 (*nr)++; 512 } while (pte++, addr += PAGE_SIZE, addr != end); 513 *mask |= PGTBL_PTE_MODIFIED; 514 return 0; 515 } 516 517 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 518 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 519 pgtbl_mod_mask *mask) 520 { 521 pmd_t *pmd; 522 unsigned long next; 523 524 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 525 if (!pmd) 526 return -ENOMEM; 527 do { 528 next = pmd_addr_end(addr, end); 529 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 530 return -ENOMEM; 531 } while (pmd++, addr = next, addr != end); 532 return 0; 533 } 534 535 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 536 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 537 pgtbl_mod_mask *mask) 538 { 539 pud_t *pud; 540 unsigned long next; 541 542 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 543 if (!pud) 544 return -ENOMEM; 545 do { 546 next = pud_addr_end(addr, end); 547 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 548 return -ENOMEM; 549 } while (pud++, addr = next, addr != end); 550 return 0; 551 } 552 553 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 554 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 555 pgtbl_mod_mask *mask) 556 { 557 p4d_t *p4d; 558 unsigned long next; 559 560 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 561 if (!p4d) 562 return -ENOMEM; 563 do { 564 next = p4d_addr_end(addr, end); 565 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 566 return -ENOMEM; 567 } while (p4d++, addr = next, addr != end); 568 return 0; 569 } 570 571 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 572 pgprot_t prot, struct page **pages) 573 { 574 unsigned long start = addr; 575 pgd_t *pgd; 576 unsigned long next; 577 int err = 0; 578 int nr = 0; 579 pgtbl_mod_mask mask = 0; 580 581 BUG_ON(addr >= end); 582 pgd = pgd_offset_k(addr); 583 do { 584 next = pgd_addr_end(addr, end); 585 if (pgd_bad(*pgd)) 586 mask |= PGTBL_PGD_MODIFIED; 587 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 588 if (err) 589 return err; 590 } while (pgd++, addr = next, addr != end); 591 592 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 593 arch_sync_kernel_mappings(start, end); 594 595 return 0; 596 } 597 598 /* 599 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 600 * flush caches. 601 * 602 * The caller is responsible for calling flush_cache_vmap() after this 603 * function returns successfully and before the addresses are accessed. 604 * 605 * This is an internal function only. Do not use outside mm/. 606 */ 607 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, 608 pgprot_t prot, struct page **pages, unsigned int page_shift) 609 { 610 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 611 612 WARN_ON(page_shift < PAGE_SHIFT); 613 614 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 615 page_shift == PAGE_SHIFT) 616 return vmap_small_pages_range_noflush(addr, end, prot, pages); 617 618 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 619 int err; 620 621 err = vmap_range_noflush(addr, addr + (1UL << page_shift), 622 page_to_phys(pages[i]), prot, 623 page_shift); 624 if (err) 625 return err; 626 627 addr += 1UL << page_shift; 628 } 629 630 return 0; 631 } 632 633 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 634 pgprot_t prot, struct page **pages, unsigned int page_shift) 635 { 636 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, 637 page_shift); 638 639 if (ret) 640 return ret; 641 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 642 } 643 644 /** 645 * vmap_pages_range - map pages to a kernel virtual address 646 * @addr: start of the VM area to map 647 * @end: end of the VM area to map (non-inclusive) 648 * @prot: page protection flags to use 649 * @pages: pages to map (always PAGE_SIZE pages) 650 * @page_shift: maximum shift that the pages may be mapped with, @pages must 651 * be aligned and contiguous up to at least this shift. 652 * 653 * RETURNS: 654 * 0 on success, -errno on failure. 655 */ 656 static int vmap_pages_range(unsigned long addr, unsigned long end, 657 pgprot_t prot, struct page **pages, unsigned int page_shift) 658 { 659 int err; 660 661 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 662 flush_cache_vmap(addr, end); 663 return err; 664 } 665 666 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, 667 unsigned long end) 668 { 669 might_sleep(); 670 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) 671 return -EINVAL; 672 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) 673 return -EINVAL; 674 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) 675 return -EINVAL; 676 if ((end - start) >> PAGE_SHIFT > totalram_pages()) 677 return -E2BIG; 678 if (start < (unsigned long)area->addr || 679 (void *)end > area->addr + get_vm_area_size(area)) 680 return -ERANGE; 681 return 0; 682 } 683 684 /** 685 * vm_area_map_pages - map pages inside given sparse vm_area 686 * @area: vm_area 687 * @start: start address inside vm_area 688 * @end: end address inside vm_area 689 * @pages: pages to map (always PAGE_SIZE pages) 690 */ 691 int vm_area_map_pages(struct vm_struct *area, unsigned long start, 692 unsigned long end, struct page **pages) 693 { 694 int err; 695 696 err = check_sparse_vm_area(area, start, end); 697 if (err) 698 return err; 699 700 return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); 701 } 702 703 /** 704 * vm_area_unmap_pages - unmap pages inside given sparse vm_area 705 * @area: vm_area 706 * @start: start address inside vm_area 707 * @end: end address inside vm_area 708 */ 709 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, 710 unsigned long end) 711 { 712 if (check_sparse_vm_area(area, start, end)) 713 return; 714 715 vunmap_range(start, end); 716 } 717 718 int is_vmalloc_or_module_addr(const void *x) 719 { 720 /* 721 * ARM, x86-64 and sparc64 put modules in a special place, 722 * and fall back on vmalloc() if that fails. Others 723 * just put it in the vmalloc space. 724 */ 725 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR) 726 unsigned long addr = (unsigned long)kasan_reset_tag(x); 727 if (addr >= MODULES_VADDR && addr < MODULES_END) 728 return 1; 729 #endif 730 return is_vmalloc_addr(x); 731 } 732 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); 733 734 /* 735 * Walk a vmap address to the struct page it maps. Huge vmap mappings will 736 * return the tail page that corresponds to the base page address, which 737 * matches small vmap mappings. 738 */ 739 struct page *vmalloc_to_page(const void *vmalloc_addr) 740 { 741 unsigned long addr = (unsigned long) vmalloc_addr; 742 struct page *page = NULL; 743 pgd_t *pgd = pgd_offset_k(addr); 744 p4d_t *p4d; 745 pud_t *pud; 746 pmd_t *pmd; 747 pte_t *ptep, pte; 748 749 /* 750 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 751 * architectures that do not vmalloc module space 752 */ 753 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 754 755 if (pgd_none(*pgd)) 756 return NULL; 757 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 758 return NULL; /* XXX: no allowance for huge pgd */ 759 if (WARN_ON_ONCE(pgd_bad(*pgd))) 760 return NULL; 761 762 p4d = p4d_offset(pgd, addr); 763 if (p4d_none(*p4d)) 764 return NULL; 765 if (p4d_leaf(*p4d)) 766 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 767 if (WARN_ON_ONCE(p4d_bad(*p4d))) 768 return NULL; 769 770 pud = pud_offset(p4d, addr); 771 if (pud_none(*pud)) 772 return NULL; 773 if (pud_leaf(*pud)) 774 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 775 if (WARN_ON_ONCE(pud_bad(*pud))) 776 return NULL; 777 778 pmd = pmd_offset(pud, addr); 779 if (pmd_none(*pmd)) 780 return NULL; 781 if (pmd_leaf(*pmd)) 782 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 783 if (WARN_ON_ONCE(pmd_bad(*pmd))) 784 return NULL; 785 786 ptep = pte_offset_kernel(pmd, addr); 787 pte = ptep_get(ptep); 788 if (pte_present(pte)) 789 page = pte_page(pte); 790 791 return page; 792 } 793 EXPORT_SYMBOL(vmalloc_to_page); 794 795 /* 796 * Map a vmalloc()-space virtual address to the physical page frame number. 797 */ 798 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 799 { 800 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 801 } 802 EXPORT_SYMBOL(vmalloc_to_pfn); 803 804 805 /*** Global kva allocator ***/ 806 807 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 808 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 809 810 811 static DEFINE_SPINLOCK(free_vmap_area_lock); 812 static bool vmap_initialized __read_mostly; 813 814 /* 815 * This kmem_cache is used for vmap_area objects. Instead of 816 * allocating from slab we reuse an object from this cache to 817 * make things faster. Especially in "no edge" splitting of 818 * free block. 819 */ 820 static struct kmem_cache *vmap_area_cachep; 821 822 /* 823 * This linked list is used in pair with free_vmap_area_root. 824 * It gives O(1) access to prev/next to perform fast coalescing. 825 */ 826 static LIST_HEAD(free_vmap_area_list); 827 828 /* 829 * This augment red-black tree represents the free vmap space. 830 * All vmap_area objects in this tree are sorted by va->va_start 831 * address. It is used for allocation and merging when a vmap 832 * object is released. 833 * 834 * Each vmap_area node contains a maximum available free block 835 * of its sub-tree, right or left. Therefore it is possible to 836 * find a lowest match of free area. 837 */ 838 static struct rb_root free_vmap_area_root = RB_ROOT; 839 840 /* 841 * Preload a CPU with one object for "no edge" split case. The 842 * aim is to get rid of allocations from the atomic context, thus 843 * to use more permissive allocation masks. 844 */ 845 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 846 847 /* 848 * This structure defines a single, solid model where a list and 849 * rb-tree are part of one entity protected by the lock. Nodes are 850 * sorted in ascending order, thus for O(1) access to left/right 851 * neighbors a list is used as well as for sequential traversal. 852 */ 853 struct rb_list { 854 struct rb_root root; 855 struct list_head head; 856 spinlock_t lock; 857 }; 858 859 /* 860 * A fast size storage contains VAs up to 1M size. A pool consists 861 * of linked between each other ready to go VAs of certain sizes. 862 * An index in the pool-array corresponds to number of pages + 1. 863 */ 864 #define MAX_VA_SIZE_PAGES 256 865 866 struct vmap_pool { 867 struct list_head head; 868 unsigned long len; 869 }; 870 871 /* 872 * An effective vmap-node logic. Users make use of nodes instead 873 * of a global heap. It allows to balance an access and mitigate 874 * contention. 875 */ 876 static struct vmap_node { 877 /* Simple size segregated storage. */ 878 struct vmap_pool pool[MAX_VA_SIZE_PAGES]; 879 spinlock_t pool_lock; 880 bool skip_populate; 881 882 /* Bookkeeping data of this node. */ 883 struct rb_list busy; 884 struct rb_list lazy; 885 886 /* 887 * Ready-to-free areas. 888 */ 889 struct list_head purge_list; 890 struct work_struct purge_work; 891 unsigned long nr_purged; 892 } single; 893 894 /* 895 * Initial setup consists of one single node, i.e. a balancing 896 * is fully disabled. Later on, after vmap is initialized these 897 * parameters are updated based on a system capacity. 898 */ 899 static struct vmap_node *vmap_nodes = &single; 900 static __read_mostly unsigned int nr_vmap_nodes = 1; 901 static __read_mostly unsigned int vmap_zone_size = 1; 902 903 static inline unsigned int 904 addr_to_node_id(unsigned long addr) 905 { 906 return (addr / vmap_zone_size) % nr_vmap_nodes; 907 } 908 909 static inline struct vmap_node * 910 addr_to_node(unsigned long addr) 911 { 912 return &vmap_nodes[addr_to_node_id(addr)]; 913 } 914 915 static inline struct vmap_node * 916 id_to_node(unsigned int id) 917 { 918 return &vmap_nodes[id % nr_vmap_nodes]; 919 } 920 921 /* 922 * We use the value 0 to represent "no node", that is why 923 * an encoded value will be the node-id incremented by 1. 924 * It is always greater then 0. A valid node_id which can 925 * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id 926 * is not valid 0 is returned. 927 */ 928 static unsigned int 929 encode_vn_id(unsigned int node_id) 930 { 931 /* Can store U8_MAX [0:254] nodes. */ 932 if (node_id < nr_vmap_nodes) 933 return (node_id + 1) << BITS_PER_BYTE; 934 935 /* Warn and no node encoded. */ 936 WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id); 937 return 0; 938 } 939 940 /* 941 * Returns an encoded node-id, the valid range is within 942 * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is 943 * returned if extracted data is wrong. 944 */ 945 static unsigned int 946 decode_vn_id(unsigned int val) 947 { 948 unsigned int node_id = (val >> BITS_PER_BYTE) - 1; 949 950 /* Can store U8_MAX [0:254] nodes. */ 951 if (node_id < nr_vmap_nodes) 952 return node_id; 953 954 /* If it was _not_ zero, warn. */ 955 WARN_ONCE(node_id != UINT_MAX, 956 "Decode wrong node id (%d)\n", node_id); 957 958 return nr_vmap_nodes; 959 } 960 961 static bool 962 is_vn_id_valid(unsigned int node_id) 963 { 964 if (node_id < nr_vmap_nodes) 965 return true; 966 967 return false; 968 } 969 970 static __always_inline unsigned long 971 va_size(struct vmap_area *va) 972 { 973 return (va->va_end - va->va_start); 974 } 975 976 static __always_inline unsigned long 977 get_subtree_max_size(struct rb_node *node) 978 { 979 struct vmap_area *va; 980 981 va = rb_entry_safe(node, struct vmap_area, rb_node); 982 return va ? va->subtree_max_size : 0; 983 } 984 985 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 986 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 987 988 static void reclaim_and_purge_vmap_areas(void); 989 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 990 static void drain_vmap_area_work(struct work_struct *work); 991 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 992 993 static atomic_long_t nr_vmalloc_pages; 994 995 unsigned long vmalloc_nr_pages(void) 996 { 997 return atomic_long_read(&nr_vmalloc_pages); 998 } 999 1000 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 1001 { 1002 struct rb_node *n = root->rb_node; 1003 1004 addr = (unsigned long)kasan_reset_tag((void *)addr); 1005 1006 while (n) { 1007 struct vmap_area *va; 1008 1009 va = rb_entry(n, struct vmap_area, rb_node); 1010 if (addr < va->va_start) 1011 n = n->rb_left; 1012 else if (addr >= va->va_end) 1013 n = n->rb_right; 1014 else 1015 return va; 1016 } 1017 1018 return NULL; 1019 } 1020 1021 /* Look up the first VA which satisfies addr < va_end, NULL if none. */ 1022 static struct vmap_area * 1023 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) 1024 { 1025 struct vmap_area *va = NULL; 1026 struct rb_node *n = root->rb_node; 1027 1028 addr = (unsigned long)kasan_reset_tag((void *)addr); 1029 1030 while (n) { 1031 struct vmap_area *tmp; 1032 1033 tmp = rb_entry(n, struct vmap_area, rb_node); 1034 if (tmp->va_end > addr) { 1035 va = tmp; 1036 if (tmp->va_start <= addr) 1037 break; 1038 1039 n = n->rb_left; 1040 } else 1041 n = n->rb_right; 1042 } 1043 1044 return va; 1045 } 1046 1047 /* 1048 * Returns a node where a first VA, that satisfies addr < va_end, resides. 1049 * If success, a node is locked. A user is responsible to unlock it when a 1050 * VA is no longer needed to be accessed. 1051 * 1052 * Returns NULL if nothing found. 1053 */ 1054 static struct vmap_node * 1055 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) 1056 { 1057 unsigned long va_start_lowest; 1058 struct vmap_node *vn; 1059 int i; 1060 1061 repeat: 1062 for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) { 1063 vn = &vmap_nodes[i]; 1064 1065 spin_lock(&vn->busy.lock); 1066 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); 1067 1068 if (*va) 1069 if (!va_start_lowest || (*va)->va_start < va_start_lowest) 1070 va_start_lowest = (*va)->va_start; 1071 spin_unlock(&vn->busy.lock); 1072 } 1073 1074 /* 1075 * Check if found VA exists, it might have gone away. In this case we 1076 * repeat the search because a VA has been removed concurrently and we 1077 * need to proceed to the next one, which is a rare case. 1078 */ 1079 if (va_start_lowest) { 1080 vn = addr_to_node(va_start_lowest); 1081 1082 spin_lock(&vn->busy.lock); 1083 *va = __find_vmap_area(va_start_lowest, &vn->busy.root); 1084 1085 if (*va) 1086 return vn; 1087 1088 spin_unlock(&vn->busy.lock); 1089 goto repeat; 1090 } 1091 1092 return NULL; 1093 } 1094 1095 /* 1096 * This function returns back addresses of parent node 1097 * and its left or right link for further processing. 1098 * 1099 * Otherwise NULL is returned. In that case all further 1100 * steps regarding inserting of conflicting overlap range 1101 * have to be declined and actually considered as a bug. 1102 */ 1103 static __always_inline struct rb_node ** 1104 find_va_links(struct vmap_area *va, 1105 struct rb_root *root, struct rb_node *from, 1106 struct rb_node **parent) 1107 { 1108 struct vmap_area *tmp_va; 1109 struct rb_node **link; 1110 1111 if (root) { 1112 link = &root->rb_node; 1113 if (unlikely(!*link)) { 1114 *parent = NULL; 1115 return link; 1116 } 1117 } else { 1118 link = &from; 1119 } 1120 1121 /* 1122 * Go to the bottom of the tree. When we hit the last point 1123 * we end up with parent rb_node and correct direction, i name 1124 * it link, where the new va->rb_node will be attached to. 1125 */ 1126 do { 1127 tmp_va = rb_entry(*link, struct vmap_area, rb_node); 1128 1129 /* 1130 * During the traversal we also do some sanity check. 1131 * Trigger the BUG() if there are sides(left/right) 1132 * or full overlaps. 1133 */ 1134 if (va->va_end <= tmp_va->va_start) 1135 link = &(*link)->rb_left; 1136 else if (va->va_start >= tmp_va->va_end) 1137 link = &(*link)->rb_right; 1138 else { 1139 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 1140 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 1141 1142 return NULL; 1143 } 1144 } while (*link); 1145 1146 *parent = &tmp_va->rb_node; 1147 return link; 1148 } 1149 1150 static __always_inline struct list_head * 1151 get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 1152 { 1153 struct list_head *list; 1154 1155 if (unlikely(!parent)) 1156 /* 1157 * The red-black tree where we try to find VA neighbors 1158 * before merging or inserting is empty, i.e. it means 1159 * there is no free vmap space. Normally it does not 1160 * happen but we handle this case anyway. 1161 */ 1162 return NULL; 1163 1164 list = &rb_entry(parent, struct vmap_area, rb_node)->list; 1165 return (&parent->rb_right == link ? list->next : list); 1166 } 1167 1168 static __always_inline void 1169 __link_va(struct vmap_area *va, struct rb_root *root, 1170 struct rb_node *parent, struct rb_node **link, 1171 struct list_head *head, bool augment) 1172 { 1173 /* 1174 * VA is still not in the list, but we can 1175 * identify its future previous list_head node. 1176 */ 1177 if (likely(parent)) { 1178 head = &rb_entry(parent, struct vmap_area, rb_node)->list; 1179 if (&parent->rb_right != link) 1180 head = head->prev; 1181 } 1182 1183 /* Insert to the rb-tree */ 1184 rb_link_node(&va->rb_node, parent, link); 1185 if (augment) { 1186 /* 1187 * Some explanation here. Just perform simple insertion 1188 * to the tree. We do not set va->subtree_max_size to 1189 * its current size before calling rb_insert_augmented(). 1190 * It is because we populate the tree from the bottom 1191 * to parent levels when the node _is_ in the tree. 1192 * 1193 * Therefore we set subtree_max_size to zero after insertion, 1194 * to let __augment_tree_propagate_from() puts everything to 1195 * the correct order later on. 1196 */ 1197 rb_insert_augmented(&va->rb_node, 1198 root, &free_vmap_area_rb_augment_cb); 1199 va->subtree_max_size = 0; 1200 } else { 1201 rb_insert_color(&va->rb_node, root); 1202 } 1203 1204 /* Address-sort this list */ 1205 list_add(&va->list, head); 1206 } 1207 1208 static __always_inline void 1209 link_va(struct vmap_area *va, struct rb_root *root, 1210 struct rb_node *parent, struct rb_node **link, 1211 struct list_head *head) 1212 { 1213 __link_va(va, root, parent, link, head, false); 1214 } 1215 1216 static __always_inline void 1217 link_va_augment(struct vmap_area *va, struct rb_root *root, 1218 struct rb_node *parent, struct rb_node **link, 1219 struct list_head *head) 1220 { 1221 __link_va(va, root, parent, link, head, true); 1222 } 1223 1224 static __always_inline void 1225 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) 1226 { 1227 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 1228 return; 1229 1230 if (augment) 1231 rb_erase_augmented(&va->rb_node, 1232 root, &free_vmap_area_rb_augment_cb); 1233 else 1234 rb_erase(&va->rb_node, root); 1235 1236 list_del_init(&va->list); 1237 RB_CLEAR_NODE(&va->rb_node); 1238 } 1239 1240 static __always_inline void 1241 unlink_va(struct vmap_area *va, struct rb_root *root) 1242 { 1243 __unlink_va(va, root, false); 1244 } 1245 1246 static __always_inline void 1247 unlink_va_augment(struct vmap_area *va, struct rb_root *root) 1248 { 1249 __unlink_va(va, root, true); 1250 } 1251 1252 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1253 /* 1254 * Gets called when remove the node and rotate. 1255 */ 1256 static __always_inline unsigned long 1257 compute_subtree_max_size(struct vmap_area *va) 1258 { 1259 return max3(va_size(va), 1260 get_subtree_max_size(va->rb_node.rb_left), 1261 get_subtree_max_size(va->rb_node.rb_right)); 1262 } 1263 1264 static void 1265 augment_tree_propagate_check(void) 1266 { 1267 struct vmap_area *va; 1268 unsigned long computed_size; 1269 1270 list_for_each_entry(va, &free_vmap_area_list, list) { 1271 computed_size = compute_subtree_max_size(va); 1272 if (computed_size != va->subtree_max_size) 1273 pr_emerg("tree is corrupted: %lu, %lu\n", 1274 va_size(va), va->subtree_max_size); 1275 } 1276 } 1277 #endif 1278 1279 /* 1280 * This function populates subtree_max_size from bottom to upper 1281 * levels starting from VA point. The propagation must be done 1282 * when VA size is modified by changing its va_start/va_end. Or 1283 * in case of newly inserting of VA to the tree. 1284 * 1285 * It means that __augment_tree_propagate_from() must be called: 1286 * - After VA has been inserted to the tree(free path); 1287 * - After VA has been shrunk(allocation path); 1288 * - After VA has been increased(merging path). 1289 * 1290 * Please note that, it does not mean that upper parent nodes 1291 * and their subtree_max_size are recalculated all the time up 1292 * to the root node. 1293 * 1294 * 4--8 1295 * /\ 1296 * / \ 1297 * / \ 1298 * 2--2 8--8 1299 * 1300 * For example if we modify the node 4, shrinking it to 2, then 1301 * no any modification is required. If we shrink the node 2 to 1 1302 * its subtree_max_size is updated only, and set to 1. If we shrink 1303 * the node 8 to 6, then its subtree_max_size is set to 6 and parent 1304 * node becomes 4--6. 1305 */ 1306 static __always_inline void 1307 augment_tree_propagate_from(struct vmap_area *va) 1308 { 1309 /* 1310 * Populate the tree from bottom towards the root until 1311 * the calculated maximum available size of checked node 1312 * is equal to its current one. 1313 */ 1314 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1315 1316 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1317 augment_tree_propagate_check(); 1318 #endif 1319 } 1320 1321 static void 1322 insert_vmap_area(struct vmap_area *va, 1323 struct rb_root *root, struct list_head *head) 1324 { 1325 struct rb_node **link; 1326 struct rb_node *parent; 1327 1328 link = find_va_links(va, root, NULL, &parent); 1329 if (link) 1330 link_va(va, root, parent, link, head); 1331 } 1332 1333 static void 1334 insert_vmap_area_augment(struct vmap_area *va, 1335 struct rb_node *from, struct rb_root *root, 1336 struct list_head *head) 1337 { 1338 struct rb_node **link; 1339 struct rb_node *parent; 1340 1341 if (from) 1342 link = find_va_links(va, NULL, from, &parent); 1343 else 1344 link = find_va_links(va, root, NULL, &parent); 1345 1346 if (link) { 1347 link_va_augment(va, root, parent, link, head); 1348 augment_tree_propagate_from(va); 1349 } 1350 } 1351 1352 /* 1353 * Merge de-allocated chunk of VA memory with previous 1354 * and next free blocks. If coalesce is not done a new 1355 * free area is inserted. If VA has been merged, it is 1356 * freed. 1357 * 1358 * Please note, it can return NULL in case of overlap 1359 * ranges, followed by WARN() report. Despite it is a 1360 * buggy behaviour, a system can be alive and keep 1361 * ongoing. 1362 */ 1363 static __always_inline struct vmap_area * 1364 __merge_or_add_vmap_area(struct vmap_area *va, 1365 struct rb_root *root, struct list_head *head, bool augment) 1366 { 1367 struct vmap_area *sibling; 1368 struct list_head *next; 1369 struct rb_node **link; 1370 struct rb_node *parent; 1371 bool merged = false; 1372 1373 /* 1374 * Find a place in the tree where VA potentially will be 1375 * inserted, unless it is merged with its sibling/siblings. 1376 */ 1377 link = find_va_links(va, root, NULL, &parent); 1378 if (!link) 1379 return NULL; 1380 1381 /* 1382 * Get next node of VA to check if merging can be done. 1383 */ 1384 next = get_va_next_sibling(parent, link); 1385 if (unlikely(next == NULL)) 1386 goto insert; 1387 1388 /* 1389 * start end 1390 * | | 1391 * |<------VA------>|<-----Next----->| 1392 * | | 1393 * start end 1394 */ 1395 if (next != head) { 1396 sibling = list_entry(next, struct vmap_area, list); 1397 if (sibling->va_start == va->va_end) { 1398 sibling->va_start = va->va_start; 1399 1400 /* Free vmap_area object. */ 1401 kmem_cache_free(vmap_area_cachep, va); 1402 1403 /* Point to the new merged area. */ 1404 va = sibling; 1405 merged = true; 1406 } 1407 } 1408 1409 /* 1410 * start end 1411 * | | 1412 * |<-----Prev----->|<------VA------>| 1413 * | | 1414 * start end 1415 */ 1416 if (next->prev != head) { 1417 sibling = list_entry(next->prev, struct vmap_area, list); 1418 if (sibling->va_end == va->va_start) { 1419 /* 1420 * If both neighbors are coalesced, it is important 1421 * to unlink the "next" node first, followed by merging 1422 * with "previous" one. Otherwise the tree might not be 1423 * fully populated if a sibling's augmented value is 1424 * "normalized" because of rotation operations. 1425 */ 1426 if (merged) 1427 __unlink_va(va, root, augment); 1428 1429 sibling->va_end = va->va_end; 1430 1431 /* Free vmap_area object. */ 1432 kmem_cache_free(vmap_area_cachep, va); 1433 1434 /* Point to the new merged area. */ 1435 va = sibling; 1436 merged = true; 1437 } 1438 } 1439 1440 insert: 1441 if (!merged) 1442 __link_va(va, root, parent, link, head, augment); 1443 1444 return va; 1445 } 1446 1447 static __always_inline struct vmap_area * 1448 merge_or_add_vmap_area(struct vmap_area *va, 1449 struct rb_root *root, struct list_head *head) 1450 { 1451 return __merge_or_add_vmap_area(va, root, head, false); 1452 } 1453 1454 static __always_inline struct vmap_area * 1455 merge_or_add_vmap_area_augment(struct vmap_area *va, 1456 struct rb_root *root, struct list_head *head) 1457 { 1458 va = __merge_or_add_vmap_area(va, root, head, true); 1459 if (va) 1460 augment_tree_propagate_from(va); 1461 1462 return va; 1463 } 1464 1465 static __always_inline bool 1466 is_within_this_va(struct vmap_area *va, unsigned long size, 1467 unsigned long align, unsigned long vstart) 1468 { 1469 unsigned long nva_start_addr; 1470 1471 if (va->va_start > vstart) 1472 nva_start_addr = ALIGN(va->va_start, align); 1473 else 1474 nva_start_addr = ALIGN(vstart, align); 1475 1476 /* Can be overflowed due to big size or alignment. */ 1477 if (nva_start_addr + size < nva_start_addr || 1478 nva_start_addr < vstart) 1479 return false; 1480 1481 return (nva_start_addr + size <= va->va_end); 1482 } 1483 1484 /* 1485 * Find the first free block(lowest start address) in the tree, 1486 * that will accomplish the request corresponding to passing 1487 * parameters. Please note, with an alignment bigger than PAGE_SIZE, 1488 * a search length is adjusted to account for worst case alignment 1489 * overhead. 1490 */ 1491 static __always_inline struct vmap_area * 1492 find_vmap_lowest_match(struct rb_root *root, unsigned long size, 1493 unsigned long align, unsigned long vstart, bool adjust_search_size) 1494 { 1495 struct vmap_area *va; 1496 struct rb_node *node; 1497 unsigned long length; 1498 1499 /* Start from the root. */ 1500 node = root->rb_node; 1501 1502 /* Adjust the search size for alignment overhead. */ 1503 length = adjust_search_size ? size + align - 1 : size; 1504 1505 while (node) { 1506 va = rb_entry(node, struct vmap_area, rb_node); 1507 1508 if (get_subtree_max_size(node->rb_left) >= length && 1509 vstart < va->va_start) { 1510 node = node->rb_left; 1511 } else { 1512 if (is_within_this_va(va, size, align, vstart)) 1513 return va; 1514 1515 /* 1516 * Does not make sense to go deeper towards the right 1517 * sub-tree if it does not have a free block that is 1518 * equal or bigger to the requested search length. 1519 */ 1520 if (get_subtree_max_size(node->rb_right) >= length) { 1521 node = node->rb_right; 1522 continue; 1523 } 1524 1525 /* 1526 * OK. We roll back and find the first right sub-tree, 1527 * that will satisfy the search criteria. It can happen 1528 * due to "vstart" restriction or an alignment overhead 1529 * that is bigger then PAGE_SIZE. 1530 */ 1531 while ((node = rb_parent(node))) { 1532 va = rb_entry(node, struct vmap_area, rb_node); 1533 if (is_within_this_va(va, size, align, vstart)) 1534 return va; 1535 1536 if (get_subtree_max_size(node->rb_right) >= length && 1537 vstart <= va->va_start) { 1538 /* 1539 * Shift the vstart forward. Please note, we update it with 1540 * parent's start address adding "1" because we do not want 1541 * to enter same sub-tree after it has already been checked 1542 * and no suitable free block found there. 1543 */ 1544 vstart = va->va_start + 1; 1545 node = node->rb_right; 1546 break; 1547 } 1548 } 1549 } 1550 } 1551 1552 return NULL; 1553 } 1554 1555 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1556 #include <linux/random.h> 1557 1558 static struct vmap_area * 1559 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, 1560 unsigned long align, unsigned long vstart) 1561 { 1562 struct vmap_area *va; 1563 1564 list_for_each_entry(va, head, list) { 1565 if (!is_within_this_va(va, size, align, vstart)) 1566 continue; 1567 1568 return va; 1569 } 1570 1571 return NULL; 1572 } 1573 1574 static void 1575 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, 1576 unsigned long size, unsigned long align) 1577 { 1578 struct vmap_area *va_1, *va_2; 1579 unsigned long vstart; 1580 unsigned int rnd; 1581 1582 get_random_bytes(&rnd, sizeof(rnd)); 1583 vstart = VMALLOC_START + rnd; 1584 1585 va_1 = find_vmap_lowest_match(root, size, align, vstart, false); 1586 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); 1587 1588 if (va_1 != va_2) 1589 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1590 va_1, va_2, vstart); 1591 } 1592 #endif 1593 1594 enum fit_type { 1595 NOTHING_FIT = 0, 1596 FL_FIT_TYPE = 1, /* full fit */ 1597 LE_FIT_TYPE = 2, /* left edge fit */ 1598 RE_FIT_TYPE = 3, /* right edge fit */ 1599 NE_FIT_TYPE = 4 /* no edge fit */ 1600 }; 1601 1602 static __always_inline enum fit_type 1603 classify_va_fit_type(struct vmap_area *va, 1604 unsigned long nva_start_addr, unsigned long size) 1605 { 1606 enum fit_type type; 1607 1608 /* Check if it is within VA. */ 1609 if (nva_start_addr < va->va_start || 1610 nva_start_addr + size > va->va_end) 1611 return NOTHING_FIT; 1612 1613 /* Now classify. */ 1614 if (va->va_start == nva_start_addr) { 1615 if (va->va_end == nva_start_addr + size) 1616 type = FL_FIT_TYPE; 1617 else 1618 type = LE_FIT_TYPE; 1619 } else if (va->va_end == nva_start_addr + size) { 1620 type = RE_FIT_TYPE; 1621 } else { 1622 type = NE_FIT_TYPE; 1623 } 1624 1625 return type; 1626 } 1627 1628 static __always_inline int 1629 va_clip(struct rb_root *root, struct list_head *head, 1630 struct vmap_area *va, unsigned long nva_start_addr, 1631 unsigned long size) 1632 { 1633 struct vmap_area *lva = NULL; 1634 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); 1635 1636 if (type == FL_FIT_TYPE) { 1637 /* 1638 * No need to split VA, it fully fits. 1639 * 1640 * | | 1641 * V NVA V 1642 * |---------------| 1643 */ 1644 unlink_va_augment(va, root); 1645 kmem_cache_free(vmap_area_cachep, va); 1646 } else if (type == LE_FIT_TYPE) { 1647 /* 1648 * Split left edge of fit VA. 1649 * 1650 * | | 1651 * V NVA V R 1652 * |-------|-------| 1653 */ 1654 va->va_start += size; 1655 } else if (type == RE_FIT_TYPE) { 1656 /* 1657 * Split right edge of fit VA. 1658 * 1659 * | | 1660 * L V NVA V 1661 * |-------|-------| 1662 */ 1663 va->va_end = nva_start_addr; 1664 } else if (type == NE_FIT_TYPE) { 1665 /* 1666 * Split no edge of fit VA. 1667 * 1668 * | | 1669 * L V NVA V R 1670 * |---|-------|---| 1671 */ 1672 lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 1673 if (unlikely(!lva)) { 1674 /* 1675 * For percpu allocator we do not do any pre-allocation 1676 * and leave it as it is. The reason is it most likely 1677 * never ends up with NE_FIT_TYPE splitting. In case of 1678 * percpu allocations offsets and sizes are aligned to 1679 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 1680 * are its main fitting cases. 1681 * 1682 * There are a few exceptions though, as an example it is 1683 * a first allocation (early boot up) when we have "one" 1684 * big free space that has to be split. 1685 * 1686 * Also we can hit this path in case of regular "vmap" 1687 * allocations, if "this" current CPU was not preloaded. 1688 * See the comment in alloc_vmap_area() why. If so, then 1689 * GFP_NOWAIT is used instead to get an extra object for 1690 * split purpose. That is rare and most time does not 1691 * occur. 1692 * 1693 * What happens if an allocation gets failed. Basically, 1694 * an "overflow" path is triggered to purge lazily freed 1695 * areas to free some memory, then, the "retry" path is 1696 * triggered to repeat one more time. See more details 1697 * in alloc_vmap_area() function. 1698 */ 1699 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 1700 if (!lva) 1701 return -1; 1702 } 1703 1704 /* 1705 * Build the remainder. 1706 */ 1707 lva->va_start = va->va_start; 1708 lva->va_end = nva_start_addr; 1709 1710 /* 1711 * Shrink this VA to remaining size. 1712 */ 1713 va->va_start = nva_start_addr + size; 1714 } else { 1715 return -1; 1716 } 1717 1718 if (type != FL_FIT_TYPE) { 1719 augment_tree_propagate_from(va); 1720 1721 if (lva) /* type == NE_FIT_TYPE */ 1722 insert_vmap_area_augment(lva, &va->rb_node, root, head); 1723 } 1724 1725 return 0; 1726 } 1727 1728 static unsigned long 1729 va_alloc(struct vmap_area *va, 1730 struct rb_root *root, struct list_head *head, 1731 unsigned long size, unsigned long align, 1732 unsigned long vstart, unsigned long vend) 1733 { 1734 unsigned long nva_start_addr; 1735 int ret; 1736 1737 if (va->va_start > vstart) 1738 nva_start_addr = ALIGN(va->va_start, align); 1739 else 1740 nva_start_addr = ALIGN(vstart, align); 1741 1742 /* Check the "vend" restriction. */ 1743 if (nva_start_addr + size > vend) 1744 return vend; 1745 1746 /* Update the free vmap_area. */ 1747 ret = va_clip(root, head, va, nva_start_addr, size); 1748 if (WARN_ON_ONCE(ret)) 1749 return vend; 1750 1751 return nva_start_addr; 1752 } 1753 1754 /* 1755 * Returns a start address of the newly allocated area, if success. 1756 * Otherwise a vend is returned that indicates failure. 1757 */ 1758 static __always_inline unsigned long 1759 __alloc_vmap_area(struct rb_root *root, struct list_head *head, 1760 unsigned long size, unsigned long align, 1761 unsigned long vstart, unsigned long vend) 1762 { 1763 bool adjust_search_size = true; 1764 unsigned long nva_start_addr; 1765 struct vmap_area *va; 1766 1767 /* 1768 * Do not adjust when: 1769 * a) align <= PAGE_SIZE, because it does not make any sense. 1770 * All blocks(their start addresses) are at least PAGE_SIZE 1771 * aligned anyway; 1772 * b) a short range where a requested size corresponds to exactly 1773 * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 1774 * With adjusted search length an allocation would not succeed. 1775 */ 1776 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 1777 adjust_search_size = false; 1778 1779 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); 1780 if (unlikely(!va)) 1781 return vend; 1782 1783 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); 1784 if (nva_start_addr == vend) 1785 return vend; 1786 1787 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1788 find_vmap_lowest_match_check(root, head, size, align); 1789 #endif 1790 1791 return nva_start_addr; 1792 } 1793 1794 /* 1795 * Free a region of KVA allocated by alloc_vmap_area 1796 */ 1797 static void free_vmap_area(struct vmap_area *va) 1798 { 1799 struct vmap_node *vn = addr_to_node(va->va_start); 1800 1801 /* 1802 * Remove from the busy tree/list. 1803 */ 1804 spin_lock(&vn->busy.lock); 1805 unlink_va(va, &vn->busy.root); 1806 spin_unlock(&vn->busy.lock); 1807 1808 /* 1809 * Insert/Merge it back to the free tree/list. 1810 */ 1811 spin_lock(&free_vmap_area_lock); 1812 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1813 spin_unlock(&free_vmap_area_lock); 1814 } 1815 1816 static inline void 1817 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1818 { 1819 struct vmap_area *va = NULL, *tmp; 1820 1821 /* 1822 * Preload this CPU with one extra vmap_area object. It is used 1823 * when fit type of free area is NE_FIT_TYPE. It guarantees that 1824 * a CPU that does an allocation is preloaded. 1825 * 1826 * We do it in non-atomic context, thus it allows us to use more 1827 * permissive allocation masks to be more stable under low memory 1828 * condition and high memory pressure. 1829 */ 1830 if (!this_cpu_read(ne_fit_preload_node)) 1831 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1832 1833 spin_lock(lock); 1834 1835 tmp = NULL; 1836 if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va)) 1837 kmem_cache_free(vmap_area_cachep, va); 1838 } 1839 1840 static struct vmap_pool * 1841 size_to_va_pool(struct vmap_node *vn, unsigned long size) 1842 { 1843 unsigned int idx = (size - 1) / PAGE_SIZE; 1844 1845 if (idx < MAX_VA_SIZE_PAGES) 1846 return &vn->pool[idx]; 1847 1848 return NULL; 1849 } 1850 1851 static bool 1852 node_pool_add_va(struct vmap_node *n, struct vmap_area *va) 1853 { 1854 struct vmap_pool *vp; 1855 1856 vp = size_to_va_pool(n, va_size(va)); 1857 if (!vp) 1858 return false; 1859 1860 spin_lock(&n->pool_lock); 1861 list_add(&va->list, &vp->head); 1862 WRITE_ONCE(vp->len, vp->len + 1); 1863 spin_unlock(&n->pool_lock); 1864 1865 return true; 1866 } 1867 1868 static struct vmap_area * 1869 node_pool_del_va(struct vmap_node *vn, unsigned long size, 1870 unsigned long align, unsigned long vstart, 1871 unsigned long vend) 1872 { 1873 struct vmap_area *va = NULL; 1874 struct vmap_pool *vp; 1875 int err = 0; 1876 1877 vp = size_to_va_pool(vn, size); 1878 if (!vp || list_empty(&vp->head)) 1879 return NULL; 1880 1881 spin_lock(&vn->pool_lock); 1882 if (!list_empty(&vp->head)) { 1883 va = list_first_entry(&vp->head, struct vmap_area, list); 1884 1885 if (IS_ALIGNED(va->va_start, align)) { 1886 /* 1887 * Do some sanity check and emit a warning 1888 * if one of below checks detects an error. 1889 */ 1890 err |= (va_size(va) != size); 1891 err |= (va->va_start < vstart); 1892 err |= (va->va_end > vend); 1893 1894 if (!WARN_ON_ONCE(err)) { 1895 list_del_init(&va->list); 1896 WRITE_ONCE(vp->len, vp->len - 1); 1897 } else { 1898 va = NULL; 1899 } 1900 } else { 1901 list_move_tail(&va->list, &vp->head); 1902 va = NULL; 1903 } 1904 } 1905 spin_unlock(&vn->pool_lock); 1906 1907 return va; 1908 } 1909 1910 static struct vmap_area * 1911 node_alloc(unsigned long size, unsigned long align, 1912 unsigned long vstart, unsigned long vend, 1913 unsigned long *addr, unsigned int *vn_id) 1914 { 1915 struct vmap_area *va; 1916 1917 *vn_id = 0; 1918 *addr = vend; 1919 1920 /* 1921 * Fallback to a global heap if not vmalloc or there 1922 * is only one node. 1923 */ 1924 if (vstart != VMALLOC_START || vend != VMALLOC_END || 1925 nr_vmap_nodes == 1) 1926 return NULL; 1927 1928 *vn_id = raw_smp_processor_id() % nr_vmap_nodes; 1929 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); 1930 *vn_id = encode_vn_id(*vn_id); 1931 1932 if (va) 1933 *addr = va->va_start; 1934 1935 return va; 1936 } 1937 1938 static inline void setup_vmalloc_vm(struct vm_struct *vm, 1939 struct vmap_area *va, unsigned long flags, const void *caller) 1940 { 1941 vm->flags = flags; 1942 vm->addr = (void *)va->va_start; 1943 vm->size = va->va_end - va->va_start; 1944 vm->caller = caller; 1945 va->vm = vm; 1946 } 1947 1948 /* 1949 * Allocate a region of KVA of the specified size and alignment, within the 1950 * vstart and vend. If vm is passed in, the two will also be bound. 1951 */ 1952 static struct vmap_area *alloc_vmap_area(unsigned long size, 1953 unsigned long align, 1954 unsigned long vstart, unsigned long vend, 1955 int node, gfp_t gfp_mask, 1956 unsigned long va_flags, struct vm_struct *vm) 1957 { 1958 struct vmap_node *vn; 1959 struct vmap_area *va; 1960 unsigned long freed; 1961 unsigned long addr; 1962 unsigned int vn_id; 1963 int purged = 0; 1964 int ret; 1965 1966 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) 1967 return ERR_PTR(-EINVAL); 1968 1969 if (unlikely(!vmap_initialized)) 1970 return ERR_PTR(-EBUSY); 1971 1972 might_sleep(); 1973 1974 /* 1975 * If a VA is obtained from a global heap(if it fails here) 1976 * it is anyway marked with this "vn_id" so it is returned 1977 * to this pool's node later. Such way gives a possibility 1978 * to populate pools based on users demand. 1979 * 1980 * On success a ready to go VA is returned. 1981 */ 1982 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); 1983 if (!va) { 1984 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 1985 1986 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1987 if (unlikely(!va)) 1988 return ERR_PTR(-ENOMEM); 1989 1990 /* 1991 * Only scan the relevant parts containing pointers to other objects 1992 * to avoid false negatives. 1993 */ 1994 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 1995 } 1996 1997 retry: 1998 if (addr == vend) { 1999 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 2000 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, 2001 size, align, vstart, vend); 2002 spin_unlock(&free_vmap_area_lock); 2003 } 2004 2005 trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); 2006 2007 /* 2008 * If an allocation fails, the "vend" address is 2009 * returned. Therefore trigger the overflow path. 2010 */ 2011 if (unlikely(addr == vend)) 2012 goto overflow; 2013 2014 va->va_start = addr; 2015 va->va_end = addr + size; 2016 va->vm = NULL; 2017 va->flags = (va_flags | vn_id); 2018 2019 if (vm) { 2020 vm->addr = (void *)va->va_start; 2021 vm->size = va->va_end - va->va_start; 2022 va->vm = vm; 2023 } 2024 2025 vn = addr_to_node(va->va_start); 2026 2027 spin_lock(&vn->busy.lock); 2028 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 2029 spin_unlock(&vn->busy.lock); 2030 2031 BUG_ON(!IS_ALIGNED(va->va_start, align)); 2032 BUG_ON(va->va_start < vstart); 2033 BUG_ON(va->va_end > vend); 2034 2035 ret = kasan_populate_vmalloc(addr, size); 2036 if (ret) { 2037 free_vmap_area(va); 2038 return ERR_PTR(ret); 2039 } 2040 2041 return va; 2042 2043 overflow: 2044 if (!purged) { 2045 reclaim_and_purge_vmap_areas(); 2046 purged = 1; 2047 goto retry; 2048 } 2049 2050 freed = 0; 2051 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 2052 2053 if (freed > 0) { 2054 purged = 0; 2055 goto retry; 2056 } 2057 2058 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 2059 pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n", 2060 size, vstart, vend); 2061 2062 kmem_cache_free(vmap_area_cachep, va); 2063 return ERR_PTR(-EBUSY); 2064 } 2065 2066 int register_vmap_purge_notifier(struct notifier_block *nb) 2067 { 2068 return blocking_notifier_chain_register(&vmap_notify_list, nb); 2069 } 2070 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 2071 2072 int unregister_vmap_purge_notifier(struct notifier_block *nb) 2073 { 2074 return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 2075 } 2076 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 2077 2078 /* 2079 * lazy_max_pages is the maximum amount of virtual address space we gather up 2080 * before attempting to purge with a TLB flush. 2081 * 2082 * There is a tradeoff here: a larger number will cover more kernel page tables 2083 * and take slightly longer to purge, but it will linearly reduce the number of 2084 * global TLB flushes that must be performed. It would seem natural to scale 2085 * this number up linearly with the number of CPUs (because vmapping activity 2086 * could also scale linearly with the number of CPUs), however it is likely 2087 * that in practice, workloads might be constrained in other ways that mean 2088 * vmap activity will not scale linearly with CPUs. Also, I want to be 2089 * conservative and not introduce a big latency on huge systems, so go with 2090 * a less aggressive log scale. It will still be an improvement over the old 2091 * code, and it will be simple to change the scale factor if we find that it 2092 * becomes a problem on bigger systems. 2093 */ 2094 static unsigned long lazy_max_pages(void) 2095 { 2096 unsigned int log; 2097 2098 log = fls(num_online_cpus()); 2099 2100 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 2101 } 2102 2103 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 2104 2105 /* 2106 * Serialize vmap purging. There is no actual critical section protected 2107 * by this lock, but we want to avoid concurrent calls for performance 2108 * reasons and to make the pcpu_get_vm_areas more deterministic. 2109 */ 2110 static DEFINE_MUTEX(vmap_purge_lock); 2111 2112 /* for per-CPU blocks */ 2113 static void purge_fragmented_blocks_allcpus(void); 2114 static cpumask_t purge_nodes; 2115 2116 static void 2117 reclaim_list_global(struct list_head *head) 2118 { 2119 struct vmap_area *va, *n; 2120 2121 if (list_empty(head)) 2122 return; 2123 2124 spin_lock(&free_vmap_area_lock); 2125 list_for_each_entry_safe(va, n, head, list) 2126 merge_or_add_vmap_area_augment(va, 2127 &free_vmap_area_root, &free_vmap_area_list); 2128 spin_unlock(&free_vmap_area_lock); 2129 } 2130 2131 static void 2132 decay_va_pool_node(struct vmap_node *vn, bool full_decay) 2133 { 2134 LIST_HEAD(decay_list); 2135 struct rb_root decay_root = RB_ROOT; 2136 struct vmap_area *va, *nva; 2137 unsigned long n_decay; 2138 int i; 2139 2140 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 2141 LIST_HEAD(tmp_list); 2142 2143 if (list_empty(&vn->pool[i].head)) 2144 continue; 2145 2146 /* Detach the pool, so no-one can access it. */ 2147 spin_lock(&vn->pool_lock); 2148 list_replace_init(&vn->pool[i].head, &tmp_list); 2149 spin_unlock(&vn->pool_lock); 2150 2151 if (full_decay) 2152 WRITE_ONCE(vn->pool[i].len, 0); 2153 2154 /* Decay a pool by ~25% out of left objects. */ 2155 n_decay = vn->pool[i].len >> 2; 2156 2157 list_for_each_entry_safe(va, nva, &tmp_list, list) { 2158 list_del_init(&va->list); 2159 merge_or_add_vmap_area(va, &decay_root, &decay_list); 2160 2161 if (!full_decay) { 2162 WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1); 2163 2164 if (!--n_decay) 2165 break; 2166 } 2167 } 2168 2169 /* 2170 * Attach the pool back if it has been partly decayed. 2171 * Please note, it is supposed that nobody(other contexts) 2172 * can populate the pool therefore a simple list replace 2173 * operation takes place here. 2174 */ 2175 if (!full_decay && !list_empty(&tmp_list)) { 2176 spin_lock(&vn->pool_lock); 2177 list_replace_init(&tmp_list, &vn->pool[i].head); 2178 spin_unlock(&vn->pool_lock); 2179 } 2180 } 2181 2182 reclaim_list_global(&decay_list); 2183 } 2184 2185 static void purge_vmap_node(struct work_struct *work) 2186 { 2187 struct vmap_node *vn = container_of(work, 2188 struct vmap_node, purge_work); 2189 struct vmap_area *va, *n_va; 2190 LIST_HEAD(local_list); 2191 2192 vn->nr_purged = 0; 2193 2194 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { 2195 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 2196 unsigned long orig_start = va->va_start; 2197 unsigned long orig_end = va->va_end; 2198 unsigned int vn_id = decode_vn_id(va->flags); 2199 2200 list_del_init(&va->list); 2201 2202 if (is_vmalloc_or_module_addr((void *)orig_start)) 2203 kasan_release_vmalloc(orig_start, orig_end, 2204 va->va_start, va->va_end); 2205 2206 atomic_long_sub(nr, &vmap_lazy_nr); 2207 vn->nr_purged++; 2208 2209 if (is_vn_id_valid(vn_id) && !vn->skip_populate) 2210 if (node_pool_add_va(vn, va)) 2211 continue; 2212 2213 /* Go back to global. */ 2214 list_add(&va->list, &local_list); 2215 } 2216 2217 reclaim_list_global(&local_list); 2218 } 2219 2220 /* 2221 * Purges all lazily-freed vmap areas. 2222 */ 2223 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, 2224 bool full_pool_decay) 2225 { 2226 unsigned long nr_purged_areas = 0; 2227 unsigned int nr_purge_helpers; 2228 unsigned int nr_purge_nodes; 2229 struct vmap_node *vn; 2230 int i; 2231 2232 lockdep_assert_held(&vmap_purge_lock); 2233 2234 /* 2235 * Use cpumask to mark which node has to be processed. 2236 */ 2237 purge_nodes = CPU_MASK_NONE; 2238 2239 for (i = 0; i < nr_vmap_nodes; i++) { 2240 vn = &vmap_nodes[i]; 2241 2242 INIT_LIST_HEAD(&vn->purge_list); 2243 vn->skip_populate = full_pool_decay; 2244 decay_va_pool_node(vn, full_pool_decay); 2245 2246 if (RB_EMPTY_ROOT(&vn->lazy.root)) 2247 continue; 2248 2249 spin_lock(&vn->lazy.lock); 2250 WRITE_ONCE(vn->lazy.root.rb_node, NULL); 2251 list_replace_init(&vn->lazy.head, &vn->purge_list); 2252 spin_unlock(&vn->lazy.lock); 2253 2254 start = min(start, list_first_entry(&vn->purge_list, 2255 struct vmap_area, list)->va_start); 2256 2257 end = max(end, list_last_entry(&vn->purge_list, 2258 struct vmap_area, list)->va_end); 2259 2260 cpumask_set_cpu(i, &purge_nodes); 2261 } 2262 2263 nr_purge_nodes = cpumask_weight(&purge_nodes); 2264 if (nr_purge_nodes > 0) { 2265 flush_tlb_kernel_range(start, end); 2266 2267 /* One extra worker is per a lazy_max_pages() full set minus one. */ 2268 nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages(); 2269 nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1; 2270 2271 for_each_cpu(i, &purge_nodes) { 2272 vn = &vmap_nodes[i]; 2273 2274 if (nr_purge_helpers > 0) { 2275 INIT_WORK(&vn->purge_work, purge_vmap_node); 2276 2277 if (cpumask_test_cpu(i, cpu_online_mask)) 2278 schedule_work_on(i, &vn->purge_work); 2279 else 2280 schedule_work(&vn->purge_work); 2281 2282 nr_purge_helpers--; 2283 } else { 2284 vn->purge_work.func = NULL; 2285 purge_vmap_node(&vn->purge_work); 2286 nr_purged_areas += vn->nr_purged; 2287 } 2288 } 2289 2290 for_each_cpu(i, &purge_nodes) { 2291 vn = &vmap_nodes[i]; 2292 2293 if (vn->purge_work.func) { 2294 flush_work(&vn->purge_work); 2295 nr_purged_areas += vn->nr_purged; 2296 } 2297 } 2298 } 2299 2300 trace_purge_vmap_area_lazy(start, end, nr_purged_areas); 2301 return nr_purged_areas > 0; 2302 } 2303 2304 /* 2305 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. 2306 */ 2307 static void reclaim_and_purge_vmap_areas(void) 2308 2309 { 2310 mutex_lock(&vmap_purge_lock); 2311 purge_fragmented_blocks_allcpus(); 2312 __purge_vmap_area_lazy(ULONG_MAX, 0, true); 2313 mutex_unlock(&vmap_purge_lock); 2314 } 2315 2316 static void drain_vmap_area_work(struct work_struct *work) 2317 { 2318 mutex_lock(&vmap_purge_lock); 2319 __purge_vmap_area_lazy(ULONG_MAX, 0, false); 2320 mutex_unlock(&vmap_purge_lock); 2321 } 2322 2323 /* 2324 * Free a vmap area, caller ensuring that the area has been unmapped, 2325 * unlinked and flush_cache_vunmap had been called for the correct 2326 * range previously. 2327 */ 2328 static void free_vmap_area_noflush(struct vmap_area *va) 2329 { 2330 unsigned long nr_lazy_max = lazy_max_pages(); 2331 unsigned long va_start = va->va_start; 2332 unsigned int vn_id = decode_vn_id(va->flags); 2333 struct vmap_node *vn; 2334 unsigned long nr_lazy; 2335 2336 if (WARN_ON_ONCE(!list_empty(&va->list))) 2337 return; 2338 2339 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 2340 PAGE_SHIFT, &vmap_lazy_nr); 2341 2342 /* 2343 * If it was request by a certain node we would like to 2344 * return it to that node, i.e. its pool for later reuse. 2345 */ 2346 vn = is_vn_id_valid(vn_id) ? 2347 id_to_node(vn_id):addr_to_node(va->va_start); 2348 2349 spin_lock(&vn->lazy.lock); 2350 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); 2351 spin_unlock(&vn->lazy.lock); 2352 2353 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); 2354 2355 /* After this point, we may free va at any time */ 2356 if (unlikely(nr_lazy > nr_lazy_max)) 2357 schedule_work(&drain_vmap_work); 2358 } 2359 2360 /* 2361 * Free and unmap a vmap area 2362 */ 2363 static void free_unmap_vmap_area(struct vmap_area *va) 2364 { 2365 flush_cache_vunmap(va->va_start, va->va_end); 2366 vunmap_range_noflush(va->va_start, va->va_end); 2367 if (debug_pagealloc_enabled_static()) 2368 flush_tlb_kernel_range(va->va_start, va->va_end); 2369 2370 free_vmap_area_noflush(va); 2371 } 2372 2373 struct vmap_area *find_vmap_area(unsigned long addr) 2374 { 2375 struct vmap_node *vn; 2376 struct vmap_area *va; 2377 int i, j; 2378 2379 if (unlikely(!vmap_initialized)) 2380 return NULL; 2381 2382 /* 2383 * An addr_to_node_id(addr) converts an address to a node index 2384 * where a VA is located. If VA spans several zones and passed 2385 * addr is not the same as va->va_start, what is not common, we 2386 * may need to scan extra nodes. See an example: 2387 * 2388 * <----va----> 2389 * -|-----|-----|-----|-----|- 2390 * 1 2 0 1 2391 * 2392 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed 2393 * addr is within 2 or 0 nodes we should do extra work. 2394 */ 2395 i = j = addr_to_node_id(addr); 2396 do { 2397 vn = &vmap_nodes[i]; 2398 2399 spin_lock(&vn->busy.lock); 2400 va = __find_vmap_area(addr, &vn->busy.root); 2401 spin_unlock(&vn->busy.lock); 2402 2403 if (va) 2404 return va; 2405 } while ((i = (i + 1) % nr_vmap_nodes) != j); 2406 2407 return NULL; 2408 } 2409 2410 static struct vmap_area *find_unlink_vmap_area(unsigned long addr) 2411 { 2412 struct vmap_node *vn; 2413 struct vmap_area *va; 2414 int i, j; 2415 2416 /* 2417 * Check the comment in the find_vmap_area() about the loop. 2418 */ 2419 i = j = addr_to_node_id(addr); 2420 do { 2421 vn = &vmap_nodes[i]; 2422 2423 spin_lock(&vn->busy.lock); 2424 va = __find_vmap_area(addr, &vn->busy.root); 2425 if (va) 2426 unlink_va(va, &vn->busy.root); 2427 spin_unlock(&vn->busy.lock); 2428 2429 if (va) 2430 return va; 2431 } while ((i = (i + 1) % nr_vmap_nodes) != j); 2432 2433 return NULL; 2434 } 2435 2436 /*** Per cpu kva allocator ***/ 2437 2438 /* 2439 * vmap space is limited especially on 32 bit architectures. Ensure there is 2440 * room for at least 16 percpu vmap blocks per CPU. 2441 */ 2442 /* 2443 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 2444 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 2445 * instead (we just need a rough idea) 2446 */ 2447 #if BITS_PER_LONG == 32 2448 #define VMALLOC_SPACE (128UL*1024*1024) 2449 #else 2450 #define VMALLOC_SPACE (128UL*1024*1024*1024) 2451 #endif 2452 2453 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 2454 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 2455 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 2456 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 2457 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 2458 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 2459 #define VMAP_BBMAP_BITS \ 2460 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 2461 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 2462 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 2463 2464 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 2465 2466 /* 2467 * Purge threshold to prevent overeager purging of fragmented blocks for 2468 * regular operations: Purge if vb->free is less than 1/4 of the capacity. 2469 */ 2470 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) 2471 2472 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ 2473 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ 2474 #define VMAP_FLAGS_MASK 0x3 2475 2476 struct vmap_block_queue { 2477 spinlock_t lock; 2478 struct list_head free; 2479 2480 /* 2481 * An xarray requires an extra memory dynamically to 2482 * be allocated. If it is an issue, we can use rb-tree 2483 * instead. 2484 */ 2485 struct xarray vmap_blocks; 2486 }; 2487 2488 struct vmap_block { 2489 spinlock_t lock; 2490 struct vmap_area *va; 2491 unsigned long free, dirty; 2492 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); 2493 unsigned long dirty_min, dirty_max; /*< dirty range */ 2494 struct list_head free_list; 2495 struct rcu_head rcu_head; 2496 struct list_head purge; 2497 unsigned int cpu; 2498 }; 2499 2500 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 2501 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 2502 2503 /* 2504 * In order to fast access to any "vmap_block" associated with a 2505 * specific address, we use a hash. 2506 * 2507 * A per-cpu vmap_block_queue is used in both ways, to serialize 2508 * an access to free block chains among CPUs(alloc path) and it 2509 * also acts as a vmap_block hash(alloc/free paths). It means we 2510 * overload it, since we already have the per-cpu array which is 2511 * used as a hash table. When used as a hash a 'cpu' passed to 2512 * per_cpu() is not actually a CPU but rather a hash index. 2513 * 2514 * A hash function is addr_to_vb_xa() which hashes any address 2515 * to a specific index(in a hash) it belongs to. This then uses a 2516 * per_cpu() macro to access an array with generated index. 2517 * 2518 * An example: 2519 * 2520 * CPU_1 CPU_2 CPU_0 2521 * | | | 2522 * V V V 2523 * 0 10 20 30 40 50 60 2524 * |------|------|------|------|------|------|...<vmap address space> 2525 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 2526 * 2527 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus 2528 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; 2529 * 2530 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus 2531 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; 2532 * 2533 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus 2534 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. 2535 * 2536 * This technique almost always avoids lock contention on insert/remove, 2537 * however xarray spinlocks protect against any contention that remains. 2538 */ 2539 static struct xarray * 2540 addr_to_vb_xa(unsigned long addr) 2541 { 2542 int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids; 2543 2544 /* 2545 * Please note, nr_cpu_ids points on a highest set 2546 * possible bit, i.e. we never invoke cpumask_next() 2547 * if an index points on it which is nr_cpu_ids - 1. 2548 */ 2549 if (!cpu_possible(index)) 2550 index = cpumask_next(index, cpu_possible_mask); 2551 2552 return &per_cpu(vmap_block_queue, index).vmap_blocks; 2553 } 2554 2555 /* 2556 * We should probably have a fallback mechanism to allocate virtual memory 2557 * out of partially filled vmap blocks. However vmap block sizing should be 2558 * fairly reasonable according to the vmalloc size, so it shouldn't be a 2559 * big problem. 2560 */ 2561 2562 static unsigned long addr_to_vb_idx(unsigned long addr) 2563 { 2564 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 2565 addr /= VMAP_BLOCK_SIZE; 2566 return addr; 2567 } 2568 2569 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 2570 { 2571 unsigned long addr; 2572 2573 addr = va_start + (pages_off << PAGE_SHIFT); 2574 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 2575 return (void *)addr; 2576 } 2577 2578 /** 2579 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 2580 * block. Of course pages number can't exceed VMAP_BBMAP_BITS 2581 * @order: how many 2^order pages should be occupied in newly allocated block 2582 * @gfp_mask: flags for the page level allocator 2583 * 2584 * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 2585 */ 2586 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 2587 { 2588 struct vmap_block_queue *vbq; 2589 struct vmap_block *vb; 2590 struct vmap_area *va; 2591 struct xarray *xa; 2592 unsigned long vb_idx; 2593 int node, err; 2594 void *vaddr; 2595 2596 node = numa_node_id(); 2597 2598 vb = kmalloc_node(sizeof(struct vmap_block), 2599 gfp_mask & GFP_RECLAIM_MASK, node); 2600 if (unlikely(!vb)) 2601 return ERR_PTR(-ENOMEM); 2602 2603 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 2604 VMALLOC_START, VMALLOC_END, 2605 node, gfp_mask, 2606 VMAP_RAM|VMAP_BLOCK, NULL); 2607 if (IS_ERR(va)) { 2608 kfree(vb); 2609 return ERR_CAST(va); 2610 } 2611 2612 vaddr = vmap_block_vaddr(va->va_start, 0); 2613 spin_lock_init(&vb->lock); 2614 vb->va = va; 2615 /* At least something should be left free */ 2616 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 2617 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); 2618 vb->free = VMAP_BBMAP_BITS - (1UL << order); 2619 vb->dirty = 0; 2620 vb->dirty_min = VMAP_BBMAP_BITS; 2621 vb->dirty_max = 0; 2622 bitmap_set(vb->used_map, 0, (1UL << order)); 2623 INIT_LIST_HEAD(&vb->free_list); 2624 2625 xa = addr_to_vb_xa(va->va_start); 2626 vb_idx = addr_to_vb_idx(va->va_start); 2627 err = xa_insert(xa, vb_idx, vb, gfp_mask); 2628 if (err) { 2629 kfree(vb); 2630 free_vmap_area(va); 2631 return ERR_PTR(err); 2632 } 2633 /* 2634 * list_add_tail_rcu could happened in another core 2635 * rather than vb->cpu due to task migration, which 2636 * is safe as list_add_tail_rcu will ensure the list's 2637 * integrity together with list_for_each_rcu from read 2638 * side. 2639 */ 2640 vb->cpu = raw_smp_processor_id(); 2641 vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); 2642 spin_lock(&vbq->lock); 2643 list_add_tail_rcu(&vb->free_list, &vbq->free); 2644 spin_unlock(&vbq->lock); 2645 2646 return vaddr; 2647 } 2648 2649 static void free_vmap_block(struct vmap_block *vb) 2650 { 2651 struct vmap_node *vn; 2652 struct vmap_block *tmp; 2653 struct xarray *xa; 2654 2655 xa = addr_to_vb_xa(vb->va->va_start); 2656 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); 2657 BUG_ON(tmp != vb); 2658 2659 vn = addr_to_node(vb->va->va_start); 2660 spin_lock(&vn->busy.lock); 2661 unlink_va(vb->va, &vn->busy.root); 2662 spin_unlock(&vn->busy.lock); 2663 2664 free_vmap_area_noflush(vb->va); 2665 kfree_rcu(vb, rcu_head); 2666 } 2667 2668 static bool purge_fragmented_block(struct vmap_block *vb, 2669 struct list_head *purge_list, bool force_purge) 2670 { 2671 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu); 2672 2673 if (vb->free + vb->dirty != VMAP_BBMAP_BITS || 2674 vb->dirty == VMAP_BBMAP_BITS) 2675 return false; 2676 2677 /* Don't overeagerly purge usable blocks unless requested */ 2678 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) 2679 return false; 2680 2681 /* prevent further allocs after releasing lock */ 2682 WRITE_ONCE(vb->free, 0); 2683 /* prevent purging it again */ 2684 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); 2685 vb->dirty_min = 0; 2686 vb->dirty_max = VMAP_BBMAP_BITS; 2687 spin_lock(&vbq->lock); 2688 list_del_rcu(&vb->free_list); 2689 spin_unlock(&vbq->lock); 2690 list_add_tail(&vb->purge, purge_list); 2691 return true; 2692 } 2693 2694 static void free_purged_blocks(struct list_head *purge_list) 2695 { 2696 struct vmap_block *vb, *n_vb; 2697 2698 list_for_each_entry_safe(vb, n_vb, purge_list, purge) { 2699 list_del(&vb->purge); 2700 free_vmap_block(vb); 2701 } 2702 } 2703 2704 static void purge_fragmented_blocks(int cpu) 2705 { 2706 LIST_HEAD(purge); 2707 struct vmap_block *vb; 2708 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2709 2710 rcu_read_lock(); 2711 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2712 unsigned long free = READ_ONCE(vb->free); 2713 unsigned long dirty = READ_ONCE(vb->dirty); 2714 2715 if (free + dirty != VMAP_BBMAP_BITS || 2716 dirty == VMAP_BBMAP_BITS) 2717 continue; 2718 2719 spin_lock(&vb->lock); 2720 purge_fragmented_block(vb, &purge, true); 2721 spin_unlock(&vb->lock); 2722 } 2723 rcu_read_unlock(); 2724 free_purged_blocks(&purge); 2725 } 2726 2727 static void purge_fragmented_blocks_allcpus(void) 2728 { 2729 int cpu; 2730 2731 for_each_possible_cpu(cpu) 2732 purge_fragmented_blocks(cpu); 2733 } 2734 2735 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2736 { 2737 struct vmap_block_queue *vbq; 2738 struct vmap_block *vb; 2739 void *vaddr = NULL; 2740 unsigned int order; 2741 2742 BUG_ON(offset_in_page(size)); 2743 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2744 if (WARN_ON(size == 0)) { 2745 /* 2746 * Allocating 0 bytes isn't what caller wants since 2747 * get_order(0) returns funny result. Just warn and terminate 2748 * early. 2749 */ 2750 return ERR_PTR(-EINVAL); 2751 } 2752 order = get_order(size); 2753 2754 rcu_read_lock(); 2755 vbq = raw_cpu_ptr(&vmap_block_queue); 2756 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2757 unsigned long pages_off; 2758 2759 if (READ_ONCE(vb->free) < (1UL << order)) 2760 continue; 2761 2762 spin_lock(&vb->lock); 2763 if (vb->free < (1UL << order)) { 2764 spin_unlock(&vb->lock); 2765 continue; 2766 } 2767 2768 pages_off = VMAP_BBMAP_BITS - vb->free; 2769 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 2770 WRITE_ONCE(vb->free, vb->free - (1UL << order)); 2771 bitmap_set(vb->used_map, pages_off, (1UL << order)); 2772 if (vb->free == 0) { 2773 spin_lock(&vbq->lock); 2774 list_del_rcu(&vb->free_list); 2775 spin_unlock(&vbq->lock); 2776 } 2777 2778 spin_unlock(&vb->lock); 2779 break; 2780 } 2781 2782 rcu_read_unlock(); 2783 2784 /* Allocate new block if nothing was found */ 2785 if (!vaddr) 2786 vaddr = new_vmap_block(order, gfp_mask); 2787 2788 return vaddr; 2789 } 2790 2791 static void vb_free(unsigned long addr, unsigned long size) 2792 { 2793 unsigned long offset; 2794 unsigned int order; 2795 struct vmap_block *vb; 2796 struct xarray *xa; 2797 2798 BUG_ON(offset_in_page(size)); 2799 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2800 2801 flush_cache_vunmap(addr, addr + size); 2802 2803 order = get_order(size); 2804 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 2805 2806 xa = addr_to_vb_xa(addr); 2807 vb = xa_load(xa, addr_to_vb_idx(addr)); 2808 2809 spin_lock(&vb->lock); 2810 bitmap_clear(vb->used_map, offset, (1UL << order)); 2811 spin_unlock(&vb->lock); 2812 2813 vunmap_range_noflush(addr, addr + size); 2814 2815 if (debug_pagealloc_enabled_static()) 2816 flush_tlb_kernel_range(addr, addr + size); 2817 2818 spin_lock(&vb->lock); 2819 2820 /* Expand the not yet TLB flushed dirty range */ 2821 vb->dirty_min = min(vb->dirty_min, offset); 2822 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2823 2824 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); 2825 if (vb->dirty == VMAP_BBMAP_BITS) { 2826 BUG_ON(vb->free); 2827 spin_unlock(&vb->lock); 2828 free_vmap_block(vb); 2829 } else 2830 spin_unlock(&vb->lock); 2831 } 2832 2833 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2834 { 2835 LIST_HEAD(purge_list); 2836 int cpu; 2837 2838 if (unlikely(!vmap_initialized)) 2839 return; 2840 2841 mutex_lock(&vmap_purge_lock); 2842 2843 for_each_possible_cpu(cpu) { 2844 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2845 struct vmap_block *vb; 2846 unsigned long idx; 2847 2848 rcu_read_lock(); 2849 xa_for_each(&vbq->vmap_blocks, idx, vb) { 2850 spin_lock(&vb->lock); 2851 2852 /* 2853 * Try to purge a fragmented block first. If it's 2854 * not purgeable, check whether there is dirty 2855 * space to be flushed. 2856 */ 2857 if (!purge_fragmented_block(vb, &purge_list, false) && 2858 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { 2859 unsigned long va_start = vb->va->va_start; 2860 unsigned long s, e; 2861 2862 s = va_start + (vb->dirty_min << PAGE_SHIFT); 2863 e = va_start + (vb->dirty_max << PAGE_SHIFT); 2864 2865 start = min(s, start); 2866 end = max(e, end); 2867 2868 /* Prevent that this is flushed again */ 2869 vb->dirty_min = VMAP_BBMAP_BITS; 2870 vb->dirty_max = 0; 2871 2872 flush = 1; 2873 } 2874 spin_unlock(&vb->lock); 2875 } 2876 rcu_read_unlock(); 2877 } 2878 free_purged_blocks(&purge_list); 2879 2880 if (!__purge_vmap_area_lazy(start, end, false) && flush) 2881 flush_tlb_kernel_range(start, end); 2882 mutex_unlock(&vmap_purge_lock); 2883 } 2884 2885 /** 2886 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2887 * 2888 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2889 * to amortize TLB flushing overheads. What this means is that any page you 2890 * have now, may, in a former life, have been mapped into kernel virtual 2891 * address by the vmap layer and so there might be some CPUs with TLB entries 2892 * still referencing that page (additional to the regular 1:1 kernel mapping). 2893 * 2894 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 2895 * be sure that none of the pages we have control over will have any aliases 2896 * from the vmap layer. 2897 */ 2898 void vm_unmap_aliases(void) 2899 { 2900 unsigned long start = ULONG_MAX, end = 0; 2901 int flush = 0; 2902 2903 _vm_unmap_aliases(start, end, flush); 2904 } 2905 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 2906 2907 /** 2908 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 2909 * @mem: the pointer returned by vm_map_ram 2910 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 2911 */ 2912 void vm_unmap_ram(const void *mem, unsigned int count) 2913 { 2914 unsigned long size = (unsigned long)count << PAGE_SHIFT; 2915 unsigned long addr = (unsigned long)kasan_reset_tag(mem); 2916 struct vmap_area *va; 2917 2918 might_sleep(); 2919 BUG_ON(!addr); 2920 BUG_ON(addr < VMALLOC_START); 2921 BUG_ON(addr > VMALLOC_END); 2922 BUG_ON(!PAGE_ALIGNED(addr)); 2923 2924 kasan_poison_vmalloc(mem, size); 2925 2926 if (likely(count <= VMAP_MAX_ALLOC)) { 2927 debug_check_no_locks_freed(mem, size); 2928 vb_free(addr, size); 2929 return; 2930 } 2931 2932 va = find_unlink_vmap_area(addr); 2933 if (WARN_ON_ONCE(!va)) 2934 return; 2935 2936 debug_check_no_locks_freed((void *)va->va_start, 2937 (va->va_end - va->va_start)); 2938 free_unmap_vmap_area(va); 2939 } 2940 EXPORT_SYMBOL(vm_unmap_ram); 2941 2942 /** 2943 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 2944 * @pages: an array of pointers to the pages to be mapped 2945 * @count: number of pages 2946 * @node: prefer to allocate data structures on this node 2947 * 2948 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 2949 * faster than vmap so it's good. But if you mix long-life and short-life 2950 * objects with vm_map_ram(), it could consume lots of address space through 2951 * fragmentation (especially on a 32bit machine). You could see failures in 2952 * the end. Please use this function for short-lived objects. 2953 * 2954 * Returns: a pointer to the address that has been mapped, or %NULL on failure 2955 */ 2956 void *vm_map_ram(struct page **pages, unsigned int count, int node) 2957 { 2958 unsigned long size = (unsigned long)count << PAGE_SHIFT; 2959 unsigned long addr; 2960 void *mem; 2961 2962 if (likely(count <= VMAP_MAX_ALLOC)) { 2963 mem = vb_alloc(size, GFP_KERNEL); 2964 if (IS_ERR(mem)) 2965 return NULL; 2966 addr = (unsigned long)mem; 2967 } else { 2968 struct vmap_area *va; 2969 va = alloc_vmap_area(size, PAGE_SIZE, 2970 VMALLOC_START, VMALLOC_END, 2971 node, GFP_KERNEL, VMAP_RAM, 2972 NULL); 2973 if (IS_ERR(va)) 2974 return NULL; 2975 2976 addr = va->va_start; 2977 mem = (void *)addr; 2978 } 2979 2980 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 2981 pages, PAGE_SHIFT) < 0) { 2982 vm_unmap_ram(mem, count); 2983 return NULL; 2984 } 2985 2986 /* 2987 * Mark the pages as accessible, now that they are mapped. 2988 * With hardware tag-based KASAN, marking is skipped for 2989 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 2990 */ 2991 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); 2992 2993 return mem; 2994 } 2995 EXPORT_SYMBOL(vm_map_ram); 2996 2997 static struct vm_struct *vmlist __initdata; 2998 2999 static inline unsigned int vm_area_page_order(struct vm_struct *vm) 3000 { 3001 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 3002 return vm->page_order; 3003 #else 3004 return 0; 3005 #endif 3006 } 3007 3008 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 3009 { 3010 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 3011 vm->page_order = order; 3012 #else 3013 BUG_ON(order != 0); 3014 #endif 3015 } 3016 3017 /** 3018 * vm_area_add_early - add vmap area early during boot 3019 * @vm: vm_struct to add 3020 * 3021 * This function is used to add fixed kernel vm area to vmlist before 3022 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 3023 * should contain proper values and the other fields should be zero. 3024 * 3025 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3026 */ 3027 void __init vm_area_add_early(struct vm_struct *vm) 3028 { 3029 struct vm_struct *tmp, **p; 3030 3031 BUG_ON(vmap_initialized); 3032 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 3033 if (tmp->addr >= vm->addr) { 3034 BUG_ON(tmp->addr < vm->addr + vm->size); 3035 break; 3036 } else 3037 BUG_ON(tmp->addr + tmp->size > vm->addr); 3038 } 3039 vm->next = *p; 3040 *p = vm; 3041 } 3042 3043 /** 3044 * vm_area_register_early - register vmap area early during boot 3045 * @vm: vm_struct to register 3046 * @align: requested alignment 3047 * 3048 * This function is used to register kernel vm area before 3049 * vmalloc_init() is called. @vm->size and @vm->flags should contain 3050 * proper values on entry and other fields should be zero. On return, 3051 * vm->addr contains the allocated address. 3052 * 3053 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3054 */ 3055 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 3056 { 3057 unsigned long addr = ALIGN(VMALLOC_START, align); 3058 struct vm_struct *cur, **p; 3059 3060 BUG_ON(vmap_initialized); 3061 3062 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 3063 if ((unsigned long)cur->addr - addr >= vm->size) 3064 break; 3065 addr = ALIGN((unsigned long)cur->addr + cur->size, align); 3066 } 3067 3068 BUG_ON(addr > VMALLOC_END - vm->size); 3069 vm->addr = (void *)addr; 3070 vm->next = *p; 3071 *p = vm; 3072 kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 3073 } 3074 3075 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 3076 { 3077 /* 3078 * Before removing VM_UNINITIALIZED, 3079 * we should make sure that vm has proper values. 3080 * Pair with smp_rmb() in show_numa_info(). 3081 */ 3082 smp_wmb(); 3083 vm->flags &= ~VM_UNINITIALIZED; 3084 } 3085 3086 static struct vm_struct *__get_vm_area_node(unsigned long size, 3087 unsigned long align, unsigned long shift, unsigned long flags, 3088 unsigned long start, unsigned long end, int node, 3089 gfp_t gfp_mask, const void *caller) 3090 { 3091 struct vmap_area *va; 3092 struct vm_struct *area; 3093 unsigned long requested_size = size; 3094 3095 BUG_ON(in_interrupt()); 3096 size = ALIGN(size, 1ul << shift); 3097 if (unlikely(!size)) 3098 return NULL; 3099 3100 if (flags & VM_IOREMAP) 3101 align = 1ul << clamp_t(int, get_count_order_long(size), 3102 PAGE_SHIFT, IOREMAP_MAX_ORDER); 3103 3104 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 3105 if (unlikely(!area)) 3106 return NULL; 3107 3108 if (!(flags & VM_NO_GUARD)) 3109 size += PAGE_SIZE; 3110 3111 area->flags = flags; 3112 area->caller = caller; 3113 3114 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); 3115 if (IS_ERR(va)) { 3116 kfree(area); 3117 return NULL; 3118 } 3119 3120 /* 3121 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a 3122 * best-effort approach, as they can be mapped outside of vmalloc code. 3123 * For VM_ALLOC mappings, the pages are marked as accessible after 3124 * getting mapped in __vmalloc_node_range(). 3125 * With hardware tag-based KASAN, marking is skipped for 3126 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 3127 */ 3128 if (!(flags & VM_ALLOC)) 3129 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, 3130 KASAN_VMALLOC_PROT_NORMAL); 3131 3132 return area; 3133 } 3134 3135 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 3136 unsigned long start, unsigned long end, 3137 const void *caller) 3138 { 3139 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 3140 NUMA_NO_NODE, GFP_KERNEL, caller); 3141 } 3142 3143 /** 3144 * get_vm_area - reserve a contiguous kernel virtual area 3145 * @size: size of the area 3146 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 3147 * 3148 * Search an area of @size in the kernel virtual mapping area, 3149 * and reserved it for out purposes. Returns the area descriptor 3150 * on success or %NULL on failure. 3151 * 3152 * Return: the area descriptor on success or %NULL on failure. 3153 */ 3154 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 3155 { 3156 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 3157 VMALLOC_START, VMALLOC_END, 3158 NUMA_NO_NODE, GFP_KERNEL, 3159 __builtin_return_address(0)); 3160 } 3161 3162 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 3163 const void *caller) 3164 { 3165 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 3166 VMALLOC_START, VMALLOC_END, 3167 NUMA_NO_NODE, GFP_KERNEL, caller); 3168 } 3169 3170 /** 3171 * find_vm_area - find a continuous kernel virtual area 3172 * @addr: base address 3173 * 3174 * Search for the kernel VM area starting at @addr, and return it. 3175 * It is up to the caller to do all required locking to keep the returned 3176 * pointer valid. 3177 * 3178 * Return: the area descriptor on success or %NULL on failure. 3179 */ 3180 struct vm_struct *find_vm_area(const void *addr) 3181 { 3182 struct vmap_area *va; 3183 3184 va = find_vmap_area((unsigned long)addr); 3185 if (!va) 3186 return NULL; 3187 3188 return va->vm; 3189 } 3190 3191 /** 3192 * remove_vm_area - find and remove a continuous kernel virtual area 3193 * @addr: base address 3194 * 3195 * Search for the kernel VM area starting at @addr, and remove it. 3196 * This function returns the found VM area, but using it is NOT safe 3197 * on SMP machines, except for its size or flags. 3198 * 3199 * Return: the area descriptor on success or %NULL on failure. 3200 */ 3201 struct vm_struct *remove_vm_area(const void *addr) 3202 { 3203 struct vmap_area *va; 3204 struct vm_struct *vm; 3205 3206 might_sleep(); 3207 3208 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 3209 addr)) 3210 return NULL; 3211 3212 va = find_unlink_vmap_area((unsigned long)addr); 3213 if (!va || !va->vm) 3214 return NULL; 3215 vm = va->vm; 3216 3217 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); 3218 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); 3219 kasan_free_module_shadow(vm); 3220 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); 3221 3222 free_unmap_vmap_area(va); 3223 return vm; 3224 } 3225 3226 static inline void set_area_direct_map(const struct vm_struct *area, 3227 int (*set_direct_map)(struct page *page)) 3228 { 3229 int i; 3230 3231 /* HUGE_VMALLOC passes small pages to set_direct_map */ 3232 for (i = 0; i < area->nr_pages; i++) 3233 if (page_address(area->pages[i])) 3234 set_direct_map(area->pages[i]); 3235 } 3236 3237 /* 3238 * Flush the vm mapping and reset the direct map. 3239 */ 3240 static void vm_reset_perms(struct vm_struct *area) 3241 { 3242 unsigned long start = ULONG_MAX, end = 0; 3243 unsigned int page_order = vm_area_page_order(area); 3244 int flush_dmap = 0; 3245 int i; 3246 3247 /* 3248 * Find the start and end range of the direct mappings to make sure that 3249 * the vm_unmap_aliases() flush includes the direct map. 3250 */ 3251 for (i = 0; i < area->nr_pages; i += 1U << page_order) { 3252 unsigned long addr = (unsigned long)page_address(area->pages[i]); 3253 3254 if (addr) { 3255 unsigned long page_size; 3256 3257 page_size = PAGE_SIZE << page_order; 3258 start = min(addr, start); 3259 end = max(addr + page_size, end); 3260 flush_dmap = 1; 3261 } 3262 } 3263 3264 /* 3265 * Set direct map to something invalid so that it won't be cached if 3266 * there are any accesses after the TLB flush, then flush the TLB and 3267 * reset the direct map permissions to the default. 3268 */ 3269 set_area_direct_map(area, set_direct_map_invalid_noflush); 3270 _vm_unmap_aliases(start, end, flush_dmap); 3271 set_area_direct_map(area, set_direct_map_default_noflush); 3272 } 3273 3274 static void delayed_vfree_work(struct work_struct *w) 3275 { 3276 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 3277 struct llist_node *t, *llnode; 3278 3279 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 3280 vfree(llnode); 3281 } 3282 3283 /** 3284 * vfree_atomic - release memory allocated by vmalloc() 3285 * @addr: memory base address 3286 * 3287 * This one is just like vfree() but can be called in any atomic context 3288 * except NMIs. 3289 */ 3290 void vfree_atomic(const void *addr) 3291 { 3292 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 3293 3294 BUG_ON(in_nmi()); 3295 kmemleak_free(addr); 3296 3297 /* 3298 * Use raw_cpu_ptr() because this can be called from preemptible 3299 * context. Preemption is absolutely fine here, because the llist_add() 3300 * implementation is lockless, so it works even if we are adding to 3301 * another cpu's list. schedule_work() should be fine with this too. 3302 */ 3303 if (addr && llist_add((struct llist_node *)addr, &p->list)) 3304 schedule_work(&p->wq); 3305 } 3306 3307 /** 3308 * vfree - Release memory allocated by vmalloc() 3309 * @addr: Memory base address 3310 * 3311 * Free the virtually continuous memory area starting at @addr, as obtained 3312 * from one of the vmalloc() family of APIs. This will usually also free the 3313 * physical memory underlying the virtual allocation, but that memory is 3314 * reference counted, so it will not be freed until the last user goes away. 3315 * 3316 * If @addr is NULL, no operation is performed. 3317 * 3318 * Context: 3319 * May sleep if called *not* from interrupt context. 3320 * Must not be called in NMI context (strictly speaking, it could be 3321 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 3322 * conventions for vfree() arch-dependent would be a really bad idea). 3323 */ 3324 void vfree(const void *addr) 3325 { 3326 struct vm_struct *vm; 3327 int i; 3328 3329 if (unlikely(in_interrupt())) { 3330 vfree_atomic(addr); 3331 return; 3332 } 3333 3334 BUG_ON(in_nmi()); 3335 kmemleak_free(addr); 3336 might_sleep(); 3337 3338 if (!addr) 3339 return; 3340 3341 vm = remove_vm_area(addr); 3342 if (unlikely(!vm)) { 3343 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 3344 addr); 3345 return; 3346 } 3347 3348 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) 3349 vm_reset_perms(vm); 3350 for (i = 0; i < vm->nr_pages; i++) { 3351 struct page *page = vm->pages[i]; 3352 3353 BUG_ON(!page); 3354 mod_memcg_page_state(page, MEMCG_VMALLOC, -1); 3355 /* 3356 * High-order allocs for huge vmallocs are split, so 3357 * can be freed as an array of order-0 allocations 3358 */ 3359 __free_page(page); 3360 cond_resched(); 3361 } 3362 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); 3363 kvfree(vm->pages); 3364 kfree(vm); 3365 } 3366 EXPORT_SYMBOL(vfree); 3367 3368 /** 3369 * vunmap - release virtual mapping obtained by vmap() 3370 * @addr: memory base address 3371 * 3372 * Free the virtually contiguous memory area starting at @addr, 3373 * which was created from the page array passed to vmap(). 3374 * 3375 * Must not be called in interrupt context. 3376 */ 3377 void vunmap(const void *addr) 3378 { 3379 struct vm_struct *vm; 3380 3381 BUG_ON(in_interrupt()); 3382 might_sleep(); 3383 3384 if (!addr) 3385 return; 3386 vm = remove_vm_area(addr); 3387 if (unlikely(!vm)) { 3388 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", 3389 addr); 3390 return; 3391 } 3392 kfree(vm); 3393 } 3394 EXPORT_SYMBOL(vunmap); 3395 3396 /** 3397 * vmap - map an array of pages into virtually contiguous space 3398 * @pages: array of page pointers 3399 * @count: number of pages to map 3400 * @flags: vm_area->flags 3401 * @prot: page protection for the mapping 3402 * 3403 * Maps @count pages from @pages into contiguous kernel virtual space. 3404 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 3405 * (which must be kmalloc or vmalloc memory) and one reference per pages in it 3406 * are transferred from the caller to vmap(), and will be freed / dropped when 3407 * vfree() is called on the return value. 3408 * 3409 * Return: the address of the area or %NULL on failure 3410 */ 3411 void *vmap(struct page **pages, unsigned int count, 3412 unsigned long flags, pgprot_t prot) 3413 { 3414 struct vm_struct *area; 3415 unsigned long addr; 3416 unsigned long size; /* In bytes */ 3417 3418 might_sleep(); 3419 3420 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) 3421 return NULL; 3422 3423 /* 3424 * Your top guard is someone else's bottom guard. Not having a top 3425 * guard compromises someone else's mappings too. 3426 */ 3427 if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 3428 flags &= ~VM_NO_GUARD; 3429 3430 if (count > totalram_pages()) 3431 return NULL; 3432 3433 size = (unsigned long)count << PAGE_SHIFT; 3434 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 3435 if (!area) 3436 return NULL; 3437 3438 addr = (unsigned long)area->addr; 3439 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 3440 pages, PAGE_SHIFT) < 0) { 3441 vunmap(area->addr); 3442 return NULL; 3443 } 3444 3445 if (flags & VM_MAP_PUT_PAGES) { 3446 area->pages = pages; 3447 area->nr_pages = count; 3448 } 3449 return area->addr; 3450 } 3451 EXPORT_SYMBOL(vmap); 3452 3453 #ifdef CONFIG_VMAP_PFN 3454 struct vmap_pfn_data { 3455 unsigned long *pfns; 3456 pgprot_t prot; 3457 unsigned int idx; 3458 }; 3459 3460 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 3461 { 3462 struct vmap_pfn_data *data = private; 3463 unsigned long pfn = data->pfns[data->idx]; 3464 pte_t ptent; 3465 3466 if (WARN_ON_ONCE(pfn_valid(pfn))) 3467 return -EINVAL; 3468 3469 ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); 3470 set_pte_at(&init_mm, addr, pte, ptent); 3471 3472 data->idx++; 3473 return 0; 3474 } 3475 3476 /** 3477 * vmap_pfn - map an array of PFNs into virtually contiguous space 3478 * @pfns: array of PFNs 3479 * @count: number of pages to map 3480 * @prot: page protection for the mapping 3481 * 3482 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 3483 * the start address of the mapping. 3484 */ 3485 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 3486 { 3487 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 3488 struct vm_struct *area; 3489 3490 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 3491 __builtin_return_address(0)); 3492 if (!area) 3493 return NULL; 3494 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 3495 count * PAGE_SIZE, vmap_pfn_apply, &data)) { 3496 free_vm_area(area); 3497 return NULL; 3498 } 3499 3500 flush_cache_vmap((unsigned long)area->addr, 3501 (unsigned long)area->addr + count * PAGE_SIZE); 3502 3503 return area->addr; 3504 } 3505 EXPORT_SYMBOL_GPL(vmap_pfn); 3506 #endif /* CONFIG_VMAP_PFN */ 3507 3508 static inline unsigned int 3509 vm_area_alloc_pages(gfp_t gfp, int nid, 3510 unsigned int order, unsigned int nr_pages, struct page **pages) 3511 { 3512 unsigned int nr_allocated = 0; 3513 struct page *page; 3514 int i; 3515 3516 /* 3517 * For order-0 pages we make use of bulk allocator, if 3518 * the page array is partly or not at all populated due 3519 * to fails, fallback to a single page allocator that is 3520 * more permissive. 3521 */ 3522 if (!order) { 3523 while (nr_allocated < nr_pages) { 3524 unsigned int nr, nr_pages_request; 3525 3526 /* 3527 * A maximum allowed request is hard-coded and is 100 3528 * pages per call. That is done in order to prevent a 3529 * long preemption off scenario in the bulk-allocator 3530 * so the range is [1:100]. 3531 */ 3532 nr_pages_request = min(100U, nr_pages - nr_allocated); 3533 3534 /* memory allocation should consider mempolicy, we can't 3535 * wrongly use nearest node when nid == NUMA_NO_NODE, 3536 * otherwise memory may be allocated in only one node, 3537 * but mempolicy wants to alloc memory by interleaving. 3538 */ 3539 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 3540 nr = alloc_pages_bulk_array_mempolicy_noprof(gfp, 3541 nr_pages_request, 3542 pages + nr_allocated); 3543 else 3544 nr = alloc_pages_bulk_array_node_noprof(gfp, nid, 3545 nr_pages_request, 3546 pages + nr_allocated); 3547 3548 nr_allocated += nr; 3549 cond_resched(); 3550 3551 /* 3552 * If zero or pages were obtained partly, 3553 * fallback to a single page allocator. 3554 */ 3555 if (nr != nr_pages_request) 3556 break; 3557 } 3558 } 3559 3560 /* High-order pages or fallback path if "bulk" fails. */ 3561 while (nr_allocated < nr_pages) { 3562 if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current)) 3563 break; 3564 3565 if (nid == NUMA_NO_NODE) 3566 page = alloc_pages_noprof(gfp, order); 3567 else 3568 page = alloc_pages_node_noprof(nid, gfp, order); 3569 3570 if (unlikely(!page)) 3571 break; 3572 3573 /* 3574 * Higher order allocations must be able to be treated as 3575 * independent small pages by callers (as they can with 3576 * small-page vmallocs). Some drivers do their own refcounting 3577 * on vmalloc_to_page() pages, some use page->mapping, 3578 * page->lru, etc. 3579 */ 3580 if (order) 3581 split_page(page, order); 3582 3583 /* 3584 * Careful, we allocate and map page-order pages, but 3585 * tracking is done per PAGE_SIZE page so as to keep the 3586 * vm_struct APIs independent of the physical/mapped size. 3587 */ 3588 for (i = 0; i < (1U << order); i++) 3589 pages[nr_allocated + i] = page + i; 3590 3591 cond_resched(); 3592 nr_allocated += 1U << order; 3593 } 3594 3595 return nr_allocated; 3596 } 3597 3598 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 3599 pgprot_t prot, unsigned int page_shift, 3600 int node) 3601 { 3602 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 3603 bool nofail = gfp_mask & __GFP_NOFAIL; 3604 unsigned long addr = (unsigned long)area->addr; 3605 unsigned long size = get_vm_area_size(area); 3606 unsigned long array_size; 3607 unsigned int nr_small_pages = size >> PAGE_SHIFT; 3608 unsigned int page_order; 3609 unsigned int flags; 3610 int ret; 3611 3612 array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 3613 3614 if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 3615 gfp_mask |= __GFP_HIGHMEM; 3616 3617 /* Please note that the recursion is strictly bounded. */ 3618 if (array_size > PAGE_SIZE) { 3619 area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node, 3620 area->caller); 3621 } else { 3622 area->pages = kmalloc_node_noprof(array_size, nested_gfp, node); 3623 } 3624 3625 if (!area->pages) { 3626 warn_alloc(gfp_mask, NULL, 3627 "vmalloc error: size %lu, failed to allocated page array size %lu", 3628 nr_small_pages * PAGE_SIZE, array_size); 3629 free_vm_area(area); 3630 return NULL; 3631 } 3632 3633 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 3634 page_order = vm_area_page_order(area); 3635 3636 /* 3637 * Higher order nofail allocations are really expensive and 3638 * potentially dangerous (pre-mature OOM, disruptive reclaim 3639 * and compaction etc. 3640 * 3641 * Please note, the __vmalloc_node_range_noprof() falls-back 3642 * to order-0 pages if high-order attempt is unsuccessful. 3643 */ 3644 area->nr_pages = vm_area_alloc_pages((page_order ? 3645 gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN, 3646 node, page_order, nr_small_pages, area->pages); 3647 3648 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 3649 if (gfp_mask & __GFP_ACCOUNT) { 3650 int i; 3651 3652 for (i = 0; i < area->nr_pages; i++) 3653 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); 3654 } 3655 3656 /* 3657 * If not enough pages were obtained to accomplish an 3658 * allocation request, free them via vfree() if any. 3659 */ 3660 if (area->nr_pages != nr_small_pages) { 3661 /* 3662 * vm_area_alloc_pages() can fail due to insufficient memory but 3663 * also:- 3664 * 3665 * - a pending fatal signal 3666 * - insufficient huge page-order pages 3667 * 3668 * Since we always retry allocations at order-0 in the huge page 3669 * case a warning for either is spurious. 3670 */ 3671 if (!fatal_signal_pending(current) && page_order == 0) 3672 warn_alloc(gfp_mask, NULL, 3673 "vmalloc error: size %lu, failed to allocate pages", 3674 area->nr_pages * PAGE_SIZE); 3675 goto fail; 3676 } 3677 3678 /* 3679 * page tables allocations ignore external gfp mask, enforce it 3680 * by the scope API 3681 */ 3682 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3683 flags = memalloc_nofs_save(); 3684 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3685 flags = memalloc_noio_save(); 3686 3687 do { 3688 ret = vmap_pages_range(addr, addr + size, prot, area->pages, 3689 page_shift); 3690 if (nofail && (ret < 0)) 3691 schedule_timeout_uninterruptible(1); 3692 } while (nofail && (ret < 0)); 3693 3694 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3695 memalloc_nofs_restore(flags); 3696 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3697 memalloc_noio_restore(flags); 3698 3699 if (ret < 0) { 3700 warn_alloc(gfp_mask, NULL, 3701 "vmalloc error: size %lu, failed to map pages", 3702 area->nr_pages * PAGE_SIZE); 3703 goto fail; 3704 } 3705 3706 return area->addr; 3707 3708 fail: 3709 vfree(area->addr); 3710 return NULL; 3711 } 3712 3713 /** 3714 * __vmalloc_node_range - allocate virtually contiguous memory 3715 * @size: allocation size 3716 * @align: desired alignment 3717 * @start: vm area range start 3718 * @end: vm area range end 3719 * @gfp_mask: flags for the page level allocator 3720 * @prot: protection mask for the allocated pages 3721 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 3722 * @node: node to use for allocation or NUMA_NO_NODE 3723 * @caller: caller's return address 3724 * 3725 * Allocate enough pages to cover @size from the page level 3726 * allocator with @gfp_mask flags. Please note that the full set of gfp 3727 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all 3728 * supported. 3729 * Zone modifiers are not supported. From the reclaim modifiers 3730 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) 3731 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and 3732 * __GFP_RETRY_MAYFAIL are not supported). 3733 * 3734 * __GFP_NOWARN can be used to suppress failures messages. 3735 * 3736 * Map them into contiguous kernel virtual space, using a pagetable 3737 * protection of @prot. 3738 * 3739 * Return: the address of the area or %NULL on failure 3740 */ 3741 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, 3742 unsigned long start, unsigned long end, gfp_t gfp_mask, 3743 pgprot_t prot, unsigned long vm_flags, int node, 3744 const void *caller) 3745 { 3746 struct vm_struct *area; 3747 void *ret; 3748 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; 3749 unsigned long real_size = size; 3750 unsigned long real_align = align; 3751 unsigned int shift = PAGE_SHIFT; 3752 3753 if (WARN_ON_ONCE(!size)) 3754 return NULL; 3755 3756 if ((size >> PAGE_SHIFT) > totalram_pages()) { 3757 warn_alloc(gfp_mask, NULL, 3758 "vmalloc error: size %lu, exceeds total pages", 3759 real_size); 3760 return NULL; 3761 } 3762 3763 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { 3764 unsigned long size_per_node; 3765 3766 /* 3767 * Try huge pages. Only try for PAGE_KERNEL allocations, 3768 * others like modules don't yet expect huge pages in 3769 * their allocations due to apply_to_page_range not 3770 * supporting them. 3771 */ 3772 3773 size_per_node = size; 3774 if (node == NUMA_NO_NODE) 3775 size_per_node /= num_online_nodes(); 3776 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) 3777 shift = PMD_SHIFT; 3778 else 3779 shift = arch_vmap_pte_supported_shift(size_per_node); 3780 3781 align = max(real_align, 1UL << shift); 3782 size = ALIGN(real_size, 1UL << shift); 3783 } 3784 3785 again: 3786 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | 3787 VM_UNINITIALIZED | vm_flags, start, end, node, 3788 gfp_mask, caller); 3789 if (!area) { 3790 bool nofail = gfp_mask & __GFP_NOFAIL; 3791 warn_alloc(gfp_mask, NULL, 3792 "vmalloc error: size %lu, vm_struct allocation failed%s", 3793 real_size, (nofail) ? ". Retrying." : ""); 3794 if (nofail) { 3795 schedule_timeout_uninterruptible(1); 3796 goto again; 3797 } 3798 goto fail; 3799 } 3800 3801 /* 3802 * Prepare arguments for __vmalloc_area_node() and 3803 * kasan_unpoison_vmalloc(). 3804 */ 3805 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { 3806 if (kasan_hw_tags_enabled()) { 3807 /* 3808 * Modify protection bits to allow tagging. 3809 * This must be done before mapping. 3810 */ 3811 prot = arch_vmap_pgprot_tagged(prot); 3812 3813 /* 3814 * Skip page_alloc poisoning and zeroing for physical 3815 * pages backing VM_ALLOC mapping. Memory is instead 3816 * poisoned and zeroed by kasan_unpoison_vmalloc(). 3817 */ 3818 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; 3819 } 3820 3821 /* Take note that the mapping is PAGE_KERNEL. */ 3822 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; 3823 } 3824 3825 /* Allocate physical pages and map them into vmalloc space. */ 3826 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 3827 if (!ret) 3828 goto fail; 3829 3830 /* 3831 * Mark the pages as accessible, now that they are mapped. 3832 * The condition for setting KASAN_VMALLOC_INIT should complement the 3833 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check 3834 * to make sure that memory is initialized under the same conditions. 3835 * Tag-based KASAN modes only assign tags to normal non-executable 3836 * allocations, see __kasan_unpoison_vmalloc(). 3837 */ 3838 kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 3839 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && 3840 (gfp_mask & __GFP_SKIP_ZERO)) 3841 kasan_flags |= KASAN_VMALLOC_INIT; 3842 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ 3843 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); 3844 3845 /* 3846 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 3847 * flag. It means that vm_struct is not fully initialized. 3848 * Now, it is fully initialized, so remove this flag here. 3849 */ 3850 clear_vm_uninitialized_flag(area); 3851 3852 size = PAGE_ALIGN(size); 3853 if (!(vm_flags & VM_DEFER_KMEMLEAK)) 3854 kmemleak_vmalloc(area, size, gfp_mask); 3855 3856 return area->addr; 3857 3858 fail: 3859 if (shift > PAGE_SHIFT) { 3860 shift = PAGE_SHIFT; 3861 align = real_align; 3862 size = real_size; 3863 goto again; 3864 } 3865 3866 return NULL; 3867 } 3868 3869 /** 3870 * __vmalloc_node - allocate virtually contiguous memory 3871 * @size: allocation size 3872 * @align: desired alignment 3873 * @gfp_mask: flags for the page level allocator 3874 * @node: node to use for allocation or NUMA_NO_NODE 3875 * @caller: caller's return address 3876 * 3877 * Allocate enough pages to cover @size from the page level allocator with 3878 * @gfp_mask flags. Map them into contiguous kernel virtual space. 3879 * 3880 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 3881 * and __GFP_NOFAIL are not supported 3882 * 3883 * Any use of gfp flags outside of GFP_KERNEL should be consulted 3884 * with mm people. 3885 * 3886 * Return: pointer to the allocated memory or %NULL on error 3887 */ 3888 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, 3889 gfp_t gfp_mask, int node, const void *caller) 3890 { 3891 return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END, 3892 gfp_mask, PAGE_KERNEL, 0, node, caller); 3893 } 3894 /* 3895 * This is only for performance analysis of vmalloc and stress purpose. 3896 * It is required by vmalloc test module, therefore do not use it other 3897 * than that. 3898 */ 3899 #ifdef CONFIG_TEST_VMALLOC_MODULE 3900 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof); 3901 #endif 3902 3903 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) 3904 { 3905 return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE, 3906 __builtin_return_address(0)); 3907 } 3908 EXPORT_SYMBOL(__vmalloc_noprof); 3909 3910 /** 3911 * vmalloc - allocate virtually contiguous memory 3912 * @size: allocation size 3913 * 3914 * Allocate enough pages to cover @size from the page level 3915 * allocator and map them into contiguous kernel virtual space. 3916 * 3917 * For tight control over page level allocator and protection flags 3918 * use __vmalloc() instead. 3919 * 3920 * Return: pointer to the allocated memory or %NULL on error 3921 */ 3922 void *vmalloc_noprof(unsigned long size) 3923 { 3924 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE, 3925 __builtin_return_address(0)); 3926 } 3927 EXPORT_SYMBOL(vmalloc_noprof); 3928 3929 /** 3930 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages 3931 * @size: allocation size 3932 * @gfp_mask: flags for the page level allocator 3933 * 3934 * Allocate enough pages to cover @size from the page level 3935 * allocator and map them into contiguous kernel virtual space. 3936 * If @size is greater than or equal to PMD_SIZE, allow using 3937 * huge pages for the memory 3938 * 3939 * Return: pointer to the allocated memory or %NULL on error 3940 */ 3941 void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) 3942 { 3943 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, 3944 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 3945 NUMA_NO_NODE, __builtin_return_address(0)); 3946 } 3947 EXPORT_SYMBOL_GPL(vmalloc_huge_noprof); 3948 3949 /** 3950 * vzalloc - allocate virtually contiguous memory with zero fill 3951 * @size: allocation size 3952 * 3953 * Allocate enough pages to cover @size from the page level 3954 * allocator and map them into contiguous kernel virtual space. 3955 * The memory allocated is set to zero. 3956 * 3957 * For tight control over page level allocator and protection flags 3958 * use __vmalloc() instead. 3959 * 3960 * Return: pointer to the allocated memory or %NULL on error 3961 */ 3962 void *vzalloc_noprof(unsigned long size) 3963 { 3964 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 3965 __builtin_return_address(0)); 3966 } 3967 EXPORT_SYMBOL(vzalloc_noprof); 3968 3969 /** 3970 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 3971 * @size: allocation size 3972 * 3973 * The resulting memory area is zeroed so it can be mapped to userspace 3974 * without leaking data. 3975 * 3976 * Return: pointer to the allocated memory or %NULL on error 3977 */ 3978 void *vmalloc_user_noprof(unsigned long size) 3979 { 3980 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3981 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 3982 VM_USERMAP, NUMA_NO_NODE, 3983 __builtin_return_address(0)); 3984 } 3985 EXPORT_SYMBOL(vmalloc_user_noprof); 3986 3987 /** 3988 * vmalloc_node - allocate memory on a specific node 3989 * @size: allocation size 3990 * @node: numa node 3991 * 3992 * Allocate enough pages to cover @size from the page level 3993 * allocator and map them into contiguous kernel virtual space. 3994 * 3995 * For tight control over page level allocator and protection flags 3996 * use __vmalloc() instead. 3997 * 3998 * Return: pointer to the allocated memory or %NULL on error 3999 */ 4000 void *vmalloc_node_noprof(unsigned long size, int node) 4001 { 4002 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node, 4003 __builtin_return_address(0)); 4004 } 4005 EXPORT_SYMBOL(vmalloc_node_noprof); 4006 4007 /** 4008 * vzalloc_node - allocate memory on a specific node with zero fill 4009 * @size: allocation size 4010 * @node: numa node 4011 * 4012 * Allocate enough pages to cover @size from the page level 4013 * allocator and map them into contiguous kernel virtual space. 4014 * The memory allocated is set to zero. 4015 * 4016 * Return: pointer to the allocated memory or %NULL on error 4017 */ 4018 void *vzalloc_node_noprof(unsigned long size, int node) 4019 { 4020 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node, 4021 __builtin_return_address(0)); 4022 } 4023 EXPORT_SYMBOL(vzalloc_node_noprof); 4024 4025 /** 4026 * vrealloc - reallocate virtually contiguous memory; contents remain unchanged 4027 * @p: object to reallocate memory for 4028 * @size: the size to reallocate 4029 * @flags: the flags for the page level allocator 4030 * 4031 * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and 4032 * @p is not a %NULL pointer, the object pointed to is freed. 4033 * 4034 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 4035 * initial memory allocation, every subsequent call to this API for the same 4036 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 4037 * __GFP_ZERO is not fully honored by this API. 4038 * 4039 * In any case, the contents of the object pointed to are preserved up to the 4040 * lesser of the new and old sizes. 4041 * 4042 * This function must not be called concurrently with itself or vfree() for the 4043 * same memory allocation. 4044 * 4045 * Return: pointer to the allocated memory; %NULL if @size is zero or in case of 4046 * failure 4047 */ 4048 void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) 4049 { 4050 size_t old_size = 0; 4051 void *n; 4052 4053 if (!size) { 4054 vfree(p); 4055 return NULL; 4056 } 4057 4058 if (p) { 4059 struct vm_struct *vm; 4060 4061 vm = find_vm_area(p); 4062 if (unlikely(!vm)) { 4063 WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p); 4064 return NULL; 4065 } 4066 4067 old_size = get_vm_area_size(vm); 4068 } 4069 4070 /* 4071 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What 4072 * would be a good heuristic for when to shrink the vm_area? 4073 */ 4074 if (size <= old_size) { 4075 /* Zero out spare memory. */ 4076 if (want_init_on_alloc(flags)) 4077 memset((void *)p + size, 0, old_size - size); 4078 4079 return (void *)p; 4080 } 4081 4082 /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ 4083 n = __vmalloc_noprof(size, flags); 4084 if (!n) 4085 return NULL; 4086 4087 if (p) { 4088 memcpy(n, p, old_size); 4089 vfree(p); 4090 } 4091 4092 return n; 4093 } 4094 4095 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 4096 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 4097 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 4098 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 4099 #else 4100 /* 4101 * 64b systems should always have either DMA or DMA32 zones. For others 4102 * GFP_DMA32 should do the right thing and use the normal zone. 4103 */ 4104 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 4105 #endif 4106 4107 /** 4108 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 4109 * @size: allocation size 4110 * 4111 * Allocate enough 32bit PA addressable pages to cover @size from the 4112 * page level allocator and map them into contiguous kernel virtual space. 4113 * 4114 * Return: pointer to the allocated memory or %NULL on error 4115 */ 4116 void *vmalloc_32_noprof(unsigned long size) 4117 { 4118 return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 4119 __builtin_return_address(0)); 4120 } 4121 EXPORT_SYMBOL(vmalloc_32_noprof); 4122 4123 /** 4124 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 4125 * @size: allocation size 4126 * 4127 * The resulting memory area is 32bit addressable and zeroed so it can be 4128 * mapped to userspace without leaking data. 4129 * 4130 * Return: pointer to the allocated memory or %NULL on error 4131 */ 4132 void *vmalloc_32_user_noprof(unsigned long size) 4133 { 4134 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, 4135 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 4136 VM_USERMAP, NUMA_NO_NODE, 4137 __builtin_return_address(0)); 4138 } 4139 EXPORT_SYMBOL(vmalloc_32_user_noprof); 4140 4141 /* 4142 * Atomically zero bytes in the iterator. 4143 * 4144 * Returns the number of zeroed bytes. 4145 */ 4146 static size_t zero_iter(struct iov_iter *iter, size_t count) 4147 { 4148 size_t remains = count; 4149 4150 while (remains > 0) { 4151 size_t num, copied; 4152 4153 num = min_t(size_t, remains, PAGE_SIZE); 4154 copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); 4155 remains -= copied; 4156 4157 if (copied < num) 4158 break; 4159 } 4160 4161 return count - remains; 4162 } 4163 4164 /* 4165 * small helper routine, copy contents to iter from addr. 4166 * If the page is not present, fill zero. 4167 * 4168 * Returns the number of copied bytes. 4169 */ 4170 static size_t aligned_vread_iter(struct iov_iter *iter, 4171 const char *addr, size_t count) 4172 { 4173 size_t remains = count; 4174 struct page *page; 4175 4176 while (remains > 0) { 4177 unsigned long offset, length; 4178 size_t copied = 0; 4179 4180 offset = offset_in_page(addr); 4181 length = PAGE_SIZE - offset; 4182 if (length > remains) 4183 length = remains; 4184 page = vmalloc_to_page(addr); 4185 /* 4186 * To do safe access to this _mapped_ area, we need lock. But 4187 * adding lock here means that we need to add overhead of 4188 * vmalloc()/vfree() calls for this _debug_ interface, rarely 4189 * used. Instead of that, we'll use an local mapping via 4190 * copy_page_to_iter_nofault() and accept a small overhead in 4191 * this access function. 4192 */ 4193 if (page) 4194 copied = copy_page_to_iter_nofault(page, offset, 4195 length, iter); 4196 else 4197 copied = zero_iter(iter, length); 4198 4199 addr += copied; 4200 remains -= copied; 4201 4202 if (copied != length) 4203 break; 4204 } 4205 4206 return count - remains; 4207 } 4208 4209 /* 4210 * Read from a vm_map_ram region of memory. 4211 * 4212 * Returns the number of copied bytes. 4213 */ 4214 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, 4215 size_t count, unsigned long flags) 4216 { 4217 char *start; 4218 struct vmap_block *vb; 4219 struct xarray *xa; 4220 unsigned long offset; 4221 unsigned int rs, re; 4222 size_t remains, n; 4223 4224 /* 4225 * If it's area created by vm_map_ram() interface directly, but 4226 * not further subdividing and delegating management to vmap_block, 4227 * handle it here. 4228 */ 4229 if (!(flags & VMAP_BLOCK)) 4230 return aligned_vread_iter(iter, addr, count); 4231 4232 remains = count; 4233 4234 /* 4235 * Area is split into regions and tracked with vmap_block, read out 4236 * each region and zero fill the hole between regions. 4237 */ 4238 xa = addr_to_vb_xa((unsigned long) addr); 4239 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); 4240 if (!vb) 4241 goto finished_zero; 4242 4243 spin_lock(&vb->lock); 4244 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { 4245 spin_unlock(&vb->lock); 4246 goto finished_zero; 4247 } 4248 4249 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { 4250 size_t copied; 4251 4252 if (remains == 0) 4253 goto finished; 4254 4255 start = vmap_block_vaddr(vb->va->va_start, rs); 4256 4257 if (addr < start) { 4258 size_t to_zero = min_t(size_t, start - addr, remains); 4259 size_t zeroed = zero_iter(iter, to_zero); 4260 4261 addr += zeroed; 4262 remains -= zeroed; 4263 4264 if (remains == 0 || zeroed != to_zero) 4265 goto finished; 4266 } 4267 4268 /*it could start reading from the middle of used region*/ 4269 offset = offset_in_page(addr); 4270 n = ((re - rs + 1) << PAGE_SHIFT) - offset; 4271 if (n > remains) 4272 n = remains; 4273 4274 copied = aligned_vread_iter(iter, start + offset, n); 4275 4276 addr += copied; 4277 remains -= copied; 4278 4279 if (copied != n) 4280 goto finished; 4281 } 4282 4283 spin_unlock(&vb->lock); 4284 4285 finished_zero: 4286 /* zero-fill the left dirty or free regions */ 4287 return count - remains + zero_iter(iter, remains); 4288 finished: 4289 /* We couldn't copy/zero everything */ 4290 spin_unlock(&vb->lock); 4291 return count - remains; 4292 } 4293 4294 /** 4295 * vread_iter() - read vmalloc area in a safe way to an iterator. 4296 * @iter: the iterator to which data should be written. 4297 * @addr: vm address. 4298 * @count: number of bytes to be read. 4299 * 4300 * This function checks that addr is a valid vmalloc'ed area, and 4301 * copy data from that area to a given buffer. If the given memory range 4302 * of [addr...addr+count) includes some valid address, data is copied to 4303 * proper area of @buf. If there are memory holes, they'll be zero-filled. 4304 * IOREMAP area is treated as memory hole and no copy is done. 4305 * 4306 * If [addr...addr+count) doesn't includes any intersects with alive 4307 * vm_struct area, returns 0. @buf should be kernel's buffer. 4308 * 4309 * Note: In usual ops, vread() is never necessary because the caller 4310 * should know vmalloc() area is valid and can use memcpy(). 4311 * This is for routines which have to access vmalloc area without 4312 * any information, as /proc/kcore. 4313 * 4314 * Return: number of bytes for which addr and buf should be increased 4315 * (same number as @count) or %0 if [addr...addr+count) doesn't 4316 * include any intersection with valid vmalloc area 4317 */ 4318 long vread_iter(struct iov_iter *iter, const char *addr, size_t count) 4319 { 4320 struct vmap_node *vn; 4321 struct vmap_area *va; 4322 struct vm_struct *vm; 4323 char *vaddr; 4324 size_t n, size, flags, remains; 4325 unsigned long next; 4326 4327 addr = kasan_reset_tag(addr); 4328 4329 /* Don't allow overflow */ 4330 if ((unsigned long) addr + count < count) 4331 count = -(unsigned long) addr; 4332 4333 remains = count; 4334 4335 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); 4336 if (!vn) 4337 goto finished_zero; 4338 4339 /* no intersects with alive vmap_area */ 4340 if ((unsigned long)addr + remains <= va->va_start) 4341 goto finished_zero; 4342 4343 do { 4344 size_t copied; 4345 4346 if (remains == 0) 4347 goto finished; 4348 4349 vm = va->vm; 4350 flags = va->flags & VMAP_FLAGS_MASK; 4351 /* 4352 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need 4353 * be set together with VMAP_RAM. 4354 */ 4355 WARN_ON(flags == VMAP_BLOCK); 4356 4357 if (!vm && !flags) 4358 goto next_va; 4359 4360 if (vm && (vm->flags & VM_UNINITIALIZED)) 4361 goto next_va; 4362 4363 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 4364 smp_rmb(); 4365 4366 vaddr = (char *) va->va_start; 4367 size = vm ? get_vm_area_size(vm) : va_size(va); 4368 4369 if (addr >= vaddr + size) 4370 goto next_va; 4371 4372 if (addr < vaddr) { 4373 size_t to_zero = min_t(size_t, vaddr - addr, remains); 4374 size_t zeroed = zero_iter(iter, to_zero); 4375 4376 addr += zeroed; 4377 remains -= zeroed; 4378 4379 if (remains == 0 || zeroed != to_zero) 4380 goto finished; 4381 } 4382 4383 n = vaddr + size - addr; 4384 if (n > remains) 4385 n = remains; 4386 4387 if (flags & VMAP_RAM) 4388 copied = vmap_ram_vread_iter(iter, addr, n, flags); 4389 else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE)))) 4390 copied = aligned_vread_iter(iter, addr, n); 4391 else /* IOREMAP | SPARSE area is treated as memory hole */ 4392 copied = zero_iter(iter, n); 4393 4394 addr += copied; 4395 remains -= copied; 4396 4397 if (copied != n) 4398 goto finished; 4399 4400 next_va: 4401 next = va->va_end; 4402 spin_unlock(&vn->busy.lock); 4403 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); 4404 4405 finished_zero: 4406 if (vn) 4407 spin_unlock(&vn->busy.lock); 4408 4409 /* zero-fill memory holes */ 4410 return count - remains + zero_iter(iter, remains); 4411 finished: 4412 /* Nothing remains, or We couldn't copy/zero everything. */ 4413 if (vn) 4414 spin_unlock(&vn->busy.lock); 4415 4416 return count - remains; 4417 } 4418 4419 /** 4420 * remap_vmalloc_range_partial - map vmalloc pages to userspace 4421 * @vma: vma to cover 4422 * @uaddr: target user address to start at 4423 * @kaddr: virtual address of vmalloc kernel memory 4424 * @pgoff: offset from @kaddr to start at 4425 * @size: size of map area 4426 * 4427 * Returns: 0 for success, -Exxx on failure 4428 * 4429 * This function checks that @kaddr is a valid vmalloc'ed area, 4430 * and that it is big enough to cover the range starting at 4431 * @uaddr in @vma. Will return failure if that criteria isn't 4432 * met. 4433 * 4434 * Similar to remap_pfn_range() (see mm/memory.c) 4435 */ 4436 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 4437 void *kaddr, unsigned long pgoff, 4438 unsigned long size) 4439 { 4440 struct vm_struct *area; 4441 unsigned long off; 4442 unsigned long end_index; 4443 4444 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 4445 return -EINVAL; 4446 4447 size = PAGE_ALIGN(size); 4448 4449 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 4450 return -EINVAL; 4451 4452 area = find_vm_area(kaddr); 4453 if (!area) 4454 return -EINVAL; 4455 4456 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 4457 return -EINVAL; 4458 4459 if (check_add_overflow(size, off, &end_index) || 4460 end_index > get_vm_area_size(area)) 4461 return -EINVAL; 4462 kaddr += off; 4463 4464 do { 4465 struct page *page = vmalloc_to_page(kaddr); 4466 int ret; 4467 4468 ret = vm_insert_page(vma, uaddr, page); 4469 if (ret) 4470 return ret; 4471 4472 uaddr += PAGE_SIZE; 4473 kaddr += PAGE_SIZE; 4474 size -= PAGE_SIZE; 4475 } while (size > 0); 4476 4477 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 4478 4479 return 0; 4480 } 4481 4482 /** 4483 * remap_vmalloc_range - map vmalloc pages to userspace 4484 * @vma: vma to cover (map full range of vma) 4485 * @addr: vmalloc memory 4486 * @pgoff: number of pages into addr before first page to map 4487 * 4488 * Returns: 0 for success, -Exxx on failure 4489 * 4490 * This function checks that addr is a valid vmalloc'ed area, and 4491 * that it is big enough to cover the vma. Will return failure if 4492 * that criteria isn't met. 4493 * 4494 * Similar to remap_pfn_range() (see mm/memory.c) 4495 */ 4496 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 4497 unsigned long pgoff) 4498 { 4499 return remap_vmalloc_range_partial(vma, vma->vm_start, 4500 addr, pgoff, 4501 vma->vm_end - vma->vm_start); 4502 } 4503 EXPORT_SYMBOL(remap_vmalloc_range); 4504 4505 void free_vm_area(struct vm_struct *area) 4506 { 4507 struct vm_struct *ret; 4508 ret = remove_vm_area(area->addr); 4509 BUG_ON(ret != area); 4510 kfree(area); 4511 } 4512 EXPORT_SYMBOL_GPL(free_vm_area); 4513 4514 #ifdef CONFIG_SMP 4515 static struct vmap_area *node_to_va(struct rb_node *n) 4516 { 4517 return rb_entry_safe(n, struct vmap_area, rb_node); 4518 } 4519 4520 /** 4521 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 4522 * @addr: target address 4523 * 4524 * Returns: vmap_area if it is found. If there is no such area 4525 * the first highest(reverse order) vmap_area is returned 4526 * i.e. va->va_start < addr && va->va_end < addr or NULL 4527 * if there are no any areas before @addr. 4528 */ 4529 static struct vmap_area * 4530 pvm_find_va_enclose_addr(unsigned long addr) 4531 { 4532 struct vmap_area *va, *tmp; 4533 struct rb_node *n; 4534 4535 n = free_vmap_area_root.rb_node; 4536 va = NULL; 4537 4538 while (n) { 4539 tmp = rb_entry(n, struct vmap_area, rb_node); 4540 if (tmp->va_start <= addr) { 4541 va = tmp; 4542 if (tmp->va_end >= addr) 4543 break; 4544 4545 n = n->rb_right; 4546 } else { 4547 n = n->rb_left; 4548 } 4549 } 4550 4551 return va; 4552 } 4553 4554 /** 4555 * pvm_determine_end_from_reverse - find the highest aligned address 4556 * of free block below VMALLOC_END 4557 * @va: 4558 * in - the VA we start the search(reverse order); 4559 * out - the VA with the highest aligned end address. 4560 * @align: alignment for required highest address 4561 * 4562 * Returns: determined end address within vmap_area 4563 */ 4564 static unsigned long 4565 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 4566 { 4567 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4568 unsigned long addr; 4569 4570 if (likely(*va)) { 4571 list_for_each_entry_from_reverse((*va), 4572 &free_vmap_area_list, list) { 4573 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 4574 if ((*va)->va_start < addr) 4575 return addr; 4576 } 4577 } 4578 4579 return 0; 4580 } 4581 4582 /** 4583 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 4584 * @offsets: array containing offset of each area 4585 * @sizes: array containing size of each area 4586 * @nr_vms: the number of areas to allocate 4587 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 4588 * 4589 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 4590 * vm_structs on success, %NULL on failure 4591 * 4592 * Percpu allocator wants to use congruent vm areas so that it can 4593 * maintain the offsets among percpu areas. This function allocates 4594 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 4595 * be scattered pretty far, distance between two areas easily going up 4596 * to gigabytes. To avoid interacting with regular vmallocs, these 4597 * areas are allocated from top. 4598 * 4599 * Despite its complicated look, this allocator is rather simple. It 4600 * does everything top-down and scans free blocks from the end looking 4601 * for matching base. While scanning, if any of the areas do not fit the 4602 * base address is pulled down to fit the area. Scanning is repeated till 4603 * all the areas fit and then all necessary data structures are inserted 4604 * and the result is returned. 4605 */ 4606 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 4607 const size_t *sizes, int nr_vms, 4608 size_t align) 4609 { 4610 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 4611 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4612 struct vmap_area **vas, *va; 4613 struct vm_struct **vms; 4614 int area, area2, last_area, term_area; 4615 unsigned long base, start, size, end, last_end, orig_start, orig_end; 4616 bool purged = false; 4617 4618 /* verify parameters and allocate data structures */ 4619 BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 4620 for (last_area = 0, area = 0; area < nr_vms; area++) { 4621 start = offsets[area]; 4622 end = start + sizes[area]; 4623 4624 /* is everything aligned properly? */ 4625 BUG_ON(!IS_ALIGNED(offsets[area], align)); 4626 BUG_ON(!IS_ALIGNED(sizes[area], align)); 4627 4628 /* detect the area with the highest address */ 4629 if (start > offsets[last_area]) 4630 last_area = area; 4631 4632 for (area2 = area + 1; area2 < nr_vms; area2++) { 4633 unsigned long start2 = offsets[area2]; 4634 unsigned long end2 = start2 + sizes[area2]; 4635 4636 BUG_ON(start2 < end && start < end2); 4637 } 4638 } 4639 last_end = offsets[last_area] + sizes[last_area]; 4640 4641 if (vmalloc_end - vmalloc_start < last_end) { 4642 WARN_ON(true); 4643 return NULL; 4644 } 4645 4646 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 4647 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 4648 if (!vas || !vms) 4649 goto err_free2; 4650 4651 for (area = 0; area < nr_vms; area++) { 4652 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 4653 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 4654 if (!vas[area] || !vms[area]) 4655 goto err_free; 4656 } 4657 retry: 4658 spin_lock(&free_vmap_area_lock); 4659 4660 /* start scanning - we scan from the top, begin with the last area */ 4661 area = term_area = last_area; 4662 start = offsets[area]; 4663 end = start + sizes[area]; 4664 4665 va = pvm_find_va_enclose_addr(vmalloc_end); 4666 base = pvm_determine_end_from_reverse(&va, align) - end; 4667 4668 while (true) { 4669 /* 4670 * base might have underflowed, add last_end before 4671 * comparing. 4672 */ 4673 if (base + last_end < vmalloc_start + last_end) 4674 goto overflow; 4675 4676 /* 4677 * Fitting base has not been found. 4678 */ 4679 if (va == NULL) 4680 goto overflow; 4681 4682 /* 4683 * If required width exceeds current VA block, move 4684 * base downwards and then recheck. 4685 */ 4686 if (base + end > va->va_end) { 4687 base = pvm_determine_end_from_reverse(&va, align) - end; 4688 term_area = area; 4689 continue; 4690 } 4691 4692 /* 4693 * If this VA does not fit, move base downwards and recheck. 4694 */ 4695 if (base + start < va->va_start) { 4696 va = node_to_va(rb_prev(&va->rb_node)); 4697 base = pvm_determine_end_from_reverse(&va, align) - end; 4698 term_area = area; 4699 continue; 4700 } 4701 4702 /* 4703 * This area fits, move on to the previous one. If 4704 * the previous one is the terminal one, we're done. 4705 */ 4706 area = (area + nr_vms - 1) % nr_vms; 4707 if (area == term_area) 4708 break; 4709 4710 start = offsets[area]; 4711 end = start + sizes[area]; 4712 va = pvm_find_va_enclose_addr(base + end); 4713 } 4714 4715 /* we've found a fitting base, insert all va's */ 4716 for (area = 0; area < nr_vms; area++) { 4717 int ret; 4718 4719 start = base + offsets[area]; 4720 size = sizes[area]; 4721 4722 va = pvm_find_va_enclose_addr(start); 4723 if (WARN_ON_ONCE(va == NULL)) 4724 /* It is a BUG(), but trigger recovery instead. */ 4725 goto recovery; 4726 4727 ret = va_clip(&free_vmap_area_root, 4728 &free_vmap_area_list, va, start, size); 4729 if (WARN_ON_ONCE(unlikely(ret))) 4730 /* It is a BUG(), but trigger recovery instead. */ 4731 goto recovery; 4732 4733 /* Allocated area. */ 4734 va = vas[area]; 4735 va->va_start = start; 4736 va->va_end = start + size; 4737 } 4738 4739 spin_unlock(&free_vmap_area_lock); 4740 4741 /* populate the kasan shadow space */ 4742 for (area = 0; area < nr_vms; area++) { 4743 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 4744 goto err_free_shadow; 4745 } 4746 4747 /* insert all vm's */ 4748 for (area = 0; area < nr_vms; area++) { 4749 struct vmap_node *vn = addr_to_node(vas[area]->va_start); 4750 4751 spin_lock(&vn->busy.lock); 4752 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); 4753 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 4754 pcpu_get_vm_areas); 4755 spin_unlock(&vn->busy.lock); 4756 } 4757 4758 /* 4759 * Mark allocated areas as accessible. Do it now as a best-effort 4760 * approach, as they can be mapped outside of vmalloc code. 4761 * With hardware tag-based KASAN, marking is skipped for 4762 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 4763 */ 4764 for (area = 0; area < nr_vms; area++) 4765 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, 4766 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); 4767 4768 kfree(vas); 4769 return vms; 4770 4771 recovery: 4772 /* 4773 * Remove previously allocated areas. There is no 4774 * need in removing these areas from the busy tree, 4775 * because they are inserted only on the final step 4776 * and when pcpu_get_vm_areas() is success. 4777 */ 4778 while (area--) { 4779 orig_start = vas[area]->va_start; 4780 orig_end = vas[area]->va_end; 4781 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 4782 &free_vmap_area_list); 4783 if (va) 4784 kasan_release_vmalloc(orig_start, orig_end, 4785 va->va_start, va->va_end); 4786 vas[area] = NULL; 4787 } 4788 4789 overflow: 4790 spin_unlock(&free_vmap_area_lock); 4791 if (!purged) { 4792 reclaim_and_purge_vmap_areas(); 4793 purged = true; 4794 4795 /* Before "retry", check if we recover. */ 4796 for (area = 0; area < nr_vms; area++) { 4797 if (vas[area]) 4798 continue; 4799 4800 vas[area] = kmem_cache_zalloc( 4801 vmap_area_cachep, GFP_KERNEL); 4802 if (!vas[area]) 4803 goto err_free; 4804 } 4805 4806 goto retry; 4807 } 4808 4809 err_free: 4810 for (area = 0; area < nr_vms; area++) { 4811 if (vas[area]) 4812 kmem_cache_free(vmap_area_cachep, vas[area]); 4813 4814 kfree(vms[area]); 4815 } 4816 err_free2: 4817 kfree(vas); 4818 kfree(vms); 4819 return NULL; 4820 4821 err_free_shadow: 4822 spin_lock(&free_vmap_area_lock); 4823 /* 4824 * We release all the vmalloc shadows, even the ones for regions that 4825 * hadn't been successfully added. This relies on kasan_release_vmalloc 4826 * being able to tolerate this case. 4827 */ 4828 for (area = 0; area < nr_vms; area++) { 4829 orig_start = vas[area]->va_start; 4830 orig_end = vas[area]->va_end; 4831 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 4832 &free_vmap_area_list); 4833 if (va) 4834 kasan_release_vmalloc(orig_start, orig_end, 4835 va->va_start, va->va_end); 4836 vas[area] = NULL; 4837 kfree(vms[area]); 4838 } 4839 spin_unlock(&free_vmap_area_lock); 4840 kfree(vas); 4841 kfree(vms); 4842 return NULL; 4843 } 4844 4845 /** 4846 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 4847 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 4848 * @nr_vms: the number of allocated areas 4849 * 4850 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 4851 */ 4852 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 4853 { 4854 int i; 4855 4856 for (i = 0; i < nr_vms; i++) 4857 free_vm_area(vms[i]); 4858 kfree(vms); 4859 } 4860 #endif /* CONFIG_SMP */ 4861 4862 #ifdef CONFIG_PRINTK 4863 bool vmalloc_dump_obj(void *object) 4864 { 4865 const void *caller; 4866 struct vm_struct *vm; 4867 struct vmap_area *va; 4868 struct vmap_node *vn; 4869 unsigned long addr; 4870 unsigned int nr_pages; 4871 4872 addr = PAGE_ALIGN((unsigned long) object); 4873 vn = addr_to_node(addr); 4874 4875 if (!spin_trylock(&vn->busy.lock)) 4876 return false; 4877 4878 va = __find_vmap_area(addr, &vn->busy.root); 4879 if (!va || !va->vm) { 4880 spin_unlock(&vn->busy.lock); 4881 return false; 4882 } 4883 4884 vm = va->vm; 4885 addr = (unsigned long) vm->addr; 4886 caller = vm->caller; 4887 nr_pages = vm->nr_pages; 4888 spin_unlock(&vn->busy.lock); 4889 4890 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 4891 nr_pages, addr, caller); 4892 4893 return true; 4894 } 4895 #endif 4896 4897 #ifdef CONFIG_PROC_FS 4898 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 4899 { 4900 if (IS_ENABLED(CONFIG_NUMA)) { 4901 unsigned int nr, *counters = m->private; 4902 unsigned int step = 1U << vm_area_page_order(v); 4903 4904 if (!counters) 4905 return; 4906 4907 if (v->flags & VM_UNINITIALIZED) 4908 return; 4909 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 4910 smp_rmb(); 4911 4912 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 4913 4914 for (nr = 0; nr < v->nr_pages; nr += step) 4915 counters[page_to_nid(v->pages[nr])] += step; 4916 for_each_node_state(nr, N_HIGH_MEMORY) 4917 if (counters[nr]) 4918 seq_printf(m, " N%u=%u", nr, counters[nr]); 4919 } 4920 } 4921 4922 static void show_purge_info(struct seq_file *m) 4923 { 4924 struct vmap_node *vn; 4925 struct vmap_area *va; 4926 int i; 4927 4928 for (i = 0; i < nr_vmap_nodes; i++) { 4929 vn = &vmap_nodes[i]; 4930 4931 spin_lock(&vn->lazy.lock); 4932 list_for_each_entry(va, &vn->lazy.head, list) { 4933 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 4934 (void *)va->va_start, (void *)va->va_end, 4935 va->va_end - va->va_start); 4936 } 4937 spin_unlock(&vn->lazy.lock); 4938 } 4939 } 4940 4941 static int vmalloc_info_show(struct seq_file *m, void *p) 4942 { 4943 struct vmap_node *vn; 4944 struct vmap_area *va; 4945 struct vm_struct *v; 4946 int i; 4947 4948 for (i = 0; i < nr_vmap_nodes; i++) { 4949 vn = &vmap_nodes[i]; 4950 4951 spin_lock(&vn->busy.lock); 4952 list_for_each_entry(va, &vn->busy.head, list) { 4953 if (!va->vm) { 4954 if (va->flags & VMAP_RAM) 4955 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 4956 (void *)va->va_start, (void *)va->va_end, 4957 va->va_end - va->va_start); 4958 4959 continue; 4960 } 4961 4962 v = va->vm; 4963 4964 seq_printf(m, "0x%pK-0x%pK %7ld", 4965 v->addr, v->addr + v->size, v->size); 4966 4967 if (v->caller) 4968 seq_printf(m, " %pS", v->caller); 4969 4970 if (v->nr_pages) 4971 seq_printf(m, " pages=%d", v->nr_pages); 4972 4973 if (v->phys_addr) 4974 seq_printf(m, " phys=%pa", &v->phys_addr); 4975 4976 if (v->flags & VM_IOREMAP) 4977 seq_puts(m, " ioremap"); 4978 4979 if (v->flags & VM_SPARSE) 4980 seq_puts(m, " sparse"); 4981 4982 if (v->flags & VM_ALLOC) 4983 seq_puts(m, " vmalloc"); 4984 4985 if (v->flags & VM_MAP) 4986 seq_puts(m, " vmap"); 4987 4988 if (v->flags & VM_USERMAP) 4989 seq_puts(m, " user"); 4990 4991 if (v->flags & VM_DMA_COHERENT) 4992 seq_puts(m, " dma-coherent"); 4993 4994 if (is_vmalloc_addr(v->pages)) 4995 seq_puts(m, " vpages"); 4996 4997 show_numa_info(m, v); 4998 seq_putc(m, '\n'); 4999 } 5000 spin_unlock(&vn->busy.lock); 5001 } 5002 5003 /* 5004 * As a final step, dump "unpurged" areas. 5005 */ 5006 show_purge_info(m); 5007 return 0; 5008 } 5009 5010 static int __init proc_vmalloc_init(void) 5011 { 5012 void *priv_data = NULL; 5013 5014 if (IS_ENABLED(CONFIG_NUMA)) 5015 priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 5016 5017 proc_create_single_data("vmallocinfo", 5018 0400, NULL, vmalloc_info_show, priv_data); 5019 5020 return 0; 5021 } 5022 module_init(proc_vmalloc_init); 5023 5024 #endif 5025 5026 static void __init vmap_init_free_space(void) 5027 { 5028 unsigned long vmap_start = 1; 5029 const unsigned long vmap_end = ULONG_MAX; 5030 struct vmap_area *free; 5031 struct vm_struct *busy; 5032 5033 /* 5034 * B F B B B F 5035 * -|-----|.....|-----|-----|-----|.....|- 5036 * | The KVA space | 5037 * |<--------------------------------->| 5038 */ 5039 for (busy = vmlist; busy; busy = busy->next) { 5040 if ((unsigned long) busy->addr - vmap_start > 0) { 5041 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5042 if (!WARN_ON_ONCE(!free)) { 5043 free->va_start = vmap_start; 5044 free->va_end = (unsigned long) busy->addr; 5045 5046 insert_vmap_area_augment(free, NULL, 5047 &free_vmap_area_root, 5048 &free_vmap_area_list); 5049 } 5050 } 5051 5052 vmap_start = (unsigned long) busy->addr + busy->size; 5053 } 5054 5055 if (vmap_end - vmap_start > 0) { 5056 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5057 if (!WARN_ON_ONCE(!free)) { 5058 free->va_start = vmap_start; 5059 free->va_end = vmap_end; 5060 5061 insert_vmap_area_augment(free, NULL, 5062 &free_vmap_area_root, 5063 &free_vmap_area_list); 5064 } 5065 } 5066 } 5067 5068 static void vmap_init_nodes(void) 5069 { 5070 struct vmap_node *vn; 5071 int i, n; 5072 5073 #if BITS_PER_LONG == 64 5074 /* 5075 * A high threshold of max nodes is fixed and bound to 128, 5076 * thus a scale factor is 1 for systems where number of cores 5077 * are less or equal to specified threshold. 5078 * 5079 * As for NUMA-aware notes. For bigger systems, for example 5080 * NUMA with multi-sockets, where we can end-up with thousands 5081 * of cores in total, a "sub-numa-clustering" should be added. 5082 * 5083 * In this case a NUMA domain is considered as a single entity 5084 * with dedicated sub-nodes in it which describe one group or 5085 * set of cores. Therefore a per-domain purging is supposed to 5086 * be added as well as a per-domain balancing. 5087 */ 5088 n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); 5089 5090 if (n > 1) { 5091 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN); 5092 if (vn) { 5093 /* Node partition is 16 pages. */ 5094 vmap_zone_size = (1 << 4) * PAGE_SIZE; 5095 nr_vmap_nodes = n; 5096 vmap_nodes = vn; 5097 } else { 5098 pr_err("Failed to allocate an array. Disable a node layer\n"); 5099 } 5100 } 5101 #endif 5102 5103 for (n = 0; n < nr_vmap_nodes; n++) { 5104 vn = &vmap_nodes[n]; 5105 vn->busy.root = RB_ROOT; 5106 INIT_LIST_HEAD(&vn->busy.head); 5107 spin_lock_init(&vn->busy.lock); 5108 5109 vn->lazy.root = RB_ROOT; 5110 INIT_LIST_HEAD(&vn->lazy.head); 5111 spin_lock_init(&vn->lazy.lock); 5112 5113 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 5114 INIT_LIST_HEAD(&vn->pool[i].head); 5115 WRITE_ONCE(vn->pool[i].len, 0); 5116 } 5117 5118 spin_lock_init(&vn->pool_lock); 5119 } 5120 } 5121 5122 static unsigned long 5123 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 5124 { 5125 unsigned long count; 5126 struct vmap_node *vn; 5127 int i, j; 5128 5129 for (count = 0, i = 0; i < nr_vmap_nodes; i++) { 5130 vn = &vmap_nodes[i]; 5131 5132 for (j = 0; j < MAX_VA_SIZE_PAGES; j++) 5133 count += READ_ONCE(vn->pool[j].len); 5134 } 5135 5136 return count ? count : SHRINK_EMPTY; 5137 } 5138 5139 static unsigned long 5140 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 5141 { 5142 int i; 5143 5144 for (i = 0; i < nr_vmap_nodes; i++) 5145 decay_va_pool_node(&vmap_nodes[i], true); 5146 5147 return SHRINK_STOP; 5148 } 5149 5150 void __init vmalloc_init(void) 5151 { 5152 struct shrinker *vmap_node_shrinker; 5153 struct vmap_area *va; 5154 struct vmap_node *vn; 5155 struct vm_struct *tmp; 5156 int i; 5157 5158 /* 5159 * Create the cache for vmap_area objects. 5160 */ 5161 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 5162 5163 for_each_possible_cpu(i) { 5164 struct vmap_block_queue *vbq; 5165 struct vfree_deferred *p; 5166 5167 vbq = &per_cpu(vmap_block_queue, i); 5168 spin_lock_init(&vbq->lock); 5169 INIT_LIST_HEAD(&vbq->free); 5170 p = &per_cpu(vfree_deferred, i); 5171 init_llist_head(&p->list); 5172 INIT_WORK(&p->wq, delayed_vfree_work); 5173 xa_init(&vbq->vmap_blocks); 5174 } 5175 5176 /* 5177 * Setup nodes before importing vmlist. 5178 */ 5179 vmap_init_nodes(); 5180 5181 /* Import existing vmlist entries. */ 5182 for (tmp = vmlist; tmp; tmp = tmp->next) { 5183 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5184 if (WARN_ON_ONCE(!va)) 5185 continue; 5186 5187 va->va_start = (unsigned long)tmp->addr; 5188 va->va_end = va->va_start + tmp->size; 5189 va->vm = tmp; 5190 5191 vn = addr_to_node(va->va_start); 5192 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 5193 } 5194 5195 /* 5196 * Now we can initialize a free vmap space. 5197 */ 5198 vmap_init_free_space(); 5199 vmap_initialized = true; 5200 5201 vmap_node_shrinker = shrinker_alloc(0, "vmap-node"); 5202 if (!vmap_node_shrinker) { 5203 pr_err("Failed to allocate vmap-node shrinker!\n"); 5204 return; 5205 } 5206 5207 vmap_node_shrinker->count_objects = vmap_node_shrink_count; 5208 vmap_node_shrinker->scan_objects = vmap_node_shrink_scan; 5209 shrinker_register(vmap_node_shrinker); 5210 } 5211