1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1993 Linus Torvalds 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/set_memory.h> 22 #include <linux/debugobjects.h> 23 #include <linux/kallsyms.h> 24 #include <linux/list.h> 25 #include <linux/notifier.h> 26 #include <linux/rbtree.h> 27 #include <linux/xarray.h> 28 #include <linux/io.h> 29 #include <linux/rcupdate.h> 30 #include <linux/pfn.h> 31 #include <linux/kmemleak.h> 32 #include <linux/atomic.h> 33 #include <linux/compiler.h> 34 #include <linux/memcontrol.h> 35 #include <linux/llist.h> 36 #include <linux/uio.h> 37 #include <linux/bitops.h> 38 #include <linux/rbtree_augmented.h> 39 #include <linux/overflow.h> 40 #include <linux/pgtable.h> 41 #include <linux/hugetlb.h> 42 #include <linux/sched/mm.h> 43 #include <asm/tlbflush.h> 44 #include <asm/shmparam.h> 45 #include <linux/page_owner.h> 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/vmalloc.h> 49 50 #include "internal.h" 51 #include "pgalloc-track.h" 52 53 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 54 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 55 56 static int __init set_nohugeiomap(char *str) 57 { 58 ioremap_max_page_shift = PAGE_SHIFT; 59 return 0; 60 } 61 early_param("nohugeiomap", set_nohugeiomap); 62 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 63 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 64 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 65 66 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 67 static bool __ro_after_init vmap_allow_huge = true; 68 69 static int __init set_nohugevmalloc(char *str) 70 { 71 vmap_allow_huge = false; 72 return 0; 73 } 74 early_param("nohugevmalloc", set_nohugevmalloc); 75 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 76 static const bool vmap_allow_huge = false; 77 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 78 79 bool is_vmalloc_addr(const void *x) 80 { 81 unsigned long addr = (unsigned long)kasan_reset_tag(x); 82 83 return addr >= VMALLOC_START && addr < VMALLOC_END; 84 } 85 EXPORT_SYMBOL(is_vmalloc_addr); 86 87 struct vfree_deferred { 88 struct llist_head list; 89 struct work_struct wq; 90 }; 91 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 92 93 /*** Page table manipulation functions ***/ 94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 95 phys_addr_t phys_addr, pgprot_t prot, 96 unsigned int max_page_shift, pgtbl_mod_mask *mask) 97 { 98 pte_t *pte; 99 u64 pfn; 100 struct page *page; 101 unsigned long size = PAGE_SIZE; 102 103 if (WARN_ON_ONCE(!PAGE_ALIGNED(end - addr))) 104 return -EINVAL; 105 106 pfn = phys_addr >> PAGE_SHIFT; 107 pte = pte_alloc_kernel_track(pmd, addr, mask); 108 if (!pte) 109 return -ENOMEM; 110 111 lazy_mmu_mode_enable(); 112 113 do { 114 if (unlikely(!pte_none(ptep_get(pte)))) { 115 if (pfn_valid(pfn)) { 116 page = pfn_to_page(pfn); 117 dump_page(page, "remapping already mapped page"); 118 } 119 BUG(); 120 } 121 122 #ifdef CONFIG_HUGETLB_PAGE 123 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 124 if (size != PAGE_SIZE) { 125 pte_t entry = pfn_pte(pfn, prot); 126 127 entry = arch_make_huge_pte(entry, ilog2(size), 0); 128 set_huge_pte_at(&init_mm, addr, pte, entry, size); 129 pfn += PFN_DOWN(size); 130 continue; 131 } 132 #endif 133 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 134 pfn++; 135 } while (pte += PFN_DOWN(size), addr += size, addr != end); 136 137 lazy_mmu_mode_disable(); 138 *mask |= PGTBL_PTE_MODIFIED; 139 return 0; 140 } 141 142 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 143 phys_addr_t phys_addr, pgprot_t prot, 144 unsigned int max_page_shift) 145 { 146 if (max_page_shift < PMD_SHIFT) 147 return 0; 148 149 if (!arch_vmap_pmd_supported(prot)) 150 return 0; 151 152 if ((end - addr) != PMD_SIZE) 153 return 0; 154 155 if (!IS_ALIGNED(addr, PMD_SIZE)) 156 return 0; 157 158 if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 159 return 0; 160 161 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 162 return 0; 163 164 return pmd_set_huge(pmd, phys_addr, prot); 165 } 166 167 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 168 phys_addr_t phys_addr, pgprot_t prot, 169 unsigned int max_page_shift, pgtbl_mod_mask *mask) 170 { 171 pmd_t *pmd; 172 unsigned long next; 173 int err = 0; 174 175 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 176 if (!pmd) 177 return -ENOMEM; 178 do { 179 next = pmd_addr_end(addr, end); 180 181 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 182 max_page_shift)) { 183 *mask |= PGTBL_PMD_MODIFIED; 184 continue; 185 } 186 187 err = vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask); 188 if (err) 189 break; 190 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 191 return err; 192 } 193 194 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 195 phys_addr_t phys_addr, pgprot_t prot, 196 unsigned int max_page_shift) 197 { 198 if (max_page_shift < PUD_SHIFT) 199 return 0; 200 201 if (!arch_vmap_pud_supported(prot)) 202 return 0; 203 204 if ((end - addr) != PUD_SIZE) 205 return 0; 206 207 if (!IS_ALIGNED(addr, PUD_SIZE)) 208 return 0; 209 210 if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 211 return 0; 212 213 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 214 return 0; 215 216 return pud_set_huge(pud, phys_addr, prot); 217 } 218 219 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 220 phys_addr_t phys_addr, pgprot_t prot, 221 unsigned int max_page_shift, pgtbl_mod_mask *mask) 222 { 223 pud_t *pud; 224 unsigned long next; 225 int err = 0; 226 227 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 228 if (!pud) 229 return -ENOMEM; 230 do { 231 next = pud_addr_end(addr, end); 232 233 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 234 max_page_shift)) { 235 *mask |= PGTBL_PUD_MODIFIED; 236 continue; 237 } 238 239 err = vmap_pmd_range(pud, addr, next, phys_addr, prot, max_page_shift, mask); 240 if (err) 241 break; 242 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 243 return err; 244 } 245 246 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 247 phys_addr_t phys_addr, pgprot_t prot, 248 unsigned int max_page_shift) 249 { 250 if (max_page_shift < P4D_SHIFT) 251 return 0; 252 253 if (!arch_vmap_p4d_supported(prot)) 254 return 0; 255 256 if ((end - addr) != P4D_SIZE) 257 return 0; 258 259 if (!IS_ALIGNED(addr, P4D_SIZE)) 260 return 0; 261 262 if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 263 return 0; 264 265 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 266 return 0; 267 268 return p4d_set_huge(p4d, phys_addr, prot); 269 } 270 271 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 272 phys_addr_t phys_addr, pgprot_t prot, 273 unsigned int max_page_shift, pgtbl_mod_mask *mask) 274 { 275 p4d_t *p4d; 276 unsigned long next; 277 int err = 0; 278 279 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 280 if (!p4d) 281 return -ENOMEM; 282 do { 283 next = p4d_addr_end(addr, end); 284 285 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 286 max_page_shift)) { 287 *mask |= PGTBL_P4D_MODIFIED; 288 continue; 289 } 290 291 err = vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift, mask); 292 if (err) 293 break; 294 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 295 return err; 296 } 297 298 static int vmap_range_noflush(unsigned long addr, unsigned long end, 299 phys_addr_t phys_addr, pgprot_t prot, 300 unsigned int max_page_shift) 301 { 302 pgd_t *pgd; 303 unsigned long start; 304 unsigned long next; 305 int err; 306 pgtbl_mod_mask mask = 0; 307 308 /* 309 * Might allocate pagetables (for most archs a more precise annotation 310 * would be might_alloc(GFP_PGTABLE_KERNEL)). Also might shootdown TLB 311 * (requires IRQs enabled on x86). 312 */ 313 might_sleep(); 314 BUG_ON(addr >= end); 315 316 start = addr; 317 pgd = pgd_offset_k(addr); 318 do { 319 next = pgd_addr_end(addr, end); 320 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 321 max_page_shift, &mask); 322 if (err) 323 break; 324 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 325 326 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 327 arch_sync_kernel_mappings(start, end); 328 329 return err; 330 } 331 332 int vmap_page_range(unsigned long addr, unsigned long end, 333 phys_addr_t phys_addr, pgprot_t prot) 334 { 335 int err; 336 337 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 338 ioremap_max_page_shift); 339 flush_cache_vmap(addr, end); 340 if (!err) 341 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, 342 ioremap_max_page_shift); 343 return err; 344 } 345 346 int ioremap_page_range(unsigned long addr, unsigned long end, 347 phys_addr_t phys_addr, pgprot_t prot) 348 { 349 struct vm_struct *area; 350 351 area = find_vm_area((void *)addr); 352 if (!area || !(area->flags & VM_IOREMAP)) { 353 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); 354 return -EINVAL; 355 } 356 if (addr != (unsigned long)area->addr || 357 (void *)end != area->addr + get_vm_area_size(area)) { 358 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n", 359 addr, end, (long)area->addr, 360 (long)area->addr + get_vm_area_size(area)); 361 return -ERANGE; 362 } 363 return vmap_page_range(addr, end, phys_addr, prot); 364 } 365 366 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 367 pgtbl_mod_mask *mask) 368 { 369 pte_t *pte; 370 pte_t ptent; 371 unsigned long size = PAGE_SIZE; 372 373 pte = pte_offset_kernel(pmd, addr); 374 lazy_mmu_mode_enable(); 375 376 do { 377 #ifdef CONFIG_HUGETLB_PAGE 378 size = arch_vmap_pte_range_unmap_size(addr, pte); 379 if (size != PAGE_SIZE) { 380 if (WARN_ON(!IS_ALIGNED(addr, size))) { 381 addr = ALIGN_DOWN(addr, size); 382 pte = PTR_ALIGN_DOWN(pte, sizeof(*pte) * (size >> PAGE_SHIFT)); 383 } 384 ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size); 385 if (WARN_ON(end - addr < size)) 386 size = end - addr; 387 } else 388 #endif 389 ptent = ptep_get_and_clear(&init_mm, addr, pte); 390 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 391 } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end); 392 393 lazy_mmu_mode_disable(); 394 *mask |= PGTBL_PTE_MODIFIED; 395 } 396 397 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 398 pgtbl_mod_mask *mask) 399 { 400 pmd_t *pmd; 401 unsigned long next; 402 int cleared; 403 404 pmd = pmd_offset(pud, addr); 405 do { 406 next = pmd_addr_end(addr, end); 407 408 cleared = pmd_clear_huge(pmd); 409 if (cleared || pmd_bad(*pmd)) 410 *mask |= PGTBL_PMD_MODIFIED; 411 412 if (cleared) { 413 WARN_ON(next - addr < PMD_SIZE); 414 continue; 415 } 416 if (pmd_none_or_clear_bad(pmd)) 417 continue; 418 vunmap_pte_range(pmd, addr, next, mask); 419 420 cond_resched(); 421 } while (pmd++, addr = next, addr != end); 422 } 423 424 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 425 pgtbl_mod_mask *mask) 426 { 427 pud_t *pud; 428 unsigned long next; 429 int cleared; 430 431 pud = pud_offset(p4d, addr); 432 do { 433 next = pud_addr_end(addr, end); 434 435 cleared = pud_clear_huge(pud); 436 if (cleared || pud_bad(*pud)) 437 *mask |= PGTBL_PUD_MODIFIED; 438 439 if (cleared) { 440 WARN_ON(next - addr < PUD_SIZE); 441 continue; 442 } 443 if (pud_none_or_clear_bad(pud)) 444 continue; 445 vunmap_pmd_range(pud, addr, next, mask); 446 } while (pud++, addr = next, addr != end); 447 } 448 449 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 450 pgtbl_mod_mask *mask) 451 { 452 p4d_t *p4d; 453 unsigned long next; 454 455 p4d = p4d_offset(pgd, addr); 456 do { 457 next = p4d_addr_end(addr, end); 458 459 p4d_clear_huge(p4d); 460 if (p4d_bad(*p4d)) 461 *mask |= PGTBL_P4D_MODIFIED; 462 463 if (p4d_none_or_clear_bad(p4d)) 464 continue; 465 vunmap_pud_range(p4d, addr, next, mask); 466 } while (p4d++, addr = next, addr != end); 467 } 468 469 /* 470 * vunmap_range_noflush is similar to vunmap_range, but does not 471 * flush caches or TLBs. 472 * 473 * The caller is responsible for calling flush_cache_vmap() before calling 474 * this function, and flush_tlb_kernel_range after it has returned 475 * successfully (and before the addresses are expected to cause a page fault 476 * or be re-mapped for something else, if TLB flushes are being delayed or 477 * coalesced). 478 * 479 * This is an internal function only. Do not use outside mm/. 480 */ 481 void __vunmap_range_noflush(unsigned long start, unsigned long end) 482 { 483 unsigned long next; 484 pgd_t *pgd; 485 unsigned long addr = start; 486 pgtbl_mod_mask mask = 0; 487 488 BUG_ON(addr >= end); 489 pgd = pgd_offset_k(addr); 490 do { 491 next = pgd_addr_end(addr, end); 492 if (pgd_bad(*pgd)) 493 mask |= PGTBL_PGD_MODIFIED; 494 if (pgd_none_or_clear_bad(pgd)) 495 continue; 496 vunmap_p4d_range(pgd, addr, next, &mask); 497 } while (pgd++, addr = next, addr != end); 498 499 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 500 arch_sync_kernel_mappings(start, end); 501 } 502 503 void vunmap_range_noflush(unsigned long start, unsigned long end) 504 { 505 kmsan_vunmap_range_noflush(start, end); 506 __vunmap_range_noflush(start, end); 507 } 508 509 /** 510 * vunmap_range - unmap kernel virtual addresses 511 * @addr: start of the VM area to unmap 512 * @end: end of the VM area to unmap (non-inclusive) 513 * 514 * Clears any present PTEs in the virtual address range, flushes TLBs and 515 * caches. Any subsequent access to the address before it has been re-mapped 516 * is a kernel bug. 517 */ 518 void vunmap_range(unsigned long addr, unsigned long end) 519 { 520 flush_cache_vunmap(addr, end); 521 vunmap_range_noflush(addr, end); 522 flush_tlb_kernel_range(addr, end); 523 } 524 525 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 526 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 527 pgtbl_mod_mask *mask) 528 { 529 int err = 0; 530 pte_t *pte; 531 532 /* 533 * nr is a running index into the array which helps higher level 534 * callers keep track of where we're up to. 535 */ 536 537 pte = pte_alloc_kernel_track(pmd, addr, mask); 538 if (!pte) 539 return -ENOMEM; 540 541 lazy_mmu_mode_enable(); 542 543 do { 544 struct page *page = pages[*nr]; 545 546 if (WARN_ON(!pte_none(ptep_get(pte)))) { 547 err = -EBUSY; 548 break; 549 } 550 if (WARN_ON(!page)) { 551 err = -ENOMEM; 552 break; 553 } 554 if (WARN_ON(!pfn_valid(page_to_pfn(page)))) { 555 err = -EINVAL; 556 break; 557 } 558 559 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 560 (*nr)++; 561 } while (pte++, addr += PAGE_SIZE, addr != end); 562 563 lazy_mmu_mode_disable(); 564 *mask |= PGTBL_PTE_MODIFIED; 565 566 return err; 567 } 568 569 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 570 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 571 pgtbl_mod_mask *mask) 572 { 573 pmd_t *pmd; 574 unsigned long next; 575 576 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 577 if (!pmd) 578 return -ENOMEM; 579 do { 580 next = pmd_addr_end(addr, end); 581 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 582 return -ENOMEM; 583 } while (pmd++, addr = next, addr != end); 584 return 0; 585 } 586 587 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 588 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 589 pgtbl_mod_mask *mask) 590 { 591 pud_t *pud; 592 unsigned long next; 593 594 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 595 if (!pud) 596 return -ENOMEM; 597 do { 598 next = pud_addr_end(addr, end); 599 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 600 return -ENOMEM; 601 } while (pud++, addr = next, addr != end); 602 return 0; 603 } 604 605 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 606 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 607 pgtbl_mod_mask *mask) 608 { 609 p4d_t *p4d; 610 unsigned long next; 611 612 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 613 if (!p4d) 614 return -ENOMEM; 615 do { 616 next = p4d_addr_end(addr, end); 617 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 618 return -ENOMEM; 619 } while (p4d++, addr = next, addr != end); 620 return 0; 621 } 622 623 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 624 pgprot_t prot, struct page **pages) 625 { 626 unsigned long start = addr; 627 pgd_t *pgd; 628 unsigned long next; 629 int err = 0; 630 int nr = 0; 631 pgtbl_mod_mask mask = 0; 632 633 BUG_ON(addr >= end); 634 pgd = pgd_offset_k(addr); 635 do { 636 next = pgd_addr_end(addr, end); 637 if (pgd_bad(*pgd)) 638 mask |= PGTBL_PGD_MODIFIED; 639 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 640 if (err) 641 break; 642 } while (pgd++, addr = next, addr != end); 643 644 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 645 arch_sync_kernel_mappings(start, end); 646 647 return err; 648 } 649 650 /* 651 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 652 * flush caches. 653 * 654 * The caller is responsible for calling flush_cache_vmap() after this 655 * function returns successfully and before the addresses are accessed. 656 * 657 * This is an internal function only. Do not use outside mm/. 658 */ 659 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, 660 pgprot_t prot, struct page **pages, unsigned int page_shift) 661 { 662 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 663 664 WARN_ON(page_shift < PAGE_SHIFT); 665 666 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 667 page_shift == PAGE_SHIFT) 668 return vmap_small_pages_range_noflush(addr, end, prot, pages); 669 670 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 671 int err; 672 673 err = vmap_range_noflush(addr, addr + (1UL << page_shift), 674 page_to_phys(pages[i]), prot, 675 page_shift); 676 if (err) 677 return err; 678 679 addr += 1UL << page_shift; 680 } 681 682 return 0; 683 } 684 685 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 686 pgprot_t prot, struct page **pages, unsigned int page_shift, 687 gfp_t gfp_mask) 688 { 689 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, 690 page_shift, gfp_mask); 691 692 if (ret) 693 return ret; 694 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 695 } 696 697 static int __vmap_pages_range(unsigned long addr, unsigned long end, 698 pgprot_t prot, struct page **pages, unsigned int page_shift, 699 gfp_t gfp_mask) 700 { 701 int err; 702 703 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift, gfp_mask); 704 flush_cache_vmap(addr, end); 705 return err; 706 } 707 708 /** 709 * vmap_pages_range - map pages to a kernel virtual address 710 * @addr: start of the VM area to map 711 * @end: end of the VM area to map (non-inclusive) 712 * @prot: page protection flags to use 713 * @pages: pages to map (always PAGE_SIZE pages) 714 * @page_shift: maximum shift that the pages may be mapped with, @pages must 715 * be aligned and contiguous up to at least this shift. 716 * 717 * RETURNS: 718 * 0 on success, -errno on failure. 719 */ 720 int vmap_pages_range(unsigned long addr, unsigned long end, 721 pgprot_t prot, struct page **pages, unsigned int page_shift) 722 { 723 return __vmap_pages_range(addr, end, prot, pages, page_shift, GFP_KERNEL); 724 } 725 726 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, 727 unsigned long end) 728 { 729 might_sleep(); 730 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) 731 return -EINVAL; 732 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) 733 return -EINVAL; 734 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) 735 return -EINVAL; 736 if ((end - start) >> PAGE_SHIFT > totalram_pages()) 737 return -E2BIG; 738 if (start < (unsigned long)area->addr || 739 (void *)end > area->addr + get_vm_area_size(area)) 740 return -ERANGE; 741 return 0; 742 } 743 744 /** 745 * vm_area_map_pages - map pages inside given sparse vm_area 746 * @area: vm_area 747 * @start: start address inside vm_area 748 * @end: end address inside vm_area 749 * @pages: pages to map (always PAGE_SIZE pages) 750 */ 751 int vm_area_map_pages(struct vm_struct *area, unsigned long start, 752 unsigned long end, struct page **pages) 753 { 754 int err; 755 756 err = check_sparse_vm_area(area, start, end); 757 if (err) 758 return err; 759 760 return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); 761 } 762 763 /** 764 * vm_area_unmap_pages - unmap pages inside given sparse vm_area 765 * @area: vm_area 766 * @start: start address inside vm_area 767 * @end: end address inside vm_area 768 */ 769 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, 770 unsigned long end) 771 { 772 if (check_sparse_vm_area(area, start, end)) 773 return; 774 775 vunmap_range(start, end); 776 } 777 778 int is_vmalloc_or_module_addr(const void *x) 779 { 780 /* 781 * ARM, x86-64 and sparc64 put modules in a special place, 782 * and fall back on vmalloc() if that fails. Others 783 * just put it in the vmalloc space. 784 */ 785 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR) 786 unsigned long addr = (unsigned long)kasan_reset_tag(x); 787 if (addr >= MODULES_VADDR && addr < MODULES_END) 788 return 1; 789 #endif 790 return is_vmalloc_addr(x); 791 } 792 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); 793 794 /* 795 * Walk a vmap address to the struct page it maps. Huge vmap mappings will 796 * return the tail page that corresponds to the base page address, which 797 * matches small vmap mappings. 798 */ 799 struct page *vmalloc_to_page(const void *vmalloc_addr) 800 { 801 unsigned long addr = (unsigned long) vmalloc_addr; 802 struct page *page = NULL; 803 pgd_t *pgd = pgd_offset_k(addr); 804 p4d_t *p4d; 805 pud_t *pud; 806 pmd_t *pmd; 807 pte_t *ptep, pte; 808 809 /* 810 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 811 * architectures that do not vmalloc module space 812 */ 813 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 814 815 if (pgd_none(*pgd)) 816 return NULL; 817 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 818 return NULL; /* XXX: no allowance for huge pgd */ 819 if (WARN_ON_ONCE(pgd_bad(*pgd))) 820 return NULL; 821 822 p4d = p4d_offset(pgd, addr); 823 if (p4d_none(*p4d)) 824 return NULL; 825 if (p4d_leaf(*p4d)) 826 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 827 if (WARN_ON_ONCE(p4d_bad(*p4d))) 828 return NULL; 829 830 pud = pud_offset(p4d, addr); 831 if (pud_none(*pud)) 832 return NULL; 833 if (pud_leaf(*pud)) 834 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 835 if (WARN_ON_ONCE(pud_bad(*pud))) 836 return NULL; 837 838 pmd = pmd_offset(pud, addr); 839 if (pmd_none(*pmd)) 840 return NULL; 841 if (pmd_leaf(*pmd)) 842 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 843 if (WARN_ON_ONCE(pmd_bad(*pmd))) 844 return NULL; 845 846 ptep = pte_offset_kernel(pmd, addr); 847 pte = ptep_get(ptep); 848 if (pte_present(pte)) 849 page = pte_page(pte); 850 851 return page; 852 } 853 EXPORT_SYMBOL(vmalloc_to_page); 854 855 /* 856 * Map a vmalloc()-space virtual address to the physical page frame number. 857 */ 858 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 859 { 860 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 861 } 862 EXPORT_SYMBOL(vmalloc_to_pfn); 863 864 865 /*** Global kva allocator ***/ 866 867 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 868 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 869 870 871 static DEFINE_SPINLOCK(free_vmap_area_lock); 872 static bool vmap_initialized __read_mostly; 873 874 /* 875 * This kmem_cache is used for vmap_area objects. Instead of 876 * allocating from slab we reuse an object from this cache to 877 * make things faster. Especially in "no edge" splitting of 878 * free block. 879 */ 880 static struct kmem_cache *vmap_area_cachep; 881 882 /* 883 * This linked list is used in pair with free_vmap_area_root. 884 * It gives O(1) access to prev/next to perform fast coalescing. 885 */ 886 static LIST_HEAD(free_vmap_area_list); 887 888 /* 889 * This augment red-black tree represents the free vmap space. 890 * All vmap_area objects in this tree are sorted by va->va_start 891 * address. It is used for allocation and merging when a vmap 892 * object is released. 893 * 894 * Each vmap_area node contains a maximum available free block 895 * of its sub-tree, right or left. Therefore it is possible to 896 * find a lowest match of free area. 897 */ 898 static struct rb_root free_vmap_area_root = RB_ROOT; 899 900 /* 901 * Preload a CPU with one object for "no edge" split case. The 902 * aim is to get rid of allocations from the atomic context, thus 903 * to use more permissive allocation masks. 904 */ 905 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 906 907 /* 908 * This structure defines a single, solid model where a list and 909 * rb-tree are part of one entity protected by the lock. Nodes are 910 * sorted in ascending order, thus for O(1) access to left/right 911 * neighbors a list is used as well as for sequential traversal. 912 */ 913 struct rb_list { 914 struct rb_root root; 915 struct list_head head; 916 spinlock_t lock; 917 }; 918 919 /* 920 * A fast size storage contains VAs up to 1M size. A pool consists 921 * of linked between each other ready to go VAs of certain sizes. 922 * An index in the pool-array corresponds to number of pages + 1. 923 */ 924 #define MAX_VA_SIZE_PAGES 256 925 926 struct vmap_pool { 927 struct list_head head; 928 unsigned long len; 929 }; 930 931 /* 932 * An effective vmap-node logic. Users make use of nodes instead 933 * of a global heap. It allows to balance an access and mitigate 934 * contention. 935 */ 936 static struct vmap_node { 937 /* Simple size segregated storage. */ 938 struct vmap_pool pool[MAX_VA_SIZE_PAGES]; 939 spinlock_t pool_lock; 940 bool skip_populate; 941 942 /* Bookkeeping data of this node. */ 943 struct rb_list busy; 944 struct rb_list lazy; 945 946 /* 947 * Ready-to-free areas. 948 */ 949 struct list_head purge_list; 950 struct work_struct purge_work; 951 unsigned long nr_purged; 952 } single; 953 954 /* 955 * Initial setup consists of one single node, i.e. a balancing 956 * is fully disabled. Later on, after vmap is initialized these 957 * parameters are updated based on a system capacity. 958 */ 959 static struct vmap_node *vmap_nodes = &single; 960 static __read_mostly unsigned int nr_vmap_nodes = 1; 961 static __read_mostly unsigned int vmap_zone_size = 1; 962 963 /* A simple iterator over all vmap-nodes. */ 964 #define for_each_vmap_node(vn) \ 965 for ((vn) = &vmap_nodes[0]; \ 966 (vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++) 967 968 static inline unsigned int 969 addr_to_node_id(unsigned long addr) 970 { 971 return (addr / vmap_zone_size) % nr_vmap_nodes; 972 } 973 974 static inline struct vmap_node * 975 addr_to_node(unsigned long addr) 976 { 977 return &vmap_nodes[addr_to_node_id(addr)]; 978 } 979 980 static inline struct vmap_node * 981 id_to_node(unsigned int id) 982 { 983 return &vmap_nodes[id % nr_vmap_nodes]; 984 } 985 986 static inline unsigned int 987 node_to_id(struct vmap_node *node) 988 { 989 /* Pointer arithmetic. */ 990 unsigned int id = node - vmap_nodes; 991 992 if (likely(id < nr_vmap_nodes)) 993 return id; 994 995 WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node); 996 return 0; 997 } 998 999 /* 1000 * We use the value 0 to represent "no node", that is why 1001 * an encoded value will be the node-id incremented by 1. 1002 * It is always greater then 0. A valid node_id which can 1003 * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id 1004 * is not valid 0 is returned. 1005 */ 1006 static unsigned int 1007 encode_vn_id(unsigned int node_id) 1008 { 1009 /* Can store U8_MAX [0:254] nodes. */ 1010 if (node_id < nr_vmap_nodes) 1011 return (node_id + 1) << BITS_PER_BYTE; 1012 1013 /* Warn and no node encoded. */ 1014 WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id); 1015 return 0; 1016 } 1017 1018 /* 1019 * Returns an encoded node-id, the valid range is within 1020 * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is 1021 * returned if extracted data is wrong. 1022 */ 1023 static unsigned int 1024 decode_vn_id(unsigned int val) 1025 { 1026 unsigned int node_id = (val >> BITS_PER_BYTE) - 1; 1027 1028 /* Can store U8_MAX [0:254] nodes. */ 1029 if (node_id < nr_vmap_nodes) 1030 return node_id; 1031 1032 /* If it was _not_ zero, warn. */ 1033 WARN_ONCE(node_id != UINT_MAX, 1034 "Decode wrong node id (%d)\n", node_id); 1035 1036 return nr_vmap_nodes; 1037 } 1038 1039 static bool 1040 is_vn_id_valid(unsigned int node_id) 1041 { 1042 if (node_id < nr_vmap_nodes) 1043 return true; 1044 1045 return false; 1046 } 1047 1048 static __always_inline unsigned long 1049 va_size(struct vmap_area *va) 1050 { 1051 return (va->va_end - va->va_start); 1052 } 1053 1054 static __always_inline unsigned long 1055 get_subtree_max_size(struct rb_node *node) 1056 { 1057 struct vmap_area *va; 1058 1059 va = rb_entry_safe(node, struct vmap_area, rb_node); 1060 return va ? va->subtree_max_size : 0; 1061 } 1062 1063 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 1064 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 1065 1066 static void reclaim_and_purge_vmap_areas(void); 1067 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 1068 static void drain_vmap_area_work(struct work_struct *work); 1069 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 1070 1071 static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages; 1072 static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr; 1073 1074 unsigned long vmalloc_nr_pages(void) 1075 { 1076 return atomic_long_read(&nr_vmalloc_pages); 1077 } 1078 1079 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 1080 { 1081 struct rb_node *n = root->rb_node; 1082 1083 addr = (unsigned long)kasan_reset_tag((void *)addr); 1084 1085 while (n) { 1086 struct vmap_area *va; 1087 1088 va = rb_entry(n, struct vmap_area, rb_node); 1089 if (addr < va->va_start) 1090 n = n->rb_left; 1091 else if (addr >= va->va_end) 1092 n = n->rb_right; 1093 else 1094 return va; 1095 } 1096 1097 return NULL; 1098 } 1099 1100 /* Look up the first VA which satisfies addr < va_end, NULL if none. */ 1101 static struct vmap_area * 1102 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) 1103 { 1104 struct vmap_area *va = NULL; 1105 struct rb_node *n = root->rb_node; 1106 1107 addr = (unsigned long)kasan_reset_tag((void *)addr); 1108 1109 while (n) { 1110 struct vmap_area *tmp; 1111 1112 tmp = rb_entry(n, struct vmap_area, rb_node); 1113 if (tmp->va_end > addr) { 1114 va = tmp; 1115 if (tmp->va_start <= addr) 1116 break; 1117 1118 n = n->rb_left; 1119 } else 1120 n = n->rb_right; 1121 } 1122 1123 return va; 1124 } 1125 1126 /* 1127 * Returns a node where a first VA, that satisfies addr < va_end, resides. 1128 * If success, a node is locked. A user is responsible to unlock it when a 1129 * VA is no longer needed to be accessed. 1130 * 1131 * Returns NULL if nothing found. 1132 */ 1133 static struct vmap_node * 1134 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) 1135 { 1136 unsigned long va_start_lowest; 1137 struct vmap_node *vn; 1138 1139 repeat: 1140 va_start_lowest = 0; 1141 1142 for_each_vmap_node(vn) { 1143 spin_lock(&vn->busy.lock); 1144 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); 1145 1146 if (*va) 1147 if (!va_start_lowest || (*va)->va_start < va_start_lowest) 1148 va_start_lowest = (*va)->va_start; 1149 spin_unlock(&vn->busy.lock); 1150 } 1151 1152 /* 1153 * Check if found VA exists, it might have gone away. In this case we 1154 * repeat the search because a VA has been removed concurrently and we 1155 * need to proceed to the next one, which is a rare case. 1156 */ 1157 if (va_start_lowest) { 1158 vn = addr_to_node(va_start_lowest); 1159 1160 spin_lock(&vn->busy.lock); 1161 *va = __find_vmap_area(va_start_lowest, &vn->busy.root); 1162 1163 if (*va) 1164 return vn; 1165 1166 spin_unlock(&vn->busy.lock); 1167 goto repeat; 1168 } 1169 1170 return NULL; 1171 } 1172 1173 /* 1174 * This function returns back addresses of parent node 1175 * and its left or right link for further processing. 1176 * 1177 * Otherwise NULL is returned. In that case all further 1178 * steps regarding inserting of conflicting overlap range 1179 * have to be declined and actually considered as a bug. 1180 */ 1181 static __always_inline struct rb_node ** 1182 find_va_links(struct vmap_area *va, 1183 struct rb_root *root, struct rb_node *from, 1184 struct rb_node **parent) 1185 { 1186 struct vmap_area *tmp_va; 1187 struct rb_node **link; 1188 1189 if (root) { 1190 link = &root->rb_node; 1191 if (unlikely(!*link)) { 1192 *parent = NULL; 1193 return link; 1194 } 1195 } else { 1196 link = &from; 1197 } 1198 1199 /* 1200 * Go to the bottom of the tree. When we hit the last point 1201 * we end up with parent rb_node and correct direction, i name 1202 * it link, where the new va->rb_node will be attached to. 1203 */ 1204 do { 1205 tmp_va = rb_entry(*link, struct vmap_area, rb_node); 1206 1207 /* 1208 * During the traversal we also do some sanity check. 1209 * Trigger the BUG() if there are sides(left/right) 1210 * or full overlaps. 1211 */ 1212 if (va->va_end <= tmp_va->va_start) 1213 link = &(*link)->rb_left; 1214 else if (va->va_start >= tmp_va->va_end) 1215 link = &(*link)->rb_right; 1216 else { 1217 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 1218 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 1219 1220 return NULL; 1221 } 1222 } while (*link); 1223 1224 *parent = &tmp_va->rb_node; 1225 return link; 1226 } 1227 1228 static __always_inline struct list_head * 1229 get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 1230 { 1231 struct list_head *list; 1232 1233 if (unlikely(!parent)) 1234 /* 1235 * The red-black tree where we try to find VA neighbors 1236 * before merging or inserting is empty, i.e. it means 1237 * there is no free vmap space. Normally it does not 1238 * happen but we handle this case anyway. 1239 */ 1240 return NULL; 1241 1242 list = &rb_entry(parent, struct vmap_area, rb_node)->list; 1243 return (&parent->rb_right == link ? list->next : list); 1244 } 1245 1246 static __always_inline void 1247 __link_va(struct vmap_area *va, struct rb_root *root, 1248 struct rb_node *parent, struct rb_node **link, 1249 struct list_head *head, bool augment) 1250 { 1251 /* 1252 * VA is still not in the list, but we can 1253 * identify its future previous list_head node. 1254 */ 1255 if (likely(parent)) { 1256 head = &rb_entry(parent, struct vmap_area, rb_node)->list; 1257 if (&parent->rb_right != link) 1258 head = head->prev; 1259 } 1260 1261 /* Insert to the rb-tree */ 1262 rb_link_node(&va->rb_node, parent, link); 1263 if (augment) { 1264 /* 1265 * Some explanation here. Just perform simple insertion 1266 * to the tree. We do not set va->subtree_max_size to 1267 * its current size before calling rb_insert_augmented(). 1268 * It is because we populate the tree from the bottom 1269 * to parent levels when the node _is_ in the tree. 1270 * 1271 * Therefore we set subtree_max_size to zero after insertion, 1272 * to let __augment_tree_propagate_from() puts everything to 1273 * the correct order later on. 1274 */ 1275 rb_insert_augmented(&va->rb_node, 1276 root, &free_vmap_area_rb_augment_cb); 1277 va->subtree_max_size = 0; 1278 } else { 1279 rb_insert_color(&va->rb_node, root); 1280 } 1281 1282 /* Address-sort this list */ 1283 list_add(&va->list, head); 1284 } 1285 1286 static __always_inline void 1287 link_va(struct vmap_area *va, struct rb_root *root, 1288 struct rb_node *parent, struct rb_node **link, 1289 struct list_head *head) 1290 { 1291 __link_va(va, root, parent, link, head, false); 1292 } 1293 1294 static __always_inline void 1295 link_va_augment(struct vmap_area *va, struct rb_root *root, 1296 struct rb_node *parent, struct rb_node **link, 1297 struct list_head *head) 1298 { 1299 __link_va(va, root, parent, link, head, true); 1300 } 1301 1302 static __always_inline void 1303 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) 1304 { 1305 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 1306 return; 1307 1308 if (augment) 1309 rb_erase_augmented(&va->rb_node, 1310 root, &free_vmap_area_rb_augment_cb); 1311 else 1312 rb_erase(&va->rb_node, root); 1313 1314 list_del_init(&va->list); 1315 RB_CLEAR_NODE(&va->rb_node); 1316 } 1317 1318 static __always_inline void 1319 unlink_va(struct vmap_area *va, struct rb_root *root) 1320 { 1321 __unlink_va(va, root, false); 1322 } 1323 1324 static __always_inline void 1325 unlink_va_augment(struct vmap_area *va, struct rb_root *root) 1326 { 1327 __unlink_va(va, root, true); 1328 } 1329 1330 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1331 /* 1332 * Gets called when remove the node and rotate. 1333 */ 1334 static __always_inline unsigned long 1335 compute_subtree_max_size(struct vmap_area *va) 1336 { 1337 return max3(va_size(va), 1338 get_subtree_max_size(va->rb_node.rb_left), 1339 get_subtree_max_size(va->rb_node.rb_right)); 1340 } 1341 1342 static void 1343 augment_tree_propagate_check(void) 1344 { 1345 struct vmap_area *va; 1346 unsigned long computed_size; 1347 1348 list_for_each_entry(va, &free_vmap_area_list, list) { 1349 computed_size = compute_subtree_max_size(va); 1350 if (computed_size != va->subtree_max_size) 1351 pr_emerg("tree is corrupted: %lu, %lu\n", 1352 va_size(va), va->subtree_max_size); 1353 } 1354 } 1355 #endif 1356 1357 /* 1358 * This function populates subtree_max_size from bottom to upper 1359 * levels starting from VA point. The propagation must be done 1360 * when VA size is modified by changing its va_start/va_end. Or 1361 * in case of newly inserting of VA to the tree. 1362 * 1363 * It means that __augment_tree_propagate_from() must be called: 1364 * - After VA has been inserted to the tree(free path); 1365 * - After VA has been shrunk(allocation path); 1366 * - After VA has been increased(merging path). 1367 * 1368 * Please note that, it does not mean that upper parent nodes 1369 * and their subtree_max_size are recalculated all the time up 1370 * to the root node. 1371 * 1372 * 4--8 1373 * /\ 1374 * / \ 1375 * / \ 1376 * 2--2 8--8 1377 * 1378 * For example if we modify the node 4, shrinking it to 2, then 1379 * no any modification is required. If we shrink the node 2 to 1 1380 * its subtree_max_size is updated only, and set to 1. If we shrink 1381 * the node 8 to 6, then its subtree_max_size is set to 6 and parent 1382 * node becomes 4--6. 1383 */ 1384 static __always_inline void 1385 augment_tree_propagate_from(struct vmap_area *va) 1386 { 1387 /* 1388 * Populate the tree from bottom towards the root until 1389 * the calculated maximum available size of checked node 1390 * is equal to its current one. 1391 */ 1392 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1393 1394 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1395 augment_tree_propagate_check(); 1396 #endif 1397 } 1398 1399 static void 1400 insert_vmap_area(struct vmap_area *va, 1401 struct rb_root *root, struct list_head *head) 1402 { 1403 struct rb_node **link; 1404 struct rb_node *parent; 1405 1406 link = find_va_links(va, root, NULL, &parent); 1407 if (link) 1408 link_va(va, root, parent, link, head); 1409 } 1410 1411 static void 1412 insert_vmap_area_augment(struct vmap_area *va, 1413 struct rb_node *from, struct rb_root *root, 1414 struct list_head *head) 1415 { 1416 struct rb_node **link; 1417 struct rb_node *parent; 1418 1419 if (from) 1420 link = find_va_links(va, NULL, from, &parent); 1421 else 1422 link = find_va_links(va, root, NULL, &parent); 1423 1424 if (link) { 1425 link_va_augment(va, root, parent, link, head); 1426 augment_tree_propagate_from(va); 1427 } 1428 } 1429 1430 /* 1431 * Merge de-allocated chunk of VA memory with previous 1432 * and next free blocks. If coalesce is not done a new 1433 * free area is inserted. If VA has been merged, it is 1434 * freed. 1435 * 1436 * Please note, it can return NULL in case of overlap 1437 * ranges, followed by WARN() report. Despite it is a 1438 * buggy behaviour, a system can be alive and keep 1439 * ongoing. 1440 */ 1441 static __always_inline struct vmap_area * 1442 __merge_or_add_vmap_area(struct vmap_area *va, 1443 struct rb_root *root, struct list_head *head, bool augment) 1444 { 1445 struct vmap_area *sibling; 1446 struct list_head *next; 1447 struct rb_node **link; 1448 struct rb_node *parent; 1449 bool merged = false; 1450 1451 /* 1452 * Find a place in the tree where VA potentially will be 1453 * inserted, unless it is merged with its sibling/siblings. 1454 */ 1455 link = find_va_links(va, root, NULL, &parent); 1456 if (!link) 1457 return NULL; 1458 1459 /* 1460 * Get next node of VA to check if merging can be done. 1461 */ 1462 next = get_va_next_sibling(parent, link); 1463 if (unlikely(next == NULL)) 1464 goto insert; 1465 1466 /* 1467 * start end 1468 * | | 1469 * |<------VA------>|<-----Next----->| 1470 * | | 1471 * start end 1472 */ 1473 if (next != head) { 1474 sibling = list_entry(next, struct vmap_area, list); 1475 if (sibling->va_start == va->va_end) { 1476 sibling->va_start = va->va_start; 1477 1478 /* Free vmap_area object. */ 1479 kmem_cache_free(vmap_area_cachep, va); 1480 1481 /* Point to the new merged area. */ 1482 va = sibling; 1483 merged = true; 1484 } 1485 } 1486 1487 /* 1488 * start end 1489 * | | 1490 * |<-----Prev----->|<------VA------>| 1491 * | | 1492 * start end 1493 */ 1494 if (next->prev != head) { 1495 sibling = list_entry(next->prev, struct vmap_area, list); 1496 if (sibling->va_end == va->va_start) { 1497 /* 1498 * If both neighbors are coalesced, it is important 1499 * to unlink the "next" node first, followed by merging 1500 * with "previous" one. Otherwise the tree might not be 1501 * fully populated if a sibling's augmented value is 1502 * "normalized" because of rotation operations. 1503 */ 1504 if (merged) 1505 __unlink_va(va, root, augment); 1506 1507 sibling->va_end = va->va_end; 1508 1509 /* Free vmap_area object. */ 1510 kmem_cache_free(vmap_area_cachep, va); 1511 1512 /* Point to the new merged area. */ 1513 va = sibling; 1514 merged = true; 1515 } 1516 } 1517 1518 insert: 1519 if (!merged) 1520 __link_va(va, root, parent, link, head, augment); 1521 1522 return va; 1523 } 1524 1525 static __always_inline struct vmap_area * 1526 merge_or_add_vmap_area(struct vmap_area *va, 1527 struct rb_root *root, struct list_head *head) 1528 { 1529 return __merge_or_add_vmap_area(va, root, head, false); 1530 } 1531 1532 static __always_inline struct vmap_area * 1533 merge_or_add_vmap_area_augment(struct vmap_area *va, 1534 struct rb_root *root, struct list_head *head) 1535 { 1536 va = __merge_or_add_vmap_area(va, root, head, true); 1537 if (va) 1538 augment_tree_propagate_from(va); 1539 1540 return va; 1541 } 1542 1543 static __always_inline bool 1544 is_within_this_va(struct vmap_area *va, unsigned long size, 1545 unsigned long align, unsigned long vstart) 1546 { 1547 unsigned long nva_start_addr; 1548 1549 if (va->va_start > vstart) 1550 nva_start_addr = ALIGN(va->va_start, align); 1551 else 1552 nva_start_addr = ALIGN(vstart, align); 1553 1554 /* Can be overflowed due to big size or alignment. */ 1555 if (nva_start_addr + size < nva_start_addr || 1556 nva_start_addr < vstart) 1557 return false; 1558 1559 return (nva_start_addr + size <= va->va_end); 1560 } 1561 1562 /* 1563 * Find the first free block(lowest start address) in the tree, 1564 * that will accomplish the request corresponding to passing 1565 * parameters. Please note, with an alignment bigger than PAGE_SIZE, 1566 * a search length is adjusted to account for worst case alignment 1567 * overhead. 1568 */ 1569 static __always_inline struct vmap_area * 1570 find_vmap_lowest_match(struct rb_root *root, unsigned long size, 1571 unsigned long align, unsigned long vstart, bool adjust_search_size) 1572 { 1573 struct vmap_area *va; 1574 struct rb_node *node; 1575 unsigned long length; 1576 1577 /* Start from the root. */ 1578 node = root->rb_node; 1579 1580 /* Adjust the search size for alignment overhead. */ 1581 length = adjust_search_size ? size + align - 1 : size; 1582 1583 while (node) { 1584 va = rb_entry(node, struct vmap_area, rb_node); 1585 1586 if (get_subtree_max_size(node->rb_left) >= length && 1587 vstart < va->va_start) { 1588 node = node->rb_left; 1589 } else { 1590 if (is_within_this_va(va, size, align, vstart)) 1591 return va; 1592 1593 /* 1594 * Does not make sense to go deeper towards the right 1595 * sub-tree if it does not have a free block that is 1596 * equal or bigger to the requested search length. 1597 */ 1598 if (get_subtree_max_size(node->rb_right) >= length) { 1599 node = node->rb_right; 1600 continue; 1601 } 1602 1603 /* 1604 * OK. We roll back and find the first right sub-tree, 1605 * that will satisfy the search criteria. It can happen 1606 * due to "vstart" restriction or an alignment overhead 1607 * that is bigger then PAGE_SIZE. 1608 */ 1609 while ((node = rb_parent(node))) { 1610 va = rb_entry(node, struct vmap_area, rb_node); 1611 if (is_within_this_va(va, size, align, vstart)) 1612 return va; 1613 1614 if (get_subtree_max_size(node->rb_right) >= length && 1615 vstart <= va->va_start) { 1616 /* 1617 * Shift the vstart forward. Please note, we update it with 1618 * parent's start address adding "1" because we do not want 1619 * to enter same sub-tree after it has already been checked 1620 * and no suitable free block found there. 1621 */ 1622 vstart = va->va_start + 1; 1623 node = node->rb_right; 1624 break; 1625 } 1626 } 1627 } 1628 } 1629 1630 return NULL; 1631 } 1632 1633 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1634 #include <linux/random.h> 1635 1636 static struct vmap_area * 1637 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, 1638 unsigned long align, unsigned long vstart) 1639 { 1640 struct vmap_area *va; 1641 1642 list_for_each_entry(va, head, list) { 1643 if (!is_within_this_va(va, size, align, vstart)) 1644 continue; 1645 1646 return va; 1647 } 1648 1649 return NULL; 1650 } 1651 1652 static void 1653 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, 1654 unsigned long size, unsigned long align) 1655 { 1656 struct vmap_area *va_1, *va_2; 1657 unsigned long vstart; 1658 unsigned int rnd; 1659 1660 get_random_bytes(&rnd, sizeof(rnd)); 1661 vstart = VMALLOC_START + rnd; 1662 1663 va_1 = find_vmap_lowest_match(root, size, align, vstart, false); 1664 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); 1665 1666 if (va_1 != va_2) 1667 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1668 va_1, va_2, vstart); 1669 } 1670 #endif 1671 1672 enum fit_type { 1673 NOTHING_FIT = 0, 1674 FL_FIT_TYPE = 1, /* full fit */ 1675 LE_FIT_TYPE = 2, /* left edge fit */ 1676 RE_FIT_TYPE = 3, /* right edge fit */ 1677 NE_FIT_TYPE = 4 /* no edge fit */ 1678 }; 1679 1680 static __always_inline enum fit_type 1681 classify_va_fit_type(struct vmap_area *va, 1682 unsigned long nva_start_addr, unsigned long size) 1683 { 1684 enum fit_type type; 1685 1686 /* Check if it is within VA. */ 1687 if (nva_start_addr < va->va_start || 1688 nva_start_addr + size > va->va_end) 1689 return NOTHING_FIT; 1690 1691 /* Now classify. */ 1692 if (va->va_start == nva_start_addr) { 1693 if (va->va_end == nva_start_addr + size) 1694 type = FL_FIT_TYPE; 1695 else 1696 type = LE_FIT_TYPE; 1697 } else if (va->va_end == nva_start_addr + size) { 1698 type = RE_FIT_TYPE; 1699 } else { 1700 type = NE_FIT_TYPE; 1701 } 1702 1703 return type; 1704 } 1705 1706 static __always_inline int 1707 va_clip(struct rb_root *root, struct list_head *head, 1708 struct vmap_area *va, unsigned long nva_start_addr, 1709 unsigned long size) 1710 { 1711 struct vmap_area *lva = NULL; 1712 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); 1713 1714 if (type == FL_FIT_TYPE) { 1715 /* 1716 * No need to split VA, it fully fits. 1717 * 1718 * | | 1719 * V NVA V 1720 * |---------------| 1721 */ 1722 unlink_va_augment(va, root); 1723 kmem_cache_free(vmap_area_cachep, va); 1724 } else if (type == LE_FIT_TYPE) { 1725 /* 1726 * Split left edge of fit VA. 1727 * 1728 * | | 1729 * V NVA V R 1730 * |-------|-------| 1731 */ 1732 va->va_start += size; 1733 } else if (type == RE_FIT_TYPE) { 1734 /* 1735 * Split right edge of fit VA. 1736 * 1737 * | | 1738 * L V NVA V 1739 * |-------|-------| 1740 */ 1741 va->va_end = nva_start_addr; 1742 } else if (type == NE_FIT_TYPE) { 1743 /* 1744 * Split no edge of fit VA. 1745 * 1746 * | | 1747 * L V NVA V R 1748 * |---|-------|---| 1749 */ 1750 lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 1751 if (unlikely(!lva)) { 1752 /* 1753 * For percpu allocator we do not do any pre-allocation 1754 * and leave it as it is. The reason is it most likely 1755 * never ends up with NE_FIT_TYPE splitting. In case of 1756 * percpu allocations offsets and sizes are aligned to 1757 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 1758 * are its main fitting cases. 1759 * 1760 * There are a few exceptions though, as an example it is 1761 * a first allocation (early boot up) when we have "one" 1762 * big free space that has to be split. 1763 * 1764 * Also we can hit this path in case of regular "vmap" 1765 * allocations, if "this" current CPU was not preloaded. 1766 * See the comment in alloc_vmap_area() why. If so, then 1767 * GFP_NOWAIT is used instead to get an extra object for 1768 * split purpose. That is rare and most time does not 1769 * occur. 1770 * 1771 * What happens if an allocation gets failed. Basically, 1772 * an "overflow" path is triggered to purge lazily freed 1773 * areas to free some memory, then, the "retry" path is 1774 * triggered to repeat one more time. See more details 1775 * in alloc_vmap_area() function. 1776 */ 1777 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 1778 if (!lva) 1779 return -ENOMEM; 1780 } 1781 1782 /* 1783 * Build the remainder. 1784 */ 1785 lva->va_start = va->va_start; 1786 lva->va_end = nva_start_addr; 1787 1788 /* 1789 * Shrink this VA to remaining size. 1790 */ 1791 va->va_start = nva_start_addr + size; 1792 } else { 1793 return -EINVAL; 1794 } 1795 1796 if (type != FL_FIT_TYPE) { 1797 augment_tree_propagate_from(va); 1798 1799 if (lva) /* type == NE_FIT_TYPE */ 1800 insert_vmap_area_augment(lva, &va->rb_node, root, head); 1801 } 1802 1803 return 0; 1804 } 1805 1806 static unsigned long 1807 va_alloc(struct vmap_area *va, 1808 struct rb_root *root, struct list_head *head, 1809 unsigned long size, unsigned long align, 1810 unsigned long vstart, unsigned long vend) 1811 { 1812 unsigned long nva_start_addr; 1813 int ret; 1814 1815 if (va->va_start > vstart) 1816 nva_start_addr = ALIGN(va->va_start, align); 1817 else 1818 nva_start_addr = ALIGN(vstart, align); 1819 1820 /* Check the "vend" restriction. */ 1821 if (nva_start_addr + size > vend) 1822 return -ERANGE; 1823 1824 /* Update the free vmap_area. */ 1825 ret = va_clip(root, head, va, nva_start_addr, size); 1826 if (WARN_ON_ONCE(ret)) 1827 return ret; 1828 1829 return nva_start_addr; 1830 } 1831 1832 /* 1833 * Returns a start address of the newly allocated area, if success. 1834 * Otherwise an error value is returned that indicates failure. 1835 */ 1836 static __always_inline unsigned long 1837 __alloc_vmap_area(struct rb_root *root, struct list_head *head, 1838 unsigned long size, unsigned long align, 1839 unsigned long vstart, unsigned long vend) 1840 { 1841 bool adjust_search_size = true; 1842 unsigned long nva_start_addr; 1843 struct vmap_area *va; 1844 1845 /* 1846 * Do not adjust when: 1847 * a) align <= PAGE_SIZE, because it does not make any sense. 1848 * All blocks(their start addresses) are at least PAGE_SIZE 1849 * aligned anyway; 1850 * b) a short range where a requested size corresponds to exactly 1851 * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 1852 * With adjusted search length an allocation would not succeed. 1853 */ 1854 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 1855 adjust_search_size = false; 1856 1857 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); 1858 if (unlikely(!va)) 1859 return -ENOENT; 1860 1861 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); 1862 1863 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1864 if (!IS_ERR_VALUE(nva_start_addr)) 1865 find_vmap_lowest_match_check(root, head, size, align); 1866 #endif 1867 1868 return nva_start_addr; 1869 } 1870 1871 /* 1872 * Free a region of KVA allocated by alloc_vmap_area 1873 */ 1874 static void free_vmap_area(struct vmap_area *va) 1875 { 1876 struct vmap_node *vn = addr_to_node(va->va_start); 1877 1878 /* 1879 * Remove from the busy tree/list. 1880 */ 1881 spin_lock(&vn->busy.lock); 1882 unlink_va(va, &vn->busy.root); 1883 spin_unlock(&vn->busy.lock); 1884 1885 /* 1886 * Insert/Merge it back to the free tree/list. 1887 */ 1888 spin_lock(&free_vmap_area_lock); 1889 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1890 spin_unlock(&free_vmap_area_lock); 1891 } 1892 1893 static inline void 1894 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1895 { 1896 struct vmap_area *va = NULL, *tmp; 1897 1898 /* 1899 * Preload this CPU with one extra vmap_area object. It is used 1900 * when fit type of free area is NE_FIT_TYPE. It guarantees that 1901 * a CPU that does an allocation is preloaded. 1902 * 1903 * We do it in non-atomic context, thus it allows us to use more 1904 * permissive allocation masks to be more stable under low memory 1905 * condition and high memory pressure. 1906 */ 1907 if (!this_cpu_read(ne_fit_preload_node)) 1908 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1909 1910 spin_lock(lock); 1911 1912 tmp = NULL; 1913 if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va)) 1914 kmem_cache_free(vmap_area_cachep, va); 1915 } 1916 1917 static struct vmap_pool * 1918 size_to_va_pool(struct vmap_node *vn, unsigned long size) 1919 { 1920 unsigned int idx = (size - 1) / PAGE_SIZE; 1921 1922 if (idx < MAX_VA_SIZE_PAGES) 1923 return &vn->pool[idx]; 1924 1925 return NULL; 1926 } 1927 1928 static bool 1929 node_pool_add_va(struct vmap_node *n, struct vmap_area *va) 1930 { 1931 struct vmap_pool *vp; 1932 1933 vp = size_to_va_pool(n, va_size(va)); 1934 if (!vp) 1935 return false; 1936 1937 spin_lock(&n->pool_lock); 1938 list_add(&va->list, &vp->head); 1939 WRITE_ONCE(vp->len, vp->len + 1); 1940 spin_unlock(&n->pool_lock); 1941 1942 return true; 1943 } 1944 1945 static struct vmap_area * 1946 node_pool_del_va(struct vmap_node *vn, unsigned long size, 1947 unsigned long align, unsigned long vstart, 1948 unsigned long vend) 1949 { 1950 struct vmap_area *va = NULL; 1951 struct vmap_pool *vp; 1952 int err = 0; 1953 1954 vp = size_to_va_pool(vn, size); 1955 if (!vp || list_empty(&vp->head)) 1956 return NULL; 1957 1958 spin_lock(&vn->pool_lock); 1959 if (!list_empty(&vp->head)) { 1960 va = list_first_entry(&vp->head, struct vmap_area, list); 1961 1962 if (IS_ALIGNED(va->va_start, align)) { 1963 /* 1964 * Do some sanity check and emit a warning 1965 * if one of below checks detects an error. 1966 */ 1967 err |= (va_size(va) != size); 1968 err |= (va->va_start < vstart); 1969 err |= (va->va_end > vend); 1970 1971 if (!WARN_ON_ONCE(err)) { 1972 list_del_init(&va->list); 1973 WRITE_ONCE(vp->len, vp->len - 1); 1974 } else { 1975 va = NULL; 1976 } 1977 } else { 1978 list_move_tail(&va->list, &vp->head); 1979 va = NULL; 1980 } 1981 } 1982 spin_unlock(&vn->pool_lock); 1983 1984 return va; 1985 } 1986 1987 static struct vmap_area * 1988 node_alloc(unsigned long size, unsigned long align, 1989 unsigned long vstart, unsigned long vend, 1990 unsigned long *addr, unsigned int *vn_id) 1991 { 1992 struct vmap_area *va; 1993 1994 *vn_id = 0; 1995 *addr = -EINVAL; 1996 1997 /* 1998 * Fallback to a global heap if not vmalloc or there 1999 * is only one node. 2000 */ 2001 if (vstart != VMALLOC_START || vend != VMALLOC_END || 2002 nr_vmap_nodes == 1) 2003 return NULL; 2004 2005 *vn_id = raw_smp_processor_id() % nr_vmap_nodes; 2006 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); 2007 *vn_id = encode_vn_id(*vn_id); 2008 2009 if (va) 2010 *addr = va->va_start; 2011 2012 return va; 2013 } 2014 2015 static inline void setup_vmalloc_vm(struct vm_struct *vm, 2016 struct vmap_area *va, unsigned long flags, const void *caller) 2017 { 2018 vm->flags = flags; 2019 vm->addr = (void *)va->va_start; 2020 vm->size = vm->requested_size = va_size(va); 2021 vm->caller = caller; 2022 va->vm = vm; 2023 } 2024 2025 /* 2026 * Allocate a region of KVA of the specified size and alignment, within the 2027 * vstart and vend. If vm is passed in, the two will also be bound. 2028 */ 2029 static struct vmap_area *alloc_vmap_area(unsigned long size, 2030 unsigned long align, 2031 unsigned long vstart, unsigned long vend, 2032 int node, gfp_t gfp_mask, 2033 unsigned long va_flags, struct vm_struct *vm) 2034 { 2035 struct vmap_node *vn; 2036 struct vmap_area *va; 2037 unsigned long freed; 2038 unsigned long addr; 2039 unsigned int vn_id; 2040 bool allow_block; 2041 int purged = 0; 2042 int ret; 2043 2044 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) 2045 return ERR_PTR(-EINVAL); 2046 2047 if (unlikely(!vmap_initialized)) 2048 return ERR_PTR(-EBUSY); 2049 2050 /* Only reclaim behaviour flags are relevant. */ 2051 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 2052 allow_block = gfpflags_allow_blocking(gfp_mask); 2053 might_sleep_if(allow_block); 2054 2055 /* 2056 * If a VA is obtained from a global heap(if it fails here) 2057 * it is anyway marked with this "vn_id" so it is returned 2058 * to this pool's node later. Such way gives a possibility 2059 * to populate pools based on users demand. 2060 * 2061 * On success a ready to go VA is returned. 2062 */ 2063 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); 2064 if (!va) { 2065 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 2066 if (unlikely(!va)) 2067 return ERR_PTR(-ENOMEM); 2068 2069 /* 2070 * Only scan the relevant parts containing pointers to other objects 2071 * to avoid false negatives. 2072 */ 2073 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 2074 } 2075 2076 retry: 2077 if (IS_ERR_VALUE(addr)) { 2078 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 2079 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, 2080 size, align, vstart, vend); 2081 spin_unlock(&free_vmap_area_lock); 2082 2083 /* 2084 * This is not a fast path. Check if yielding is needed. This 2085 * is the only reschedule point in the vmalloc() path. 2086 */ 2087 if (allow_block) 2088 cond_resched(); 2089 } 2090 2091 trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); 2092 2093 /* 2094 * If an allocation fails, the error value is 2095 * returned. Therefore trigger the overflow path. 2096 */ 2097 if (IS_ERR_VALUE(addr)) { 2098 if (allow_block) 2099 goto overflow; 2100 2101 /* 2102 * We can not trigger any reclaim logic because 2103 * sleeping is not allowed, thus fail an allocation. 2104 */ 2105 goto out_free_va; 2106 } 2107 2108 va->va_start = addr; 2109 va->va_end = addr + size; 2110 va->vm = NULL; 2111 va->flags = (va_flags | vn_id); 2112 2113 if (vm) { 2114 vm->addr = (void *)va->va_start; 2115 vm->size = va_size(va); 2116 va->vm = vm; 2117 } 2118 2119 vn = addr_to_node(va->va_start); 2120 2121 spin_lock(&vn->busy.lock); 2122 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 2123 spin_unlock(&vn->busy.lock); 2124 2125 BUG_ON(!IS_ALIGNED(va->va_start, align)); 2126 BUG_ON(va->va_start < vstart); 2127 BUG_ON(va->va_end > vend); 2128 2129 ret = kasan_populate_vmalloc(addr, size, gfp_mask); 2130 if (ret) { 2131 free_vmap_area(va); 2132 return ERR_PTR(ret); 2133 } 2134 2135 return va; 2136 2137 overflow: 2138 if (!purged) { 2139 reclaim_and_purge_vmap_areas(); 2140 purged = 1; 2141 goto retry; 2142 } 2143 2144 freed = 0; 2145 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 2146 2147 if (freed > 0) { 2148 purged = 0; 2149 goto retry; 2150 } 2151 2152 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 2153 pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n", 2154 size, vstart, vend); 2155 2156 out_free_va: 2157 kmem_cache_free(vmap_area_cachep, va); 2158 return ERR_PTR(-EBUSY); 2159 } 2160 2161 int register_vmap_purge_notifier(struct notifier_block *nb) 2162 { 2163 return blocking_notifier_chain_register(&vmap_notify_list, nb); 2164 } 2165 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 2166 2167 int unregister_vmap_purge_notifier(struct notifier_block *nb) 2168 { 2169 return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 2170 } 2171 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 2172 2173 /* 2174 * lazy_max_pages is the maximum amount of virtual address space we gather up 2175 * before attempting to purge with a TLB flush. 2176 * 2177 * There is a tradeoff here: a larger number will cover more kernel page tables 2178 * and take slightly longer to purge, but it will linearly reduce the number of 2179 * global TLB flushes that must be performed. It would seem natural to scale 2180 * this number up linearly with the number of CPUs (because vmapping activity 2181 * could also scale linearly with the number of CPUs), however it is likely 2182 * that in practice, workloads might be constrained in other ways that mean 2183 * vmap activity will not scale linearly with CPUs. Also, I want to be 2184 * conservative and not introduce a big latency on huge systems, so go with 2185 * a less aggressive log scale. It will still be an improvement over the old 2186 * code, and it will be simple to change the scale factor if we find that it 2187 * becomes a problem on bigger systems. 2188 */ 2189 static unsigned long lazy_max_pages(void) 2190 { 2191 unsigned int log; 2192 2193 log = fls(num_online_cpus()); 2194 2195 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 2196 } 2197 2198 /* 2199 * Serialize vmap purging. There is no actual critical section protected 2200 * by this lock, but we want to avoid concurrent calls for performance 2201 * reasons and to make the pcpu_get_vm_areas more deterministic. 2202 */ 2203 static DEFINE_MUTEX(vmap_purge_lock); 2204 2205 /* for per-CPU blocks */ 2206 static void purge_fragmented_blocks_allcpus(void); 2207 2208 static void 2209 reclaim_list_global(struct list_head *head) 2210 { 2211 struct vmap_area *va, *n; 2212 2213 if (list_empty(head)) 2214 return; 2215 2216 spin_lock(&free_vmap_area_lock); 2217 list_for_each_entry_safe(va, n, head, list) 2218 merge_or_add_vmap_area_augment(va, 2219 &free_vmap_area_root, &free_vmap_area_list); 2220 spin_unlock(&free_vmap_area_lock); 2221 } 2222 2223 static void 2224 decay_va_pool_node(struct vmap_node *vn, bool full_decay) 2225 { 2226 LIST_HEAD(decay_list); 2227 struct rb_root decay_root = RB_ROOT; 2228 struct vmap_area *va, *nva; 2229 unsigned long n_decay, pool_len; 2230 int i; 2231 2232 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 2233 LIST_HEAD(tmp_list); 2234 2235 if (list_empty(&vn->pool[i].head)) 2236 continue; 2237 2238 /* Detach the pool, so no-one can access it. */ 2239 spin_lock(&vn->pool_lock); 2240 list_replace_init(&vn->pool[i].head, &tmp_list); 2241 spin_unlock(&vn->pool_lock); 2242 2243 pool_len = n_decay = vn->pool[i].len; 2244 WRITE_ONCE(vn->pool[i].len, 0); 2245 2246 /* Decay a pool by ~25% out of left objects. */ 2247 if (!full_decay) 2248 n_decay >>= 2; 2249 pool_len -= n_decay; 2250 2251 list_for_each_entry_safe(va, nva, &tmp_list, list) { 2252 if (!n_decay--) 2253 break; 2254 2255 list_del_init(&va->list); 2256 merge_or_add_vmap_area(va, &decay_root, &decay_list); 2257 } 2258 2259 /* 2260 * Attach the pool back if it has been partly decayed. 2261 * Please note, it is supposed that nobody(other contexts) 2262 * can populate the pool therefore a simple list replace 2263 * operation takes place here. 2264 */ 2265 if (!list_empty(&tmp_list)) { 2266 spin_lock(&vn->pool_lock); 2267 list_replace_init(&tmp_list, &vn->pool[i].head); 2268 WRITE_ONCE(vn->pool[i].len, pool_len); 2269 spin_unlock(&vn->pool_lock); 2270 } 2271 } 2272 2273 reclaim_list_global(&decay_list); 2274 } 2275 2276 static void 2277 kasan_release_vmalloc_node(struct vmap_node *vn) 2278 { 2279 struct vmap_area *va; 2280 unsigned long start, end; 2281 2282 start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start; 2283 end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end; 2284 2285 list_for_each_entry(va, &vn->purge_list, list) { 2286 if (is_vmalloc_or_module_addr((void *) va->va_start)) 2287 kasan_release_vmalloc(va->va_start, va->va_end, 2288 va->va_start, va->va_end, 2289 KASAN_VMALLOC_PAGE_RANGE); 2290 } 2291 2292 kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH); 2293 } 2294 2295 static void purge_vmap_node(struct work_struct *work) 2296 { 2297 struct vmap_node *vn = container_of(work, 2298 struct vmap_node, purge_work); 2299 unsigned long nr_purged_pages = 0; 2300 struct vmap_area *va, *n_va; 2301 LIST_HEAD(local_list); 2302 2303 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) 2304 kasan_release_vmalloc_node(vn); 2305 2306 vn->nr_purged = 0; 2307 2308 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { 2309 unsigned long nr = va_size(va) >> PAGE_SHIFT; 2310 unsigned int vn_id = decode_vn_id(va->flags); 2311 2312 list_del_init(&va->list); 2313 2314 nr_purged_pages += nr; 2315 vn->nr_purged++; 2316 2317 if (is_vn_id_valid(vn_id) && !vn->skip_populate) 2318 if (node_pool_add_va(vn, va)) 2319 continue; 2320 2321 /* Go back to global. */ 2322 list_add(&va->list, &local_list); 2323 } 2324 2325 atomic_long_sub(nr_purged_pages, &vmap_lazy_nr); 2326 2327 reclaim_list_global(&local_list); 2328 } 2329 2330 /* 2331 * Purges all lazily-freed vmap areas. 2332 */ 2333 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, 2334 bool full_pool_decay) 2335 { 2336 unsigned long nr_purged_areas = 0; 2337 unsigned int nr_purge_helpers; 2338 static cpumask_t purge_nodes; 2339 unsigned int nr_purge_nodes; 2340 struct vmap_node *vn; 2341 int i; 2342 2343 lockdep_assert_held(&vmap_purge_lock); 2344 2345 /* 2346 * Use cpumask to mark which node has to be processed. 2347 */ 2348 purge_nodes = CPU_MASK_NONE; 2349 2350 for_each_vmap_node(vn) { 2351 INIT_LIST_HEAD(&vn->purge_list); 2352 vn->skip_populate = full_pool_decay; 2353 decay_va_pool_node(vn, full_pool_decay); 2354 2355 if (RB_EMPTY_ROOT(&vn->lazy.root)) 2356 continue; 2357 2358 spin_lock(&vn->lazy.lock); 2359 WRITE_ONCE(vn->lazy.root.rb_node, NULL); 2360 list_replace_init(&vn->lazy.head, &vn->purge_list); 2361 spin_unlock(&vn->lazy.lock); 2362 2363 start = min(start, list_first_entry(&vn->purge_list, 2364 struct vmap_area, list)->va_start); 2365 2366 end = max(end, list_last_entry(&vn->purge_list, 2367 struct vmap_area, list)->va_end); 2368 2369 cpumask_set_cpu(node_to_id(vn), &purge_nodes); 2370 } 2371 2372 nr_purge_nodes = cpumask_weight(&purge_nodes); 2373 if (nr_purge_nodes > 0) { 2374 flush_tlb_kernel_range(start, end); 2375 2376 /* One extra worker is per a lazy_max_pages() full set minus one. */ 2377 nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages(); 2378 nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1; 2379 2380 for_each_cpu(i, &purge_nodes) { 2381 vn = &vmap_nodes[i]; 2382 2383 if (nr_purge_helpers > 0) { 2384 INIT_WORK(&vn->purge_work, purge_vmap_node); 2385 2386 if (cpumask_test_cpu(i, cpu_online_mask)) 2387 schedule_work_on(i, &vn->purge_work); 2388 else 2389 schedule_work(&vn->purge_work); 2390 2391 nr_purge_helpers--; 2392 } else { 2393 vn->purge_work.func = NULL; 2394 purge_vmap_node(&vn->purge_work); 2395 nr_purged_areas += vn->nr_purged; 2396 } 2397 } 2398 2399 for_each_cpu(i, &purge_nodes) { 2400 vn = &vmap_nodes[i]; 2401 2402 if (vn->purge_work.func) { 2403 flush_work(&vn->purge_work); 2404 nr_purged_areas += vn->nr_purged; 2405 } 2406 } 2407 } 2408 2409 trace_purge_vmap_area_lazy(start, end, nr_purged_areas); 2410 return nr_purged_areas > 0; 2411 } 2412 2413 /* 2414 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. 2415 */ 2416 static void reclaim_and_purge_vmap_areas(void) 2417 2418 { 2419 mutex_lock(&vmap_purge_lock); 2420 purge_fragmented_blocks_allcpus(); 2421 __purge_vmap_area_lazy(ULONG_MAX, 0, true); 2422 mutex_unlock(&vmap_purge_lock); 2423 } 2424 2425 static void drain_vmap_area_work(struct work_struct *work) 2426 { 2427 mutex_lock(&vmap_purge_lock); 2428 __purge_vmap_area_lazy(ULONG_MAX, 0, false); 2429 mutex_unlock(&vmap_purge_lock); 2430 } 2431 2432 /* 2433 * Free a vmap area, caller ensuring that the area has been unmapped, 2434 * unlinked and flush_cache_vunmap had been called for the correct 2435 * range previously. 2436 */ 2437 static void free_vmap_area_noflush(struct vmap_area *va) 2438 { 2439 unsigned long nr_lazy_max = lazy_max_pages(); 2440 unsigned long va_start = va->va_start; 2441 unsigned int vn_id = decode_vn_id(va->flags); 2442 struct vmap_node *vn; 2443 unsigned long nr_lazy; 2444 2445 if (WARN_ON_ONCE(!list_empty(&va->list))) 2446 return; 2447 2448 nr_lazy = atomic_long_add_return_relaxed(va_size(va) >> PAGE_SHIFT, 2449 &vmap_lazy_nr); 2450 2451 /* 2452 * If it was request by a certain node we would like to 2453 * return it to that node, i.e. its pool for later reuse. 2454 */ 2455 vn = is_vn_id_valid(vn_id) ? 2456 id_to_node(vn_id):addr_to_node(va->va_start); 2457 2458 spin_lock(&vn->lazy.lock); 2459 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); 2460 spin_unlock(&vn->lazy.lock); 2461 2462 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); 2463 2464 /* After this point, we may free va at any time */ 2465 if (unlikely(nr_lazy > nr_lazy_max)) 2466 schedule_work(&drain_vmap_work); 2467 } 2468 2469 /* 2470 * Free and unmap a vmap area 2471 */ 2472 static void free_unmap_vmap_area(struct vmap_area *va) 2473 { 2474 flush_cache_vunmap(va->va_start, va->va_end); 2475 vunmap_range_noflush(va->va_start, va->va_end); 2476 if (debug_pagealloc_enabled_static()) 2477 flush_tlb_kernel_range(va->va_start, va->va_end); 2478 2479 free_vmap_area_noflush(va); 2480 } 2481 2482 struct vmap_area *find_vmap_area(unsigned long addr) 2483 { 2484 struct vmap_node *vn; 2485 struct vmap_area *va; 2486 int i, j; 2487 2488 if (unlikely(!vmap_initialized)) 2489 return NULL; 2490 2491 /* 2492 * An addr_to_node_id(addr) converts an address to a node index 2493 * where a VA is located. If VA spans several zones and passed 2494 * addr is not the same as va->va_start, what is not common, we 2495 * may need to scan extra nodes. See an example: 2496 * 2497 * <----va----> 2498 * -|-----|-----|-----|-----|- 2499 * 1 2 0 1 2500 * 2501 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed 2502 * addr is within 2 or 0 nodes we should do extra work. 2503 */ 2504 i = j = addr_to_node_id(addr); 2505 do { 2506 vn = &vmap_nodes[i]; 2507 2508 spin_lock(&vn->busy.lock); 2509 va = __find_vmap_area(addr, &vn->busy.root); 2510 spin_unlock(&vn->busy.lock); 2511 2512 if (va) 2513 return va; 2514 } while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j); 2515 2516 return NULL; 2517 } 2518 2519 static struct vmap_area *find_unlink_vmap_area(unsigned long addr) 2520 { 2521 struct vmap_node *vn; 2522 struct vmap_area *va; 2523 int i, j; 2524 2525 /* 2526 * Check the comment in the find_vmap_area() about the loop. 2527 */ 2528 i = j = addr_to_node_id(addr); 2529 do { 2530 vn = &vmap_nodes[i]; 2531 2532 spin_lock(&vn->busy.lock); 2533 va = __find_vmap_area(addr, &vn->busy.root); 2534 if (va) 2535 unlink_va(va, &vn->busy.root); 2536 spin_unlock(&vn->busy.lock); 2537 2538 if (va) 2539 return va; 2540 } while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j); 2541 2542 return NULL; 2543 } 2544 2545 /*** Per cpu kva allocator ***/ 2546 2547 /* 2548 * vmap space is limited especially on 32 bit architectures. Ensure there is 2549 * room for at least 16 percpu vmap blocks per CPU. 2550 */ 2551 /* 2552 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 2553 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 2554 * instead (we just need a rough idea) 2555 */ 2556 #if BITS_PER_LONG == 32 2557 #define VMALLOC_SPACE (128UL*1024*1024) 2558 #else 2559 #define VMALLOC_SPACE (128UL*1024*1024*1024) 2560 #endif 2561 2562 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 2563 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 2564 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 2565 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 2566 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 2567 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 2568 #define VMAP_BBMAP_BITS \ 2569 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 2570 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 2571 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 2572 2573 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 2574 2575 /* 2576 * Purge threshold to prevent overeager purging of fragmented blocks for 2577 * regular operations: Purge if vb->free is less than 1/4 of the capacity. 2578 */ 2579 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) 2580 2581 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ 2582 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ 2583 #define VMAP_FLAGS_MASK 0x3 2584 2585 struct vmap_block_queue { 2586 spinlock_t lock; 2587 struct list_head free; 2588 2589 /* 2590 * An xarray requires an extra memory dynamically to 2591 * be allocated. If it is an issue, we can use rb-tree 2592 * instead. 2593 */ 2594 struct xarray vmap_blocks; 2595 }; 2596 2597 struct vmap_block { 2598 spinlock_t lock; 2599 struct vmap_area *va; 2600 unsigned long free, dirty; 2601 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); 2602 unsigned long dirty_min, dirty_max; /*< dirty range */ 2603 struct list_head free_list; 2604 struct rcu_head rcu_head; 2605 struct list_head purge; 2606 unsigned int cpu; 2607 }; 2608 2609 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 2610 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 2611 2612 /* 2613 * In order to fast access to any "vmap_block" associated with a 2614 * specific address, we use a hash. 2615 * 2616 * A per-cpu vmap_block_queue is used in both ways, to serialize 2617 * an access to free block chains among CPUs(alloc path) and it 2618 * also acts as a vmap_block hash(alloc/free paths). It means we 2619 * overload it, since we already have the per-cpu array which is 2620 * used as a hash table. When used as a hash a 'cpu' passed to 2621 * per_cpu() is not actually a CPU but rather a hash index. 2622 * 2623 * A hash function is addr_to_vb_xa() which hashes any address 2624 * to a specific index(in a hash) it belongs to. This then uses a 2625 * per_cpu() macro to access an array with generated index. 2626 * 2627 * An example: 2628 * 2629 * CPU_1 CPU_2 CPU_0 2630 * | | | 2631 * V V V 2632 * 0 10 20 30 40 50 60 2633 * |------|------|------|------|------|------|...<vmap address space> 2634 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 2635 * 2636 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus 2637 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; 2638 * 2639 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus 2640 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; 2641 * 2642 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus 2643 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. 2644 * 2645 * This technique almost always avoids lock contention on insert/remove, 2646 * however xarray spinlocks protect against any contention that remains. 2647 */ 2648 static struct xarray * 2649 addr_to_vb_xa(unsigned long addr) 2650 { 2651 int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids; 2652 2653 /* 2654 * Please note, nr_cpu_ids points on a highest set 2655 * possible bit, i.e. we never invoke cpumask_next() 2656 * if an index points on it which is nr_cpu_ids - 1. 2657 */ 2658 if (!cpu_possible(index)) 2659 index = cpumask_next(index, cpu_possible_mask); 2660 2661 return &per_cpu(vmap_block_queue, index).vmap_blocks; 2662 } 2663 2664 /* 2665 * We should probably have a fallback mechanism to allocate virtual memory 2666 * out of partially filled vmap blocks. However vmap block sizing should be 2667 * fairly reasonable according to the vmalloc size, so it shouldn't be a 2668 * big problem. 2669 */ 2670 2671 static unsigned long addr_to_vb_idx(unsigned long addr) 2672 { 2673 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 2674 addr /= VMAP_BLOCK_SIZE; 2675 return addr; 2676 } 2677 2678 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 2679 { 2680 unsigned long addr; 2681 2682 addr = va_start + (pages_off << PAGE_SHIFT); 2683 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 2684 return (void *)addr; 2685 } 2686 2687 /** 2688 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 2689 * block. Of course pages number can't exceed VMAP_BBMAP_BITS 2690 * @order: how many 2^order pages should be occupied in newly allocated block 2691 * @gfp_mask: flags for the page level allocator 2692 * 2693 * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 2694 */ 2695 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 2696 { 2697 struct vmap_block_queue *vbq; 2698 struct vmap_block *vb; 2699 struct vmap_area *va; 2700 struct xarray *xa; 2701 unsigned long vb_idx; 2702 int node, err; 2703 void *vaddr; 2704 2705 node = numa_node_id(); 2706 2707 vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask, node); 2708 if (unlikely(!vb)) 2709 return ERR_PTR(-ENOMEM); 2710 2711 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 2712 VMALLOC_START, VMALLOC_END, 2713 node, gfp_mask, 2714 VMAP_RAM|VMAP_BLOCK, NULL); 2715 if (IS_ERR(va)) { 2716 kfree(vb); 2717 return ERR_CAST(va); 2718 } 2719 2720 vaddr = vmap_block_vaddr(va->va_start, 0); 2721 spin_lock_init(&vb->lock); 2722 vb->va = va; 2723 /* At least something should be left free */ 2724 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 2725 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); 2726 vb->free = VMAP_BBMAP_BITS - (1UL << order); 2727 vb->dirty = 0; 2728 vb->dirty_min = VMAP_BBMAP_BITS; 2729 vb->dirty_max = 0; 2730 bitmap_set(vb->used_map, 0, (1UL << order)); 2731 INIT_LIST_HEAD(&vb->free_list); 2732 vb->cpu = raw_smp_processor_id(); 2733 2734 xa = addr_to_vb_xa(va->va_start); 2735 vb_idx = addr_to_vb_idx(va->va_start); 2736 err = xa_insert(xa, vb_idx, vb, gfp_mask); 2737 if (err) { 2738 kfree(vb); 2739 free_vmap_area(va); 2740 return ERR_PTR(err); 2741 } 2742 /* 2743 * list_add_tail_rcu could happened in another core 2744 * rather than vb->cpu due to task migration, which 2745 * is safe as list_add_tail_rcu will ensure the list's 2746 * integrity together with list_for_each_rcu from read 2747 * side. 2748 */ 2749 vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); 2750 spin_lock(&vbq->lock); 2751 list_add_tail_rcu(&vb->free_list, &vbq->free); 2752 spin_unlock(&vbq->lock); 2753 2754 return vaddr; 2755 } 2756 2757 static void free_vmap_block(struct vmap_block *vb) 2758 { 2759 struct vmap_node *vn; 2760 struct vmap_block *tmp; 2761 struct xarray *xa; 2762 2763 xa = addr_to_vb_xa(vb->va->va_start); 2764 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); 2765 BUG_ON(tmp != vb); 2766 2767 vn = addr_to_node(vb->va->va_start); 2768 spin_lock(&vn->busy.lock); 2769 unlink_va(vb->va, &vn->busy.root); 2770 spin_unlock(&vn->busy.lock); 2771 2772 free_vmap_area_noflush(vb->va); 2773 kfree_rcu(vb, rcu_head); 2774 } 2775 2776 static bool purge_fragmented_block(struct vmap_block *vb, 2777 struct list_head *purge_list, bool force_purge) 2778 { 2779 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu); 2780 2781 if (vb->free + vb->dirty != VMAP_BBMAP_BITS || 2782 vb->dirty == VMAP_BBMAP_BITS) 2783 return false; 2784 2785 /* Don't overeagerly purge usable blocks unless requested */ 2786 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) 2787 return false; 2788 2789 /* prevent further allocs after releasing lock */ 2790 WRITE_ONCE(vb->free, 0); 2791 /* prevent purging it again */ 2792 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); 2793 vb->dirty_min = 0; 2794 vb->dirty_max = VMAP_BBMAP_BITS; 2795 spin_lock(&vbq->lock); 2796 list_del_rcu(&vb->free_list); 2797 spin_unlock(&vbq->lock); 2798 list_add_tail(&vb->purge, purge_list); 2799 return true; 2800 } 2801 2802 static void free_purged_blocks(struct list_head *purge_list) 2803 { 2804 struct vmap_block *vb, *n_vb; 2805 2806 list_for_each_entry_safe(vb, n_vb, purge_list, purge) { 2807 list_del(&vb->purge); 2808 free_vmap_block(vb); 2809 } 2810 } 2811 2812 static void purge_fragmented_blocks(int cpu) 2813 { 2814 LIST_HEAD(purge); 2815 struct vmap_block *vb; 2816 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2817 2818 rcu_read_lock(); 2819 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2820 unsigned long free = READ_ONCE(vb->free); 2821 unsigned long dirty = READ_ONCE(vb->dirty); 2822 2823 if (free + dirty != VMAP_BBMAP_BITS || 2824 dirty == VMAP_BBMAP_BITS) 2825 continue; 2826 2827 spin_lock(&vb->lock); 2828 purge_fragmented_block(vb, &purge, true); 2829 spin_unlock(&vb->lock); 2830 } 2831 rcu_read_unlock(); 2832 free_purged_blocks(&purge); 2833 } 2834 2835 static void purge_fragmented_blocks_allcpus(void) 2836 { 2837 int cpu; 2838 2839 for_each_possible_cpu(cpu) 2840 purge_fragmented_blocks(cpu); 2841 } 2842 2843 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2844 { 2845 struct vmap_block_queue *vbq; 2846 struct vmap_block *vb; 2847 void *vaddr = NULL; 2848 unsigned int order; 2849 2850 BUG_ON(offset_in_page(size)); 2851 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2852 if (WARN_ON(size == 0)) { 2853 /* 2854 * Allocating 0 bytes isn't what caller wants since 2855 * get_order(0) returns funny result. Just warn and terminate 2856 * early. 2857 */ 2858 return ERR_PTR(-EINVAL); 2859 } 2860 order = get_order(size); 2861 2862 rcu_read_lock(); 2863 vbq = raw_cpu_ptr(&vmap_block_queue); 2864 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2865 unsigned long pages_off; 2866 2867 if (READ_ONCE(vb->free) < (1UL << order)) 2868 continue; 2869 2870 spin_lock(&vb->lock); 2871 if (vb->free < (1UL << order)) { 2872 spin_unlock(&vb->lock); 2873 continue; 2874 } 2875 2876 pages_off = VMAP_BBMAP_BITS - vb->free; 2877 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 2878 WRITE_ONCE(vb->free, vb->free - (1UL << order)); 2879 bitmap_set(vb->used_map, pages_off, (1UL << order)); 2880 if (vb->free == 0) { 2881 spin_lock(&vbq->lock); 2882 list_del_rcu(&vb->free_list); 2883 spin_unlock(&vbq->lock); 2884 } 2885 2886 spin_unlock(&vb->lock); 2887 break; 2888 } 2889 2890 rcu_read_unlock(); 2891 2892 /* Allocate new block if nothing was found */ 2893 if (!vaddr) 2894 vaddr = new_vmap_block(order, gfp_mask); 2895 2896 return vaddr; 2897 } 2898 2899 static void vb_free(unsigned long addr, unsigned long size) 2900 { 2901 unsigned long offset; 2902 unsigned int order; 2903 struct vmap_block *vb; 2904 struct xarray *xa; 2905 2906 BUG_ON(offset_in_page(size)); 2907 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2908 2909 flush_cache_vunmap(addr, addr + size); 2910 2911 order = get_order(size); 2912 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 2913 2914 xa = addr_to_vb_xa(addr); 2915 vb = xa_load(xa, addr_to_vb_idx(addr)); 2916 2917 spin_lock(&vb->lock); 2918 bitmap_clear(vb->used_map, offset, (1UL << order)); 2919 spin_unlock(&vb->lock); 2920 2921 vunmap_range_noflush(addr, addr + size); 2922 2923 if (debug_pagealloc_enabled_static()) 2924 flush_tlb_kernel_range(addr, addr + size); 2925 2926 spin_lock(&vb->lock); 2927 2928 /* Expand the not yet TLB flushed dirty range */ 2929 vb->dirty_min = min(vb->dirty_min, offset); 2930 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2931 2932 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); 2933 if (vb->dirty == VMAP_BBMAP_BITS) { 2934 BUG_ON(vb->free); 2935 spin_unlock(&vb->lock); 2936 free_vmap_block(vb); 2937 } else 2938 spin_unlock(&vb->lock); 2939 } 2940 2941 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2942 { 2943 LIST_HEAD(purge_list); 2944 int cpu; 2945 2946 if (unlikely(!vmap_initialized)) 2947 return; 2948 2949 mutex_lock(&vmap_purge_lock); 2950 2951 for_each_possible_cpu(cpu) { 2952 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2953 struct vmap_block *vb; 2954 unsigned long idx; 2955 2956 rcu_read_lock(); 2957 xa_for_each(&vbq->vmap_blocks, idx, vb) { 2958 spin_lock(&vb->lock); 2959 2960 /* 2961 * Try to purge a fragmented block first. If it's 2962 * not purgeable, check whether there is dirty 2963 * space to be flushed. 2964 */ 2965 if (!purge_fragmented_block(vb, &purge_list, false) && 2966 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { 2967 unsigned long va_start = vb->va->va_start; 2968 unsigned long s, e; 2969 2970 s = va_start + (vb->dirty_min << PAGE_SHIFT); 2971 e = va_start + (vb->dirty_max << PAGE_SHIFT); 2972 2973 start = min(s, start); 2974 end = max(e, end); 2975 2976 /* Prevent that this is flushed again */ 2977 vb->dirty_min = VMAP_BBMAP_BITS; 2978 vb->dirty_max = 0; 2979 2980 flush = 1; 2981 } 2982 spin_unlock(&vb->lock); 2983 } 2984 rcu_read_unlock(); 2985 } 2986 free_purged_blocks(&purge_list); 2987 2988 if (!__purge_vmap_area_lazy(start, end, false) && flush) 2989 flush_tlb_kernel_range(start, end); 2990 mutex_unlock(&vmap_purge_lock); 2991 } 2992 2993 /** 2994 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2995 * 2996 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2997 * to amortize TLB flushing overheads. What this means is that any page you 2998 * have now, may, in a former life, have been mapped into kernel virtual 2999 * address by the vmap layer and so there might be some CPUs with TLB entries 3000 * still referencing that page (additional to the regular 1:1 kernel mapping). 3001 * 3002 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 3003 * be sure that none of the pages we have control over will have any aliases 3004 * from the vmap layer. 3005 */ 3006 void vm_unmap_aliases(void) 3007 { 3008 _vm_unmap_aliases(ULONG_MAX, 0, 0); 3009 } 3010 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 3011 3012 /** 3013 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 3014 * @mem: the pointer returned by vm_map_ram 3015 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 3016 */ 3017 void vm_unmap_ram(const void *mem, unsigned int count) 3018 { 3019 unsigned long size = (unsigned long)count << PAGE_SHIFT; 3020 unsigned long addr = (unsigned long)kasan_reset_tag(mem); 3021 struct vmap_area *va; 3022 3023 might_sleep(); 3024 BUG_ON(!addr); 3025 BUG_ON(addr < VMALLOC_START); 3026 BUG_ON(addr > VMALLOC_END); 3027 BUG_ON(!PAGE_ALIGNED(addr)); 3028 3029 kasan_poison_vmalloc(mem, size); 3030 3031 if (likely(count <= VMAP_MAX_ALLOC)) { 3032 debug_check_no_locks_freed(mem, size); 3033 vb_free(addr, size); 3034 return; 3035 } 3036 3037 va = find_unlink_vmap_area(addr); 3038 if (WARN_ON_ONCE(!va)) 3039 return; 3040 3041 debug_check_no_locks_freed((void *)va->va_start, va_size(va)); 3042 free_unmap_vmap_area(va); 3043 } 3044 EXPORT_SYMBOL(vm_unmap_ram); 3045 3046 /** 3047 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 3048 * @pages: an array of pointers to the pages to be mapped 3049 * @count: number of pages 3050 * @node: prefer to allocate data structures on this node 3051 * 3052 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 3053 * faster than vmap so it's good. But if you mix long-life and short-life 3054 * objects with vm_map_ram(), it could consume lots of address space through 3055 * fragmentation (especially on a 32bit machine). You could see failures in 3056 * the end. Please use this function for short-lived objects. 3057 * 3058 * Returns: a pointer to the address that has been mapped, or %NULL on failure 3059 */ 3060 void *vm_map_ram(struct page **pages, unsigned int count, int node) 3061 { 3062 unsigned long size = (unsigned long)count << PAGE_SHIFT; 3063 unsigned long addr; 3064 void *mem; 3065 3066 if (likely(count <= VMAP_MAX_ALLOC)) { 3067 mem = vb_alloc(size, GFP_KERNEL); 3068 if (IS_ERR(mem)) 3069 return NULL; 3070 addr = (unsigned long)mem; 3071 } else { 3072 struct vmap_area *va; 3073 va = alloc_vmap_area(size, PAGE_SIZE, 3074 VMALLOC_START, VMALLOC_END, 3075 node, GFP_KERNEL, VMAP_RAM, 3076 NULL); 3077 if (IS_ERR(va)) 3078 return NULL; 3079 3080 addr = va->va_start; 3081 mem = (void *)addr; 3082 } 3083 3084 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 3085 pages, PAGE_SHIFT) < 0) { 3086 vm_unmap_ram(mem, count); 3087 return NULL; 3088 } 3089 3090 /* 3091 * Mark the pages as accessible, now that they are mapped. 3092 * With hardware tag-based KASAN, marking is skipped for 3093 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 3094 */ 3095 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); 3096 3097 return mem; 3098 } 3099 EXPORT_SYMBOL(vm_map_ram); 3100 3101 static struct vm_struct *vmlist __initdata; 3102 3103 static inline unsigned int vm_area_page_order(struct vm_struct *vm) 3104 { 3105 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 3106 return vm->page_order; 3107 #else 3108 return 0; 3109 #endif 3110 } 3111 3112 unsigned int get_vm_area_page_order(struct vm_struct *vm) 3113 { 3114 return vm_area_page_order(vm); 3115 } 3116 3117 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 3118 { 3119 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 3120 vm->page_order = order; 3121 #else 3122 BUG_ON(order != 0); 3123 #endif 3124 } 3125 3126 /** 3127 * vm_area_add_early - add vmap area early during boot 3128 * @vm: vm_struct to add 3129 * 3130 * This function is used to add fixed kernel vm area to vmlist before 3131 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 3132 * should contain proper values and the other fields should be zero. 3133 * 3134 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3135 */ 3136 void __init vm_area_add_early(struct vm_struct *vm) 3137 { 3138 struct vm_struct *tmp, **p; 3139 3140 BUG_ON(vmap_initialized); 3141 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 3142 if (tmp->addr >= vm->addr) { 3143 BUG_ON(tmp->addr < vm->addr + vm->size); 3144 break; 3145 } else 3146 BUG_ON(tmp->addr + tmp->size > vm->addr); 3147 } 3148 vm->next = *p; 3149 *p = vm; 3150 } 3151 3152 /** 3153 * vm_area_register_early - register vmap area early during boot 3154 * @vm: vm_struct to register 3155 * @align: requested alignment 3156 * 3157 * This function is used to register kernel vm area before 3158 * vmalloc_init() is called. @vm->size and @vm->flags should contain 3159 * proper values on entry and other fields should be zero. On return, 3160 * vm->addr contains the allocated address. 3161 * 3162 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3163 */ 3164 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 3165 { 3166 unsigned long addr = ALIGN(VMALLOC_START, align); 3167 struct vm_struct *cur, **p; 3168 3169 BUG_ON(vmap_initialized); 3170 3171 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 3172 if ((unsigned long)cur->addr - addr >= vm->size) 3173 break; 3174 addr = ALIGN((unsigned long)cur->addr + cur->size, align); 3175 } 3176 3177 BUG_ON(addr > VMALLOC_END - vm->size); 3178 vm->addr = (void *)addr; 3179 vm->next = *p; 3180 *p = vm; 3181 kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 3182 } 3183 3184 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 3185 { 3186 /* 3187 * Before removing VM_UNINITIALIZED, 3188 * we should make sure that vm has proper values. 3189 * Pair with smp_rmb() in vread_iter() and vmalloc_info_show(). 3190 */ 3191 smp_wmb(); 3192 vm->flags &= ~VM_UNINITIALIZED; 3193 } 3194 3195 struct vm_struct *__get_vm_area_node(unsigned long size, 3196 unsigned long align, unsigned long shift, unsigned long flags, 3197 unsigned long start, unsigned long end, int node, 3198 gfp_t gfp_mask, const void *caller) 3199 { 3200 struct vmap_area *va; 3201 struct vm_struct *area; 3202 unsigned long requested_size = size; 3203 3204 BUG_ON(in_interrupt()); 3205 size = ALIGN(size, 1ul << shift); 3206 if (unlikely(!size)) 3207 return NULL; 3208 3209 if (flags & VM_IOREMAP) 3210 align = 1ul << clamp_t(int, get_count_order_long(size), 3211 PAGE_SHIFT, IOREMAP_MAX_ORDER); 3212 3213 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 3214 if (unlikely(!area)) 3215 return NULL; 3216 3217 if (!(flags & VM_NO_GUARD)) 3218 size += PAGE_SIZE; 3219 3220 area->flags = flags; 3221 area->caller = caller; 3222 area->requested_size = requested_size; 3223 3224 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); 3225 if (IS_ERR(va)) { 3226 kfree(area); 3227 return NULL; 3228 } 3229 3230 /* 3231 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a 3232 * best-effort approach, as they can be mapped outside of vmalloc code. 3233 * For VM_ALLOC mappings, the pages are marked as accessible after 3234 * getting mapped in __vmalloc_node_range(). 3235 * With hardware tag-based KASAN, marking is skipped for 3236 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 3237 */ 3238 if (!(flags & VM_ALLOC)) 3239 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, 3240 KASAN_VMALLOC_PROT_NORMAL); 3241 3242 return area; 3243 } 3244 3245 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 3246 unsigned long start, unsigned long end, 3247 const void *caller) 3248 { 3249 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 3250 NUMA_NO_NODE, GFP_KERNEL, caller); 3251 } 3252 3253 /** 3254 * get_vm_area - reserve a contiguous kernel virtual area 3255 * @size: size of the area 3256 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 3257 * 3258 * Search an area of @size in the kernel virtual mapping area, 3259 * and reserved it for out purposes. Returns the area descriptor 3260 * on success or %NULL on failure. 3261 * 3262 * Return: the area descriptor on success or %NULL on failure. 3263 */ 3264 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 3265 { 3266 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 3267 VMALLOC_START, VMALLOC_END, 3268 NUMA_NO_NODE, GFP_KERNEL, 3269 __builtin_return_address(0)); 3270 } 3271 3272 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 3273 const void *caller) 3274 { 3275 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 3276 VMALLOC_START, VMALLOC_END, 3277 NUMA_NO_NODE, GFP_KERNEL, caller); 3278 } 3279 3280 /** 3281 * find_vm_area - find a continuous kernel virtual area 3282 * @addr: base address 3283 * 3284 * Search for the kernel VM area starting at @addr, and return it. 3285 * It is up to the caller to do all required locking to keep the returned 3286 * pointer valid. 3287 * 3288 * Return: the area descriptor on success or %NULL on failure. 3289 */ 3290 struct vm_struct *find_vm_area(const void *addr) 3291 { 3292 struct vmap_area *va; 3293 3294 va = find_vmap_area((unsigned long)addr); 3295 if (!va) 3296 return NULL; 3297 3298 return va->vm; 3299 } 3300 3301 /** 3302 * remove_vm_area - find and remove a continuous kernel virtual area 3303 * @addr: base address 3304 * 3305 * Search for the kernel VM area starting at @addr, and remove it. 3306 * This function returns the found VM area, but using it is NOT safe 3307 * on SMP machines, except for its size or flags. 3308 * 3309 * Return: the area descriptor on success or %NULL on failure. 3310 */ 3311 struct vm_struct *remove_vm_area(const void *addr) 3312 { 3313 struct vmap_area *va; 3314 struct vm_struct *vm; 3315 3316 might_sleep(); 3317 3318 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 3319 addr)) 3320 return NULL; 3321 3322 va = find_unlink_vmap_area((unsigned long)addr); 3323 if (!va || !va->vm) 3324 return NULL; 3325 vm = va->vm; 3326 3327 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); 3328 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); 3329 kasan_free_module_shadow(vm); 3330 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); 3331 3332 free_unmap_vmap_area(va); 3333 return vm; 3334 } 3335 3336 static inline void set_area_direct_map(const struct vm_struct *area, 3337 int (*set_direct_map)(struct page *page)) 3338 { 3339 int i; 3340 3341 /* HUGE_VMALLOC passes small pages to set_direct_map */ 3342 for (i = 0; i < area->nr_pages; i++) 3343 if (page_address(area->pages[i])) 3344 set_direct_map(area->pages[i]); 3345 } 3346 3347 /* 3348 * Flush the vm mapping and reset the direct map. 3349 */ 3350 static void vm_reset_perms(struct vm_struct *area) 3351 { 3352 unsigned long start = ULONG_MAX, end = 0; 3353 unsigned int page_order = vm_area_page_order(area); 3354 int flush_dmap = 0; 3355 int i; 3356 3357 /* 3358 * Find the start and end range of the direct mappings to make sure that 3359 * the vm_unmap_aliases() flush includes the direct map. 3360 */ 3361 for (i = 0; i < area->nr_pages; i += 1U << page_order) { 3362 unsigned long addr = (unsigned long)page_address(area->pages[i]); 3363 3364 if (addr) { 3365 unsigned long page_size; 3366 3367 page_size = PAGE_SIZE << page_order; 3368 start = min(addr, start); 3369 end = max(addr + page_size, end); 3370 flush_dmap = 1; 3371 } 3372 } 3373 3374 /* 3375 * Set direct map to something invalid so that it won't be cached if 3376 * there are any accesses after the TLB flush, then flush the TLB and 3377 * reset the direct map permissions to the default. 3378 */ 3379 set_area_direct_map(area, set_direct_map_invalid_noflush); 3380 _vm_unmap_aliases(start, end, flush_dmap); 3381 set_area_direct_map(area, set_direct_map_default_noflush); 3382 } 3383 3384 static void delayed_vfree_work(struct work_struct *w) 3385 { 3386 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 3387 struct llist_node *t, *llnode; 3388 3389 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 3390 vfree(llnode); 3391 } 3392 3393 /** 3394 * vfree_atomic - release memory allocated by vmalloc() 3395 * @addr: memory base address 3396 * 3397 * This one is just like vfree() but can be called in any atomic context 3398 * except NMIs. 3399 */ 3400 void vfree_atomic(const void *addr) 3401 { 3402 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 3403 3404 BUG_ON(in_nmi()); 3405 kmemleak_free(addr); 3406 3407 /* 3408 * Use raw_cpu_ptr() because this can be called from preemptible 3409 * context. Preemption is absolutely fine here, because the llist_add() 3410 * implementation is lockless, so it works even if we are adding to 3411 * another cpu's list. schedule_work() should be fine with this too. 3412 */ 3413 if (addr && llist_add((struct llist_node *)addr, &p->list)) 3414 schedule_work(&p->wq); 3415 } 3416 3417 /** 3418 * vfree - Release memory allocated by vmalloc() 3419 * @addr: Memory base address 3420 * 3421 * Free the virtually continuous memory area starting at @addr, as obtained 3422 * from one of the vmalloc() family of APIs. This will usually also free the 3423 * physical memory underlying the virtual allocation, but that memory is 3424 * reference counted, so it will not be freed until the last user goes away. 3425 * 3426 * If @addr is NULL, no operation is performed. 3427 * 3428 * Context: 3429 * May sleep if called *not* from interrupt context. 3430 * Must not be called in NMI context (strictly speaking, it could be 3431 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 3432 * conventions for vfree() arch-dependent would be a really bad idea). 3433 */ 3434 void vfree(const void *addr) 3435 { 3436 struct vm_struct *vm; 3437 int i; 3438 3439 if (unlikely(in_interrupt())) { 3440 vfree_atomic(addr); 3441 return; 3442 } 3443 3444 BUG_ON(in_nmi()); 3445 kmemleak_free(addr); 3446 might_sleep(); 3447 3448 if (!addr) 3449 return; 3450 3451 vm = remove_vm_area(addr); 3452 if (unlikely(!vm)) { 3453 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 3454 addr); 3455 return; 3456 } 3457 3458 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) 3459 vm_reset_perms(vm); 3460 /* All pages of vm should be charged to same memcg, so use first one. */ 3461 if (vm->nr_pages && !(vm->flags & VM_MAP_PUT_PAGES)) 3462 mod_memcg_page_state(vm->pages[0], MEMCG_VMALLOC, -vm->nr_pages); 3463 for (i = 0; i < vm->nr_pages; i++) { 3464 struct page *page = vm->pages[i]; 3465 3466 BUG_ON(!page); 3467 /* 3468 * High-order allocs for huge vmallocs are split, so 3469 * can be freed as an array of order-0 allocations 3470 */ 3471 __free_page(page); 3472 cond_resched(); 3473 } 3474 if (!(vm->flags & VM_MAP_PUT_PAGES)) 3475 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); 3476 kvfree(vm->pages); 3477 kfree(vm); 3478 } 3479 EXPORT_SYMBOL(vfree); 3480 3481 /** 3482 * vunmap - release virtual mapping obtained by vmap() 3483 * @addr: memory base address 3484 * 3485 * Free the virtually contiguous memory area starting at @addr, 3486 * which was created from the page array passed to vmap(). 3487 * 3488 * Must not be called in interrupt context. 3489 */ 3490 void vunmap(const void *addr) 3491 { 3492 struct vm_struct *vm; 3493 3494 BUG_ON(in_interrupt()); 3495 might_sleep(); 3496 3497 if (!addr) 3498 return; 3499 vm = remove_vm_area(addr); 3500 if (unlikely(!vm)) { 3501 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", 3502 addr); 3503 return; 3504 } 3505 kfree(vm); 3506 } 3507 EXPORT_SYMBOL(vunmap); 3508 3509 /** 3510 * vmap - map an array of pages into virtually contiguous space 3511 * @pages: array of page pointers 3512 * @count: number of pages to map 3513 * @flags: vm_area->flags 3514 * @prot: page protection for the mapping 3515 * 3516 * Maps @count pages from @pages into contiguous kernel virtual space. 3517 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 3518 * (which must be kmalloc or vmalloc memory) and one reference per pages in it 3519 * are transferred from the caller to vmap(), and will be freed / dropped when 3520 * vfree() is called on the return value. 3521 * 3522 * Return: the address of the area or %NULL on failure 3523 */ 3524 void *vmap(struct page **pages, unsigned int count, 3525 unsigned long flags, pgprot_t prot) 3526 { 3527 struct vm_struct *area; 3528 unsigned long addr; 3529 unsigned long size; /* In bytes */ 3530 3531 might_sleep(); 3532 3533 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) 3534 return NULL; 3535 3536 /* 3537 * Your top guard is someone else's bottom guard. Not having a top 3538 * guard compromises someone else's mappings too. 3539 */ 3540 if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 3541 flags &= ~VM_NO_GUARD; 3542 3543 if (count > totalram_pages()) 3544 return NULL; 3545 3546 size = (unsigned long)count << PAGE_SHIFT; 3547 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 3548 if (!area) 3549 return NULL; 3550 3551 addr = (unsigned long)area->addr; 3552 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 3553 pages, PAGE_SHIFT) < 0) { 3554 vunmap(area->addr); 3555 return NULL; 3556 } 3557 3558 if (flags & VM_MAP_PUT_PAGES) { 3559 area->pages = pages; 3560 area->nr_pages = count; 3561 } 3562 return area->addr; 3563 } 3564 EXPORT_SYMBOL(vmap); 3565 3566 #ifdef CONFIG_VMAP_PFN 3567 struct vmap_pfn_data { 3568 unsigned long *pfns; 3569 pgprot_t prot; 3570 unsigned int idx; 3571 }; 3572 3573 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 3574 { 3575 struct vmap_pfn_data *data = private; 3576 unsigned long pfn = data->pfns[data->idx]; 3577 pte_t ptent; 3578 3579 if (WARN_ON_ONCE(pfn_valid(pfn))) 3580 return -EINVAL; 3581 3582 ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); 3583 set_pte_at(&init_mm, addr, pte, ptent); 3584 3585 data->idx++; 3586 return 0; 3587 } 3588 3589 /** 3590 * vmap_pfn - map an array of PFNs into virtually contiguous space 3591 * @pfns: array of PFNs 3592 * @count: number of pages to map 3593 * @prot: page protection for the mapping 3594 * 3595 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 3596 * the start address of the mapping. 3597 */ 3598 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 3599 { 3600 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 3601 struct vm_struct *area; 3602 3603 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 3604 __builtin_return_address(0)); 3605 if (!area) 3606 return NULL; 3607 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 3608 count * PAGE_SIZE, vmap_pfn_apply, &data)) { 3609 free_vm_area(area); 3610 return NULL; 3611 } 3612 3613 flush_cache_vmap((unsigned long)area->addr, 3614 (unsigned long)area->addr + count * PAGE_SIZE); 3615 3616 return area->addr; 3617 } 3618 EXPORT_SYMBOL_GPL(vmap_pfn); 3619 #endif /* CONFIG_VMAP_PFN */ 3620 3621 /* 3622 * Helper for vmalloc to adjust the gfp flags for certain allocations. 3623 */ 3624 static inline gfp_t vmalloc_gfp_adjust(gfp_t flags, const bool large) 3625 { 3626 flags |= __GFP_NOWARN; 3627 if (large) 3628 flags &= ~__GFP_NOFAIL; 3629 return flags; 3630 } 3631 3632 static inline unsigned int 3633 vm_area_alloc_pages(gfp_t gfp, int nid, 3634 unsigned int order, unsigned int nr_pages, struct page **pages) 3635 { 3636 unsigned int nr_allocated = 0; 3637 unsigned int nr_remaining = nr_pages; 3638 unsigned int max_attempt_order = MAX_PAGE_ORDER; 3639 struct page *page; 3640 int i; 3641 unsigned int large_order = ilog2(nr_remaining); 3642 gfp_t large_gfp = vmalloc_gfp_adjust(gfp, large_order) & ~__GFP_DIRECT_RECLAIM; 3643 3644 large_order = min(max_attempt_order, large_order); 3645 3646 /* 3647 * Initially, attempt to have the page allocator give us large order 3648 * pages. Do not attempt allocating smaller than order chunks since 3649 * __vmap_pages_range() expects physically contigous pages of exactly 3650 * order long chunks. 3651 */ 3652 while (large_order > order && nr_remaining) { 3653 if (nid == NUMA_NO_NODE) 3654 page = alloc_pages_noprof(large_gfp, large_order); 3655 else 3656 page = alloc_pages_node_noprof(nid, large_gfp, large_order); 3657 3658 if (unlikely(!page)) { 3659 max_attempt_order = --large_order; 3660 continue; 3661 } 3662 3663 split_page(page, large_order); 3664 for (i = 0; i < (1U << large_order); i++) 3665 pages[nr_allocated + i] = page + i; 3666 3667 nr_allocated += 1U << large_order; 3668 nr_remaining = nr_pages - nr_allocated; 3669 3670 large_order = ilog2(nr_remaining); 3671 large_order = min(max_attempt_order, large_order); 3672 } 3673 3674 /* 3675 * For order-0 pages we make use of bulk allocator, if 3676 * the page array is partly or not at all populated due 3677 * to fails, fallback to a single page allocator that is 3678 * more permissive. 3679 */ 3680 if (!order) { 3681 while (nr_allocated < nr_pages) { 3682 unsigned int nr, nr_pages_request; 3683 3684 /* 3685 * A maximum allowed request is hard-coded and is 100 3686 * pages per call. That is done in order to prevent a 3687 * long preemption off scenario in the bulk-allocator 3688 * so the range is [1:100]. 3689 */ 3690 nr_pages_request = min(100U, nr_pages - nr_allocated); 3691 3692 /* memory allocation should consider mempolicy, we can't 3693 * wrongly use nearest node when nid == NUMA_NO_NODE, 3694 * otherwise memory may be allocated in only one node, 3695 * but mempolicy wants to alloc memory by interleaving. 3696 */ 3697 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 3698 nr = alloc_pages_bulk_mempolicy_noprof(gfp, 3699 nr_pages_request, 3700 pages + nr_allocated); 3701 else 3702 nr = alloc_pages_bulk_node_noprof(gfp, nid, 3703 nr_pages_request, 3704 pages + nr_allocated); 3705 3706 nr_allocated += nr; 3707 3708 /* 3709 * If zero or pages were obtained partly, 3710 * fallback to a single page allocator. 3711 */ 3712 if (nr != nr_pages_request) 3713 break; 3714 } 3715 } 3716 3717 /* High-order pages or fallback path if "bulk" fails. */ 3718 while (nr_allocated < nr_pages) { 3719 if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current)) 3720 break; 3721 3722 if (nid == NUMA_NO_NODE) 3723 page = alloc_pages_noprof(gfp, order); 3724 else 3725 page = alloc_pages_node_noprof(nid, gfp, order); 3726 3727 if (unlikely(!page)) 3728 break; 3729 3730 /* 3731 * High-order allocations must be able to be treated as 3732 * independent small pages by callers (as they can with 3733 * small-page vmallocs). Some drivers do their own refcounting 3734 * on vmalloc_to_page() pages, some use page->mapping, 3735 * page->lru, etc. 3736 */ 3737 if (order) 3738 split_page(page, order); 3739 3740 /* 3741 * Careful, we allocate and map page-order pages, but 3742 * tracking is done per PAGE_SIZE page so as to keep the 3743 * vm_struct APIs independent of the physical/mapped size. 3744 */ 3745 for (i = 0; i < (1U << order); i++) 3746 pages[nr_allocated + i] = page + i; 3747 3748 nr_allocated += 1U << order; 3749 } 3750 3751 return nr_allocated; 3752 } 3753 3754 static LLIST_HEAD(pending_vm_area_cleanup); 3755 static void cleanup_vm_area_work(struct work_struct *work) 3756 { 3757 struct vm_struct *area, *tmp; 3758 struct llist_node *head; 3759 3760 head = llist_del_all(&pending_vm_area_cleanup); 3761 if (!head) 3762 return; 3763 3764 llist_for_each_entry_safe(area, tmp, head, llnode) { 3765 if (!area->pages) 3766 free_vm_area(area); 3767 else 3768 vfree(area->addr); 3769 } 3770 } 3771 3772 /* 3773 * Helper for __vmalloc_area_node() to defer cleanup 3774 * of partially initialized vm_struct in error paths. 3775 */ 3776 static DECLARE_WORK(cleanup_vm_area, cleanup_vm_area_work); 3777 static void defer_vm_area_cleanup(struct vm_struct *area) 3778 { 3779 if (llist_add(&area->llnode, &pending_vm_area_cleanup)) 3780 schedule_work(&cleanup_vm_area); 3781 } 3782 3783 /* 3784 * Page tables allocations ignore external GFP. Enforces it by 3785 * the memalloc scope API. It is used by vmalloc internals and 3786 * KASAN shadow population only. 3787 * 3788 * GFP to scope mapping: 3789 * 3790 * non-blocking (no __GFP_DIRECT_RECLAIM) - memalloc_noreclaim_save() 3791 * GFP_NOFS - memalloc_nofs_save() 3792 * GFP_NOIO - memalloc_noio_save() 3793 * 3794 * Returns a flag cookie to pair with restore. 3795 */ 3796 unsigned int 3797 memalloc_apply_gfp_scope(gfp_t gfp_mask) 3798 { 3799 unsigned int flags = 0; 3800 3801 if (!gfpflags_allow_blocking(gfp_mask)) 3802 flags = memalloc_noreclaim_save(); 3803 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3804 flags = memalloc_nofs_save(); 3805 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3806 flags = memalloc_noio_save(); 3807 3808 /* 0 - no scope applied. */ 3809 return flags; 3810 } 3811 3812 void 3813 memalloc_restore_scope(unsigned int flags) 3814 { 3815 if (flags) 3816 memalloc_flags_restore(flags); 3817 } 3818 3819 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 3820 pgprot_t prot, unsigned int page_shift, 3821 int node) 3822 { 3823 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 3824 bool nofail = gfp_mask & __GFP_NOFAIL; 3825 unsigned long addr = (unsigned long)area->addr; 3826 unsigned long size = get_vm_area_size(area); 3827 unsigned long array_size; 3828 unsigned int nr_small_pages = size >> PAGE_SHIFT; 3829 unsigned int page_order; 3830 unsigned int flags; 3831 int ret; 3832 3833 array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 3834 3835 /* __GFP_NOFAIL and "noblock" flags are mutually exclusive. */ 3836 if (!gfpflags_allow_blocking(gfp_mask)) 3837 nofail = false; 3838 3839 if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 3840 gfp_mask |= __GFP_HIGHMEM; 3841 3842 /* Please note that the recursion is strictly bounded. */ 3843 if (array_size > PAGE_SIZE) { 3844 area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node, 3845 area->caller); 3846 } else { 3847 area->pages = kmalloc_node_noprof(array_size, nested_gfp, node); 3848 } 3849 3850 if (!area->pages) { 3851 warn_alloc(gfp_mask, NULL, 3852 "vmalloc error: size %lu, failed to allocated page array size %lu", 3853 nr_small_pages * PAGE_SIZE, array_size); 3854 goto fail; 3855 } 3856 3857 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 3858 page_order = vm_area_page_order(area); 3859 3860 /* 3861 * High-order nofail allocations are really expensive and 3862 * potentially dangerous (pre-mature OOM, disruptive reclaim 3863 * and compaction etc. 3864 * 3865 * Please note, the __vmalloc_node_range_noprof() falls-back 3866 * to order-0 pages if high-order attempt is unsuccessful. 3867 */ 3868 area->nr_pages = vm_area_alloc_pages( 3869 vmalloc_gfp_adjust(gfp_mask, page_order), node, 3870 page_order, nr_small_pages, area->pages); 3871 3872 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 3873 /* All pages of vm should be charged to same memcg, so use first one. */ 3874 if (gfp_mask & __GFP_ACCOUNT && area->nr_pages) 3875 mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC, 3876 area->nr_pages); 3877 3878 /* 3879 * If not enough pages were obtained to accomplish an 3880 * allocation request, free them via vfree() if any. 3881 */ 3882 if (area->nr_pages != nr_small_pages) { 3883 /* 3884 * vm_area_alloc_pages() can fail due to insufficient memory but 3885 * also:- 3886 * 3887 * - a pending fatal signal 3888 * - insufficient huge page-order pages 3889 * 3890 * Since we always retry allocations at order-0 in the huge page 3891 * case a warning for either is spurious. 3892 */ 3893 if (!fatal_signal_pending(current) && page_order == 0) 3894 warn_alloc(gfp_mask, NULL, 3895 "vmalloc error: size %lu, failed to allocate pages", 3896 area->nr_pages * PAGE_SIZE); 3897 goto fail; 3898 } 3899 3900 /* 3901 * page tables allocations ignore external gfp mask, enforce it 3902 * by the scope API 3903 */ 3904 flags = memalloc_apply_gfp_scope(gfp_mask); 3905 do { 3906 ret = __vmap_pages_range(addr, addr + size, prot, area->pages, 3907 page_shift, nested_gfp); 3908 if (nofail && (ret < 0)) 3909 schedule_timeout_uninterruptible(1); 3910 } while (nofail && (ret < 0)); 3911 memalloc_restore_scope(flags); 3912 3913 if (ret < 0) { 3914 warn_alloc(gfp_mask, NULL, 3915 "vmalloc error: size %lu, failed to map pages", 3916 area->nr_pages * PAGE_SIZE); 3917 goto fail; 3918 } 3919 3920 return area->addr; 3921 3922 fail: 3923 defer_vm_area_cleanup(area); 3924 return NULL; 3925 } 3926 3927 /* 3928 * See __vmalloc_node_range() for a clear list of supported vmalloc flags. 3929 * This gfp lists all flags currently passed through vmalloc. Currently, 3930 * __GFP_ZERO is used by BPF and __GFP_NORETRY is used by percpu. Both drm 3931 * and BPF also use GFP_USER. Additionally, various users pass 3932 * GFP_KERNEL_ACCOUNT. Xfs uses __GFP_NOLOCKDEP. 3933 */ 3934 #define GFP_VMALLOC_SUPPORTED (GFP_KERNEL | GFP_ATOMIC | GFP_NOWAIT |\ 3935 __GFP_NOFAIL | __GFP_ZERO | __GFP_NORETRY |\ 3936 GFP_NOFS | GFP_NOIO | GFP_KERNEL_ACCOUNT |\ 3937 GFP_USER | __GFP_NOLOCKDEP) 3938 3939 static gfp_t vmalloc_fix_flags(gfp_t flags) 3940 { 3941 gfp_t invalid_mask = flags & ~GFP_VMALLOC_SUPPORTED; 3942 3943 flags &= GFP_VMALLOC_SUPPORTED; 3944 WARN_ONCE(1, "Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", 3945 invalid_mask, &invalid_mask, flags, &flags); 3946 return flags; 3947 } 3948 3949 /** 3950 * __vmalloc_node_range - allocate virtually contiguous memory 3951 * @size: allocation size 3952 * @align: desired alignment 3953 * @start: vm area range start 3954 * @end: vm area range end 3955 * @gfp_mask: flags for the page level allocator 3956 * @prot: protection mask for the allocated pages 3957 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 3958 * @node: node to use for allocation or NUMA_NO_NODE 3959 * @caller: caller's return address 3960 * 3961 * Allocate enough pages to cover @size from the page level 3962 * allocator with @gfp_mask flags and map them into contiguous 3963 * virtual range with protection @prot. 3964 * 3965 * Supported GFP classes: %GFP_KERNEL, %GFP_ATOMIC, %GFP_NOWAIT, 3966 * %GFP_NOFS and %GFP_NOIO. Zone modifiers are not supported. 3967 * Please note %GFP_ATOMIC and %GFP_NOWAIT are supported only 3968 * by __vmalloc(). 3969 * 3970 * Retry modifiers: only %__GFP_NOFAIL is supported; %__GFP_NORETRY 3971 * and %__GFP_RETRY_MAYFAIL are not supported. 3972 * 3973 * %__GFP_NOWARN can be used to suppress failure messages. 3974 * 3975 * Can not be called from interrupt nor NMI contexts. 3976 * Return: the address of the area or %NULL on failure 3977 */ 3978 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, 3979 unsigned long start, unsigned long end, gfp_t gfp_mask, 3980 pgprot_t prot, unsigned long vm_flags, int node, 3981 const void *caller) 3982 { 3983 struct vm_struct *area; 3984 void *ret; 3985 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; 3986 unsigned long original_align = align; 3987 unsigned int shift = PAGE_SHIFT; 3988 3989 if (WARN_ON_ONCE(!size)) 3990 return NULL; 3991 3992 if ((size >> PAGE_SHIFT) > totalram_pages()) { 3993 warn_alloc(gfp_mask, NULL, 3994 "vmalloc error: size %lu, exceeds total pages", 3995 size); 3996 return NULL; 3997 } 3998 3999 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { 4000 /* 4001 * Try huge pages. Only try for PAGE_KERNEL allocations, 4002 * others like modules don't yet expect huge pages in 4003 * their allocations due to apply_to_page_range not 4004 * supporting them. 4005 */ 4006 4007 if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE) 4008 shift = PMD_SHIFT; 4009 else 4010 shift = arch_vmap_pte_supported_shift(size); 4011 4012 align = max(original_align, 1UL << shift); 4013 } 4014 4015 again: 4016 area = __get_vm_area_node(size, align, shift, VM_ALLOC | 4017 VM_UNINITIALIZED | vm_flags, start, end, node, 4018 gfp_mask, caller); 4019 if (!area) { 4020 bool nofail = gfp_mask & __GFP_NOFAIL; 4021 warn_alloc(gfp_mask, NULL, 4022 "vmalloc error: size %lu, vm_struct allocation failed%s", 4023 size, (nofail) ? ". Retrying." : ""); 4024 if (nofail) { 4025 schedule_timeout_uninterruptible(1); 4026 goto again; 4027 } 4028 goto fail; 4029 } 4030 4031 /* 4032 * Prepare arguments for __vmalloc_area_node() and 4033 * kasan_unpoison_vmalloc(). 4034 */ 4035 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { 4036 if (kasan_hw_tags_enabled()) { 4037 /* 4038 * Modify protection bits to allow tagging. 4039 * This must be done before mapping. 4040 */ 4041 prot = arch_vmap_pgprot_tagged(prot); 4042 4043 /* 4044 * Skip page_alloc poisoning and zeroing for physical 4045 * pages backing VM_ALLOC mapping. Memory is instead 4046 * poisoned and zeroed by kasan_unpoison_vmalloc(). 4047 */ 4048 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; 4049 } 4050 4051 /* Take note that the mapping is PAGE_KERNEL. */ 4052 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; 4053 } 4054 4055 /* Allocate physical pages and map them into vmalloc space. */ 4056 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 4057 if (!ret) 4058 goto fail; 4059 4060 /* 4061 * Mark the pages as accessible, now that they are mapped. 4062 * The condition for setting KASAN_VMALLOC_INIT should complement the 4063 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check 4064 * to make sure that memory is initialized under the same conditions. 4065 * Tag-based KASAN modes only assign tags to normal non-executable 4066 * allocations, see __kasan_unpoison_vmalloc(). 4067 */ 4068 kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 4069 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && 4070 (gfp_mask & __GFP_SKIP_ZERO)) 4071 kasan_flags |= KASAN_VMALLOC_INIT; 4072 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ 4073 area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags); 4074 4075 /* 4076 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 4077 * flag. It means that vm_struct is not fully initialized. 4078 * Now, it is fully initialized, so remove this flag here. 4079 */ 4080 clear_vm_uninitialized_flag(area); 4081 4082 if (!(vm_flags & VM_DEFER_KMEMLEAK)) 4083 kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask); 4084 4085 return area->addr; 4086 4087 fail: 4088 if (shift > PAGE_SHIFT) { 4089 shift = PAGE_SHIFT; 4090 align = original_align; 4091 goto again; 4092 } 4093 4094 return NULL; 4095 } 4096 4097 /** 4098 * __vmalloc_node - allocate virtually contiguous memory 4099 * @size: allocation size 4100 * @align: desired alignment 4101 * @gfp_mask: flags for the page level allocator 4102 * @node: node to use for allocation or NUMA_NO_NODE 4103 * @caller: caller's return address 4104 * 4105 * Allocate enough pages to cover @size from the page level allocator with 4106 * @gfp_mask flags. Map them into contiguous kernel virtual space. 4107 * 4108 * Semantics of @gfp_mask (including reclaim/retry modifiers such as 4109 * __GFP_NOFAIL) are the same as in __vmalloc_node_range_noprof(). 4110 * 4111 * Return: pointer to the allocated memory or %NULL on error 4112 */ 4113 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, 4114 gfp_t gfp_mask, int node, const void *caller) 4115 { 4116 return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END, 4117 gfp_mask, PAGE_KERNEL, 0, node, caller); 4118 } 4119 /* 4120 * This is only for performance analysis of vmalloc and stress purpose. 4121 * It is required by vmalloc test module, therefore do not use it other 4122 * than that. 4123 */ 4124 #ifdef CONFIG_TEST_VMALLOC_MODULE 4125 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof); 4126 #endif 4127 4128 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) 4129 { 4130 if (unlikely(gfp_mask & ~GFP_VMALLOC_SUPPORTED)) 4131 gfp_mask = vmalloc_fix_flags(gfp_mask); 4132 return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE, 4133 __builtin_return_address(0)); 4134 } 4135 EXPORT_SYMBOL(__vmalloc_noprof); 4136 4137 /** 4138 * vmalloc - allocate virtually contiguous memory 4139 * @size: allocation size 4140 * 4141 * Allocate enough pages to cover @size from the page level 4142 * allocator and map them into contiguous kernel virtual space. 4143 * 4144 * For tight control over page level allocator and protection flags 4145 * use __vmalloc() instead. 4146 * 4147 * Return: pointer to the allocated memory or %NULL on error 4148 */ 4149 void *vmalloc_noprof(unsigned long size) 4150 { 4151 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE, 4152 __builtin_return_address(0)); 4153 } 4154 EXPORT_SYMBOL(vmalloc_noprof); 4155 4156 /** 4157 * vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages 4158 * @size: allocation size 4159 * @gfp_mask: flags for the page level allocator 4160 * @node: node to use for allocation or NUMA_NO_NODE 4161 * 4162 * Allocate enough pages to cover @size from the page level 4163 * allocator and map them into contiguous kernel virtual space. 4164 * If @size is greater than or equal to PMD_SIZE, allow using 4165 * huge pages for the memory 4166 * 4167 * Return: pointer to the allocated memory or %NULL on error 4168 */ 4169 void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) 4170 { 4171 if (unlikely(gfp_mask & ~GFP_VMALLOC_SUPPORTED)) 4172 gfp_mask = vmalloc_fix_flags(gfp_mask); 4173 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, 4174 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 4175 node, __builtin_return_address(0)); 4176 } 4177 EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof); 4178 4179 /** 4180 * vzalloc - allocate virtually contiguous memory with zero fill 4181 * @size: allocation size 4182 * 4183 * Allocate enough pages to cover @size from the page level 4184 * allocator and map them into contiguous kernel virtual space. 4185 * The memory allocated is set to zero. 4186 * 4187 * For tight control over page level allocator and protection flags 4188 * use __vmalloc() instead. 4189 * 4190 * Return: pointer to the allocated memory or %NULL on error 4191 */ 4192 void *vzalloc_noprof(unsigned long size) 4193 { 4194 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 4195 __builtin_return_address(0)); 4196 } 4197 EXPORT_SYMBOL(vzalloc_noprof); 4198 4199 /** 4200 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 4201 * @size: allocation size 4202 * 4203 * The resulting memory area is zeroed so it can be mapped to userspace 4204 * without leaking data. 4205 * 4206 * Return: pointer to the allocated memory or %NULL on error 4207 */ 4208 void *vmalloc_user_noprof(unsigned long size) 4209 { 4210 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, 4211 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 4212 VM_USERMAP, NUMA_NO_NODE, 4213 __builtin_return_address(0)); 4214 } 4215 EXPORT_SYMBOL(vmalloc_user_noprof); 4216 4217 /** 4218 * vmalloc_node - allocate memory on a specific node 4219 * @size: allocation size 4220 * @node: numa node 4221 * 4222 * Allocate enough pages to cover @size from the page level 4223 * allocator and map them into contiguous kernel virtual space. 4224 * 4225 * For tight control over page level allocator and protection flags 4226 * use __vmalloc() instead. 4227 * 4228 * Return: pointer to the allocated memory or %NULL on error 4229 */ 4230 void *vmalloc_node_noprof(unsigned long size, int node) 4231 { 4232 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node, 4233 __builtin_return_address(0)); 4234 } 4235 EXPORT_SYMBOL(vmalloc_node_noprof); 4236 4237 /** 4238 * vzalloc_node - allocate memory on a specific node with zero fill 4239 * @size: allocation size 4240 * @node: numa node 4241 * 4242 * Allocate enough pages to cover @size from the page level 4243 * allocator and map them into contiguous kernel virtual space. 4244 * The memory allocated is set to zero. 4245 * 4246 * Return: pointer to the allocated memory or %NULL on error 4247 */ 4248 void *vzalloc_node_noprof(unsigned long size, int node) 4249 { 4250 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node, 4251 __builtin_return_address(0)); 4252 } 4253 EXPORT_SYMBOL(vzalloc_node_noprof); 4254 4255 /** 4256 * vrealloc_node_align - reallocate virtually contiguous memory; contents 4257 * remain unchanged 4258 * @p: object to reallocate memory for 4259 * @size: the size to reallocate 4260 * @align: requested alignment 4261 * @flags: the flags for the page level allocator 4262 * @nid: node number of the target node 4263 * 4264 * If @p is %NULL, vrealloc_XXX() behaves exactly like vmalloc_XXX(). If @size 4265 * is 0 and @p is not a %NULL pointer, the object pointed to is freed. 4266 * 4267 * If the caller wants the new memory to be on specific node *only*, 4268 * __GFP_THISNODE flag should be set, otherwise the function will try to avoid 4269 * reallocation and possibly disregard the specified @nid. 4270 * 4271 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 4272 * initial memory allocation, every subsequent call to this API for the same 4273 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 4274 * __GFP_ZERO is not fully honored by this API. 4275 * 4276 * Requesting an alignment that is bigger than the alignment of the existing 4277 * allocation will fail. 4278 * 4279 * In any case, the contents of the object pointed to are preserved up to the 4280 * lesser of the new and old sizes. 4281 * 4282 * This function must not be called concurrently with itself or vfree() for the 4283 * same memory allocation. 4284 * 4285 * Return: pointer to the allocated memory; %NULL if @size is zero or in case of 4286 * failure 4287 */ 4288 void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, 4289 gfp_t flags, int nid) 4290 { 4291 struct vm_struct *vm = NULL; 4292 size_t alloced_size = 0; 4293 size_t old_size = 0; 4294 void *n; 4295 4296 if (!size) { 4297 vfree(p); 4298 return NULL; 4299 } 4300 4301 if (p) { 4302 vm = find_vm_area(p); 4303 if (unlikely(!vm)) { 4304 WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p); 4305 return NULL; 4306 } 4307 4308 alloced_size = get_vm_area_size(vm); 4309 old_size = vm->requested_size; 4310 if (WARN(alloced_size < old_size, 4311 "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) 4312 return NULL; 4313 if (WARN(!IS_ALIGNED((unsigned long)p, align), 4314 "will not reallocate with a bigger alignment (0x%lx)\n", align)) 4315 return NULL; 4316 if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE && 4317 nid != page_to_nid(vmalloc_to_page(p))) 4318 goto need_realloc; 4319 } 4320 4321 /* 4322 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What 4323 * would be a good heuristic for when to shrink the vm_area? 4324 */ 4325 if (size <= old_size) { 4326 /* Zero out "freed" memory, potentially for future realloc. */ 4327 if (want_init_on_free() || want_init_on_alloc(flags)) 4328 memset((void *)p + size, 0, old_size - size); 4329 vm->requested_size = size; 4330 kasan_poison_vmalloc(p + size, old_size - size); 4331 return (void *)p; 4332 } 4333 4334 /* 4335 * We already have the bytes available in the allocation; use them. 4336 */ 4337 if (size <= alloced_size) { 4338 kasan_unpoison_vmalloc(p + old_size, size - old_size, 4339 KASAN_VMALLOC_PROT_NORMAL | 4340 KASAN_VMALLOC_VM_ALLOC | 4341 KASAN_VMALLOC_KEEP_TAG); 4342 /* 4343 * No need to zero memory here, as unused memory will have 4344 * already been zeroed at initial allocation time or during 4345 * realloc shrink time. 4346 */ 4347 vm->requested_size = size; 4348 return (void *)p; 4349 } 4350 4351 need_realloc: 4352 /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ 4353 n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0)); 4354 4355 if (!n) 4356 return NULL; 4357 4358 if (p) { 4359 memcpy(n, p, old_size); 4360 vfree(p); 4361 } 4362 4363 return n; 4364 } 4365 4366 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 4367 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 4368 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 4369 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 4370 #else 4371 /* 4372 * 64b systems should always have either DMA or DMA32 zones. For others 4373 * GFP_DMA32 should do the right thing and use the normal zone. 4374 */ 4375 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 4376 #endif 4377 4378 /** 4379 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 4380 * @size: allocation size 4381 * 4382 * Allocate enough 32bit PA addressable pages to cover @size from the 4383 * page level allocator and map them into contiguous kernel virtual space. 4384 * 4385 * Return: pointer to the allocated memory or %NULL on error 4386 */ 4387 void *vmalloc_32_noprof(unsigned long size) 4388 { 4389 return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 4390 __builtin_return_address(0)); 4391 } 4392 EXPORT_SYMBOL(vmalloc_32_noprof); 4393 4394 /** 4395 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 4396 * @size: allocation size 4397 * 4398 * The resulting memory area is 32bit addressable and zeroed so it can be 4399 * mapped to userspace without leaking data. 4400 * 4401 * Return: pointer to the allocated memory or %NULL on error 4402 */ 4403 void *vmalloc_32_user_noprof(unsigned long size) 4404 { 4405 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, 4406 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 4407 VM_USERMAP, NUMA_NO_NODE, 4408 __builtin_return_address(0)); 4409 } 4410 EXPORT_SYMBOL(vmalloc_32_user_noprof); 4411 4412 /* 4413 * Atomically zero bytes in the iterator. 4414 * 4415 * Returns the number of zeroed bytes. 4416 */ 4417 static size_t zero_iter(struct iov_iter *iter, size_t count) 4418 { 4419 size_t remains = count; 4420 4421 while (remains > 0) { 4422 size_t num, copied; 4423 4424 num = min_t(size_t, remains, PAGE_SIZE); 4425 copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); 4426 remains -= copied; 4427 4428 if (copied < num) 4429 break; 4430 } 4431 4432 return count - remains; 4433 } 4434 4435 /* 4436 * small helper routine, copy contents to iter from addr. 4437 * If the page is not present, fill zero. 4438 * 4439 * Returns the number of copied bytes. 4440 */ 4441 static size_t aligned_vread_iter(struct iov_iter *iter, 4442 const char *addr, size_t count) 4443 { 4444 size_t remains = count; 4445 struct page *page; 4446 4447 while (remains > 0) { 4448 unsigned long offset, length; 4449 size_t copied = 0; 4450 4451 offset = offset_in_page(addr); 4452 length = PAGE_SIZE - offset; 4453 if (length > remains) 4454 length = remains; 4455 page = vmalloc_to_page(addr); 4456 /* 4457 * To do safe access to this _mapped_ area, we need lock. But 4458 * adding lock here means that we need to add overhead of 4459 * vmalloc()/vfree() calls for this _debug_ interface, rarely 4460 * used. Instead of that, we'll use an local mapping via 4461 * copy_page_to_iter_nofault() and accept a small overhead in 4462 * this access function. 4463 */ 4464 if (page) 4465 copied = copy_page_to_iter_nofault(page, offset, 4466 length, iter); 4467 else 4468 copied = zero_iter(iter, length); 4469 4470 addr += copied; 4471 remains -= copied; 4472 4473 if (copied != length) 4474 break; 4475 } 4476 4477 return count - remains; 4478 } 4479 4480 /* 4481 * Read from a vm_map_ram region of memory. 4482 * 4483 * Returns the number of copied bytes. 4484 */ 4485 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, 4486 size_t count, unsigned long flags) 4487 { 4488 char *start; 4489 struct vmap_block *vb; 4490 struct xarray *xa; 4491 unsigned long offset; 4492 unsigned int rs, re; 4493 size_t remains, n; 4494 4495 /* 4496 * If it's area created by vm_map_ram() interface directly, but 4497 * not further subdividing and delegating management to vmap_block, 4498 * handle it here. 4499 */ 4500 if (!(flags & VMAP_BLOCK)) 4501 return aligned_vread_iter(iter, addr, count); 4502 4503 remains = count; 4504 4505 /* 4506 * Area is split into regions and tracked with vmap_block, read out 4507 * each region and zero fill the hole between regions. 4508 */ 4509 xa = addr_to_vb_xa((unsigned long) addr); 4510 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); 4511 if (!vb) 4512 goto finished_zero; 4513 4514 spin_lock(&vb->lock); 4515 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { 4516 spin_unlock(&vb->lock); 4517 goto finished_zero; 4518 } 4519 4520 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { 4521 size_t copied; 4522 4523 if (remains == 0) 4524 goto finished; 4525 4526 start = vmap_block_vaddr(vb->va->va_start, rs); 4527 4528 if (addr < start) { 4529 size_t to_zero = min_t(size_t, start - addr, remains); 4530 size_t zeroed = zero_iter(iter, to_zero); 4531 4532 addr += zeroed; 4533 remains -= zeroed; 4534 4535 if (remains == 0 || zeroed != to_zero) 4536 goto finished; 4537 } 4538 4539 /*it could start reading from the middle of used region*/ 4540 offset = offset_in_page(addr); 4541 n = ((re - rs + 1) << PAGE_SHIFT) - offset; 4542 if (n > remains) 4543 n = remains; 4544 4545 copied = aligned_vread_iter(iter, start + offset, n); 4546 4547 addr += copied; 4548 remains -= copied; 4549 4550 if (copied != n) 4551 goto finished; 4552 } 4553 4554 spin_unlock(&vb->lock); 4555 4556 finished_zero: 4557 /* zero-fill the left dirty or free regions */ 4558 return count - remains + zero_iter(iter, remains); 4559 finished: 4560 /* We couldn't copy/zero everything */ 4561 spin_unlock(&vb->lock); 4562 return count - remains; 4563 } 4564 4565 /** 4566 * vread_iter() - read vmalloc area in a safe way to an iterator. 4567 * @iter: the iterator to which data should be written. 4568 * @addr: vm address. 4569 * @count: number of bytes to be read. 4570 * 4571 * This function checks that addr is a valid vmalloc'ed area, and 4572 * copy data from that area to a given buffer. If the given memory range 4573 * of [addr...addr+count) includes some valid address, data is copied to 4574 * proper area of @buf. If there are memory holes, they'll be zero-filled. 4575 * IOREMAP area is treated as memory hole and no copy is done. 4576 * 4577 * If [addr...addr+count) doesn't includes any intersects with alive 4578 * vm_struct area, returns 0. @buf should be kernel's buffer. 4579 * 4580 * Note: In usual ops, vread() is never necessary because the caller 4581 * should know vmalloc() area is valid and can use memcpy(). 4582 * This is for routines which have to access vmalloc area without 4583 * any information, as /proc/kcore. 4584 * 4585 * Return: number of bytes for which addr and buf should be increased 4586 * (same number as @count) or %0 if [addr...addr+count) doesn't 4587 * include any intersection with valid vmalloc area 4588 */ 4589 long vread_iter(struct iov_iter *iter, const char *addr, size_t count) 4590 { 4591 struct vmap_node *vn; 4592 struct vmap_area *va; 4593 struct vm_struct *vm; 4594 char *vaddr; 4595 size_t n, size, flags, remains; 4596 unsigned long next; 4597 4598 addr = kasan_reset_tag(addr); 4599 4600 /* Don't allow overflow */ 4601 if ((unsigned long) addr + count < count) 4602 count = -(unsigned long) addr; 4603 4604 remains = count; 4605 4606 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); 4607 if (!vn) 4608 goto finished_zero; 4609 4610 /* no intersects with alive vmap_area */ 4611 if ((unsigned long)addr + remains <= va->va_start) 4612 goto finished_zero; 4613 4614 do { 4615 size_t copied; 4616 4617 if (remains == 0) 4618 goto finished; 4619 4620 vm = va->vm; 4621 flags = va->flags & VMAP_FLAGS_MASK; 4622 /* 4623 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need 4624 * be set together with VMAP_RAM. 4625 */ 4626 WARN_ON(flags == VMAP_BLOCK); 4627 4628 if (!vm && !flags) 4629 goto next_va; 4630 4631 if (vm && (vm->flags & VM_UNINITIALIZED)) 4632 goto next_va; 4633 4634 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 4635 smp_rmb(); 4636 4637 vaddr = (char *) va->va_start; 4638 size = vm ? get_vm_area_size(vm) : va_size(va); 4639 4640 if (addr >= vaddr + size) 4641 goto next_va; 4642 4643 if (addr < vaddr) { 4644 size_t to_zero = min_t(size_t, vaddr - addr, remains); 4645 size_t zeroed = zero_iter(iter, to_zero); 4646 4647 addr += zeroed; 4648 remains -= zeroed; 4649 4650 if (remains == 0 || zeroed != to_zero) 4651 goto finished; 4652 } 4653 4654 n = vaddr + size - addr; 4655 if (n > remains) 4656 n = remains; 4657 4658 if (flags & VMAP_RAM) 4659 copied = vmap_ram_vread_iter(iter, addr, n, flags); 4660 else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE)))) 4661 copied = aligned_vread_iter(iter, addr, n); 4662 else /* IOREMAP | SPARSE area is treated as memory hole */ 4663 copied = zero_iter(iter, n); 4664 4665 addr += copied; 4666 remains -= copied; 4667 4668 if (copied != n) 4669 goto finished; 4670 4671 next_va: 4672 next = va->va_end; 4673 spin_unlock(&vn->busy.lock); 4674 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); 4675 4676 finished_zero: 4677 if (vn) 4678 spin_unlock(&vn->busy.lock); 4679 4680 /* zero-fill memory holes */ 4681 return count - remains + zero_iter(iter, remains); 4682 finished: 4683 /* Nothing remains, or We couldn't copy/zero everything. */ 4684 if (vn) 4685 spin_unlock(&vn->busy.lock); 4686 4687 return count - remains; 4688 } 4689 4690 /** 4691 * remap_vmalloc_range_partial - map vmalloc pages to userspace 4692 * @vma: vma to cover 4693 * @uaddr: target user address to start at 4694 * @kaddr: virtual address of vmalloc kernel memory 4695 * @pgoff: offset from @kaddr to start at 4696 * @size: size of map area 4697 * 4698 * Returns: 0 for success, -Exxx on failure 4699 * 4700 * This function checks that @kaddr is a valid vmalloc'ed area, 4701 * and that it is big enough to cover the range starting at 4702 * @uaddr in @vma. Will return failure if that criteria isn't 4703 * met. 4704 * 4705 * Similar to remap_pfn_range() (see mm/memory.c) 4706 */ 4707 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 4708 void *kaddr, unsigned long pgoff, 4709 unsigned long size) 4710 { 4711 struct vm_struct *area; 4712 unsigned long off; 4713 unsigned long end_index; 4714 4715 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 4716 return -EINVAL; 4717 4718 size = PAGE_ALIGN(size); 4719 4720 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 4721 return -EINVAL; 4722 4723 area = find_vm_area(kaddr); 4724 if (!area) 4725 return -EINVAL; 4726 4727 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 4728 return -EINVAL; 4729 4730 if (check_add_overflow(size, off, &end_index) || 4731 end_index > get_vm_area_size(area)) 4732 return -EINVAL; 4733 kaddr += off; 4734 4735 do { 4736 struct page *page = vmalloc_to_page(kaddr); 4737 int ret; 4738 4739 ret = vm_insert_page(vma, uaddr, page); 4740 if (ret) 4741 return ret; 4742 4743 uaddr += PAGE_SIZE; 4744 kaddr += PAGE_SIZE; 4745 size -= PAGE_SIZE; 4746 } while (size > 0); 4747 4748 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 4749 4750 return 0; 4751 } 4752 4753 /** 4754 * remap_vmalloc_range - map vmalloc pages to userspace 4755 * @vma: vma to cover (map full range of vma) 4756 * @addr: vmalloc memory 4757 * @pgoff: number of pages into addr before first page to map 4758 * 4759 * Returns: 0 for success, -Exxx on failure 4760 * 4761 * This function checks that addr is a valid vmalloc'ed area, and 4762 * that it is big enough to cover the vma. Will return failure if 4763 * that criteria isn't met. 4764 * 4765 * Similar to remap_pfn_range() (see mm/memory.c) 4766 */ 4767 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 4768 unsigned long pgoff) 4769 { 4770 return remap_vmalloc_range_partial(vma, vma->vm_start, 4771 addr, pgoff, 4772 vma->vm_end - vma->vm_start); 4773 } 4774 EXPORT_SYMBOL(remap_vmalloc_range); 4775 4776 void free_vm_area(struct vm_struct *area) 4777 { 4778 struct vm_struct *ret; 4779 ret = remove_vm_area(area->addr); 4780 BUG_ON(ret != area); 4781 kfree(area); 4782 } 4783 EXPORT_SYMBOL_GPL(free_vm_area); 4784 4785 #ifdef CONFIG_SMP 4786 static struct vmap_area *node_to_va(struct rb_node *n) 4787 { 4788 return rb_entry_safe(n, struct vmap_area, rb_node); 4789 } 4790 4791 /** 4792 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 4793 * @addr: target address 4794 * 4795 * Returns: vmap_area if it is found. If there is no such area 4796 * the first highest(reverse order) vmap_area is returned 4797 * i.e. va->va_start < addr && va->va_end < addr or NULL 4798 * if there are no any areas before @addr. 4799 */ 4800 static struct vmap_area * 4801 pvm_find_va_enclose_addr(unsigned long addr) 4802 { 4803 struct vmap_area *va, *tmp; 4804 struct rb_node *n; 4805 4806 n = free_vmap_area_root.rb_node; 4807 va = NULL; 4808 4809 while (n) { 4810 tmp = rb_entry(n, struct vmap_area, rb_node); 4811 if (tmp->va_start <= addr) { 4812 va = tmp; 4813 if (tmp->va_end >= addr) 4814 break; 4815 4816 n = n->rb_right; 4817 } else { 4818 n = n->rb_left; 4819 } 4820 } 4821 4822 return va; 4823 } 4824 4825 /** 4826 * pvm_determine_end_from_reverse - find the highest aligned address 4827 * of free block below VMALLOC_END 4828 * @va: 4829 * in - the VA we start the search(reverse order); 4830 * out - the VA with the highest aligned end address. 4831 * @align: alignment for required highest address 4832 * 4833 * Returns: determined end address within vmap_area 4834 */ 4835 static unsigned long 4836 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 4837 { 4838 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4839 unsigned long addr; 4840 4841 if (likely(*va)) { 4842 list_for_each_entry_from_reverse((*va), 4843 &free_vmap_area_list, list) { 4844 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 4845 if ((*va)->va_start < addr) 4846 return addr; 4847 } 4848 } 4849 4850 return 0; 4851 } 4852 4853 /** 4854 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 4855 * @offsets: array containing offset of each area 4856 * @sizes: array containing size of each area 4857 * @nr_vms: the number of areas to allocate 4858 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 4859 * 4860 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 4861 * vm_structs on success, %NULL on failure 4862 * 4863 * Percpu allocator wants to use congruent vm areas so that it can 4864 * maintain the offsets among percpu areas. This function allocates 4865 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 4866 * be scattered pretty far, distance between two areas easily going up 4867 * to gigabytes. To avoid interacting with regular vmallocs, these 4868 * areas are allocated from top. 4869 * 4870 * Despite its complicated look, this allocator is rather simple. It 4871 * does everything top-down and scans free blocks from the end looking 4872 * for matching base. While scanning, if any of the areas do not fit the 4873 * base address is pulled down to fit the area. Scanning is repeated till 4874 * all the areas fit and then all necessary data structures are inserted 4875 * and the result is returned. 4876 */ 4877 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 4878 const size_t *sizes, int nr_vms, 4879 size_t align) 4880 { 4881 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 4882 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4883 struct vmap_area **vas, *va; 4884 struct vm_struct **vms; 4885 int area, area2, last_area, term_area; 4886 unsigned long base, start, size, end, last_end, orig_start, orig_end; 4887 bool purged = false; 4888 4889 /* verify parameters and allocate data structures */ 4890 BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 4891 for (last_area = 0, area = 0; area < nr_vms; area++) { 4892 start = offsets[area]; 4893 end = start + sizes[area]; 4894 4895 /* is everything aligned properly? */ 4896 BUG_ON(!IS_ALIGNED(offsets[area], align)); 4897 BUG_ON(!IS_ALIGNED(sizes[area], align)); 4898 4899 /* detect the area with the highest address */ 4900 if (start > offsets[last_area]) 4901 last_area = area; 4902 4903 for (area2 = area + 1; area2 < nr_vms; area2++) { 4904 unsigned long start2 = offsets[area2]; 4905 unsigned long end2 = start2 + sizes[area2]; 4906 4907 BUG_ON(start2 < end && start < end2); 4908 } 4909 } 4910 last_end = offsets[last_area] + sizes[last_area]; 4911 4912 if (vmalloc_end - vmalloc_start < last_end) { 4913 WARN_ON(true); 4914 return NULL; 4915 } 4916 4917 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 4918 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 4919 if (!vas || !vms) 4920 goto err_free2; 4921 4922 for (area = 0; area < nr_vms; area++) { 4923 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 4924 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 4925 if (!vas[area] || !vms[area]) 4926 goto err_free; 4927 } 4928 retry: 4929 spin_lock(&free_vmap_area_lock); 4930 4931 /* start scanning - we scan from the top, begin with the last area */ 4932 area = term_area = last_area; 4933 start = offsets[area]; 4934 end = start + sizes[area]; 4935 4936 va = pvm_find_va_enclose_addr(vmalloc_end); 4937 base = pvm_determine_end_from_reverse(&va, align) - end; 4938 4939 while (true) { 4940 /* 4941 * base might have underflowed, add last_end before 4942 * comparing. 4943 */ 4944 if (base + last_end < vmalloc_start + last_end) 4945 goto overflow; 4946 4947 /* 4948 * Fitting base has not been found. 4949 */ 4950 if (va == NULL) 4951 goto overflow; 4952 4953 /* 4954 * If required width exceeds current VA block, move 4955 * base downwards and then recheck. 4956 */ 4957 if (base + end > va->va_end) { 4958 base = pvm_determine_end_from_reverse(&va, align) - end; 4959 term_area = area; 4960 continue; 4961 } 4962 4963 /* 4964 * If this VA does not fit, move base downwards and recheck. 4965 */ 4966 if (base + start < va->va_start) { 4967 va = node_to_va(rb_prev(&va->rb_node)); 4968 base = pvm_determine_end_from_reverse(&va, align) - end; 4969 term_area = area; 4970 continue; 4971 } 4972 4973 /* 4974 * This area fits, move on to the previous one. If 4975 * the previous one is the terminal one, we're done. 4976 */ 4977 area = (area + nr_vms - 1) % nr_vms; 4978 if (area == term_area) 4979 break; 4980 4981 start = offsets[area]; 4982 end = start + sizes[area]; 4983 va = pvm_find_va_enclose_addr(base + end); 4984 } 4985 4986 /* we've found a fitting base, insert all va's */ 4987 for (area = 0; area < nr_vms; area++) { 4988 int ret; 4989 4990 start = base + offsets[area]; 4991 size = sizes[area]; 4992 4993 va = pvm_find_va_enclose_addr(start); 4994 if (WARN_ON_ONCE(va == NULL)) 4995 /* It is a BUG(), but trigger recovery instead. */ 4996 goto recovery; 4997 4998 ret = va_clip(&free_vmap_area_root, 4999 &free_vmap_area_list, va, start, size); 5000 if (WARN_ON_ONCE(unlikely(ret))) 5001 /* It is a BUG(), but trigger recovery instead. */ 5002 goto recovery; 5003 5004 /* Allocated area. */ 5005 va = vas[area]; 5006 va->va_start = start; 5007 va->va_end = start + size; 5008 } 5009 5010 spin_unlock(&free_vmap_area_lock); 5011 5012 /* populate the kasan shadow space */ 5013 for (area = 0; area < nr_vms; area++) { 5014 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL)) 5015 goto err_free_shadow; 5016 } 5017 5018 /* insert all vm's */ 5019 for (area = 0; area < nr_vms; area++) { 5020 struct vmap_node *vn = addr_to_node(vas[area]->va_start); 5021 5022 spin_lock(&vn->busy.lock); 5023 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); 5024 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 5025 pcpu_get_vm_areas); 5026 spin_unlock(&vn->busy.lock); 5027 } 5028 5029 /* 5030 * Mark allocated areas as accessible. Do it now as a best-effort 5031 * approach, as they can be mapped outside of vmalloc code. 5032 * With hardware tag-based KASAN, marking is skipped for 5033 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 5034 */ 5035 kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL); 5036 5037 kfree(vas); 5038 return vms; 5039 5040 recovery: 5041 /* 5042 * Remove previously allocated areas. There is no 5043 * need in removing these areas from the busy tree, 5044 * because they are inserted only on the final step 5045 * and when pcpu_get_vm_areas() is success. 5046 */ 5047 while (area--) { 5048 orig_start = vas[area]->va_start; 5049 orig_end = vas[area]->va_end; 5050 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 5051 &free_vmap_area_list); 5052 if (va) 5053 kasan_release_vmalloc(orig_start, orig_end, 5054 va->va_start, va->va_end, 5055 KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH); 5056 vas[area] = NULL; 5057 } 5058 5059 overflow: 5060 spin_unlock(&free_vmap_area_lock); 5061 if (!purged) { 5062 reclaim_and_purge_vmap_areas(); 5063 purged = true; 5064 5065 /* Before "retry", check if we recover. */ 5066 for (area = 0; area < nr_vms; area++) { 5067 if (vas[area]) 5068 continue; 5069 5070 vas[area] = kmem_cache_zalloc( 5071 vmap_area_cachep, GFP_KERNEL); 5072 if (!vas[area]) 5073 goto err_free; 5074 } 5075 5076 goto retry; 5077 } 5078 5079 err_free: 5080 for (area = 0; area < nr_vms; area++) { 5081 if (vas[area]) 5082 kmem_cache_free(vmap_area_cachep, vas[area]); 5083 5084 kfree(vms[area]); 5085 } 5086 err_free2: 5087 kfree(vas); 5088 kfree(vms); 5089 return NULL; 5090 5091 err_free_shadow: 5092 spin_lock(&free_vmap_area_lock); 5093 /* 5094 * We release all the vmalloc shadows, even the ones for regions that 5095 * hadn't been successfully added. This relies on kasan_release_vmalloc 5096 * being able to tolerate this case. 5097 */ 5098 for (area = 0; area < nr_vms; area++) { 5099 orig_start = vas[area]->va_start; 5100 orig_end = vas[area]->va_end; 5101 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 5102 &free_vmap_area_list); 5103 if (va) 5104 kasan_release_vmalloc(orig_start, orig_end, 5105 va->va_start, va->va_end, 5106 KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH); 5107 vas[area] = NULL; 5108 kfree(vms[area]); 5109 } 5110 spin_unlock(&free_vmap_area_lock); 5111 kfree(vas); 5112 kfree(vms); 5113 return NULL; 5114 } 5115 5116 /** 5117 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 5118 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 5119 * @nr_vms: the number of allocated areas 5120 * 5121 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 5122 */ 5123 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 5124 { 5125 int i; 5126 5127 for (i = 0; i < nr_vms; i++) 5128 free_vm_area(vms[i]); 5129 kfree(vms); 5130 } 5131 #endif /* CONFIG_SMP */ 5132 5133 #ifdef CONFIG_PRINTK 5134 bool vmalloc_dump_obj(void *object) 5135 { 5136 const void *caller; 5137 struct vm_struct *vm; 5138 struct vmap_area *va; 5139 struct vmap_node *vn; 5140 unsigned long addr; 5141 unsigned int nr_pages; 5142 5143 addr = PAGE_ALIGN((unsigned long) object); 5144 vn = addr_to_node(addr); 5145 5146 if (!spin_trylock(&vn->busy.lock)) 5147 return false; 5148 5149 va = __find_vmap_area(addr, &vn->busy.root); 5150 if (!va || !va->vm) { 5151 spin_unlock(&vn->busy.lock); 5152 return false; 5153 } 5154 5155 vm = va->vm; 5156 addr = (unsigned long) vm->addr; 5157 caller = vm->caller; 5158 nr_pages = vm->nr_pages; 5159 spin_unlock(&vn->busy.lock); 5160 5161 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 5162 nr_pages, addr, caller); 5163 5164 return true; 5165 } 5166 #endif 5167 5168 #ifdef CONFIG_PROC_FS 5169 5170 /* 5171 * Print number of pages allocated on each memory node. 5172 * 5173 * This function can only be called if CONFIG_NUMA is enabled 5174 * and VM_UNINITIALIZED bit in v->flags is disabled. 5175 */ 5176 static void show_numa_info(struct seq_file *m, struct vm_struct *v, 5177 unsigned int *counters) 5178 { 5179 unsigned int nr; 5180 unsigned int step = 1U << vm_area_page_order(v); 5181 5182 if (!counters) 5183 return; 5184 5185 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 5186 5187 for (nr = 0; nr < v->nr_pages; nr += step) 5188 counters[page_to_nid(v->pages[nr])] += step; 5189 for_each_node_state(nr, N_HIGH_MEMORY) 5190 if (counters[nr]) 5191 seq_printf(m, " N%u=%u", nr, counters[nr]); 5192 } 5193 5194 static void show_purge_info(struct seq_file *m) 5195 { 5196 struct vmap_node *vn; 5197 struct vmap_area *va; 5198 5199 for_each_vmap_node(vn) { 5200 spin_lock(&vn->lazy.lock); 5201 list_for_each_entry(va, &vn->lazy.head, list) { 5202 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 5203 (void *)va->va_start, (void *)va->va_end, 5204 va_size(va)); 5205 } 5206 spin_unlock(&vn->lazy.lock); 5207 } 5208 } 5209 5210 static int vmalloc_info_show(struct seq_file *m, void *p) 5211 { 5212 struct vmap_node *vn; 5213 struct vmap_area *va; 5214 struct vm_struct *v; 5215 unsigned int *counters; 5216 5217 if (IS_ENABLED(CONFIG_NUMA)) 5218 counters = kmalloc_array(nr_node_ids, sizeof(unsigned int), GFP_KERNEL); 5219 5220 for_each_vmap_node(vn) { 5221 spin_lock(&vn->busy.lock); 5222 list_for_each_entry(va, &vn->busy.head, list) { 5223 if (!va->vm) { 5224 if (va->flags & VMAP_RAM) 5225 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 5226 (void *)va->va_start, (void *)va->va_end, 5227 va_size(va)); 5228 5229 continue; 5230 } 5231 5232 v = va->vm; 5233 if (v->flags & VM_UNINITIALIZED) 5234 continue; 5235 5236 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 5237 smp_rmb(); 5238 5239 seq_printf(m, "0x%pK-0x%pK %7ld", 5240 v->addr, v->addr + v->size, v->size); 5241 5242 if (v->caller) 5243 seq_printf(m, " %pS", v->caller); 5244 5245 if (v->nr_pages) 5246 seq_printf(m, " pages=%d", v->nr_pages); 5247 5248 if (v->phys_addr) 5249 seq_printf(m, " phys=%pa", &v->phys_addr); 5250 5251 if (v->flags & VM_IOREMAP) 5252 seq_puts(m, " ioremap"); 5253 5254 if (v->flags & VM_SPARSE) 5255 seq_puts(m, " sparse"); 5256 5257 if (v->flags & VM_ALLOC) 5258 seq_puts(m, " vmalloc"); 5259 5260 if (v->flags & VM_MAP) 5261 seq_puts(m, " vmap"); 5262 5263 if (v->flags & VM_USERMAP) 5264 seq_puts(m, " user"); 5265 5266 if (v->flags & VM_DMA_COHERENT) 5267 seq_puts(m, " dma-coherent"); 5268 5269 if (is_vmalloc_addr(v->pages)) 5270 seq_puts(m, " vpages"); 5271 5272 if (IS_ENABLED(CONFIG_NUMA)) 5273 show_numa_info(m, v, counters); 5274 5275 seq_putc(m, '\n'); 5276 } 5277 spin_unlock(&vn->busy.lock); 5278 } 5279 5280 /* 5281 * As a final step, dump "unpurged" areas. 5282 */ 5283 show_purge_info(m); 5284 if (IS_ENABLED(CONFIG_NUMA)) 5285 kfree(counters); 5286 return 0; 5287 } 5288 5289 static int __init proc_vmalloc_init(void) 5290 { 5291 proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show); 5292 return 0; 5293 } 5294 module_init(proc_vmalloc_init); 5295 5296 #endif 5297 5298 static void __init vmap_init_free_space(void) 5299 { 5300 unsigned long vmap_start = 1; 5301 const unsigned long vmap_end = ULONG_MAX; 5302 struct vmap_area *free; 5303 struct vm_struct *busy; 5304 5305 /* 5306 * B F B B B F 5307 * -|-----|.....|-----|-----|-----|.....|- 5308 * | The KVA space | 5309 * |<--------------------------------->| 5310 */ 5311 for (busy = vmlist; busy; busy = busy->next) { 5312 if ((unsigned long) busy->addr - vmap_start > 0) { 5313 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5314 if (!WARN_ON_ONCE(!free)) { 5315 free->va_start = vmap_start; 5316 free->va_end = (unsigned long) busy->addr; 5317 5318 insert_vmap_area_augment(free, NULL, 5319 &free_vmap_area_root, 5320 &free_vmap_area_list); 5321 } 5322 } 5323 5324 vmap_start = (unsigned long) busy->addr + busy->size; 5325 } 5326 5327 if (vmap_end - vmap_start > 0) { 5328 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5329 if (!WARN_ON_ONCE(!free)) { 5330 free->va_start = vmap_start; 5331 free->va_end = vmap_end; 5332 5333 insert_vmap_area_augment(free, NULL, 5334 &free_vmap_area_root, 5335 &free_vmap_area_list); 5336 } 5337 } 5338 } 5339 5340 static void vmap_init_nodes(void) 5341 { 5342 struct vmap_node *vn; 5343 int i; 5344 5345 #if BITS_PER_LONG == 64 5346 /* 5347 * A high threshold of max nodes is fixed and bound to 128, 5348 * thus a scale factor is 1 for systems where number of cores 5349 * are less or equal to specified threshold. 5350 * 5351 * As for NUMA-aware notes. For bigger systems, for example 5352 * NUMA with multi-sockets, where we can end-up with thousands 5353 * of cores in total, a "sub-numa-clustering" should be added. 5354 * 5355 * In this case a NUMA domain is considered as a single entity 5356 * with dedicated sub-nodes in it which describe one group or 5357 * set of cores. Therefore a per-domain purging is supposed to 5358 * be added as well as a per-domain balancing. 5359 */ 5360 int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); 5361 5362 if (n > 1) { 5363 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT); 5364 if (vn) { 5365 /* Node partition is 16 pages. */ 5366 vmap_zone_size = (1 << 4) * PAGE_SIZE; 5367 nr_vmap_nodes = n; 5368 vmap_nodes = vn; 5369 } else { 5370 pr_err("Failed to allocate an array. Disable a node layer\n"); 5371 } 5372 } 5373 #endif 5374 5375 for_each_vmap_node(vn) { 5376 vn->busy.root = RB_ROOT; 5377 INIT_LIST_HEAD(&vn->busy.head); 5378 spin_lock_init(&vn->busy.lock); 5379 5380 vn->lazy.root = RB_ROOT; 5381 INIT_LIST_HEAD(&vn->lazy.head); 5382 spin_lock_init(&vn->lazy.lock); 5383 5384 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 5385 INIT_LIST_HEAD(&vn->pool[i].head); 5386 WRITE_ONCE(vn->pool[i].len, 0); 5387 } 5388 5389 spin_lock_init(&vn->pool_lock); 5390 } 5391 } 5392 5393 static unsigned long 5394 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 5395 { 5396 unsigned long count = 0; 5397 struct vmap_node *vn; 5398 int i; 5399 5400 for_each_vmap_node(vn) { 5401 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) 5402 count += READ_ONCE(vn->pool[i].len); 5403 } 5404 5405 return count ? count : SHRINK_EMPTY; 5406 } 5407 5408 static unsigned long 5409 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 5410 { 5411 struct vmap_node *vn; 5412 5413 for_each_vmap_node(vn) 5414 decay_va_pool_node(vn, true); 5415 5416 return SHRINK_STOP; 5417 } 5418 5419 void __init vmalloc_init(void) 5420 { 5421 struct shrinker *vmap_node_shrinker; 5422 struct vmap_area *va; 5423 struct vmap_node *vn; 5424 struct vm_struct *tmp; 5425 int i; 5426 5427 /* 5428 * Create the cache for vmap_area objects. 5429 */ 5430 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 5431 5432 for_each_possible_cpu(i) { 5433 struct vmap_block_queue *vbq; 5434 struct vfree_deferred *p; 5435 5436 vbq = &per_cpu(vmap_block_queue, i); 5437 spin_lock_init(&vbq->lock); 5438 INIT_LIST_HEAD(&vbq->free); 5439 p = &per_cpu(vfree_deferred, i); 5440 init_llist_head(&p->list); 5441 INIT_WORK(&p->wq, delayed_vfree_work); 5442 xa_init(&vbq->vmap_blocks); 5443 } 5444 5445 /* 5446 * Setup nodes before importing vmlist. 5447 */ 5448 vmap_init_nodes(); 5449 5450 /* Import existing vmlist entries. */ 5451 for (tmp = vmlist; tmp; tmp = tmp->next) { 5452 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5453 if (WARN_ON_ONCE(!va)) 5454 continue; 5455 5456 va->va_start = (unsigned long)tmp->addr; 5457 va->va_end = va->va_start + tmp->size; 5458 va->vm = tmp; 5459 5460 vn = addr_to_node(va->va_start); 5461 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 5462 } 5463 5464 /* 5465 * Now we can initialize a free vmap space. 5466 */ 5467 vmap_init_free_space(); 5468 vmap_initialized = true; 5469 5470 vmap_node_shrinker = shrinker_alloc(0, "vmap-node"); 5471 if (!vmap_node_shrinker) { 5472 pr_err("Failed to allocate vmap-node shrinker!\n"); 5473 return; 5474 } 5475 5476 vmap_node_shrinker->count_objects = vmap_node_shrink_count; 5477 vmap_node_shrinker->scan_objects = vmap_node_shrink_scan; 5478 shrinker_register(vmap_node_shrinker); 5479 } 5480