1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1993 Linus Torvalds 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/set_memory.h> 22 #include <linux/debugobjects.h> 23 #include <linux/kallsyms.h> 24 #include <linux/list.h> 25 #include <linux/notifier.h> 26 #include <linux/rbtree.h> 27 #include <linux/xarray.h> 28 #include <linux/io.h> 29 #include <linux/rcupdate.h> 30 #include <linux/pfn.h> 31 #include <linux/kmemleak.h> 32 #include <linux/atomic.h> 33 #include <linux/compiler.h> 34 #include <linux/memcontrol.h> 35 #include <linux/llist.h> 36 #include <linux/uio.h> 37 #include <linux/bitops.h> 38 #include <linux/rbtree_augmented.h> 39 #include <linux/overflow.h> 40 #include <linux/pgtable.h> 41 #include <linux/hugetlb.h> 42 #include <linux/sched/mm.h> 43 #include <asm/tlbflush.h> 44 #include <asm/shmparam.h> 45 #include <linux/page_owner.h> 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/vmalloc.h> 49 50 #include "internal.h" 51 #include "pgalloc-track.h" 52 53 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 54 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 55 56 static int __init set_nohugeiomap(char *str) 57 { 58 ioremap_max_page_shift = PAGE_SHIFT; 59 return 0; 60 } 61 early_param("nohugeiomap", set_nohugeiomap); 62 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 63 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 64 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 65 66 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 67 static bool __ro_after_init vmap_allow_huge = true; 68 69 static int __init set_nohugevmalloc(char *str) 70 { 71 vmap_allow_huge = false; 72 return 0; 73 } 74 early_param("nohugevmalloc", set_nohugevmalloc); 75 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 76 static const bool vmap_allow_huge = false; 77 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 78 79 bool is_vmalloc_addr(const void *x) 80 { 81 unsigned long addr = (unsigned long)kasan_reset_tag(x); 82 83 return addr >= VMALLOC_START && addr < VMALLOC_END; 84 } 85 EXPORT_SYMBOL(is_vmalloc_addr); 86 87 struct vfree_deferred { 88 struct llist_head list; 89 struct work_struct wq; 90 }; 91 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 92 93 /*** Page table manipulation functions ***/ 94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 95 phys_addr_t phys_addr, pgprot_t prot, 96 unsigned int max_page_shift, pgtbl_mod_mask *mask) 97 { 98 pte_t *pte; 99 u64 pfn; 100 struct page *page; 101 unsigned long size = PAGE_SIZE; 102 103 if (WARN_ON_ONCE(!PAGE_ALIGNED(end - addr))) 104 return -EINVAL; 105 106 pfn = phys_addr >> PAGE_SHIFT; 107 pte = pte_alloc_kernel_track(pmd, addr, mask); 108 if (!pte) 109 return -ENOMEM; 110 111 lazy_mmu_mode_enable(); 112 113 do { 114 if (unlikely(!pte_none(ptep_get(pte)))) { 115 if (pfn_valid(pfn)) { 116 page = pfn_to_page(pfn); 117 dump_page(page, "remapping already mapped page"); 118 } 119 BUG(); 120 } 121 122 #ifdef CONFIG_HUGETLB_PAGE 123 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 124 if (size != PAGE_SIZE) { 125 pte_t entry = pfn_pte(pfn, prot); 126 127 entry = arch_make_huge_pte(entry, ilog2(size), 0); 128 set_huge_pte_at(&init_mm, addr, pte, entry, size); 129 pfn += PFN_DOWN(size); 130 continue; 131 } 132 #endif 133 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 134 pfn++; 135 } while (pte += PFN_DOWN(size), addr += size, addr != end); 136 137 lazy_mmu_mode_disable(); 138 *mask |= PGTBL_PTE_MODIFIED; 139 return 0; 140 } 141 142 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 143 phys_addr_t phys_addr, pgprot_t prot, 144 unsigned int max_page_shift) 145 { 146 if (max_page_shift < PMD_SHIFT) 147 return 0; 148 149 if (!arch_vmap_pmd_supported(prot)) 150 return 0; 151 152 if ((end - addr) != PMD_SIZE) 153 return 0; 154 155 if (!IS_ALIGNED(addr, PMD_SIZE)) 156 return 0; 157 158 if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 159 return 0; 160 161 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 162 return 0; 163 164 return pmd_set_huge(pmd, phys_addr, prot); 165 } 166 167 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 168 phys_addr_t phys_addr, pgprot_t prot, 169 unsigned int max_page_shift, pgtbl_mod_mask *mask) 170 { 171 pmd_t *pmd; 172 unsigned long next; 173 int err = 0; 174 175 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 176 if (!pmd) 177 return -ENOMEM; 178 do { 179 next = pmd_addr_end(addr, end); 180 181 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 182 max_page_shift)) { 183 *mask |= PGTBL_PMD_MODIFIED; 184 continue; 185 } 186 187 err = vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask); 188 if (err) 189 break; 190 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 191 return err; 192 } 193 194 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 195 phys_addr_t phys_addr, pgprot_t prot, 196 unsigned int max_page_shift) 197 { 198 if (max_page_shift < PUD_SHIFT) 199 return 0; 200 201 if (!arch_vmap_pud_supported(prot)) 202 return 0; 203 204 if ((end - addr) != PUD_SIZE) 205 return 0; 206 207 if (!IS_ALIGNED(addr, PUD_SIZE)) 208 return 0; 209 210 if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 211 return 0; 212 213 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 214 return 0; 215 216 return pud_set_huge(pud, phys_addr, prot); 217 } 218 219 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 220 phys_addr_t phys_addr, pgprot_t prot, 221 unsigned int max_page_shift, pgtbl_mod_mask *mask) 222 { 223 pud_t *pud; 224 unsigned long next; 225 int err = 0; 226 227 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 228 if (!pud) 229 return -ENOMEM; 230 do { 231 next = pud_addr_end(addr, end); 232 233 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 234 max_page_shift)) { 235 *mask |= PGTBL_PUD_MODIFIED; 236 continue; 237 } 238 239 err = vmap_pmd_range(pud, addr, next, phys_addr, prot, max_page_shift, mask); 240 if (err) 241 break; 242 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 243 return err; 244 } 245 246 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 247 phys_addr_t phys_addr, pgprot_t prot, 248 unsigned int max_page_shift) 249 { 250 if (max_page_shift < P4D_SHIFT) 251 return 0; 252 253 if (!arch_vmap_p4d_supported(prot)) 254 return 0; 255 256 if ((end - addr) != P4D_SIZE) 257 return 0; 258 259 if (!IS_ALIGNED(addr, P4D_SIZE)) 260 return 0; 261 262 if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 263 return 0; 264 265 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 266 return 0; 267 268 return p4d_set_huge(p4d, phys_addr, prot); 269 } 270 271 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 272 phys_addr_t phys_addr, pgprot_t prot, 273 unsigned int max_page_shift, pgtbl_mod_mask *mask) 274 { 275 p4d_t *p4d; 276 unsigned long next; 277 int err = 0; 278 279 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 280 if (!p4d) 281 return -ENOMEM; 282 do { 283 next = p4d_addr_end(addr, end); 284 285 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 286 max_page_shift)) { 287 *mask |= PGTBL_P4D_MODIFIED; 288 continue; 289 } 290 291 err = vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift, mask); 292 if (err) 293 break; 294 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 295 return err; 296 } 297 298 static int vmap_range_noflush(unsigned long addr, unsigned long end, 299 phys_addr_t phys_addr, pgprot_t prot, 300 unsigned int max_page_shift) 301 { 302 pgd_t *pgd; 303 unsigned long start; 304 unsigned long next; 305 int err; 306 pgtbl_mod_mask mask = 0; 307 308 /* 309 * Might allocate pagetables (for most archs a more precise annotation 310 * would be might_alloc(GFP_PGTABLE_KERNEL)). Also might shootdown TLB 311 * (requires IRQs enabled on x86). 312 */ 313 might_sleep(); 314 BUG_ON(addr >= end); 315 316 start = addr; 317 pgd = pgd_offset_k(addr); 318 do { 319 next = pgd_addr_end(addr, end); 320 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 321 max_page_shift, &mask); 322 if (err) 323 break; 324 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 325 326 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 327 arch_sync_kernel_mappings(start, end); 328 329 return err; 330 } 331 332 int vmap_page_range(unsigned long addr, unsigned long end, 333 phys_addr_t phys_addr, pgprot_t prot) 334 { 335 int err; 336 337 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 338 ioremap_max_page_shift); 339 flush_cache_vmap(addr, end); 340 if (!err) 341 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, 342 ioremap_max_page_shift); 343 return err; 344 } 345 346 int ioremap_page_range(unsigned long addr, unsigned long end, 347 phys_addr_t phys_addr, pgprot_t prot) 348 { 349 struct vm_struct *area; 350 351 area = find_vm_area((void *)addr); 352 if (!area || !(area->flags & VM_IOREMAP)) { 353 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); 354 return -EINVAL; 355 } 356 if (addr != (unsigned long)area->addr || 357 (void *)end != area->addr + get_vm_area_size(area)) { 358 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n", 359 addr, end, (long)area->addr, 360 (long)area->addr + get_vm_area_size(area)); 361 return -ERANGE; 362 } 363 return vmap_page_range(addr, end, phys_addr, prot); 364 } 365 366 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 367 pgtbl_mod_mask *mask) 368 { 369 pte_t *pte; 370 pte_t ptent; 371 unsigned long size = PAGE_SIZE; 372 373 pte = pte_offset_kernel(pmd, addr); 374 lazy_mmu_mode_enable(); 375 376 do { 377 #ifdef CONFIG_HUGETLB_PAGE 378 size = arch_vmap_pte_range_unmap_size(addr, pte); 379 if (size != PAGE_SIZE) { 380 if (WARN_ON(!IS_ALIGNED(addr, size))) { 381 addr = ALIGN_DOWN(addr, size); 382 pte = PTR_ALIGN_DOWN(pte, sizeof(*pte) * (size >> PAGE_SHIFT)); 383 } 384 ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size); 385 if (WARN_ON(end - addr < size)) 386 size = end - addr; 387 } else 388 #endif 389 ptent = ptep_get_and_clear(&init_mm, addr, pte); 390 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 391 } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end); 392 393 lazy_mmu_mode_disable(); 394 *mask |= PGTBL_PTE_MODIFIED; 395 } 396 397 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 398 pgtbl_mod_mask *mask) 399 { 400 pmd_t *pmd; 401 unsigned long next; 402 int cleared; 403 404 pmd = pmd_offset(pud, addr); 405 do { 406 next = pmd_addr_end(addr, end); 407 408 cleared = pmd_clear_huge(pmd); 409 if (cleared || pmd_bad(*pmd)) 410 *mask |= PGTBL_PMD_MODIFIED; 411 412 if (cleared) { 413 WARN_ON(next - addr < PMD_SIZE); 414 continue; 415 } 416 if (pmd_none_or_clear_bad(pmd)) 417 continue; 418 vunmap_pte_range(pmd, addr, next, mask); 419 420 cond_resched(); 421 } while (pmd++, addr = next, addr != end); 422 } 423 424 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 425 pgtbl_mod_mask *mask) 426 { 427 pud_t *pud; 428 unsigned long next; 429 int cleared; 430 431 pud = pud_offset(p4d, addr); 432 do { 433 next = pud_addr_end(addr, end); 434 435 cleared = pud_clear_huge(pud); 436 if (cleared || pud_bad(*pud)) 437 *mask |= PGTBL_PUD_MODIFIED; 438 439 if (cleared) { 440 WARN_ON(next - addr < PUD_SIZE); 441 continue; 442 } 443 if (pud_none_or_clear_bad(pud)) 444 continue; 445 vunmap_pmd_range(pud, addr, next, mask); 446 } while (pud++, addr = next, addr != end); 447 } 448 449 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 450 pgtbl_mod_mask *mask) 451 { 452 p4d_t *p4d; 453 unsigned long next; 454 455 p4d = p4d_offset(pgd, addr); 456 do { 457 next = p4d_addr_end(addr, end); 458 459 p4d_clear_huge(p4d); 460 if (p4d_bad(*p4d)) 461 *mask |= PGTBL_P4D_MODIFIED; 462 463 if (p4d_none_or_clear_bad(p4d)) 464 continue; 465 vunmap_pud_range(p4d, addr, next, mask); 466 } while (p4d++, addr = next, addr != end); 467 } 468 469 /* 470 * vunmap_range_noflush is similar to vunmap_range, but does not 471 * flush caches or TLBs. 472 * 473 * The caller is responsible for calling flush_cache_vmap() before calling 474 * this function, and flush_tlb_kernel_range after it has returned 475 * successfully (and before the addresses are expected to cause a page fault 476 * or be re-mapped for something else, if TLB flushes are being delayed or 477 * coalesced). 478 * 479 * This is an internal function only. Do not use outside mm/. 480 */ 481 void __vunmap_range_noflush(unsigned long start, unsigned long end) 482 { 483 unsigned long next; 484 pgd_t *pgd; 485 unsigned long addr = start; 486 pgtbl_mod_mask mask = 0; 487 488 BUG_ON(addr >= end); 489 pgd = pgd_offset_k(addr); 490 do { 491 next = pgd_addr_end(addr, end); 492 if (pgd_bad(*pgd)) 493 mask |= PGTBL_PGD_MODIFIED; 494 if (pgd_none_or_clear_bad(pgd)) 495 continue; 496 vunmap_p4d_range(pgd, addr, next, &mask); 497 } while (pgd++, addr = next, addr != end); 498 499 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 500 arch_sync_kernel_mappings(start, end); 501 } 502 503 void vunmap_range_noflush(unsigned long start, unsigned long end) 504 { 505 kmsan_vunmap_range_noflush(start, end); 506 __vunmap_range_noflush(start, end); 507 } 508 509 /** 510 * vunmap_range - unmap kernel virtual addresses 511 * @addr: start of the VM area to unmap 512 * @end: end of the VM area to unmap (non-inclusive) 513 * 514 * Clears any present PTEs in the virtual address range, flushes TLBs and 515 * caches. Any subsequent access to the address before it has been re-mapped 516 * is a kernel bug. 517 */ 518 void vunmap_range(unsigned long addr, unsigned long end) 519 { 520 flush_cache_vunmap(addr, end); 521 vunmap_range_noflush(addr, end); 522 flush_tlb_kernel_range(addr, end); 523 } 524 525 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 526 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 527 pgtbl_mod_mask *mask) 528 { 529 int err = 0; 530 pte_t *pte; 531 532 /* 533 * nr is a running index into the array which helps higher level 534 * callers keep track of where we're up to. 535 */ 536 537 pte = pte_alloc_kernel_track(pmd, addr, mask); 538 if (!pte) 539 return -ENOMEM; 540 541 lazy_mmu_mode_enable(); 542 543 do { 544 struct page *page = pages[*nr]; 545 546 if (WARN_ON(!pte_none(ptep_get(pte)))) { 547 err = -EBUSY; 548 break; 549 } 550 if (WARN_ON(!page)) { 551 err = -ENOMEM; 552 break; 553 } 554 if (WARN_ON(!pfn_valid(page_to_pfn(page)))) { 555 err = -EINVAL; 556 break; 557 } 558 559 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 560 (*nr)++; 561 } while (pte++, addr += PAGE_SIZE, addr != end); 562 563 lazy_mmu_mode_disable(); 564 *mask |= PGTBL_PTE_MODIFIED; 565 566 return err; 567 } 568 569 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 570 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 571 pgtbl_mod_mask *mask) 572 { 573 pmd_t *pmd; 574 unsigned long next; 575 576 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 577 if (!pmd) 578 return -ENOMEM; 579 do { 580 next = pmd_addr_end(addr, end); 581 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 582 return -ENOMEM; 583 } while (pmd++, addr = next, addr != end); 584 return 0; 585 } 586 587 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 588 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 589 pgtbl_mod_mask *mask) 590 { 591 pud_t *pud; 592 unsigned long next; 593 594 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 595 if (!pud) 596 return -ENOMEM; 597 do { 598 next = pud_addr_end(addr, end); 599 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 600 return -ENOMEM; 601 } while (pud++, addr = next, addr != end); 602 return 0; 603 } 604 605 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 606 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 607 pgtbl_mod_mask *mask) 608 { 609 p4d_t *p4d; 610 unsigned long next; 611 612 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 613 if (!p4d) 614 return -ENOMEM; 615 do { 616 next = p4d_addr_end(addr, end); 617 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 618 return -ENOMEM; 619 } while (p4d++, addr = next, addr != end); 620 return 0; 621 } 622 623 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 624 pgprot_t prot, struct page **pages) 625 { 626 unsigned long start = addr; 627 pgd_t *pgd; 628 unsigned long next; 629 int err = 0; 630 int nr = 0; 631 pgtbl_mod_mask mask = 0; 632 633 BUG_ON(addr >= end); 634 pgd = pgd_offset_k(addr); 635 do { 636 next = pgd_addr_end(addr, end); 637 if (pgd_bad(*pgd)) 638 mask |= PGTBL_PGD_MODIFIED; 639 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 640 if (err) 641 break; 642 } while (pgd++, addr = next, addr != end); 643 644 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 645 arch_sync_kernel_mappings(start, end); 646 647 return err; 648 } 649 650 /* 651 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 652 * flush caches. 653 * 654 * The caller is responsible for calling flush_cache_vmap() after this 655 * function returns successfully and before the addresses are accessed. 656 * 657 * This is an internal function only. Do not use outside mm/. 658 */ 659 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, 660 pgprot_t prot, struct page **pages, unsigned int page_shift) 661 { 662 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 663 664 WARN_ON(page_shift < PAGE_SHIFT); 665 666 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 667 page_shift == PAGE_SHIFT) 668 return vmap_small_pages_range_noflush(addr, end, prot, pages); 669 670 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 671 int err; 672 673 err = vmap_range_noflush(addr, addr + (1UL << page_shift), 674 page_to_phys(pages[i]), prot, 675 page_shift); 676 if (err) 677 return err; 678 679 addr += 1UL << page_shift; 680 } 681 682 return 0; 683 } 684 685 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 686 pgprot_t prot, struct page **pages, unsigned int page_shift, 687 gfp_t gfp_mask) 688 { 689 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, 690 page_shift, gfp_mask); 691 692 if (ret) 693 return ret; 694 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 695 } 696 697 static int __vmap_pages_range(unsigned long addr, unsigned long end, 698 pgprot_t prot, struct page **pages, unsigned int page_shift, 699 gfp_t gfp_mask) 700 { 701 int err; 702 703 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift, gfp_mask); 704 flush_cache_vmap(addr, end); 705 return err; 706 } 707 708 /** 709 * vmap_pages_range - map pages to a kernel virtual address 710 * @addr: start of the VM area to map 711 * @end: end of the VM area to map (non-inclusive) 712 * @prot: page protection flags to use 713 * @pages: pages to map (always PAGE_SIZE pages) 714 * @page_shift: maximum shift that the pages may be mapped with, @pages must 715 * be aligned and contiguous up to at least this shift. 716 * 717 * RETURNS: 718 * 0 on success, -errno on failure. 719 */ 720 int vmap_pages_range(unsigned long addr, unsigned long end, 721 pgprot_t prot, struct page **pages, unsigned int page_shift) 722 { 723 return __vmap_pages_range(addr, end, prot, pages, page_shift, GFP_KERNEL); 724 } 725 726 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, 727 unsigned long end) 728 { 729 might_sleep(); 730 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) 731 return -EINVAL; 732 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) 733 return -EINVAL; 734 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) 735 return -EINVAL; 736 if ((end - start) >> PAGE_SHIFT > totalram_pages()) 737 return -E2BIG; 738 if (start < (unsigned long)area->addr || 739 (void *)end > area->addr + get_vm_area_size(area)) 740 return -ERANGE; 741 return 0; 742 } 743 744 /** 745 * vm_area_map_pages - map pages inside given sparse vm_area 746 * @area: vm_area 747 * @start: start address inside vm_area 748 * @end: end address inside vm_area 749 * @pages: pages to map (always PAGE_SIZE pages) 750 */ 751 int vm_area_map_pages(struct vm_struct *area, unsigned long start, 752 unsigned long end, struct page **pages) 753 { 754 int err; 755 756 err = check_sparse_vm_area(area, start, end); 757 if (err) 758 return err; 759 760 return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); 761 } 762 763 /** 764 * vm_area_unmap_pages - unmap pages inside given sparse vm_area 765 * @area: vm_area 766 * @start: start address inside vm_area 767 * @end: end address inside vm_area 768 */ 769 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, 770 unsigned long end) 771 { 772 if (check_sparse_vm_area(area, start, end)) 773 return; 774 775 vunmap_range(start, end); 776 } 777 778 int is_vmalloc_or_module_addr(const void *x) 779 { 780 /* 781 * ARM, x86-64 and sparc64 put modules in a special place, 782 * and fall back on vmalloc() if that fails. Others 783 * just put it in the vmalloc space. 784 */ 785 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR) 786 unsigned long addr = (unsigned long)kasan_reset_tag(x); 787 if (addr >= MODULES_VADDR && addr < MODULES_END) 788 return 1; 789 #endif 790 return is_vmalloc_addr(x); 791 } 792 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); 793 794 /* 795 * Walk a vmap address to the struct page it maps. Huge vmap mappings will 796 * return the tail page that corresponds to the base page address, which 797 * matches small vmap mappings. 798 */ 799 struct page *vmalloc_to_page(const void *vmalloc_addr) 800 { 801 unsigned long addr = (unsigned long) vmalloc_addr; 802 struct page *page = NULL; 803 pgd_t *pgd = pgd_offset_k(addr); 804 p4d_t *p4d; 805 pud_t *pud; 806 pmd_t *pmd; 807 pte_t *ptep, pte; 808 809 /* 810 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 811 * architectures that do not vmalloc module space 812 */ 813 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 814 815 if (pgd_none(*pgd)) 816 return NULL; 817 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 818 return NULL; /* XXX: no allowance for huge pgd */ 819 if (WARN_ON_ONCE(pgd_bad(*pgd))) 820 return NULL; 821 822 p4d = p4d_offset(pgd, addr); 823 if (p4d_none(*p4d)) 824 return NULL; 825 if (p4d_leaf(*p4d)) 826 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 827 if (WARN_ON_ONCE(p4d_bad(*p4d))) 828 return NULL; 829 830 pud = pud_offset(p4d, addr); 831 if (pud_none(*pud)) 832 return NULL; 833 if (pud_leaf(*pud)) 834 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 835 if (WARN_ON_ONCE(pud_bad(*pud))) 836 return NULL; 837 838 pmd = pmd_offset(pud, addr); 839 if (pmd_none(*pmd)) 840 return NULL; 841 if (pmd_leaf(*pmd)) 842 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 843 if (WARN_ON_ONCE(pmd_bad(*pmd))) 844 return NULL; 845 846 ptep = pte_offset_kernel(pmd, addr); 847 pte = ptep_get(ptep); 848 if (pte_present(pte)) 849 page = pte_page(pte); 850 851 return page; 852 } 853 EXPORT_SYMBOL(vmalloc_to_page); 854 855 /* 856 * Map a vmalloc()-space virtual address to the physical page frame number. 857 */ 858 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 859 { 860 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 861 } 862 EXPORT_SYMBOL(vmalloc_to_pfn); 863 864 865 /*** Global kva allocator ***/ 866 867 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 868 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 869 870 871 static DEFINE_SPINLOCK(free_vmap_area_lock); 872 static bool vmap_initialized __read_mostly; 873 874 /* 875 * This kmem_cache is used for vmap_area objects. Instead of 876 * allocating from slab we reuse an object from this cache to 877 * make things faster. Especially in "no edge" splitting of 878 * free block. 879 */ 880 static struct kmem_cache *vmap_area_cachep; 881 882 /* 883 * This linked list is used in pair with free_vmap_area_root. 884 * It gives O(1) access to prev/next to perform fast coalescing. 885 */ 886 static LIST_HEAD(free_vmap_area_list); 887 888 /* 889 * This augment red-black tree represents the free vmap space. 890 * All vmap_area objects in this tree are sorted by va->va_start 891 * address. It is used for allocation and merging when a vmap 892 * object is released. 893 * 894 * Each vmap_area node contains a maximum available free block 895 * of its sub-tree, right or left. Therefore it is possible to 896 * find a lowest match of free area. 897 */ 898 static struct rb_root free_vmap_area_root = RB_ROOT; 899 900 /* 901 * Preload a CPU with one object for "no edge" split case. The 902 * aim is to get rid of allocations from the atomic context, thus 903 * to use more permissive allocation masks. 904 */ 905 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 906 907 /* 908 * This structure defines a single, solid model where a list and 909 * rb-tree are part of one entity protected by the lock. Nodes are 910 * sorted in ascending order, thus for O(1) access to left/right 911 * neighbors a list is used as well as for sequential traversal. 912 */ 913 struct rb_list { 914 struct rb_root root; 915 struct list_head head; 916 spinlock_t lock; 917 }; 918 919 /* 920 * A fast size storage contains VAs up to 1M size. A pool consists 921 * of linked between each other ready to go VAs of certain sizes. 922 * An index in the pool-array corresponds to number of pages + 1. 923 */ 924 #define MAX_VA_SIZE_PAGES 256 925 926 struct vmap_pool { 927 struct list_head head; 928 unsigned long len; 929 }; 930 931 /* 932 * An effective vmap-node logic. Users make use of nodes instead 933 * of a global heap. It allows to balance an access and mitigate 934 * contention. 935 */ 936 static struct vmap_node { 937 /* Simple size segregated storage. */ 938 struct vmap_pool pool[MAX_VA_SIZE_PAGES]; 939 spinlock_t pool_lock; 940 bool skip_populate; 941 942 /* Bookkeeping data of this node. */ 943 struct rb_list busy; 944 struct rb_list lazy; 945 946 /* 947 * Ready-to-free areas. 948 */ 949 struct list_head purge_list; 950 struct work_struct purge_work; 951 unsigned long nr_purged; 952 } single; 953 954 /* 955 * Initial setup consists of one single node, i.e. a balancing 956 * is fully disabled. Later on, after vmap is initialized these 957 * parameters are updated based on a system capacity. 958 */ 959 static struct vmap_node *vmap_nodes = &single; 960 static __read_mostly unsigned int nr_vmap_nodes = 1; 961 static __read_mostly unsigned int vmap_zone_size = 1; 962 963 /* A simple iterator over all vmap-nodes. */ 964 #define for_each_vmap_node(vn) \ 965 for ((vn) = &vmap_nodes[0]; \ 966 (vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++) 967 968 static inline unsigned int 969 addr_to_node_id(unsigned long addr) 970 { 971 return (addr / vmap_zone_size) % nr_vmap_nodes; 972 } 973 974 static inline struct vmap_node * 975 addr_to_node(unsigned long addr) 976 { 977 return &vmap_nodes[addr_to_node_id(addr)]; 978 } 979 980 static inline struct vmap_node * 981 id_to_node(unsigned int id) 982 { 983 return &vmap_nodes[id % nr_vmap_nodes]; 984 } 985 986 static inline unsigned int 987 node_to_id(struct vmap_node *node) 988 { 989 /* Pointer arithmetic. */ 990 unsigned int id = node - vmap_nodes; 991 992 if (likely(id < nr_vmap_nodes)) 993 return id; 994 995 WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node); 996 return 0; 997 } 998 999 /* 1000 * We use the value 0 to represent "no node", that is why 1001 * an encoded value will be the node-id incremented by 1. 1002 * It is always greater then 0. A valid node_id which can 1003 * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id 1004 * is not valid 0 is returned. 1005 */ 1006 static unsigned int 1007 encode_vn_id(unsigned int node_id) 1008 { 1009 /* Can store U8_MAX [0:254] nodes. */ 1010 if (node_id < nr_vmap_nodes) 1011 return (node_id + 1) << BITS_PER_BYTE; 1012 1013 /* Warn and no node encoded. */ 1014 WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id); 1015 return 0; 1016 } 1017 1018 /* 1019 * Returns an encoded node-id, the valid range is within 1020 * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is 1021 * returned if extracted data is wrong. 1022 */ 1023 static unsigned int 1024 decode_vn_id(unsigned int val) 1025 { 1026 unsigned int node_id = (val >> BITS_PER_BYTE) - 1; 1027 1028 /* Can store U8_MAX [0:254] nodes. */ 1029 if (node_id < nr_vmap_nodes) 1030 return node_id; 1031 1032 /* If it was _not_ zero, warn. */ 1033 WARN_ONCE(node_id != UINT_MAX, 1034 "Decode wrong node id (%d)\n", node_id); 1035 1036 return nr_vmap_nodes; 1037 } 1038 1039 static bool 1040 is_vn_id_valid(unsigned int node_id) 1041 { 1042 if (node_id < nr_vmap_nodes) 1043 return true; 1044 1045 return false; 1046 } 1047 1048 static __always_inline unsigned long 1049 va_size(struct vmap_area *va) 1050 { 1051 return (va->va_end - va->va_start); 1052 } 1053 1054 static __always_inline unsigned long 1055 get_subtree_max_size(struct rb_node *node) 1056 { 1057 struct vmap_area *va; 1058 1059 va = rb_entry_safe(node, struct vmap_area, rb_node); 1060 return va ? va->subtree_max_size : 0; 1061 } 1062 1063 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 1064 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 1065 1066 static void reclaim_and_purge_vmap_areas(void); 1067 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 1068 static void drain_vmap_area_work(struct work_struct *work); 1069 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 1070 1071 static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages; 1072 static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr; 1073 1074 unsigned long vmalloc_nr_pages(void) 1075 { 1076 return atomic_long_read(&nr_vmalloc_pages); 1077 } 1078 1079 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 1080 { 1081 struct rb_node *n = root->rb_node; 1082 1083 addr = (unsigned long)kasan_reset_tag((void *)addr); 1084 1085 while (n) { 1086 struct vmap_area *va; 1087 1088 va = rb_entry(n, struct vmap_area, rb_node); 1089 if (addr < va->va_start) 1090 n = n->rb_left; 1091 else if (addr >= va->va_end) 1092 n = n->rb_right; 1093 else 1094 return va; 1095 } 1096 1097 return NULL; 1098 } 1099 1100 /* Look up the first VA which satisfies addr < va_end, NULL if none. */ 1101 static struct vmap_area * 1102 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) 1103 { 1104 struct vmap_area *va = NULL; 1105 struct rb_node *n = root->rb_node; 1106 1107 addr = (unsigned long)kasan_reset_tag((void *)addr); 1108 1109 while (n) { 1110 struct vmap_area *tmp; 1111 1112 tmp = rb_entry(n, struct vmap_area, rb_node); 1113 if (tmp->va_end > addr) { 1114 va = tmp; 1115 if (tmp->va_start <= addr) 1116 break; 1117 1118 n = n->rb_left; 1119 } else 1120 n = n->rb_right; 1121 } 1122 1123 return va; 1124 } 1125 1126 /* 1127 * Returns a node where a first VA, that satisfies addr < va_end, resides. 1128 * If success, a node is locked. A user is responsible to unlock it when a 1129 * VA is no longer needed to be accessed. 1130 * 1131 * Returns NULL if nothing found. 1132 */ 1133 static struct vmap_node * 1134 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) 1135 { 1136 unsigned long va_start_lowest; 1137 struct vmap_node *vn; 1138 1139 repeat: 1140 va_start_lowest = 0; 1141 1142 for_each_vmap_node(vn) { 1143 spin_lock(&vn->busy.lock); 1144 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); 1145 1146 if (*va) 1147 if (!va_start_lowest || (*va)->va_start < va_start_lowest) 1148 va_start_lowest = (*va)->va_start; 1149 spin_unlock(&vn->busy.lock); 1150 } 1151 1152 /* 1153 * Check if found VA exists, it might have gone away. In this case we 1154 * repeat the search because a VA has been removed concurrently and we 1155 * need to proceed to the next one, which is a rare case. 1156 */ 1157 if (va_start_lowest) { 1158 vn = addr_to_node(va_start_lowest); 1159 1160 spin_lock(&vn->busy.lock); 1161 *va = __find_vmap_area(va_start_lowest, &vn->busy.root); 1162 1163 if (*va) 1164 return vn; 1165 1166 spin_unlock(&vn->busy.lock); 1167 goto repeat; 1168 } 1169 1170 return NULL; 1171 } 1172 1173 /* 1174 * This function returns back addresses of parent node 1175 * and its left or right link for further processing. 1176 * 1177 * Otherwise NULL is returned. In that case all further 1178 * steps regarding inserting of conflicting overlap range 1179 * have to be declined and actually considered as a bug. 1180 */ 1181 static __always_inline struct rb_node ** 1182 find_va_links(struct vmap_area *va, 1183 struct rb_root *root, struct rb_node *from, 1184 struct rb_node **parent) 1185 { 1186 struct vmap_area *tmp_va; 1187 struct rb_node **link; 1188 1189 if (root) { 1190 link = &root->rb_node; 1191 if (unlikely(!*link)) { 1192 *parent = NULL; 1193 return link; 1194 } 1195 } else { 1196 link = &from; 1197 } 1198 1199 /* 1200 * Go to the bottom of the tree. When we hit the last point 1201 * we end up with parent rb_node and correct direction, i name 1202 * it link, where the new va->rb_node will be attached to. 1203 */ 1204 do { 1205 tmp_va = rb_entry(*link, struct vmap_area, rb_node); 1206 1207 /* 1208 * During the traversal we also do some sanity check. 1209 * Trigger the BUG() if there are sides(left/right) 1210 * or full overlaps. 1211 */ 1212 if (va->va_end <= tmp_va->va_start) 1213 link = &(*link)->rb_left; 1214 else if (va->va_start >= tmp_va->va_end) 1215 link = &(*link)->rb_right; 1216 else { 1217 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 1218 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 1219 1220 return NULL; 1221 } 1222 } while (*link); 1223 1224 *parent = &tmp_va->rb_node; 1225 return link; 1226 } 1227 1228 static __always_inline struct list_head * 1229 get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 1230 { 1231 struct list_head *list; 1232 1233 if (unlikely(!parent)) 1234 /* 1235 * The red-black tree where we try to find VA neighbors 1236 * before merging or inserting is empty, i.e. it means 1237 * there is no free vmap space. Normally it does not 1238 * happen but we handle this case anyway. 1239 */ 1240 return NULL; 1241 1242 list = &rb_entry(parent, struct vmap_area, rb_node)->list; 1243 return (&parent->rb_right == link ? list->next : list); 1244 } 1245 1246 static __always_inline void 1247 __link_va(struct vmap_area *va, struct rb_root *root, 1248 struct rb_node *parent, struct rb_node **link, 1249 struct list_head *head, bool augment) 1250 { 1251 /* 1252 * VA is still not in the list, but we can 1253 * identify its future previous list_head node. 1254 */ 1255 if (likely(parent)) { 1256 head = &rb_entry(parent, struct vmap_area, rb_node)->list; 1257 if (&parent->rb_right != link) 1258 head = head->prev; 1259 } 1260 1261 /* Insert to the rb-tree */ 1262 rb_link_node(&va->rb_node, parent, link); 1263 if (augment) { 1264 /* 1265 * Some explanation here. Just perform simple insertion 1266 * to the tree. We do not set va->subtree_max_size to 1267 * its current size before calling rb_insert_augmented(). 1268 * It is because we populate the tree from the bottom 1269 * to parent levels when the node _is_ in the tree. 1270 * 1271 * Therefore we set subtree_max_size to zero after insertion, 1272 * to let __augment_tree_propagate_from() puts everything to 1273 * the correct order later on. 1274 */ 1275 rb_insert_augmented(&va->rb_node, 1276 root, &free_vmap_area_rb_augment_cb); 1277 va->subtree_max_size = 0; 1278 } else { 1279 rb_insert_color(&va->rb_node, root); 1280 } 1281 1282 /* Address-sort this list */ 1283 list_add(&va->list, head); 1284 } 1285 1286 static __always_inline void 1287 link_va(struct vmap_area *va, struct rb_root *root, 1288 struct rb_node *parent, struct rb_node **link, 1289 struct list_head *head) 1290 { 1291 __link_va(va, root, parent, link, head, false); 1292 } 1293 1294 static __always_inline void 1295 link_va_augment(struct vmap_area *va, struct rb_root *root, 1296 struct rb_node *parent, struct rb_node **link, 1297 struct list_head *head) 1298 { 1299 __link_va(va, root, parent, link, head, true); 1300 } 1301 1302 static __always_inline void 1303 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) 1304 { 1305 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 1306 return; 1307 1308 if (augment) 1309 rb_erase_augmented(&va->rb_node, 1310 root, &free_vmap_area_rb_augment_cb); 1311 else 1312 rb_erase(&va->rb_node, root); 1313 1314 list_del_init(&va->list); 1315 RB_CLEAR_NODE(&va->rb_node); 1316 } 1317 1318 static __always_inline void 1319 unlink_va(struct vmap_area *va, struct rb_root *root) 1320 { 1321 __unlink_va(va, root, false); 1322 } 1323 1324 static __always_inline void 1325 unlink_va_augment(struct vmap_area *va, struct rb_root *root) 1326 { 1327 __unlink_va(va, root, true); 1328 } 1329 1330 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1331 /* 1332 * Gets called when remove the node and rotate. 1333 */ 1334 static __always_inline unsigned long 1335 compute_subtree_max_size(struct vmap_area *va) 1336 { 1337 return max3(va_size(va), 1338 get_subtree_max_size(va->rb_node.rb_left), 1339 get_subtree_max_size(va->rb_node.rb_right)); 1340 } 1341 1342 static void 1343 augment_tree_propagate_check(void) 1344 { 1345 struct vmap_area *va; 1346 unsigned long computed_size; 1347 1348 list_for_each_entry(va, &free_vmap_area_list, list) { 1349 computed_size = compute_subtree_max_size(va); 1350 if (computed_size != va->subtree_max_size) 1351 pr_emerg("tree is corrupted: %lu, %lu\n", 1352 va_size(va), va->subtree_max_size); 1353 } 1354 } 1355 #endif 1356 1357 /* 1358 * This function populates subtree_max_size from bottom to upper 1359 * levels starting from VA point. The propagation must be done 1360 * when VA size is modified by changing its va_start/va_end. Or 1361 * in case of newly inserting of VA to the tree. 1362 * 1363 * It means that __augment_tree_propagate_from() must be called: 1364 * - After VA has been inserted to the tree(free path); 1365 * - After VA has been shrunk(allocation path); 1366 * - After VA has been increased(merging path). 1367 * 1368 * Please note that, it does not mean that upper parent nodes 1369 * and their subtree_max_size are recalculated all the time up 1370 * to the root node. 1371 * 1372 * 4--8 1373 * /\ 1374 * / \ 1375 * / \ 1376 * 2--2 8--8 1377 * 1378 * For example if we modify the node 4, shrinking it to 2, then 1379 * no any modification is required. If we shrink the node 2 to 1 1380 * its subtree_max_size is updated only, and set to 1. If we shrink 1381 * the node 8 to 6, then its subtree_max_size is set to 6 and parent 1382 * node becomes 4--6. 1383 */ 1384 static __always_inline void 1385 augment_tree_propagate_from(struct vmap_area *va) 1386 { 1387 /* 1388 * Populate the tree from bottom towards the root until 1389 * the calculated maximum available size of checked node 1390 * is equal to its current one. 1391 */ 1392 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1393 1394 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1395 augment_tree_propagate_check(); 1396 #endif 1397 } 1398 1399 static void 1400 insert_vmap_area(struct vmap_area *va, 1401 struct rb_root *root, struct list_head *head) 1402 { 1403 struct rb_node **link; 1404 struct rb_node *parent; 1405 1406 link = find_va_links(va, root, NULL, &parent); 1407 if (link) 1408 link_va(va, root, parent, link, head); 1409 } 1410 1411 static void 1412 insert_vmap_area_augment(struct vmap_area *va, 1413 struct rb_node *from, struct rb_root *root, 1414 struct list_head *head) 1415 { 1416 struct rb_node **link; 1417 struct rb_node *parent; 1418 1419 if (from) 1420 link = find_va_links(va, NULL, from, &parent); 1421 else 1422 link = find_va_links(va, root, NULL, &parent); 1423 1424 if (link) { 1425 link_va_augment(va, root, parent, link, head); 1426 augment_tree_propagate_from(va); 1427 } 1428 } 1429 1430 /* 1431 * Merge de-allocated chunk of VA memory with previous 1432 * and next free blocks. If coalesce is not done a new 1433 * free area is inserted. If VA has been merged, it is 1434 * freed. 1435 * 1436 * Please note, it can return NULL in case of overlap 1437 * ranges, followed by WARN() report. Despite it is a 1438 * buggy behaviour, a system can be alive and keep 1439 * ongoing. 1440 */ 1441 static __always_inline struct vmap_area * 1442 __merge_or_add_vmap_area(struct vmap_area *va, 1443 struct rb_root *root, struct list_head *head, bool augment) 1444 { 1445 struct vmap_area *sibling; 1446 struct list_head *next; 1447 struct rb_node **link; 1448 struct rb_node *parent; 1449 bool merged = false; 1450 1451 /* 1452 * Find a place in the tree where VA potentially will be 1453 * inserted, unless it is merged with its sibling/siblings. 1454 */ 1455 link = find_va_links(va, root, NULL, &parent); 1456 if (!link) 1457 return NULL; 1458 1459 /* 1460 * Get next node of VA to check if merging can be done. 1461 */ 1462 next = get_va_next_sibling(parent, link); 1463 if (unlikely(next == NULL)) 1464 goto insert; 1465 1466 /* 1467 * start end 1468 * | | 1469 * |<------VA------>|<-----Next----->| 1470 * | | 1471 * start end 1472 */ 1473 if (next != head) { 1474 sibling = list_entry(next, struct vmap_area, list); 1475 if (sibling->va_start == va->va_end) { 1476 sibling->va_start = va->va_start; 1477 1478 /* Free vmap_area object. */ 1479 kmem_cache_free(vmap_area_cachep, va); 1480 1481 /* Point to the new merged area. */ 1482 va = sibling; 1483 merged = true; 1484 } 1485 } 1486 1487 /* 1488 * start end 1489 * | | 1490 * |<-----Prev----->|<------VA------>| 1491 * | | 1492 * start end 1493 */ 1494 if (next->prev != head) { 1495 sibling = list_entry(next->prev, struct vmap_area, list); 1496 if (sibling->va_end == va->va_start) { 1497 /* 1498 * If both neighbors are coalesced, it is important 1499 * to unlink the "next" node first, followed by merging 1500 * with "previous" one. Otherwise the tree might not be 1501 * fully populated if a sibling's augmented value is 1502 * "normalized" because of rotation operations. 1503 */ 1504 if (merged) 1505 __unlink_va(va, root, augment); 1506 1507 sibling->va_end = va->va_end; 1508 1509 /* Free vmap_area object. */ 1510 kmem_cache_free(vmap_area_cachep, va); 1511 1512 /* Point to the new merged area. */ 1513 va = sibling; 1514 merged = true; 1515 } 1516 } 1517 1518 insert: 1519 if (!merged) 1520 __link_va(va, root, parent, link, head, augment); 1521 1522 return va; 1523 } 1524 1525 static __always_inline struct vmap_area * 1526 merge_or_add_vmap_area(struct vmap_area *va, 1527 struct rb_root *root, struct list_head *head) 1528 { 1529 return __merge_or_add_vmap_area(va, root, head, false); 1530 } 1531 1532 static __always_inline struct vmap_area * 1533 merge_or_add_vmap_area_augment(struct vmap_area *va, 1534 struct rb_root *root, struct list_head *head) 1535 { 1536 va = __merge_or_add_vmap_area(va, root, head, true); 1537 if (va) 1538 augment_tree_propagate_from(va); 1539 1540 return va; 1541 } 1542 1543 static __always_inline bool 1544 is_within_this_va(struct vmap_area *va, unsigned long size, 1545 unsigned long align, unsigned long vstart) 1546 { 1547 unsigned long nva_start_addr; 1548 1549 if (va->va_start > vstart) 1550 nva_start_addr = ALIGN(va->va_start, align); 1551 else 1552 nva_start_addr = ALIGN(vstart, align); 1553 1554 /* Can be overflowed due to big size or alignment. */ 1555 if (nva_start_addr + size < nva_start_addr || 1556 nva_start_addr < vstart) 1557 return false; 1558 1559 return (nva_start_addr + size <= va->va_end); 1560 } 1561 1562 /* 1563 * Find the first free block(lowest start address) in the tree, 1564 * that will accomplish the request corresponding to passing 1565 * parameters. Please note, with an alignment bigger than PAGE_SIZE, 1566 * a search length is adjusted to account for worst case alignment 1567 * overhead. 1568 */ 1569 static __always_inline struct vmap_area * 1570 find_vmap_lowest_match(struct rb_root *root, unsigned long size, 1571 unsigned long align, unsigned long vstart, bool adjust_search_size) 1572 { 1573 struct vmap_area *va; 1574 struct rb_node *node; 1575 unsigned long length; 1576 1577 /* Start from the root. */ 1578 node = root->rb_node; 1579 1580 /* Adjust the search size for alignment overhead. */ 1581 length = adjust_search_size ? size + align - 1 : size; 1582 1583 while (node) { 1584 va = rb_entry(node, struct vmap_area, rb_node); 1585 1586 if (get_subtree_max_size(node->rb_left) >= length && 1587 vstart < va->va_start) { 1588 node = node->rb_left; 1589 } else { 1590 if (is_within_this_va(va, size, align, vstart)) 1591 return va; 1592 1593 /* 1594 * Does not make sense to go deeper towards the right 1595 * sub-tree if it does not have a free block that is 1596 * equal or bigger to the requested search length. 1597 */ 1598 if (get_subtree_max_size(node->rb_right) >= length) { 1599 node = node->rb_right; 1600 continue; 1601 } 1602 1603 /* 1604 * OK. We roll back and find the first right sub-tree, 1605 * that will satisfy the search criteria. It can happen 1606 * due to "vstart" restriction or an alignment overhead 1607 * that is bigger then PAGE_SIZE. 1608 */ 1609 while ((node = rb_parent(node))) { 1610 va = rb_entry(node, struct vmap_area, rb_node); 1611 if (is_within_this_va(va, size, align, vstart)) 1612 return va; 1613 1614 if (get_subtree_max_size(node->rb_right) >= length && 1615 vstart <= va->va_start) { 1616 /* 1617 * Shift the vstart forward. Please note, we update it with 1618 * parent's start address adding "1" because we do not want 1619 * to enter same sub-tree after it has already been checked 1620 * and no suitable free block found there. 1621 */ 1622 vstart = va->va_start + 1; 1623 node = node->rb_right; 1624 break; 1625 } 1626 } 1627 } 1628 } 1629 1630 return NULL; 1631 } 1632 1633 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1634 #include <linux/random.h> 1635 1636 static struct vmap_area * 1637 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, 1638 unsigned long align, unsigned long vstart) 1639 { 1640 struct vmap_area *va; 1641 1642 list_for_each_entry(va, head, list) { 1643 if (!is_within_this_va(va, size, align, vstart)) 1644 continue; 1645 1646 return va; 1647 } 1648 1649 return NULL; 1650 } 1651 1652 static void 1653 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, 1654 unsigned long size, unsigned long align) 1655 { 1656 struct vmap_area *va_1, *va_2; 1657 unsigned long vstart; 1658 unsigned int rnd; 1659 1660 get_random_bytes(&rnd, sizeof(rnd)); 1661 vstart = VMALLOC_START + rnd; 1662 1663 va_1 = find_vmap_lowest_match(root, size, align, vstart, false); 1664 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); 1665 1666 if (va_1 != va_2) 1667 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1668 va_1, va_2, vstart); 1669 } 1670 #endif 1671 1672 enum fit_type { 1673 NOTHING_FIT = 0, 1674 FL_FIT_TYPE = 1, /* full fit */ 1675 LE_FIT_TYPE = 2, /* left edge fit */ 1676 RE_FIT_TYPE = 3, /* right edge fit */ 1677 NE_FIT_TYPE = 4 /* no edge fit */ 1678 }; 1679 1680 static __always_inline enum fit_type 1681 classify_va_fit_type(struct vmap_area *va, 1682 unsigned long nva_start_addr, unsigned long size) 1683 { 1684 enum fit_type type; 1685 1686 /* Check if it is within VA. */ 1687 if (nva_start_addr < va->va_start || 1688 nva_start_addr + size > va->va_end) 1689 return NOTHING_FIT; 1690 1691 /* Now classify. */ 1692 if (va->va_start == nva_start_addr) { 1693 if (va->va_end == nva_start_addr + size) 1694 type = FL_FIT_TYPE; 1695 else 1696 type = LE_FIT_TYPE; 1697 } else if (va->va_end == nva_start_addr + size) { 1698 type = RE_FIT_TYPE; 1699 } else { 1700 type = NE_FIT_TYPE; 1701 } 1702 1703 return type; 1704 } 1705 1706 static __always_inline int 1707 va_clip(struct rb_root *root, struct list_head *head, 1708 struct vmap_area *va, unsigned long nva_start_addr, 1709 unsigned long size) 1710 { 1711 struct vmap_area *lva = NULL; 1712 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); 1713 1714 if (type == FL_FIT_TYPE) { 1715 /* 1716 * No need to split VA, it fully fits. 1717 * 1718 * | | 1719 * V NVA V 1720 * |---------------| 1721 */ 1722 unlink_va_augment(va, root); 1723 kmem_cache_free(vmap_area_cachep, va); 1724 } else if (type == LE_FIT_TYPE) { 1725 /* 1726 * Split left edge of fit VA. 1727 * 1728 * | | 1729 * V NVA V R 1730 * |-------|-------| 1731 */ 1732 va->va_start += size; 1733 } else if (type == RE_FIT_TYPE) { 1734 /* 1735 * Split right edge of fit VA. 1736 * 1737 * | | 1738 * L V NVA V 1739 * |-------|-------| 1740 */ 1741 va->va_end = nva_start_addr; 1742 } else if (type == NE_FIT_TYPE) { 1743 /* 1744 * Split no edge of fit VA. 1745 * 1746 * | | 1747 * L V NVA V R 1748 * |---|-------|---| 1749 */ 1750 lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 1751 if (unlikely(!lva)) { 1752 /* 1753 * For percpu allocator we do not do any pre-allocation 1754 * and leave it as it is. The reason is it most likely 1755 * never ends up with NE_FIT_TYPE splitting. In case of 1756 * percpu allocations offsets and sizes are aligned to 1757 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 1758 * are its main fitting cases. 1759 * 1760 * There are a few exceptions though, as an example it is 1761 * a first allocation (early boot up) when we have "one" 1762 * big free space that has to be split. 1763 * 1764 * Also we can hit this path in case of regular "vmap" 1765 * allocations, if "this" current CPU was not preloaded. 1766 * See the comment in alloc_vmap_area() why. If so, then 1767 * GFP_NOWAIT is used instead to get an extra object for 1768 * split purpose. That is rare and most time does not 1769 * occur. 1770 * 1771 * What happens if an allocation gets failed. Basically, 1772 * an "overflow" path is triggered to purge lazily freed 1773 * areas to free some memory, then, the "retry" path is 1774 * triggered to repeat one more time. See more details 1775 * in alloc_vmap_area() function. 1776 */ 1777 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 1778 if (!lva) 1779 return -ENOMEM; 1780 } 1781 1782 /* 1783 * Build the remainder. 1784 */ 1785 lva->va_start = va->va_start; 1786 lva->va_end = nva_start_addr; 1787 1788 /* 1789 * Shrink this VA to remaining size. 1790 */ 1791 va->va_start = nva_start_addr + size; 1792 } else { 1793 return -EINVAL; 1794 } 1795 1796 if (type != FL_FIT_TYPE) { 1797 augment_tree_propagate_from(va); 1798 1799 if (lva) /* type == NE_FIT_TYPE */ 1800 insert_vmap_area_augment(lva, &va->rb_node, root, head); 1801 } 1802 1803 return 0; 1804 } 1805 1806 static unsigned long 1807 va_alloc(struct vmap_area *va, 1808 struct rb_root *root, struct list_head *head, 1809 unsigned long size, unsigned long align, 1810 unsigned long vstart, unsigned long vend) 1811 { 1812 unsigned long nva_start_addr; 1813 int ret; 1814 1815 if (va->va_start > vstart) 1816 nva_start_addr = ALIGN(va->va_start, align); 1817 else 1818 nva_start_addr = ALIGN(vstart, align); 1819 1820 /* Check the "vend" restriction. */ 1821 if (nva_start_addr + size > vend) 1822 return -ERANGE; 1823 1824 /* Update the free vmap_area. */ 1825 ret = va_clip(root, head, va, nva_start_addr, size); 1826 if (WARN_ON_ONCE(ret)) 1827 return ret; 1828 1829 return nva_start_addr; 1830 } 1831 1832 /* 1833 * Returns a start address of the newly allocated area, if success. 1834 * Otherwise an error value is returned that indicates failure. 1835 */ 1836 static __always_inline unsigned long 1837 __alloc_vmap_area(struct rb_root *root, struct list_head *head, 1838 unsigned long size, unsigned long align, 1839 unsigned long vstart, unsigned long vend) 1840 { 1841 bool adjust_search_size = true; 1842 unsigned long nva_start_addr; 1843 struct vmap_area *va; 1844 1845 /* 1846 * Do not adjust when: 1847 * a) align <= PAGE_SIZE, because it does not make any sense. 1848 * All blocks(their start addresses) are at least PAGE_SIZE 1849 * aligned anyway; 1850 * b) a short range where a requested size corresponds to exactly 1851 * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 1852 * With adjusted search length an allocation would not succeed. 1853 */ 1854 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 1855 adjust_search_size = false; 1856 1857 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); 1858 if (unlikely(!va)) 1859 return -ENOENT; 1860 1861 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); 1862 1863 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1864 if (!IS_ERR_VALUE(nva_start_addr)) 1865 find_vmap_lowest_match_check(root, head, size, align); 1866 #endif 1867 1868 return nva_start_addr; 1869 } 1870 1871 /* 1872 * Free a region of KVA allocated by alloc_vmap_area 1873 */ 1874 static void free_vmap_area(struct vmap_area *va) 1875 { 1876 struct vmap_node *vn = addr_to_node(va->va_start); 1877 1878 /* 1879 * Remove from the busy tree/list. 1880 */ 1881 spin_lock(&vn->busy.lock); 1882 unlink_va(va, &vn->busy.root); 1883 spin_unlock(&vn->busy.lock); 1884 1885 /* 1886 * Insert/Merge it back to the free tree/list. 1887 */ 1888 spin_lock(&free_vmap_area_lock); 1889 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1890 spin_unlock(&free_vmap_area_lock); 1891 } 1892 1893 static inline void 1894 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1895 { 1896 struct vmap_area *va = NULL, *tmp; 1897 1898 /* 1899 * Preload this CPU with one extra vmap_area object. It is used 1900 * when fit type of free area is NE_FIT_TYPE. It guarantees that 1901 * a CPU that does an allocation is preloaded. 1902 * 1903 * We do it in non-atomic context, thus it allows us to use more 1904 * permissive allocation masks to be more stable under low memory 1905 * condition and high memory pressure. 1906 */ 1907 if (!this_cpu_read(ne_fit_preload_node)) 1908 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1909 1910 spin_lock(lock); 1911 1912 tmp = NULL; 1913 if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va)) 1914 kmem_cache_free(vmap_area_cachep, va); 1915 } 1916 1917 static struct vmap_pool * 1918 size_to_va_pool(struct vmap_node *vn, unsigned long size) 1919 { 1920 unsigned int idx = (size - 1) / PAGE_SIZE; 1921 1922 if (idx < MAX_VA_SIZE_PAGES) 1923 return &vn->pool[idx]; 1924 1925 return NULL; 1926 } 1927 1928 static bool 1929 node_pool_add_va(struct vmap_node *n, struct vmap_area *va) 1930 { 1931 struct vmap_pool *vp; 1932 1933 vp = size_to_va_pool(n, va_size(va)); 1934 if (!vp) 1935 return false; 1936 1937 spin_lock(&n->pool_lock); 1938 list_add(&va->list, &vp->head); 1939 WRITE_ONCE(vp->len, vp->len + 1); 1940 spin_unlock(&n->pool_lock); 1941 1942 return true; 1943 } 1944 1945 static struct vmap_area * 1946 node_pool_del_va(struct vmap_node *vn, unsigned long size, 1947 unsigned long align, unsigned long vstart, 1948 unsigned long vend) 1949 { 1950 struct vmap_area *va = NULL; 1951 struct vmap_pool *vp; 1952 int err = 0; 1953 1954 vp = size_to_va_pool(vn, size); 1955 if (!vp || list_empty(&vp->head)) 1956 return NULL; 1957 1958 spin_lock(&vn->pool_lock); 1959 if (!list_empty(&vp->head)) { 1960 va = list_first_entry(&vp->head, struct vmap_area, list); 1961 1962 if (IS_ALIGNED(va->va_start, align)) { 1963 /* 1964 * Do some sanity check and emit a warning 1965 * if one of below checks detects an error. 1966 */ 1967 err |= (va_size(va) != size); 1968 err |= (va->va_start < vstart); 1969 err |= (va->va_end > vend); 1970 1971 if (!WARN_ON_ONCE(err)) { 1972 list_del_init(&va->list); 1973 WRITE_ONCE(vp->len, vp->len - 1); 1974 } else { 1975 va = NULL; 1976 } 1977 } else { 1978 list_move_tail(&va->list, &vp->head); 1979 va = NULL; 1980 } 1981 } 1982 spin_unlock(&vn->pool_lock); 1983 1984 return va; 1985 } 1986 1987 static struct vmap_area * 1988 node_alloc(unsigned long size, unsigned long align, 1989 unsigned long vstart, unsigned long vend, 1990 unsigned long *addr, unsigned int *vn_id) 1991 { 1992 struct vmap_area *va; 1993 1994 *vn_id = 0; 1995 *addr = -EINVAL; 1996 1997 /* 1998 * Fallback to a global heap if not vmalloc or there 1999 * is only one node. 2000 */ 2001 if (vstart != VMALLOC_START || vend != VMALLOC_END || 2002 nr_vmap_nodes == 1) 2003 return NULL; 2004 2005 *vn_id = raw_smp_processor_id() % nr_vmap_nodes; 2006 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); 2007 *vn_id = encode_vn_id(*vn_id); 2008 2009 if (va) 2010 *addr = va->va_start; 2011 2012 return va; 2013 } 2014 2015 static inline void setup_vmalloc_vm(struct vm_struct *vm, 2016 struct vmap_area *va, unsigned long flags, const void *caller) 2017 { 2018 vm->flags = flags; 2019 vm->addr = (void *)va->va_start; 2020 vm->size = vm->requested_size = va_size(va); 2021 vm->caller = caller; 2022 va->vm = vm; 2023 } 2024 2025 /* 2026 * Allocate a region of KVA of the specified size and alignment, within the 2027 * vstart and vend. If vm is passed in, the two will also be bound. 2028 */ 2029 static struct vmap_area *alloc_vmap_area(unsigned long size, 2030 unsigned long align, 2031 unsigned long vstart, unsigned long vend, 2032 int node, gfp_t gfp_mask, 2033 unsigned long va_flags, struct vm_struct *vm) 2034 { 2035 struct vmap_node *vn; 2036 struct vmap_area *va; 2037 unsigned long freed; 2038 unsigned long addr; 2039 unsigned int vn_id; 2040 bool allow_block; 2041 int purged = 0; 2042 int ret; 2043 2044 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) 2045 return ERR_PTR(-EINVAL); 2046 2047 if (unlikely(!vmap_initialized)) 2048 return ERR_PTR(-EBUSY); 2049 2050 /* Only reclaim behaviour flags are relevant. */ 2051 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 2052 allow_block = gfpflags_allow_blocking(gfp_mask); 2053 might_sleep_if(allow_block); 2054 2055 /* 2056 * If a VA is obtained from a global heap(if it fails here) 2057 * it is anyway marked with this "vn_id" so it is returned 2058 * to this pool's node later. Such way gives a possibility 2059 * to populate pools based on users demand. 2060 * 2061 * On success a ready to go VA is returned. 2062 */ 2063 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); 2064 if (!va) { 2065 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 2066 if (unlikely(!va)) 2067 return ERR_PTR(-ENOMEM); 2068 2069 /* 2070 * Only scan the relevant parts containing pointers to other objects 2071 * to avoid false negatives. 2072 */ 2073 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 2074 } 2075 2076 retry: 2077 if (IS_ERR_VALUE(addr)) { 2078 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 2079 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, 2080 size, align, vstart, vend); 2081 spin_unlock(&free_vmap_area_lock); 2082 2083 /* 2084 * This is not a fast path. Check if yielding is needed. This 2085 * is the only reschedule point in the vmalloc() path. 2086 */ 2087 if (allow_block) 2088 cond_resched(); 2089 } 2090 2091 trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); 2092 2093 /* 2094 * If an allocation fails, the error value is 2095 * returned. Therefore trigger the overflow path. 2096 */ 2097 if (IS_ERR_VALUE(addr)) { 2098 if (allow_block) 2099 goto overflow; 2100 2101 /* 2102 * We can not trigger any reclaim logic because 2103 * sleeping is not allowed, thus fail an allocation. 2104 */ 2105 goto out_free_va; 2106 } 2107 2108 va->va_start = addr; 2109 va->va_end = addr + size; 2110 va->vm = NULL; 2111 va->flags = (va_flags | vn_id); 2112 2113 if (vm) { 2114 vm->addr = (void *)va->va_start; 2115 vm->size = va_size(va); 2116 va->vm = vm; 2117 } 2118 2119 vn = addr_to_node(va->va_start); 2120 2121 spin_lock(&vn->busy.lock); 2122 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 2123 spin_unlock(&vn->busy.lock); 2124 2125 BUG_ON(!IS_ALIGNED(va->va_start, align)); 2126 BUG_ON(va->va_start < vstart); 2127 BUG_ON(va->va_end > vend); 2128 2129 ret = kasan_populate_vmalloc(addr, size, gfp_mask); 2130 if (ret) { 2131 free_vmap_area(va); 2132 return ERR_PTR(ret); 2133 } 2134 2135 return va; 2136 2137 overflow: 2138 if (!purged) { 2139 reclaim_and_purge_vmap_areas(); 2140 purged = 1; 2141 goto retry; 2142 } 2143 2144 freed = 0; 2145 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 2146 2147 if (freed > 0) { 2148 purged = 0; 2149 goto retry; 2150 } 2151 2152 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 2153 pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n", 2154 size, vstart, vend); 2155 2156 out_free_va: 2157 kmem_cache_free(vmap_area_cachep, va); 2158 return ERR_PTR(-EBUSY); 2159 } 2160 2161 int register_vmap_purge_notifier(struct notifier_block *nb) 2162 { 2163 return blocking_notifier_chain_register(&vmap_notify_list, nb); 2164 } 2165 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 2166 2167 int unregister_vmap_purge_notifier(struct notifier_block *nb) 2168 { 2169 return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 2170 } 2171 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 2172 2173 /* 2174 * lazy_max_pages is the maximum amount of virtual address space we gather up 2175 * before attempting to purge with a TLB flush. 2176 * 2177 * There is a tradeoff here: a larger number will cover more kernel page tables 2178 * and take slightly longer to purge, but it will linearly reduce the number of 2179 * global TLB flushes that must be performed. It would seem natural to scale 2180 * this number up linearly with the number of CPUs (because vmapping activity 2181 * could also scale linearly with the number of CPUs), however it is likely 2182 * that in practice, workloads might be constrained in other ways that mean 2183 * vmap activity will not scale linearly with CPUs. Also, I want to be 2184 * conservative and not introduce a big latency on huge systems, so go with 2185 * a less aggressive log scale. It will still be an improvement over the old 2186 * code, and it will be simple to change the scale factor if we find that it 2187 * becomes a problem on bigger systems. 2188 */ 2189 static unsigned long lazy_max_pages(void) 2190 { 2191 unsigned int log; 2192 2193 log = fls(num_online_cpus()); 2194 2195 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 2196 } 2197 2198 /* 2199 * Serialize vmap purging. There is no actual critical section protected 2200 * by this lock, but we want to avoid concurrent calls for performance 2201 * reasons and to make the pcpu_get_vm_areas more deterministic. 2202 */ 2203 static DEFINE_MUTEX(vmap_purge_lock); 2204 2205 /* for per-CPU blocks */ 2206 static void purge_fragmented_blocks_allcpus(void); 2207 2208 static void 2209 reclaim_list_global(struct list_head *head) 2210 { 2211 struct vmap_area *va, *n; 2212 2213 if (list_empty(head)) 2214 return; 2215 2216 spin_lock(&free_vmap_area_lock); 2217 list_for_each_entry_safe(va, n, head, list) 2218 merge_or_add_vmap_area_augment(va, 2219 &free_vmap_area_root, &free_vmap_area_list); 2220 spin_unlock(&free_vmap_area_lock); 2221 } 2222 2223 static void 2224 decay_va_pool_node(struct vmap_node *vn, bool full_decay) 2225 { 2226 LIST_HEAD(decay_list); 2227 struct rb_root decay_root = RB_ROOT; 2228 struct vmap_area *va, *nva; 2229 unsigned long n_decay, pool_len; 2230 int i; 2231 2232 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 2233 LIST_HEAD(tmp_list); 2234 2235 if (list_empty(&vn->pool[i].head)) 2236 continue; 2237 2238 /* Detach the pool, so no-one can access it. */ 2239 spin_lock(&vn->pool_lock); 2240 list_replace_init(&vn->pool[i].head, &tmp_list); 2241 spin_unlock(&vn->pool_lock); 2242 2243 pool_len = n_decay = vn->pool[i].len; 2244 WRITE_ONCE(vn->pool[i].len, 0); 2245 2246 /* Decay a pool by ~25% out of left objects. */ 2247 if (!full_decay) 2248 n_decay >>= 2; 2249 pool_len -= n_decay; 2250 2251 list_for_each_entry_safe(va, nva, &tmp_list, list) { 2252 if (!n_decay--) 2253 break; 2254 2255 list_del_init(&va->list); 2256 merge_or_add_vmap_area(va, &decay_root, &decay_list); 2257 } 2258 2259 /* 2260 * Attach the pool back if it has been partly decayed. 2261 * Please note, it is supposed that nobody(other contexts) 2262 * can populate the pool therefore a simple list replace 2263 * operation takes place here. 2264 */ 2265 if (!list_empty(&tmp_list)) { 2266 spin_lock(&vn->pool_lock); 2267 list_replace_init(&tmp_list, &vn->pool[i].head); 2268 WRITE_ONCE(vn->pool[i].len, pool_len); 2269 spin_unlock(&vn->pool_lock); 2270 } 2271 } 2272 2273 reclaim_list_global(&decay_list); 2274 } 2275 2276 #define KASAN_RELEASE_BATCH_SIZE 32 2277 2278 static void 2279 kasan_release_vmalloc_node(struct vmap_node *vn) 2280 { 2281 struct vmap_area *va; 2282 unsigned long start, end; 2283 unsigned int batch_count = 0; 2284 2285 start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start; 2286 end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end; 2287 2288 list_for_each_entry(va, &vn->purge_list, list) { 2289 if (is_vmalloc_or_module_addr((void *) va->va_start)) 2290 kasan_release_vmalloc(va->va_start, va->va_end, 2291 va->va_start, va->va_end, 2292 KASAN_VMALLOC_PAGE_RANGE); 2293 2294 if (need_resched() || (++batch_count >= KASAN_RELEASE_BATCH_SIZE)) { 2295 cond_resched(); 2296 batch_count = 0; 2297 } 2298 } 2299 2300 kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH); 2301 } 2302 2303 static void purge_vmap_node(struct work_struct *work) 2304 { 2305 struct vmap_node *vn = container_of(work, 2306 struct vmap_node, purge_work); 2307 unsigned long nr_purged_pages = 0; 2308 struct vmap_area *va, *n_va; 2309 LIST_HEAD(local_list); 2310 2311 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) 2312 kasan_release_vmalloc_node(vn); 2313 2314 vn->nr_purged = 0; 2315 2316 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { 2317 unsigned long nr = va_size(va) >> PAGE_SHIFT; 2318 unsigned int vn_id = decode_vn_id(va->flags); 2319 2320 list_del_init(&va->list); 2321 2322 nr_purged_pages += nr; 2323 vn->nr_purged++; 2324 2325 if (is_vn_id_valid(vn_id) && !vn->skip_populate) 2326 if (node_pool_add_va(vn, va)) 2327 continue; 2328 2329 /* Go back to global. */ 2330 list_add(&va->list, &local_list); 2331 } 2332 2333 atomic_long_sub(nr_purged_pages, &vmap_lazy_nr); 2334 2335 reclaim_list_global(&local_list); 2336 } 2337 2338 /* 2339 * Purges all lazily-freed vmap areas. 2340 */ 2341 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, 2342 bool full_pool_decay) 2343 { 2344 unsigned long nr_purged_areas = 0; 2345 unsigned int nr_purge_helpers; 2346 static cpumask_t purge_nodes; 2347 unsigned int nr_purge_nodes; 2348 struct vmap_node *vn; 2349 int i; 2350 2351 lockdep_assert_held(&vmap_purge_lock); 2352 2353 /* 2354 * Use cpumask to mark which node has to be processed. 2355 */ 2356 purge_nodes = CPU_MASK_NONE; 2357 2358 for_each_vmap_node(vn) { 2359 INIT_LIST_HEAD(&vn->purge_list); 2360 vn->skip_populate = full_pool_decay; 2361 decay_va_pool_node(vn, full_pool_decay); 2362 2363 if (RB_EMPTY_ROOT(&vn->lazy.root)) 2364 continue; 2365 2366 spin_lock(&vn->lazy.lock); 2367 WRITE_ONCE(vn->lazy.root.rb_node, NULL); 2368 list_replace_init(&vn->lazy.head, &vn->purge_list); 2369 spin_unlock(&vn->lazy.lock); 2370 2371 start = min(start, list_first_entry(&vn->purge_list, 2372 struct vmap_area, list)->va_start); 2373 2374 end = max(end, list_last_entry(&vn->purge_list, 2375 struct vmap_area, list)->va_end); 2376 2377 cpumask_set_cpu(node_to_id(vn), &purge_nodes); 2378 } 2379 2380 nr_purge_nodes = cpumask_weight(&purge_nodes); 2381 if (nr_purge_nodes > 0) { 2382 flush_tlb_kernel_range(start, end); 2383 2384 /* One extra worker is per a lazy_max_pages() full set minus one. */ 2385 nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages(); 2386 nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1; 2387 2388 for_each_cpu(i, &purge_nodes) { 2389 vn = &vmap_nodes[i]; 2390 2391 if (nr_purge_helpers > 0) { 2392 INIT_WORK(&vn->purge_work, purge_vmap_node); 2393 2394 if (cpumask_test_cpu(i, cpu_online_mask)) 2395 schedule_work_on(i, &vn->purge_work); 2396 else 2397 schedule_work(&vn->purge_work); 2398 2399 nr_purge_helpers--; 2400 } else { 2401 vn->purge_work.func = NULL; 2402 purge_vmap_node(&vn->purge_work); 2403 nr_purged_areas += vn->nr_purged; 2404 } 2405 } 2406 2407 for_each_cpu(i, &purge_nodes) { 2408 vn = &vmap_nodes[i]; 2409 2410 if (vn->purge_work.func) { 2411 flush_work(&vn->purge_work); 2412 nr_purged_areas += vn->nr_purged; 2413 } 2414 } 2415 } 2416 2417 trace_purge_vmap_area_lazy(start, end, nr_purged_areas); 2418 return nr_purged_areas > 0; 2419 } 2420 2421 /* 2422 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. 2423 */ 2424 static void reclaim_and_purge_vmap_areas(void) 2425 2426 { 2427 mutex_lock(&vmap_purge_lock); 2428 purge_fragmented_blocks_allcpus(); 2429 __purge_vmap_area_lazy(ULONG_MAX, 0, true); 2430 mutex_unlock(&vmap_purge_lock); 2431 } 2432 2433 static void drain_vmap_area_work(struct work_struct *work) 2434 { 2435 mutex_lock(&vmap_purge_lock); 2436 __purge_vmap_area_lazy(ULONG_MAX, 0, false); 2437 mutex_unlock(&vmap_purge_lock); 2438 } 2439 2440 /* 2441 * Free a vmap area, caller ensuring that the area has been unmapped, 2442 * unlinked and flush_cache_vunmap had been called for the correct 2443 * range previously. 2444 */ 2445 static void free_vmap_area_noflush(struct vmap_area *va) 2446 { 2447 unsigned long nr_lazy_max = lazy_max_pages(); 2448 unsigned long va_start = va->va_start; 2449 unsigned int vn_id = decode_vn_id(va->flags); 2450 struct vmap_node *vn; 2451 unsigned long nr_lazy; 2452 2453 if (WARN_ON_ONCE(!list_empty(&va->list))) 2454 return; 2455 2456 nr_lazy = atomic_long_add_return_relaxed(va_size(va) >> PAGE_SHIFT, 2457 &vmap_lazy_nr); 2458 2459 /* 2460 * If it was request by a certain node we would like to 2461 * return it to that node, i.e. its pool for later reuse. 2462 */ 2463 vn = is_vn_id_valid(vn_id) ? 2464 id_to_node(vn_id):addr_to_node(va->va_start); 2465 2466 spin_lock(&vn->lazy.lock); 2467 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); 2468 spin_unlock(&vn->lazy.lock); 2469 2470 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); 2471 2472 /* After this point, we may free va at any time */ 2473 if (unlikely(nr_lazy > nr_lazy_max)) 2474 schedule_work(&drain_vmap_work); 2475 } 2476 2477 /* 2478 * Free and unmap a vmap area 2479 */ 2480 static void free_unmap_vmap_area(struct vmap_area *va) 2481 { 2482 flush_cache_vunmap(va->va_start, va->va_end); 2483 vunmap_range_noflush(va->va_start, va->va_end); 2484 if (debug_pagealloc_enabled_static()) 2485 flush_tlb_kernel_range(va->va_start, va->va_end); 2486 2487 free_vmap_area_noflush(va); 2488 } 2489 2490 struct vmap_area *find_vmap_area(unsigned long addr) 2491 { 2492 struct vmap_node *vn; 2493 struct vmap_area *va; 2494 int i, j; 2495 2496 if (unlikely(!vmap_initialized)) 2497 return NULL; 2498 2499 /* 2500 * An addr_to_node_id(addr) converts an address to a node index 2501 * where a VA is located. If VA spans several zones and passed 2502 * addr is not the same as va->va_start, what is not common, we 2503 * may need to scan extra nodes. See an example: 2504 * 2505 * <----va----> 2506 * -|-----|-----|-----|-----|- 2507 * 1 2 0 1 2508 * 2509 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed 2510 * addr is within 2 or 0 nodes we should do extra work. 2511 */ 2512 i = j = addr_to_node_id(addr); 2513 do { 2514 vn = &vmap_nodes[i]; 2515 2516 spin_lock(&vn->busy.lock); 2517 va = __find_vmap_area(addr, &vn->busy.root); 2518 spin_unlock(&vn->busy.lock); 2519 2520 if (va) 2521 return va; 2522 } while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j); 2523 2524 return NULL; 2525 } 2526 2527 static struct vmap_area *find_unlink_vmap_area(unsigned long addr) 2528 { 2529 struct vmap_node *vn; 2530 struct vmap_area *va; 2531 int i, j; 2532 2533 /* 2534 * Check the comment in the find_vmap_area() about the loop. 2535 */ 2536 i = j = addr_to_node_id(addr); 2537 do { 2538 vn = &vmap_nodes[i]; 2539 2540 spin_lock(&vn->busy.lock); 2541 va = __find_vmap_area(addr, &vn->busy.root); 2542 if (va) 2543 unlink_va(va, &vn->busy.root); 2544 spin_unlock(&vn->busy.lock); 2545 2546 if (va) 2547 return va; 2548 } while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j); 2549 2550 return NULL; 2551 } 2552 2553 /*** Per cpu kva allocator ***/ 2554 2555 /* 2556 * vmap space is limited especially on 32 bit architectures. Ensure there is 2557 * room for at least 16 percpu vmap blocks per CPU. 2558 */ 2559 /* 2560 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 2561 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 2562 * instead (we just need a rough idea) 2563 */ 2564 #if BITS_PER_LONG == 32 2565 #define VMALLOC_SPACE (128UL*1024*1024) 2566 #else 2567 #define VMALLOC_SPACE (128UL*1024*1024*1024) 2568 #endif 2569 2570 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 2571 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 2572 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 2573 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 2574 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 2575 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 2576 #define VMAP_BBMAP_BITS \ 2577 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 2578 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 2579 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 2580 2581 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 2582 2583 /* 2584 * Purge threshold to prevent overeager purging of fragmented blocks for 2585 * regular operations: Purge if vb->free is less than 1/4 of the capacity. 2586 */ 2587 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) 2588 2589 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ 2590 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ 2591 #define VMAP_FLAGS_MASK 0x3 2592 2593 struct vmap_block_queue { 2594 spinlock_t lock; 2595 struct list_head free; 2596 2597 /* 2598 * An xarray requires an extra memory dynamically to 2599 * be allocated. If it is an issue, we can use rb-tree 2600 * instead. 2601 */ 2602 struct xarray vmap_blocks; 2603 }; 2604 2605 struct vmap_block { 2606 spinlock_t lock; 2607 struct vmap_area *va; 2608 unsigned long free, dirty; 2609 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); 2610 unsigned long dirty_min, dirty_max; /*< dirty range */ 2611 struct list_head free_list; 2612 struct rcu_head rcu_head; 2613 struct list_head purge; 2614 unsigned int cpu; 2615 }; 2616 2617 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 2618 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 2619 2620 /* 2621 * In order to fast access to any "vmap_block" associated with a 2622 * specific address, we use a hash. 2623 * 2624 * A per-cpu vmap_block_queue is used in both ways, to serialize 2625 * an access to free block chains among CPUs(alloc path) and it 2626 * also acts as a vmap_block hash(alloc/free paths). It means we 2627 * overload it, since we already have the per-cpu array which is 2628 * used as a hash table. When used as a hash a 'cpu' passed to 2629 * per_cpu() is not actually a CPU but rather a hash index. 2630 * 2631 * A hash function is addr_to_vb_xa() which hashes any address 2632 * to a specific index(in a hash) it belongs to. This then uses a 2633 * per_cpu() macro to access an array with generated index. 2634 * 2635 * An example: 2636 * 2637 * CPU_1 CPU_2 CPU_0 2638 * | | | 2639 * V V V 2640 * 0 10 20 30 40 50 60 2641 * |------|------|------|------|------|------|...<vmap address space> 2642 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 2643 * 2644 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus 2645 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; 2646 * 2647 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus 2648 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; 2649 * 2650 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus 2651 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. 2652 * 2653 * This technique almost always avoids lock contention on insert/remove, 2654 * however xarray spinlocks protect against any contention that remains. 2655 */ 2656 static struct xarray * 2657 addr_to_vb_xa(unsigned long addr) 2658 { 2659 int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids; 2660 2661 /* 2662 * Please note, nr_cpu_ids points on a highest set 2663 * possible bit, i.e. we never invoke cpumask_next() 2664 * if an index points on it which is nr_cpu_ids - 1. 2665 */ 2666 if (!cpu_possible(index)) 2667 index = cpumask_next(index, cpu_possible_mask); 2668 2669 return &per_cpu(vmap_block_queue, index).vmap_blocks; 2670 } 2671 2672 /* 2673 * We should probably have a fallback mechanism to allocate virtual memory 2674 * out of partially filled vmap blocks. However vmap block sizing should be 2675 * fairly reasonable according to the vmalloc size, so it shouldn't be a 2676 * big problem. 2677 */ 2678 2679 static unsigned long addr_to_vb_idx(unsigned long addr) 2680 { 2681 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 2682 addr /= VMAP_BLOCK_SIZE; 2683 return addr; 2684 } 2685 2686 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 2687 { 2688 unsigned long addr; 2689 2690 addr = va_start + (pages_off << PAGE_SHIFT); 2691 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 2692 return (void *)addr; 2693 } 2694 2695 /** 2696 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 2697 * block. Of course pages number can't exceed VMAP_BBMAP_BITS 2698 * @order: how many 2^order pages should be occupied in newly allocated block 2699 * @gfp_mask: flags for the page level allocator 2700 * 2701 * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 2702 */ 2703 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 2704 { 2705 struct vmap_block_queue *vbq; 2706 struct vmap_block *vb; 2707 struct vmap_area *va; 2708 struct xarray *xa; 2709 unsigned long vb_idx; 2710 int node, err; 2711 void *vaddr; 2712 2713 node = numa_node_id(); 2714 2715 vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask, node); 2716 if (unlikely(!vb)) 2717 return ERR_PTR(-ENOMEM); 2718 2719 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 2720 VMALLOC_START, VMALLOC_END, 2721 node, gfp_mask, 2722 VMAP_RAM|VMAP_BLOCK, NULL); 2723 if (IS_ERR(va)) { 2724 kfree(vb); 2725 return ERR_CAST(va); 2726 } 2727 2728 vaddr = vmap_block_vaddr(va->va_start, 0); 2729 spin_lock_init(&vb->lock); 2730 vb->va = va; 2731 /* At least something should be left free */ 2732 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 2733 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); 2734 vb->free = VMAP_BBMAP_BITS - (1UL << order); 2735 vb->dirty = 0; 2736 vb->dirty_min = VMAP_BBMAP_BITS; 2737 vb->dirty_max = 0; 2738 bitmap_set(vb->used_map, 0, (1UL << order)); 2739 INIT_LIST_HEAD(&vb->free_list); 2740 vb->cpu = raw_smp_processor_id(); 2741 2742 xa = addr_to_vb_xa(va->va_start); 2743 vb_idx = addr_to_vb_idx(va->va_start); 2744 err = xa_insert(xa, vb_idx, vb, gfp_mask); 2745 if (err) { 2746 kfree(vb); 2747 free_vmap_area(va); 2748 return ERR_PTR(err); 2749 } 2750 /* 2751 * list_add_tail_rcu could happened in another core 2752 * rather than vb->cpu due to task migration, which 2753 * is safe as list_add_tail_rcu will ensure the list's 2754 * integrity together with list_for_each_rcu from read 2755 * side. 2756 */ 2757 vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); 2758 spin_lock(&vbq->lock); 2759 list_add_tail_rcu(&vb->free_list, &vbq->free); 2760 spin_unlock(&vbq->lock); 2761 2762 return vaddr; 2763 } 2764 2765 static void free_vmap_block(struct vmap_block *vb) 2766 { 2767 struct vmap_node *vn; 2768 struct vmap_block *tmp; 2769 struct xarray *xa; 2770 2771 xa = addr_to_vb_xa(vb->va->va_start); 2772 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); 2773 BUG_ON(tmp != vb); 2774 2775 vn = addr_to_node(vb->va->va_start); 2776 spin_lock(&vn->busy.lock); 2777 unlink_va(vb->va, &vn->busy.root); 2778 spin_unlock(&vn->busy.lock); 2779 2780 free_vmap_area_noflush(vb->va); 2781 kfree_rcu(vb, rcu_head); 2782 } 2783 2784 static bool purge_fragmented_block(struct vmap_block *vb, 2785 struct list_head *purge_list, bool force_purge) 2786 { 2787 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu); 2788 2789 if (vb->free + vb->dirty != VMAP_BBMAP_BITS || 2790 vb->dirty == VMAP_BBMAP_BITS) 2791 return false; 2792 2793 /* Don't overeagerly purge usable blocks unless requested */ 2794 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) 2795 return false; 2796 2797 /* prevent further allocs after releasing lock */ 2798 WRITE_ONCE(vb->free, 0); 2799 /* prevent purging it again */ 2800 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); 2801 vb->dirty_min = 0; 2802 vb->dirty_max = VMAP_BBMAP_BITS; 2803 spin_lock(&vbq->lock); 2804 list_del_rcu(&vb->free_list); 2805 spin_unlock(&vbq->lock); 2806 list_add_tail(&vb->purge, purge_list); 2807 return true; 2808 } 2809 2810 static void free_purged_blocks(struct list_head *purge_list) 2811 { 2812 struct vmap_block *vb, *n_vb; 2813 2814 list_for_each_entry_safe(vb, n_vb, purge_list, purge) { 2815 list_del(&vb->purge); 2816 free_vmap_block(vb); 2817 } 2818 } 2819 2820 static void purge_fragmented_blocks(int cpu) 2821 { 2822 LIST_HEAD(purge); 2823 struct vmap_block *vb; 2824 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2825 2826 rcu_read_lock(); 2827 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2828 unsigned long free = READ_ONCE(vb->free); 2829 unsigned long dirty = READ_ONCE(vb->dirty); 2830 2831 if (free + dirty != VMAP_BBMAP_BITS || 2832 dirty == VMAP_BBMAP_BITS) 2833 continue; 2834 2835 spin_lock(&vb->lock); 2836 purge_fragmented_block(vb, &purge, true); 2837 spin_unlock(&vb->lock); 2838 } 2839 rcu_read_unlock(); 2840 free_purged_blocks(&purge); 2841 } 2842 2843 static void purge_fragmented_blocks_allcpus(void) 2844 { 2845 int cpu; 2846 2847 for_each_possible_cpu(cpu) 2848 purge_fragmented_blocks(cpu); 2849 } 2850 2851 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2852 { 2853 struct vmap_block_queue *vbq; 2854 struct vmap_block *vb; 2855 void *vaddr = NULL; 2856 unsigned int order; 2857 2858 BUG_ON(offset_in_page(size)); 2859 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2860 if (WARN_ON(size == 0)) { 2861 /* 2862 * Allocating 0 bytes isn't what caller wants since 2863 * get_order(0) returns funny result. Just warn and terminate 2864 * early. 2865 */ 2866 return ERR_PTR(-EINVAL); 2867 } 2868 order = get_order(size); 2869 2870 rcu_read_lock(); 2871 vbq = raw_cpu_ptr(&vmap_block_queue); 2872 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2873 unsigned long pages_off; 2874 2875 if (READ_ONCE(vb->free) < (1UL << order)) 2876 continue; 2877 2878 spin_lock(&vb->lock); 2879 if (vb->free < (1UL << order)) { 2880 spin_unlock(&vb->lock); 2881 continue; 2882 } 2883 2884 pages_off = VMAP_BBMAP_BITS - vb->free; 2885 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 2886 WRITE_ONCE(vb->free, vb->free - (1UL << order)); 2887 bitmap_set(vb->used_map, pages_off, (1UL << order)); 2888 if (vb->free == 0) { 2889 spin_lock(&vbq->lock); 2890 list_del_rcu(&vb->free_list); 2891 spin_unlock(&vbq->lock); 2892 } 2893 2894 spin_unlock(&vb->lock); 2895 break; 2896 } 2897 2898 rcu_read_unlock(); 2899 2900 /* Allocate new block if nothing was found */ 2901 if (!vaddr) 2902 vaddr = new_vmap_block(order, gfp_mask); 2903 2904 return vaddr; 2905 } 2906 2907 static void vb_free(unsigned long addr, unsigned long size) 2908 { 2909 unsigned long offset; 2910 unsigned int order; 2911 struct vmap_block *vb; 2912 struct xarray *xa; 2913 2914 BUG_ON(offset_in_page(size)); 2915 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2916 2917 flush_cache_vunmap(addr, addr + size); 2918 2919 order = get_order(size); 2920 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 2921 2922 xa = addr_to_vb_xa(addr); 2923 vb = xa_load(xa, addr_to_vb_idx(addr)); 2924 2925 spin_lock(&vb->lock); 2926 bitmap_clear(vb->used_map, offset, (1UL << order)); 2927 spin_unlock(&vb->lock); 2928 2929 vunmap_range_noflush(addr, addr + size); 2930 2931 if (debug_pagealloc_enabled_static()) 2932 flush_tlb_kernel_range(addr, addr + size); 2933 2934 spin_lock(&vb->lock); 2935 2936 /* Expand the not yet TLB flushed dirty range */ 2937 vb->dirty_min = min(vb->dirty_min, offset); 2938 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2939 2940 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); 2941 if (vb->dirty == VMAP_BBMAP_BITS) { 2942 BUG_ON(vb->free); 2943 spin_unlock(&vb->lock); 2944 free_vmap_block(vb); 2945 } else 2946 spin_unlock(&vb->lock); 2947 } 2948 2949 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2950 { 2951 LIST_HEAD(purge_list); 2952 int cpu; 2953 2954 if (unlikely(!vmap_initialized)) 2955 return; 2956 2957 mutex_lock(&vmap_purge_lock); 2958 2959 for_each_possible_cpu(cpu) { 2960 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2961 struct vmap_block *vb; 2962 unsigned long idx; 2963 2964 rcu_read_lock(); 2965 xa_for_each(&vbq->vmap_blocks, idx, vb) { 2966 spin_lock(&vb->lock); 2967 2968 /* 2969 * Try to purge a fragmented block first. If it's 2970 * not purgeable, check whether there is dirty 2971 * space to be flushed. 2972 */ 2973 if (!purge_fragmented_block(vb, &purge_list, false) && 2974 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { 2975 unsigned long va_start = vb->va->va_start; 2976 unsigned long s, e; 2977 2978 s = va_start + (vb->dirty_min << PAGE_SHIFT); 2979 e = va_start + (vb->dirty_max << PAGE_SHIFT); 2980 2981 start = min(s, start); 2982 end = max(e, end); 2983 2984 /* Prevent that this is flushed again */ 2985 vb->dirty_min = VMAP_BBMAP_BITS; 2986 vb->dirty_max = 0; 2987 2988 flush = 1; 2989 } 2990 spin_unlock(&vb->lock); 2991 } 2992 rcu_read_unlock(); 2993 } 2994 free_purged_blocks(&purge_list); 2995 2996 if (!__purge_vmap_area_lazy(start, end, false) && flush) 2997 flush_tlb_kernel_range(start, end); 2998 mutex_unlock(&vmap_purge_lock); 2999 } 3000 3001 /** 3002 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 3003 * 3004 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 3005 * to amortize TLB flushing overheads. What this means is that any page you 3006 * have now, may, in a former life, have been mapped into kernel virtual 3007 * address by the vmap layer and so there might be some CPUs with TLB entries 3008 * still referencing that page (additional to the regular 1:1 kernel mapping). 3009 * 3010 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 3011 * be sure that none of the pages we have control over will have any aliases 3012 * from the vmap layer. 3013 */ 3014 void vm_unmap_aliases(void) 3015 { 3016 _vm_unmap_aliases(ULONG_MAX, 0, 0); 3017 } 3018 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 3019 3020 /** 3021 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 3022 * @mem: the pointer returned by vm_map_ram 3023 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 3024 */ 3025 void vm_unmap_ram(const void *mem, unsigned int count) 3026 { 3027 unsigned long size = (unsigned long)count << PAGE_SHIFT; 3028 unsigned long addr = (unsigned long)kasan_reset_tag(mem); 3029 struct vmap_area *va; 3030 3031 might_sleep(); 3032 BUG_ON(!addr); 3033 BUG_ON(addr < VMALLOC_START); 3034 BUG_ON(addr > VMALLOC_END); 3035 BUG_ON(!PAGE_ALIGNED(addr)); 3036 3037 kasan_poison_vmalloc(mem, size); 3038 3039 if (likely(count <= VMAP_MAX_ALLOC)) { 3040 debug_check_no_locks_freed(mem, size); 3041 vb_free(addr, size); 3042 return; 3043 } 3044 3045 va = find_unlink_vmap_area(addr); 3046 if (WARN_ON_ONCE(!va)) 3047 return; 3048 3049 debug_check_no_locks_freed((void *)va->va_start, va_size(va)); 3050 free_unmap_vmap_area(va); 3051 } 3052 EXPORT_SYMBOL(vm_unmap_ram); 3053 3054 /** 3055 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 3056 * @pages: an array of pointers to the pages to be mapped 3057 * @count: number of pages 3058 * @node: prefer to allocate data structures on this node 3059 * 3060 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 3061 * faster than vmap so it's good. But if you mix long-life and short-life 3062 * objects with vm_map_ram(), it could consume lots of address space through 3063 * fragmentation (especially on a 32bit machine). You could see failures in 3064 * the end. Please use this function for short-lived objects. 3065 * 3066 * Returns: a pointer to the address that has been mapped, or %NULL on failure 3067 */ 3068 void *vm_map_ram(struct page **pages, unsigned int count, int node) 3069 { 3070 unsigned long size = (unsigned long)count << PAGE_SHIFT; 3071 unsigned long addr; 3072 void *mem; 3073 3074 if (likely(count <= VMAP_MAX_ALLOC)) { 3075 mem = vb_alloc(size, GFP_KERNEL); 3076 if (IS_ERR(mem)) 3077 return NULL; 3078 addr = (unsigned long)mem; 3079 } else { 3080 struct vmap_area *va; 3081 va = alloc_vmap_area(size, PAGE_SIZE, 3082 VMALLOC_START, VMALLOC_END, 3083 node, GFP_KERNEL, VMAP_RAM, 3084 NULL); 3085 if (IS_ERR(va)) 3086 return NULL; 3087 3088 addr = va->va_start; 3089 mem = (void *)addr; 3090 } 3091 3092 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 3093 pages, PAGE_SHIFT) < 0) { 3094 vm_unmap_ram(mem, count); 3095 return NULL; 3096 } 3097 3098 /* 3099 * Mark the pages as accessible, now that they are mapped. 3100 * With hardware tag-based KASAN, marking is skipped for 3101 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 3102 */ 3103 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); 3104 3105 return mem; 3106 } 3107 EXPORT_SYMBOL(vm_map_ram); 3108 3109 static struct vm_struct *vmlist __initdata; 3110 3111 static inline unsigned int vm_area_page_order(struct vm_struct *vm) 3112 { 3113 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 3114 return vm->page_order; 3115 #else 3116 return 0; 3117 #endif 3118 } 3119 3120 unsigned int get_vm_area_page_order(struct vm_struct *vm) 3121 { 3122 return vm_area_page_order(vm); 3123 } 3124 3125 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 3126 { 3127 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 3128 vm->page_order = order; 3129 #else 3130 BUG_ON(order != 0); 3131 #endif 3132 } 3133 3134 /** 3135 * vm_area_add_early - add vmap area early during boot 3136 * @vm: vm_struct to add 3137 * 3138 * This function is used to add fixed kernel vm area to vmlist before 3139 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 3140 * should contain proper values and the other fields should be zero. 3141 * 3142 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3143 */ 3144 void __init vm_area_add_early(struct vm_struct *vm) 3145 { 3146 struct vm_struct *tmp, **p; 3147 3148 BUG_ON(vmap_initialized); 3149 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 3150 if (tmp->addr >= vm->addr) { 3151 BUG_ON(tmp->addr < vm->addr + vm->size); 3152 break; 3153 } else 3154 BUG_ON(tmp->addr + tmp->size > vm->addr); 3155 } 3156 vm->next = *p; 3157 *p = vm; 3158 } 3159 3160 /** 3161 * vm_area_register_early - register vmap area early during boot 3162 * @vm: vm_struct to register 3163 * @align: requested alignment 3164 * 3165 * This function is used to register kernel vm area before 3166 * vmalloc_init() is called. @vm->size and @vm->flags should contain 3167 * proper values on entry and other fields should be zero. On return, 3168 * vm->addr contains the allocated address. 3169 * 3170 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3171 */ 3172 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 3173 { 3174 unsigned long addr = ALIGN(VMALLOC_START, align); 3175 struct vm_struct *cur, **p; 3176 3177 BUG_ON(vmap_initialized); 3178 3179 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 3180 if ((unsigned long)cur->addr - addr >= vm->size) 3181 break; 3182 addr = ALIGN((unsigned long)cur->addr + cur->size, align); 3183 } 3184 3185 BUG_ON(addr > VMALLOC_END - vm->size); 3186 vm->addr = (void *)addr; 3187 vm->next = *p; 3188 *p = vm; 3189 kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 3190 } 3191 3192 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 3193 { 3194 /* 3195 * Before removing VM_UNINITIALIZED, 3196 * we should make sure that vm has proper values. 3197 * Pair with smp_rmb() in vread_iter() and vmalloc_info_show(). 3198 */ 3199 smp_wmb(); 3200 vm->flags &= ~VM_UNINITIALIZED; 3201 } 3202 3203 struct vm_struct *__get_vm_area_node(unsigned long size, 3204 unsigned long align, unsigned long shift, unsigned long flags, 3205 unsigned long start, unsigned long end, int node, 3206 gfp_t gfp_mask, const void *caller) 3207 { 3208 struct vmap_area *va; 3209 struct vm_struct *area; 3210 unsigned long requested_size = size; 3211 3212 BUG_ON(in_interrupt()); 3213 size = ALIGN(size, 1ul << shift); 3214 if (unlikely(!size)) 3215 return NULL; 3216 3217 if (flags & VM_IOREMAP) 3218 align = 1ul << clamp_t(int, get_count_order_long(size), 3219 PAGE_SHIFT, IOREMAP_MAX_ORDER); 3220 3221 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 3222 if (unlikely(!area)) 3223 return NULL; 3224 3225 if (!(flags & VM_NO_GUARD)) 3226 size += PAGE_SIZE; 3227 3228 area->flags = flags; 3229 area->caller = caller; 3230 area->requested_size = requested_size; 3231 3232 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); 3233 if (IS_ERR(va)) { 3234 kfree(area); 3235 return NULL; 3236 } 3237 3238 /* 3239 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a 3240 * best-effort approach, as they can be mapped outside of vmalloc code. 3241 * For VM_ALLOC mappings, the pages are marked as accessible after 3242 * getting mapped in __vmalloc_node_range(). 3243 * With hardware tag-based KASAN, marking is skipped for 3244 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 3245 */ 3246 if (!(flags & VM_ALLOC)) 3247 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, 3248 KASAN_VMALLOC_PROT_NORMAL); 3249 3250 return area; 3251 } 3252 3253 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 3254 unsigned long start, unsigned long end, 3255 const void *caller) 3256 { 3257 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 3258 NUMA_NO_NODE, GFP_KERNEL, caller); 3259 } 3260 3261 /** 3262 * get_vm_area - reserve a contiguous kernel virtual area 3263 * @size: size of the area 3264 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 3265 * 3266 * Search an area of @size in the kernel virtual mapping area, 3267 * and reserved it for out purposes. Returns the area descriptor 3268 * on success or %NULL on failure. 3269 * 3270 * Return: the area descriptor on success or %NULL on failure. 3271 */ 3272 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 3273 { 3274 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 3275 VMALLOC_START, VMALLOC_END, 3276 NUMA_NO_NODE, GFP_KERNEL, 3277 __builtin_return_address(0)); 3278 } 3279 3280 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 3281 const void *caller) 3282 { 3283 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 3284 VMALLOC_START, VMALLOC_END, 3285 NUMA_NO_NODE, GFP_KERNEL, caller); 3286 } 3287 3288 /** 3289 * find_vm_area - find a continuous kernel virtual area 3290 * @addr: base address 3291 * 3292 * Search for the kernel VM area starting at @addr, and return it. 3293 * It is up to the caller to do all required locking to keep the returned 3294 * pointer valid. 3295 * 3296 * Return: the area descriptor on success or %NULL on failure. 3297 */ 3298 struct vm_struct *find_vm_area(const void *addr) 3299 { 3300 struct vmap_area *va; 3301 3302 va = find_vmap_area((unsigned long)addr); 3303 if (!va) 3304 return NULL; 3305 3306 return va->vm; 3307 } 3308 3309 /** 3310 * remove_vm_area - find and remove a continuous kernel virtual area 3311 * @addr: base address 3312 * 3313 * Search for the kernel VM area starting at @addr, and remove it. 3314 * This function returns the found VM area, but using it is NOT safe 3315 * on SMP machines, except for its size or flags. 3316 * 3317 * Return: the area descriptor on success or %NULL on failure. 3318 */ 3319 struct vm_struct *remove_vm_area(const void *addr) 3320 { 3321 struct vmap_area *va; 3322 struct vm_struct *vm; 3323 3324 might_sleep(); 3325 3326 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 3327 addr)) 3328 return NULL; 3329 3330 va = find_unlink_vmap_area((unsigned long)addr); 3331 if (!va || !va->vm) 3332 return NULL; 3333 vm = va->vm; 3334 3335 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); 3336 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); 3337 kasan_free_module_shadow(vm); 3338 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); 3339 3340 free_unmap_vmap_area(va); 3341 return vm; 3342 } 3343 3344 static inline void set_area_direct_map(const struct vm_struct *area, 3345 int (*set_direct_map)(struct page *page)) 3346 { 3347 int i; 3348 3349 /* HUGE_VMALLOC passes small pages to set_direct_map */ 3350 for (i = 0; i < area->nr_pages; i++) 3351 if (page_address(area->pages[i])) 3352 set_direct_map(area->pages[i]); 3353 } 3354 3355 /* 3356 * Flush the vm mapping and reset the direct map. 3357 */ 3358 static void vm_reset_perms(struct vm_struct *area) 3359 { 3360 unsigned long start = ULONG_MAX, end = 0; 3361 unsigned int page_order = vm_area_page_order(area); 3362 int flush_dmap = 0; 3363 int i; 3364 3365 /* 3366 * Find the start and end range of the direct mappings to make sure that 3367 * the vm_unmap_aliases() flush includes the direct map. 3368 */ 3369 for (i = 0; i < area->nr_pages; i += 1U << page_order) { 3370 unsigned long addr = (unsigned long)page_address(area->pages[i]); 3371 3372 if (addr) { 3373 unsigned long page_size; 3374 3375 page_size = PAGE_SIZE << page_order; 3376 start = min(addr, start); 3377 end = max(addr + page_size, end); 3378 flush_dmap = 1; 3379 } 3380 } 3381 3382 /* 3383 * Set direct map to something invalid so that it won't be cached if 3384 * there are any accesses after the TLB flush, then flush the TLB and 3385 * reset the direct map permissions to the default. 3386 */ 3387 set_area_direct_map(area, set_direct_map_invalid_noflush); 3388 _vm_unmap_aliases(start, end, flush_dmap); 3389 set_area_direct_map(area, set_direct_map_default_noflush); 3390 } 3391 3392 static void delayed_vfree_work(struct work_struct *w) 3393 { 3394 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 3395 struct llist_node *t, *llnode; 3396 3397 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 3398 vfree(llnode); 3399 } 3400 3401 /** 3402 * vfree_atomic - release memory allocated by vmalloc() 3403 * @addr: memory base address 3404 * 3405 * This one is just like vfree() but can be called in any atomic context 3406 * except NMIs. 3407 */ 3408 void vfree_atomic(const void *addr) 3409 { 3410 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 3411 3412 BUG_ON(in_nmi()); 3413 kmemleak_free(addr); 3414 3415 /* 3416 * Use raw_cpu_ptr() because this can be called from preemptible 3417 * context. Preemption is absolutely fine here, because the llist_add() 3418 * implementation is lockless, so it works even if we are adding to 3419 * another cpu's list. schedule_work() should be fine with this too. 3420 */ 3421 if (addr && llist_add((struct llist_node *)addr, &p->list)) 3422 schedule_work(&p->wq); 3423 } 3424 3425 /** 3426 * vfree - Release memory allocated by vmalloc() 3427 * @addr: Memory base address 3428 * 3429 * Free the virtually continuous memory area starting at @addr, as obtained 3430 * from one of the vmalloc() family of APIs. This will usually also free the 3431 * physical memory underlying the virtual allocation, but that memory is 3432 * reference counted, so it will not be freed until the last user goes away. 3433 * 3434 * If @addr is NULL, no operation is performed. 3435 * 3436 * Context: 3437 * May sleep if called *not* from interrupt context. 3438 * Must not be called in NMI context (strictly speaking, it could be 3439 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 3440 * conventions for vfree() arch-dependent would be a really bad idea). 3441 */ 3442 void vfree(const void *addr) 3443 { 3444 struct vm_struct *vm; 3445 int i; 3446 3447 if (unlikely(in_interrupt())) { 3448 vfree_atomic(addr); 3449 return; 3450 } 3451 3452 BUG_ON(in_nmi()); 3453 kmemleak_free(addr); 3454 might_sleep(); 3455 3456 if (!addr) 3457 return; 3458 3459 vm = remove_vm_area(addr); 3460 if (unlikely(!vm)) { 3461 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 3462 addr); 3463 return; 3464 } 3465 3466 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) 3467 vm_reset_perms(vm); 3468 /* All pages of vm should be charged to same memcg, so use first one. */ 3469 if (vm->nr_pages && !(vm->flags & VM_MAP_PUT_PAGES)) 3470 mod_memcg_page_state(vm->pages[0], MEMCG_VMALLOC, -vm->nr_pages); 3471 for (i = 0; i < vm->nr_pages; i++) { 3472 struct page *page = vm->pages[i]; 3473 3474 BUG_ON(!page); 3475 /* 3476 * High-order allocs for huge vmallocs are split, so 3477 * can be freed as an array of order-0 allocations 3478 */ 3479 __free_page(page); 3480 cond_resched(); 3481 } 3482 if (!(vm->flags & VM_MAP_PUT_PAGES)) 3483 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); 3484 kvfree(vm->pages); 3485 kfree(vm); 3486 } 3487 EXPORT_SYMBOL(vfree); 3488 3489 /** 3490 * vunmap - release virtual mapping obtained by vmap() 3491 * @addr: memory base address 3492 * 3493 * Free the virtually contiguous memory area starting at @addr, 3494 * which was created from the page array passed to vmap(). 3495 * 3496 * Must not be called in interrupt context. 3497 */ 3498 void vunmap(const void *addr) 3499 { 3500 struct vm_struct *vm; 3501 3502 BUG_ON(in_interrupt()); 3503 might_sleep(); 3504 3505 if (!addr) 3506 return; 3507 vm = remove_vm_area(addr); 3508 if (unlikely(!vm)) { 3509 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", 3510 addr); 3511 return; 3512 } 3513 kfree(vm); 3514 } 3515 EXPORT_SYMBOL(vunmap); 3516 3517 /** 3518 * vmap - map an array of pages into virtually contiguous space 3519 * @pages: array of page pointers 3520 * @count: number of pages to map 3521 * @flags: vm_area->flags 3522 * @prot: page protection for the mapping 3523 * 3524 * Maps @count pages from @pages into contiguous kernel virtual space. 3525 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 3526 * (which must be kmalloc or vmalloc memory) and one reference per pages in it 3527 * are transferred from the caller to vmap(), and will be freed / dropped when 3528 * vfree() is called on the return value. 3529 * 3530 * Return: the address of the area or %NULL on failure 3531 */ 3532 void *vmap(struct page **pages, unsigned int count, 3533 unsigned long flags, pgprot_t prot) 3534 { 3535 struct vm_struct *area; 3536 unsigned long addr; 3537 unsigned long size; /* In bytes */ 3538 3539 might_sleep(); 3540 3541 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) 3542 return NULL; 3543 3544 /* 3545 * Your top guard is someone else's bottom guard. Not having a top 3546 * guard compromises someone else's mappings too. 3547 */ 3548 if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 3549 flags &= ~VM_NO_GUARD; 3550 3551 if (count > totalram_pages()) 3552 return NULL; 3553 3554 size = (unsigned long)count << PAGE_SHIFT; 3555 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 3556 if (!area) 3557 return NULL; 3558 3559 addr = (unsigned long)area->addr; 3560 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 3561 pages, PAGE_SHIFT) < 0) { 3562 vunmap(area->addr); 3563 return NULL; 3564 } 3565 3566 if (flags & VM_MAP_PUT_PAGES) { 3567 area->pages = pages; 3568 area->nr_pages = count; 3569 } 3570 return area->addr; 3571 } 3572 EXPORT_SYMBOL(vmap); 3573 3574 #ifdef CONFIG_VMAP_PFN 3575 struct vmap_pfn_data { 3576 unsigned long *pfns; 3577 pgprot_t prot; 3578 unsigned int idx; 3579 }; 3580 3581 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 3582 { 3583 struct vmap_pfn_data *data = private; 3584 unsigned long pfn = data->pfns[data->idx]; 3585 pte_t ptent; 3586 3587 if (WARN_ON_ONCE(pfn_valid(pfn))) 3588 return -EINVAL; 3589 3590 ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); 3591 set_pte_at(&init_mm, addr, pte, ptent); 3592 3593 data->idx++; 3594 return 0; 3595 } 3596 3597 /** 3598 * vmap_pfn - map an array of PFNs into virtually contiguous space 3599 * @pfns: array of PFNs 3600 * @count: number of pages to map 3601 * @prot: page protection for the mapping 3602 * 3603 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 3604 * the start address of the mapping. 3605 */ 3606 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 3607 { 3608 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 3609 struct vm_struct *area; 3610 3611 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 3612 __builtin_return_address(0)); 3613 if (!area) 3614 return NULL; 3615 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 3616 count * PAGE_SIZE, vmap_pfn_apply, &data)) { 3617 free_vm_area(area); 3618 return NULL; 3619 } 3620 3621 flush_cache_vmap((unsigned long)area->addr, 3622 (unsigned long)area->addr + count * PAGE_SIZE); 3623 3624 return area->addr; 3625 } 3626 EXPORT_SYMBOL_GPL(vmap_pfn); 3627 #endif /* CONFIG_VMAP_PFN */ 3628 3629 /* 3630 * Helper for vmalloc to adjust the gfp flags for certain allocations. 3631 */ 3632 static inline gfp_t vmalloc_gfp_adjust(gfp_t flags, const bool large) 3633 { 3634 flags |= __GFP_NOWARN; 3635 if (large) 3636 flags &= ~__GFP_NOFAIL; 3637 return flags; 3638 } 3639 3640 static inline unsigned int 3641 vm_area_alloc_pages(gfp_t gfp, int nid, 3642 unsigned int order, unsigned int nr_pages, struct page **pages) 3643 { 3644 unsigned int nr_allocated = 0; 3645 unsigned int nr_remaining = nr_pages; 3646 unsigned int max_attempt_order = MAX_PAGE_ORDER; 3647 struct page *page; 3648 int i; 3649 unsigned int large_order = ilog2(nr_remaining); 3650 gfp_t large_gfp = vmalloc_gfp_adjust(gfp, large_order) & ~__GFP_DIRECT_RECLAIM; 3651 3652 large_order = min(max_attempt_order, large_order); 3653 3654 /* 3655 * Initially, attempt to have the page allocator give us large order 3656 * pages. Do not attempt allocating smaller than order chunks since 3657 * __vmap_pages_range() expects physically contigous pages of exactly 3658 * order long chunks. 3659 */ 3660 while (large_order > order && nr_remaining) { 3661 if (nid == NUMA_NO_NODE) 3662 page = alloc_pages_noprof(large_gfp, large_order); 3663 else 3664 page = alloc_pages_node_noprof(nid, large_gfp, large_order); 3665 3666 if (unlikely(!page)) { 3667 max_attempt_order = --large_order; 3668 continue; 3669 } 3670 3671 split_page(page, large_order); 3672 for (i = 0; i < (1U << large_order); i++) 3673 pages[nr_allocated + i] = page + i; 3674 3675 nr_allocated += 1U << large_order; 3676 nr_remaining = nr_pages - nr_allocated; 3677 3678 large_order = ilog2(nr_remaining); 3679 large_order = min(max_attempt_order, large_order); 3680 } 3681 3682 /* 3683 * For order-0 pages we make use of bulk allocator, if 3684 * the page array is partly or not at all populated due 3685 * to fails, fallback to a single page allocator that is 3686 * more permissive. 3687 */ 3688 if (!order) { 3689 while (nr_allocated < nr_pages) { 3690 unsigned int nr, nr_pages_request; 3691 3692 /* 3693 * A maximum allowed request is hard-coded and is 100 3694 * pages per call. That is done in order to prevent a 3695 * long preemption off scenario in the bulk-allocator 3696 * so the range is [1:100]. 3697 */ 3698 nr_pages_request = min(100U, nr_pages - nr_allocated); 3699 3700 /* memory allocation should consider mempolicy, we can't 3701 * wrongly use nearest node when nid == NUMA_NO_NODE, 3702 * otherwise memory may be allocated in only one node, 3703 * but mempolicy wants to alloc memory by interleaving. 3704 */ 3705 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 3706 nr = alloc_pages_bulk_mempolicy_noprof(gfp, 3707 nr_pages_request, 3708 pages + nr_allocated); 3709 else 3710 nr = alloc_pages_bulk_node_noprof(gfp, nid, 3711 nr_pages_request, 3712 pages + nr_allocated); 3713 3714 nr_allocated += nr; 3715 3716 /* 3717 * If zero or pages were obtained partly, 3718 * fallback to a single page allocator. 3719 */ 3720 if (nr != nr_pages_request) 3721 break; 3722 } 3723 } 3724 3725 /* High-order pages or fallback path if "bulk" fails. */ 3726 while (nr_allocated < nr_pages) { 3727 if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current)) 3728 break; 3729 3730 if (nid == NUMA_NO_NODE) 3731 page = alloc_pages_noprof(gfp, order); 3732 else 3733 page = alloc_pages_node_noprof(nid, gfp, order); 3734 3735 if (unlikely(!page)) 3736 break; 3737 3738 /* 3739 * High-order allocations must be able to be treated as 3740 * independent small pages by callers (as they can with 3741 * small-page vmallocs). Some drivers do their own refcounting 3742 * on vmalloc_to_page() pages, some use page->mapping, 3743 * page->lru, etc. 3744 */ 3745 if (order) 3746 split_page(page, order); 3747 3748 /* 3749 * Careful, we allocate and map page-order pages, but 3750 * tracking is done per PAGE_SIZE page so as to keep the 3751 * vm_struct APIs independent of the physical/mapped size. 3752 */ 3753 for (i = 0; i < (1U << order); i++) 3754 pages[nr_allocated + i] = page + i; 3755 3756 nr_allocated += 1U << order; 3757 } 3758 3759 return nr_allocated; 3760 } 3761 3762 static LLIST_HEAD(pending_vm_area_cleanup); 3763 static void cleanup_vm_area_work(struct work_struct *work) 3764 { 3765 struct vm_struct *area, *tmp; 3766 struct llist_node *head; 3767 3768 head = llist_del_all(&pending_vm_area_cleanup); 3769 if (!head) 3770 return; 3771 3772 llist_for_each_entry_safe(area, tmp, head, llnode) { 3773 if (!area->pages) 3774 free_vm_area(area); 3775 else 3776 vfree(area->addr); 3777 } 3778 } 3779 3780 /* 3781 * Helper for __vmalloc_area_node() to defer cleanup 3782 * of partially initialized vm_struct in error paths. 3783 */ 3784 static DECLARE_WORK(cleanup_vm_area, cleanup_vm_area_work); 3785 static void defer_vm_area_cleanup(struct vm_struct *area) 3786 { 3787 if (llist_add(&area->llnode, &pending_vm_area_cleanup)) 3788 schedule_work(&cleanup_vm_area); 3789 } 3790 3791 /* 3792 * Page tables allocations ignore external GFP. Enforces it by 3793 * the memalloc scope API. It is used by vmalloc internals and 3794 * KASAN shadow population only. 3795 * 3796 * GFP to scope mapping: 3797 * 3798 * non-blocking (no __GFP_DIRECT_RECLAIM) - memalloc_noreclaim_save() 3799 * GFP_NOFS - memalloc_nofs_save() 3800 * GFP_NOIO - memalloc_noio_save() 3801 * 3802 * Returns a flag cookie to pair with restore. 3803 */ 3804 unsigned int 3805 memalloc_apply_gfp_scope(gfp_t gfp_mask) 3806 { 3807 unsigned int flags = 0; 3808 3809 if (!gfpflags_allow_blocking(gfp_mask)) 3810 flags = memalloc_noreclaim_save(); 3811 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3812 flags = memalloc_nofs_save(); 3813 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3814 flags = memalloc_noio_save(); 3815 3816 /* 0 - no scope applied. */ 3817 return flags; 3818 } 3819 3820 void 3821 memalloc_restore_scope(unsigned int flags) 3822 { 3823 if (flags) 3824 memalloc_flags_restore(flags); 3825 } 3826 3827 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 3828 pgprot_t prot, unsigned int page_shift, 3829 int node) 3830 { 3831 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 3832 bool nofail = gfp_mask & __GFP_NOFAIL; 3833 unsigned long addr = (unsigned long)area->addr; 3834 unsigned long size = get_vm_area_size(area); 3835 unsigned long array_size; 3836 unsigned int nr_small_pages = size >> PAGE_SHIFT; 3837 unsigned int page_order; 3838 unsigned int flags; 3839 int ret; 3840 3841 array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 3842 3843 /* __GFP_NOFAIL and "noblock" flags are mutually exclusive. */ 3844 if (!gfpflags_allow_blocking(gfp_mask)) 3845 nofail = false; 3846 3847 if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 3848 gfp_mask |= __GFP_HIGHMEM; 3849 3850 /* Please note that the recursion is strictly bounded. */ 3851 if (array_size > PAGE_SIZE) { 3852 area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node, 3853 area->caller); 3854 } else { 3855 area->pages = kmalloc_node_noprof(array_size, nested_gfp, node); 3856 } 3857 3858 if (!area->pages) { 3859 warn_alloc(gfp_mask, NULL, 3860 "vmalloc error: size %lu, failed to allocated page array size %lu", 3861 nr_small_pages * PAGE_SIZE, array_size); 3862 goto fail; 3863 } 3864 3865 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 3866 page_order = vm_area_page_order(area); 3867 3868 /* 3869 * High-order nofail allocations are really expensive and 3870 * potentially dangerous (pre-mature OOM, disruptive reclaim 3871 * and compaction etc. 3872 * 3873 * Please note, the __vmalloc_node_range_noprof() falls-back 3874 * to order-0 pages if high-order attempt is unsuccessful. 3875 */ 3876 area->nr_pages = vm_area_alloc_pages( 3877 vmalloc_gfp_adjust(gfp_mask, page_order), node, 3878 page_order, nr_small_pages, area->pages); 3879 3880 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 3881 /* All pages of vm should be charged to same memcg, so use first one. */ 3882 if (gfp_mask & __GFP_ACCOUNT && area->nr_pages) 3883 mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC, 3884 area->nr_pages); 3885 3886 /* 3887 * If not enough pages were obtained to accomplish an 3888 * allocation request, free them via vfree() if any. 3889 */ 3890 if (area->nr_pages != nr_small_pages) { 3891 /* 3892 * vm_area_alloc_pages() can fail due to insufficient memory but 3893 * also:- 3894 * 3895 * - a pending fatal signal 3896 * - insufficient huge page-order pages 3897 * 3898 * Since we always retry allocations at order-0 in the huge page 3899 * case a warning for either is spurious. 3900 */ 3901 if (!fatal_signal_pending(current) && page_order == 0) 3902 warn_alloc(gfp_mask, NULL, 3903 "vmalloc error: size %lu, failed to allocate pages", 3904 area->nr_pages * PAGE_SIZE); 3905 goto fail; 3906 } 3907 3908 /* 3909 * page tables allocations ignore external gfp mask, enforce it 3910 * by the scope API 3911 */ 3912 flags = memalloc_apply_gfp_scope(gfp_mask); 3913 do { 3914 ret = __vmap_pages_range(addr, addr + size, prot, area->pages, 3915 page_shift, nested_gfp); 3916 if (nofail && (ret < 0)) 3917 schedule_timeout_uninterruptible(1); 3918 } while (nofail && (ret < 0)); 3919 memalloc_restore_scope(flags); 3920 3921 if (ret < 0) { 3922 warn_alloc(gfp_mask, NULL, 3923 "vmalloc error: size %lu, failed to map pages", 3924 area->nr_pages * PAGE_SIZE); 3925 goto fail; 3926 } 3927 3928 return area->addr; 3929 3930 fail: 3931 defer_vm_area_cleanup(area); 3932 return NULL; 3933 } 3934 3935 /* 3936 * See __vmalloc_node_range() for a clear list of supported vmalloc flags. 3937 * This gfp lists all flags currently passed through vmalloc. Currently, 3938 * __GFP_ZERO is used by BPF and __GFP_NORETRY is used by percpu. Both drm 3939 * and BPF also use GFP_USER. Additionally, various users pass 3940 * GFP_KERNEL_ACCOUNT. Xfs uses __GFP_NOLOCKDEP. 3941 */ 3942 #define GFP_VMALLOC_SUPPORTED (GFP_KERNEL | GFP_ATOMIC | GFP_NOWAIT |\ 3943 __GFP_NOFAIL | __GFP_ZERO | __GFP_NORETRY |\ 3944 GFP_NOFS | GFP_NOIO | GFP_KERNEL_ACCOUNT |\ 3945 GFP_USER | __GFP_NOLOCKDEP) 3946 3947 static gfp_t vmalloc_fix_flags(gfp_t flags) 3948 { 3949 gfp_t invalid_mask = flags & ~GFP_VMALLOC_SUPPORTED; 3950 3951 flags &= GFP_VMALLOC_SUPPORTED; 3952 WARN_ONCE(1, "Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", 3953 invalid_mask, &invalid_mask, flags, &flags); 3954 return flags; 3955 } 3956 3957 /** 3958 * __vmalloc_node_range - allocate virtually contiguous memory 3959 * @size: allocation size 3960 * @align: desired alignment 3961 * @start: vm area range start 3962 * @end: vm area range end 3963 * @gfp_mask: flags for the page level allocator 3964 * @prot: protection mask for the allocated pages 3965 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 3966 * @node: node to use for allocation or NUMA_NO_NODE 3967 * @caller: caller's return address 3968 * 3969 * Allocate enough pages to cover @size from the page level 3970 * allocator with @gfp_mask flags and map them into contiguous 3971 * virtual range with protection @prot. 3972 * 3973 * Supported GFP classes: %GFP_KERNEL, %GFP_ATOMIC, %GFP_NOWAIT, 3974 * %GFP_NOFS and %GFP_NOIO. Zone modifiers are not supported. 3975 * Please note %GFP_ATOMIC and %GFP_NOWAIT are supported only 3976 * by __vmalloc(). 3977 * 3978 * Retry modifiers: only %__GFP_NOFAIL is supported; %__GFP_NORETRY 3979 * and %__GFP_RETRY_MAYFAIL are not supported. 3980 * 3981 * %__GFP_NOWARN can be used to suppress failure messages. 3982 * 3983 * Can not be called from interrupt nor NMI contexts. 3984 * Return: the address of the area or %NULL on failure 3985 */ 3986 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, 3987 unsigned long start, unsigned long end, gfp_t gfp_mask, 3988 pgprot_t prot, unsigned long vm_flags, int node, 3989 const void *caller) 3990 { 3991 struct vm_struct *area; 3992 void *ret; 3993 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; 3994 unsigned long original_align = align; 3995 unsigned int shift = PAGE_SHIFT; 3996 3997 if (WARN_ON_ONCE(!size)) 3998 return NULL; 3999 4000 if ((size >> PAGE_SHIFT) > totalram_pages()) { 4001 warn_alloc(gfp_mask, NULL, 4002 "vmalloc error: size %lu, exceeds total pages", 4003 size); 4004 return NULL; 4005 } 4006 4007 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { 4008 /* 4009 * Try huge pages. Only try for PAGE_KERNEL allocations, 4010 * others like modules don't yet expect huge pages in 4011 * their allocations due to apply_to_page_range not 4012 * supporting them. 4013 */ 4014 4015 if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE) 4016 shift = PMD_SHIFT; 4017 else 4018 shift = arch_vmap_pte_supported_shift(size); 4019 4020 align = max(original_align, 1UL << shift); 4021 } 4022 4023 again: 4024 area = __get_vm_area_node(size, align, shift, VM_ALLOC | 4025 VM_UNINITIALIZED | vm_flags, start, end, node, 4026 gfp_mask, caller); 4027 if (!area) { 4028 bool nofail = gfp_mask & __GFP_NOFAIL; 4029 warn_alloc(gfp_mask, NULL, 4030 "vmalloc error: size %lu, vm_struct allocation failed%s", 4031 size, (nofail) ? ". Retrying." : ""); 4032 if (nofail) { 4033 schedule_timeout_uninterruptible(1); 4034 goto again; 4035 } 4036 goto fail; 4037 } 4038 4039 /* 4040 * Prepare arguments for __vmalloc_area_node() and 4041 * kasan_unpoison_vmalloc(). 4042 */ 4043 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { 4044 if (kasan_hw_tags_enabled()) { 4045 /* 4046 * Modify protection bits to allow tagging. 4047 * This must be done before mapping. 4048 */ 4049 prot = arch_vmap_pgprot_tagged(prot); 4050 4051 /* 4052 * Skip page_alloc poisoning and zeroing for physical 4053 * pages backing VM_ALLOC mapping. Memory is instead 4054 * poisoned and zeroed by kasan_unpoison_vmalloc(). 4055 */ 4056 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; 4057 } 4058 4059 /* Take note that the mapping is PAGE_KERNEL. */ 4060 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; 4061 } 4062 4063 /* Allocate physical pages and map them into vmalloc space. */ 4064 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 4065 if (!ret) 4066 goto fail; 4067 4068 /* 4069 * Mark the pages as accessible, now that they are mapped. 4070 * The condition for setting KASAN_VMALLOC_INIT should complement the 4071 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check 4072 * to make sure that memory is initialized under the same conditions. 4073 * Tag-based KASAN modes only assign tags to normal non-executable 4074 * allocations, see __kasan_unpoison_vmalloc(). 4075 */ 4076 kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 4077 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && 4078 (gfp_mask & __GFP_SKIP_ZERO)) 4079 kasan_flags |= KASAN_VMALLOC_INIT; 4080 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ 4081 area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags); 4082 4083 /* 4084 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 4085 * flag. It means that vm_struct is not fully initialized. 4086 * Now, it is fully initialized, so remove this flag here. 4087 */ 4088 clear_vm_uninitialized_flag(area); 4089 4090 if (!(vm_flags & VM_DEFER_KMEMLEAK)) 4091 kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask); 4092 4093 return area->addr; 4094 4095 fail: 4096 if (shift > PAGE_SHIFT) { 4097 shift = PAGE_SHIFT; 4098 align = original_align; 4099 goto again; 4100 } 4101 4102 return NULL; 4103 } 4104 4105 /** 4106 * __vmalloc_node - allocate virtually contiguous memory 4107 * @size: allocation size 4108 * @align: desired alignment 4109 * @gfp_mask: flags for the page level allocator 4110 * @node: node to use for allocation or NUMA_NO_NODE 4111 * @caller: caller's return address 4112 * 4113 * Allocate enough pages to cover @size from the page level allocator with 4114 * @gfp_mask flags. Map them into contiguous kernel virtual space. 4115 * 4116 * Semantics of @gfp_mask (including reclaim/retry modifiers such as 4117 * __GFP_NOFAIL) are the same as in __vmalloc_node_range_noprof(). 4118 * 4119 * Return: pointer to the allocated memory or %NULL on error 4120 */ 4121 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, 4122 gfp_t gfp_mask, int node, const void *caller) 4123 { 4124 return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END, 4125 gfp_mask, PAGE_KERNEL, 0, node, caller); 4126 } 4127 /* 4128 * This is only for performance analysis of vmalloc and stress purpose. 4129 * It is required by vmalloc test module, therefore do not use it other 4130 * than that. 4131 */ 4132 #ifdef CONFIG_TEST_VMALLOC_MODULE 4133 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof); 4134 #endif 4135 4136 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) 4137 { 4138 if (unlikely(gfp_mask & ~GFP_VMALLOC_SUPPORTED)) 4139 gfp_mask = vmalloc_fix_flags(gfp_mask); 4140 return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE, 4141 __builtin_return_address(0)); 4142 } 4143 EXPORT_SYMBOL(__vmalloc_noprof); 4144 4145 /** 4146 * vmalloc - allocate virtually contiguous memory 4147 * @size: allocation size 4148 * 4149 * Allocate enough pages to cover @size from the page level 4150 * allocator and map them into contiguous kernel virtual space. 4151 * 4152 * For tight control over page level allocator and protection flags 4153 * use __vmalloc() instead. 4154 * 4155 * Return: pointer to the allocated memory or %NULL on error 4156 */ 4157 void *vmalloc_noprof(unsigned long size) 4158 { 4159 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE, 4160 __builtin_return_address(0)); 4161 } 4162 EXPORT_SYMBOL(vmalloc_noprof); 4163 4164 /** 4165 * vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages 4166 * @size: allocation size 4167 * @gfp_mask: flags for the page level allocator 4168 * @node: node to use for allocation or NUMA_NO_NODE 4169 * 4170 * Allocate enough pages to cover @size from the page level 4171 * allocator and map them into contiguous kernel virtual space. 4172 * If @size is greater than or equal to PMD_SIZE, allow using 4173 * huge pages for the memory 4174 * 4175 * Return: pointer to the allocated memory or %NULL on error 4176 */ 4177 void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) 4178 { 4179 if (unlikely(gfp_mask & ~GFP_VMALLOC_SUPPORTED)) 4180 gfp_mask = vmalloc_fix_flags(gfp_mask); 4181 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, 4182 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 4183 node, __builtin_return_address(0)); 4184 } 4185 EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof); 4186 4187 /** 4188 * vzalloc - allocate virtually contiguous memory with zero fill 4189 * @size: allocation size 4190 * 4191 * Allocate enough pages to cover @size from the page level 4192 * allocator and map them into contiguous kernel virtual space. 4193 * The memory allocated is set to zero. 4194 * 4195 * For tight control over page level allocator and protection flags 4196 * use __vmalloc() instead. 4197 * 4198 * Return: pointer to the allocated memory or %NULL on error 4199 */ 4200 void *vzalloc_noprof(unsigned long size) 4201 { 4202 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 4203 __builtin_return_address(0)); 4204 } 4205 EXPORT_SYMBOL(vzalloc_noprof); 4206 4207 /** 4208 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 4209 * @size: allocation size 4210 * 4211 * The resulting memory area is zeroed so it can be mapped to userspace 4212 * without leaking data. 4213 * 4214 * Return: pointer to the allocated memory or %NULL on error 4215 */ 4216 void *vmalloc_user_noprof(unsigned long size) 4217 { 4218 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, 4219 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 4220 VM_USERMAP, NUMA_NO_NODE, 4221 __builtin_return_address(0)); 4222 } 4223 EXPORT_SYMBOL(vmalloc_user_noprof); 4224 4225 /** 4226 * vmalloc_node - allocate memory on a specific node 4227 * @size: allocation size 4228 * @node: numa node 4229 * 4230 * Allocate enough pages to cover @size from the page level 4231 * allocator and map them into contiguous kernel virtual space. 4232 * 4233 * For tight control over page level allocator and protection flags 4234 * use __vmalloc() instead. 4235 * 4236 * Return: pointer to the allocated memory or %NULL on error 4237 */ 4238 void *vmalloc_node_noprof(unsigned long size, int node) 4239 { 4240 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node, 4241 __builtin_return_address(0)); 4242 } 4243 EXPORT_SYMBOL(vmalloc_node_noprof); 4244 4245 /** 4246 * vzalloc_node - allocate memory on a specific node with zero fill 4247 * @size: allocation size 4248 * @node: numa node 4249 * 4250 * Allocate enough pages to cover @size from the page level 4251 * allocator and map them into contiguous kernel virtual space. 4252 * The memory allocated is set to zero. 4253 * 4254 * Return: pointer to the allocated memory or %NULL on error 4255 */ 4256 void *vzalloc_node_noprof(unsigned long size, int node) 4257 { 4258 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node, 4259 __builtin_return_address(0)); 4260 } 4261 EXPORT_SYMBOL(vzalloc_node_noprof); 4262 4263 /** 4264 * vrealloc_node_align - reallocate virtually contiguous memory; contents 4265 * remain unchanged 4266 * @p: object to reallocate memory for 4267 * @size: the size to reallocate 4268 * @align: requested alignment 4269 * @flags: the flags for the page level allocator 4270 * @nid: node number of the target node 4271 * 4272 * If @p is %NULL, vrealloc_XXX() behaves exactly like vmalloc_XXX(). If @size 4273 * is 0 and @p is not a %NULL pointer, the object pointed to is freed. 4274 * 4275 * If the caller wants the new memory to be on specific node *only*, 4276 * __GFP_THISNODE flag should be set, otherwise the function will try to avoid 4277 * reallocation and possibly disregard the specified @nid. 4278 * 4279 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 4280 * initial memory allocation, every subsequent call to this API for the same 4281 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 4282 * __GFP_ZERO is not fully honored by this API. 4283 * 4284 * Requesting an alignment that is bigger than the alignment of the existing 4285 * allocation will fail. 4286 * 4287 * In any case, the contents of the object pointed to are preserved up to the 4288 * lesser of the new and old sizes. 4289 * 4290 * This function must not be called concurrently with itself or vfree() for the 4291 * same memory allocation. 4292 * 4293 * Return: pointer to the allocated memory; %NULL if @size is zero or in case of 4294 * failure 4295 */ 4296 void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, 4297 gfp_t flags, int nid) 4298 { 4299 struct vm_struct *vm = NULL; 4300 size_t alloced_size = 0; 4301 size_t old_size = 0; 4302 void *n; 4303 4304 if (!size) { 4305 vfree(p); 4306 return NULL; 4307 } 4308 4309 if (p) { 4310 vm = find_vm_area(p); 4311 if (unlikely(!vm)) { 4312 WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p); 4313 return NULL; 4314 } 4315 4316 alloced_size = get_vm_area_size(vm); 4317 old_size = vm->requested_size; 4318 if (WARN(alloced_size < old_size, 4319 "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) 4320 return NULL; 4321 if (WARN(!IS_ALIGNED((unsigned long)p, align), 4322 "will not reallocate with a bigger alignment (0x%lx)\n", align)) 4323 return NULL; 4324 if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE && 4325 nid != page_to_nid(vmalloc_to_page(p))) 4326 goto need_realloc; 4327 } 4328 4329 /* 4330 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What 4331 * would be a good heuristic for when to shrink the vm_area? 4332 */ 4333 if (size <= old_size) { 4334 /* Zero out "freed" memory, potentially for future realloc. */ 4335 if (want_init_on_free() || want_init_on_alloc(flags)) 4336 memset((void *)p + size, 0, old_size - size); 4337 vm->requested_size = size; 4338 kasan_vrealloc(p, old_size, size); 4339 return (void *)p; 4340 } 4341 4342 /* 4343 * We already have the bytes available in the allocation; use them. 4344 */ 4345 if (size <= alloced_size) { 4346 /* 4347 * No need to zero memory here, as unused memory will have 4348 * already been zeroed at initial allocation time or during 4349 * realloc shrink time. 4350 */ 4351 vm->requested_size = size; 4352 kasan_vrealloc(p, old_size, size); 4353 return (void *)p; 4354 } 4355 4356 need_realloc: 4357 /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ 4358 n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0)); 4359 4360 if (!n) 4361 return NULL; 4362 4363 if (p) { 4364 memcpy(n, p, old_size); 4365 vfree(p); 4366 } 4367 4368 return n; 4369 } 4370 EXPORT_SYMBOL(vrealloc_node_align_noprof); 4371 4372 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 4373 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 4374 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 4375 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 4376 #else 4377 /* 4378 * 64b systems should always have either DMA or DMA32 zones. For others 4379 * GFP_DMA32 should do the right thing and use the normal zone. 4380 */ 4381 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 4382 #endif 4383 4384 /** 4385 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 4386 * @size: allocation size 4387 * 4388 * Allocate enough 32bit PA addressable pages to cover @size from the 4389 * page level allocator and map them into contiguous kernel virtual space. 4390 * 4391 * Return: pointer to the allocated memory or %NULL on error 4392 */ 4393 void *vmalloc_32_noprof(unsigned long size) 4394 { 4395 return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 4396 __builtin_return_address(0)); 4397 } 4398 EXPORT_SYMBOL(vmalloc_32_noprof); 4399 4400 /** 4401 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 4402 * @size: allocation size 4403 * 4404 * The resulting memory area is 32bit addressable and zeroed so it can be 4405 * mapped to userspace without leaking data. 4406 * 4407 * Return: pointer to the allocated memory or %NULL on error 4408 */ 4409 void *vmalloc_32_user_noprof(unsigned long size) 4410 { 4411 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, 4412 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 4413 VM_USERMAP, NUMA_NO_NODE, 4414 __builtin_return_address(0)); 4415 } 4416 EXPORT_SYMBOL(vmalloc_32_user_noprof); 4417 4418 /* 4419 * Atomically zero bytes in the iterator. 4420 * 4421 * Returns the number of zeroed bytes. 4422 */ 4423 static size_t zero_iter(struct iov_iter *iter, size_t count) 4424 { 4425 size_t remains = count; 4426 4427 while (remains > 0) { 4428 size_t num, copied; 4429 4430 num = min_t(size_t, remains, PAGE_SIZE); 4431 copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); 4432 remains -= copied; 4433 4434 if (copied < num) 4435 break; 4436 } 4437 4438 return count - remains; 4439 } 4440 4441 /* 4442 * small helper routine, copy contents to iter from addr. 4443 * If the page is not present, fill zero. 4444 * 4445 * Returns the number of copied bytes. 4446 */ 4447 static size_t aligned_vread_iter(struct iov_iter *iter, 4448 const char *addr, size_t count) 4449 { 4450 size_t remains = count; 4451 struct page *page; 4452 4453 while (remains > 0) { 4454 unsigned long offset, length; 4455 size_t copied = 0; 4456 4457 offset = offset_in_page(addr); 4458 length = PAGE_SIZE - offset; 4459 if (length > remains) 4460 length = remains; 4461 page = vmalloc_to_page(addr); 4462 /* 4463 * To do safe access to this _mapped_ area, we need lock. But 4464 * adding lock here means that we need to add overhead of 4465 * vmalloc()/vfree() calls for this _debug_ interface, rarely 4466 * used. Instead of that, we'll use an local mapping via 4467 * copy_page_to_iter_nofault() and accept a small overhead in 4468 * this access function. 4469 */ 4470 if (page) 4471 copied = copy_page_to_iter_nofault(page, offset, 4472 length, iter); 4473 else 4474 copied = zero_iter(iter, length); 4475 4476 addr += copied; 4477 remains -= copied; 4478 4479 if (copied != length) 4480 break; 4481 } 4482 4483 return count - remains; 4484 } 4485 4486 /* 4487 * Read from a vm_map_ram region of memory. 4488 * 4489 * Returns the number of copied bytes. 4490 */ 4491 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, 4492 size_t count, unsigned long flags) 4493 { 4494 char *start; 4495 struct vmap_block *vb; 4496 struct xarray *xa; 4497 unsigned long offset; 4498 unsigned int rs, re; 4499 size_t remains, n; 4500 4501 /* 4502 * If it's area created by vm_map_ram() interface directly, but 4503 * not further subdividing and delegating management to vmap_block, 4504 * handle it here. 4505 */ 4506 if (!(flags & VMAP_BLOCK)) 4507 return aligned_vread_iter(iter, addr, count); 4508 4509 remains = count; 4510 4511 /* 4512 * Area is split into regions and tracked with vmap_block, read out 4513 * each region and zero fill the hole between regions. 4514 */ 4515 xa = addr_to_vb_xa((unsigned long) addr); 4516 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); 4517 if (!vb) 4518 goto finished_zero; 4519 4520 spin_lock(&vb->lock); 4521 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { 4522 spin_unlock(&vb->lock); 4523 goto finished_zero; 4524 } 4525 4526 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { 4527 size_t copied; 4528 4529 if (remains == 0) 4530 goto finished; 4531 4532 start = vmap_block_vaddr(vb->va->va_start, rs); 4533 4534 if (addr < start) { 4535 size_t to_zero = min_t(size_t, start - addr, remains); 4536 size_t zeroed = zero_iter(iter, to_zero); 4537 4538 addr += zeroed; 4539 remains -= zeroed; 4540 4541 if (remains == 0 || zeroed != to_zero) 4542 goto finished; 4543 } 4544 4545 /*it could start reading from the middle of used region*/ 4546 offset = offset_in_page(addr); 4547 n = ((re - rs + 1) << PAGE_SHIFT) - offset; 4548 if (n > remains) 4549 n = remains; 4550 4551 copied = aligned_vread_iter(iter, start + offset, n); 4552 4553 addr += copied; 4554 remains -= copied; 4555 4556 if (copied != n) 4557 goto finished; 4558 } 4559 4560 spin_unlock(&vb->lock); 4561 4562 finished_zero: 4563 /* zero-fill the left dirty or free regions */ 4564 return count - remains + zero_iter(iter, remains); 4565 finished: 4566 /* We couldn't copy/zero everything */ 4567 spin_unlock(&vb->lock); 4568 return count - remains; 4569 } 4570 4571 /** 4572 * vread_iter() - read vmalloc area in a safe way to an iterator. 4573 * @iter: the iterator to which data should be written. 4574 * @addr: vm address. 4575 * @count: number of bytes to be read. 4576 * 4577 * This function checks that addr is a valid vmalloc'ed area, and 4578 * copy data from that area to a given buffer. If the given memory range 4579 * of [addr...addr+count) includes some valid address, data is copied to 4580 * proper area of @buf. If there are memory holes, they'll be zero-filled. 4581 * IOREMAP area is treated as memory hole and no copy is done. 4582 * 4583 * If [addr...addr+count) doesn't includes any intersects with alive 4584 * vm_struct area, returns 0. @buf should be kernel's buffer. 4585 * 4586 * Note: In usual ops, vread() is never necessary because the caller 4587 * should know vmalloc() area is valid and can use memcpy(). 4588 * This is for routines which have to access vmalloc area without 4589 * any information, as /proc/kcore. 4590 * 4591 * Return: number of bytes for which addr and buf should be increased 4592 * (same number as @count) or %0 if [addr...addr+count) doesn't 4593 * include any intersection with valid vmalloc area 4594 */ 4595 long vread_iter(struct iov_iter *iter, const char *addr, size_t count) 4596 { 4597 struct vmap_node *vn; 4598 struct vmap_area *va; 4599 struct vm_struct *vm; 4600 char *vaddr; 4601 size_t n, size, flags, remains; 4602 unsigned long next; 4603 4604 addr = kasan_reset_tag(addr); 4605 4606 /* Don't allow overflow */ 4607 if ((unsigned long) addr + count < count) 4608 count = -(unsigned long) addr; 4609 4610 remains = count; 4611 4612 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); 4613 if (!vn) 4614 goto finished_zero; 4615 4616 /* no intersects with alive vmap_area */ 4617 if ((unsigned long)addr + remains <= va->va_start) 4618 goto finished_zero; 4619 4620 do { 4621 size_t copied; 4622 4623 if (remains == 0) 4624 goto finished; 4625 4626 vm = va->vm; 4627 flags = va->flags & VMAP_FLAGS_MASK; 4628 /* 4629 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need 4630 * be set together with VMAP_RAM. 4631 */ 4632 WARN_ON(flags == VMAP_BLOCK); 4633 4634 if (!vm && !flags) 4635 goto next_va; 4636 4637 if (vm && (vm->flags & VM_UNINITIALIZED)) 4638 goto next_va; 4639 4640 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 4641 smp_rmb(); 4642 4643 vaddr = (char *) va->va_start; 4644 size = vm ? get_vm_area_size(vm) : va_size(va); 4645 4646 if (addr >= vaddr + size) 4647 goto next_va; 4648 4649 if (addr < vaddr) { 4650 size_t to_zero = min_t(size_t, vaddr - addr, remains); 4651 size_t zeroed = zero_iter(iter, to_zero); 4652 4653 addr += zeroed; 4654 remains -= zeroed; 4655 4656 if (remains == 0 || zeroed != to_zero) 4657 goto finished; 4658 } 4659 4660 n = vaddr + size - addr; 4661 if (n > remains) 4662 n = remains; 4663 4664 if (flags & VMAP_RAM) 4665 copied = vmap_ram_vread_iter(iter, addr, n, flags); 4666 else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE)))) 4667 copied = aligned_vread_iter(iter, addr, n); 4668 else /* IOREMAP | SPARSE area is treated as memory hole */ 4669 copied = zero_iter(iter, n); 4670 4671 addr += copied; 4672 remains -= copied; 4673 4674 if (copied != n) 4675 goto finished; 4676 4677 next_va: 4678 next = va->va_end; 4679 spin_unlock(&vn->busy.lock); 4680 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); 4681 4682 finished_zero: 4683 if (vn) 4684 spin_unlock(&vn->busy.lock); 4685 4686 /* zero-fill memory holes */ 4687 return count - remains + zero_iter(iter, remains); 4688 finished: 4689 /* Nothing remains, or We couldn't copy/zero everything. */ 4690 if (vn) 4691 spin_unlock(&vn->busy.lock); 4692 4693 return count - remains; 4694 } 4695 4696 /** 4697 * remap_vmalloc_range_partial - map vmalloc pages to userspace 4698 * @vma: vma to cover 4699 * @uaddr: target user address to start at 4700 * @kaddr: virtual address of vmalloc kernel memory 4701 * @pgoff: offset from @kaddr to start at 4702 * @size: size of map area 4703 * 4704 * Returns: 0 for success, -Exxx on failure 4705 * 4706 * This function checks that @kaddr is a valid vmalloc'ed area, 4707 * and that it is big enough to cover the range starting at 4708 * @uaddr in @vma. Will return failure if that criteria isn't 4709 * met. 4710 * 4711 * Similar to remap_pfn_range() (see mm/memory.c) 4712 */ 4713 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 4714 void *kaddr, unsigned long pgoff, 4715 unsigned long size) 4716 { 4717 struct vm_struct *area; 4718 unsigned long off; 4719 unsigned long end_index; 4720 4721 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 4722 return -EINVAL; 4723 4724 size = PAGE_ALIGN(size); 4725 4726 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 4727 return -EINVAL; 4728 4729 area = find_vm_area(kaddr); 4730 if (!area) 4731 return -EINVAL; 4732 4733 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 4734 return -EINVAL; 4735 4736 if (check_add_overflow(size, off, &end_index) || 4737 end_index > get_vm_area_size(area)) 4738 return -EINVAL; 4739 kaddr += off; 4740 4741 do { 4742 struct page *page = vmalloc_to_page(kaddr); 4743 int ret; 4744 4745 ret = vm_insert_page(vma, uaddr, page); 4746 if (ret) 4747 return ret; 4748 4749 uaddr += PAGE_SIZE; 4750 kaddr += PAGE_SIZE; 4751 size -= PAGE_SIZE; 4752 } while (size > 0); 4753 4754 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 4755 4756 return 0; 4757 } 4758 4759 /** 4760 * remap_vmalloc_range - map vmalloc pages to userspace 4761 * @vma: vma to cover (map full range of vma) 4762 * @addr: vmalloc memory 4763 * @pgoff: number of pages into addr before first page to map 4764 * 4765 * Returns: 0 for success, -Exxx on failure 4766 * 4767 * This function checks that addr is a valid vmalloc'ed area, and 4768 * that it is big enough to cover the vma. Will return failure if 4769 * that criteria isn't met. 4770 * 4771 * Similar to remap_pfn_range() (see mm/memory.c) 4772 */ 4773 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 4774 unsigned long pgoff) 4775 { 4776 return remap_vmalloc_range_partial(vma, vma->vm_start, 4777 addr, pgoff, 4778 vma->vm_end - vma->vm_start); 4779 } 4780 EXPORT_SYMBOL(remap_vmalloc_range); 4781 4782 void free_vm_area(struct vm_struct *area) 4783 { 4784 struct vm_struct *ret; 4785 ret = remove_vm_area(area->addr); 4786 BUG_ON(ret != area); 4787 kfree(area); 4788 } 4789 EXPORT_SYMBOL_GPL(free_vm_area); 4790 4791 #ifdef CONFIG_SMP 4792 static struct vmap_area *node_to_va(struct rb_node *n) 4793 { 4794 return rb_entry_safe(n, struct vmap_area, rb_node); 4795 } 4796 4797 /** 4798 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 4799 * @addr: target address 4800 * 4801 * Returns: vmap_area if it is found. If there is no such area 4802 * the first highest(reverse order) vmap_area is returned 4803 * i.e. va->va_start < addr && va->va_end < addr or NULL 4804 * if there are no any areas before @addr. 4805 */ 4806 static struct vmap_area * 4807 pvm_find_va_enclose_addr(unsigned long addr) 4808 { 4809 struct vmap_area *va, *tmp; 4810 struct rb_node *n; 4811 4812 n = free_vmap_area_root.rb_node; 4813 va = NULL; 4814 4815 while (n) { 4816 tmp = rb_entry(n, struct vmap_area, rb_node); 4817 if (tmp->va_start <= addr) { 4818 va = tmp; 4819 if (tmp->va_end >= addr) 4820 break; 4821 4822 n = n->rb_right; 4823 } else { 4824 n = n->rb_left; 4825 } 4826 } 4827 4828 return va; 4829 } 4830 4831 /** 4832 * pvm_determine_end_from_reverse - find the highest aligned address 4833 * of free block below VMALLOC_END 4834 * @va: 4835 * in - the VA we start the search(reverse order); 4836 * out - the VA with the highest aligned end address. 4837 * @align: alignment for required highest address 4838 * 4839 * Returns: determined end address within vmap_area 4840 */ 4841 static unsigned long 4842 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 4843 { 4844 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4845 unsigned long addr; 4846 4847 if (likely(*va)) { 4848 list_for_each_entry_from_reverse((*va), 4849 &free_vmap_area_list, list) { 4850 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 4851 if ((*va)->va_start < addr) 4852 return addr; 4853 } 4854 } 4855 4856 return 0; 4857 } 4858 4859 /** 4860 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 4861 * @offsets: array containing offset of each area 4862 * @sizes: array containing size of each area 4863 * @nr_vms: the number of areas to allocate 4864 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 4865 * 4866 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 4867 * vm_structs on success, %NULL on failure 4868 * 4869 * Percpu allocator wants to use congruent vm areas so that it can 4870 * maintain the offsets among percpu areas. This function allocates 4871 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 4872 * be scattered pretty far, distance between two areas easily going up 4873 * to gigabytes. To avoid interacting with regular vmallocs, these 4874 * areas are allocated from top. 4875 * 4876 * Despite its complicated look, this allocator is rather simple. It 4877 * does everything top-down and scans free blocks from the end looking 4878 * for matching base. While scanning, if any of the areas do not fit the 4879 * base address is pulled down to fit the area. Scanning is repeated till 4880 * all the areas fit and then all necessary data structures are inserted 4881 * and the result is returned. 4882 */ 4883 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 4884 const size_t *sizes, int nr_vms, 4885 size_t align) 4886 { 4887 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 4888 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4889 struct vmap_area **vas, *va; 4890 struct vm_struct **vms; 4891 int area, area2, last_area, term_area; 4892 unsigned long base, start, size, end, last_end, orig_start, orig_end; 4893 bool purged = false; 4894 4895 /* verify parameters and allocate data structures */ 4896 BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 4897 for (last_area = 0, area = 0; area < nr_vms; area++) { 4898 start = offsets[area]; 4899 end = start + sizes[area]; 4900 4901 /* is everything aligned properly? */ 4902 BUG_ON(!IS_ALIGNED(offsets[area], align)); 4903 BUG_ON(!IS_ALIGNED(sizes[area], align)); 4904 4905 /* detect the area with the highest address */ 4906 if (start > offsets[last_area]) 4907 last_area = area; 4908 4909 for (area2 = area + 1; area2 < nr_vms; area2++) { 4910 unsigned long start2 = offsets[area2]; 4911 unsigned long end2 = start2 + sizes[area2]; 4912 4913 BUG_ON(start2 < end && start < end2); 4914 } 4915 } 4916 last_end = offsets[last_area] + sizes[last_area]; 4917 4918 if (vmalloc_end - vmalloc_start < last_end) { 4919 WARN_ON(true); 4920 return NULL; 4921 } 4922 4923 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 4924 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 4925 if (!vas || !vms) 4926 goto err_free2; 4927 4928 for (area = 0; area < nr_vms; area++) { 4929 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 4930 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 4931 if (!vas[area] || !vms[area]) 4932 goto err_free; 4933 } 4934 retry: 4935 spin_lock(&free_vmap_area_lock); 4936 4937 /* start scanning - we scan from the top, begin with the last area */ 4938 area = term_area = last_area; 4939 start = offsets[area]; 4940 end = start + sizes[area]; 4941 4942 va = pvm_find_va_enclose_addr(vmalloc_end); 4943 base = pvm_determine_end_from_reverse(&va, align) - end; 4944 4945 while (true) { 4946 /* 4947 * base might have underflowed, add last_end before 4948 * comparing. 4949 */ 4950 if (base + last_end < vmalloc_start + last_end) 4951 goto overflow; 4952 4953 /* 4954 * Fitting base has not been found. 4955 */ 4956 if (va == NULL) 4957 goto overflow; 4958 4959 /* 4960 * If required width exceeds current VA block, move 4961 * base downwards and then recheck. 4962 */ 4963 if (base + end > va->va_end) { 4964 base = pvm_determine_end_from_reverse(&va, align) - end; 4965 term_area = area; 4966 continue; 4967 } 4968 4969 /* 4970 * If this VA does not fit, move base downwards and recheck. 4971 */ 4972 if (base + start < va->va_start) { 4973 va = node_to_va(rb_prev(&va->rb_node)); 4974 base = pvm_determine_end_from_reverse(&va, align) - end; 4975 term_area = area; 4976 continue; 4977 } 4978 4979 /* 4980 * This area fits, move on to the previous one. If 4981 * the previous one is the terminal one, we're done. 4982 */ 4983 area = (area + nr_vms - 1) % nr_vms; 4984 if (area == term_area) 4985 break; 4986 4987 start = offsets[area]; 4988 end = start + sizes[area]; 4989 va = pvm_find_va_enclose_addr(base + end); 4990 } 4991 4992 /* we've found a fitting base, insert all va's */ 4993 for (area = 0; area < nr_vms; area++) { 4994 int ret; 4995 4996 start = base + offsets[area]; 4997 size = sizes[area]; 4998 4999 va = pvm_find_va_enclose_addr(start); 5000 if (WARN_ON_ONCE(va == NULL)) 5001 /* It is a BUG(), but trigger recovery instead. */ 5002 goto recovery; 5003 5004 ret = va_clip(&free_vmap_area_root, 5005 &free_vmap_area_list, va, start, size); 5006 if (WARN_ON_ONCE(unlikely(ret))) 5007 /* It is a BUG(), but trigger recovery instead. */ 5008 goto recovery; 5009 5010 /* Allocated area. */ 5011 va = vas[area]; 5012 va->va_start = start; 5013 va->va_end = start + size; 5014 } 5015 5016 spin_unlock(&free_vmap_area_lock); 5017 5018 /* populate the kasan shadow space */ 5019 for (area = 0; area < nr_vms; area++) { 5020 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL)) 5021 goto err_free_shadow; 5022 } 5023 5024 /* insert all vm's */ 5025 for (area = 0; area < nr_vms; area++) { 5026 struct vmap_node *vn = addr_to_node(vas[area]->va_start); 5027 5028 spin_lock(&vn->busy.lock); 5029 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); 5030 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 5031 pcpu_get_vm_areas); 5032 spin_unlock(&vn->busy.lock); 5033 } 5034 5035 /* 5036 * Mark allocated areas as accessible. Do it now as a best-effort 5037 * approach, as they can be mapped outside of vmalloc code. 5038 * With hardware tag-based KASAN, marking is skipped for 5039 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 5040 */ 5041 kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL); 5042 5043 kfree(vas); 5044 return vms; 5045 5046 recovery: 5047 /* 5048 * Remove previously allocated areas. There is no 5049 * need in removing these areas from the busy tree, 5050 * because they are inserted only on the final step 5051 * and when pcpu_get_vm_areas() is success. 5052 */ 5053 while (area--) { 5054 orig_start = vas[area]->va_start; 5055 orig_end = vas[area]->va_end; 5056 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 5057 &free_vmap_area_list); 5058 if (va) 5059 kasan_release_vmalloc(orig_start, orig_end, 5060 va->va_start, va->va_end, 5061 KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH); 5062 vas[area] = NULL; 5063 } 5064 5065 overflow: 5066 spin_unlock(&free_vmap_area_lock); 5067 if (!purged) { 5068 reclaim_and_purge_vmap_areas(); 5069 purged = true; 5070 5071 /* Before "retry", check if we recover. */ 5072 for (area = 0; area < nr_vms; area++) { 5073 if (vas[area]) 5074 continue; 5075 5076 vas[area] = kmem_cache_zalloc( 5077 vmap_area_cachep, GFP_KERNEL); 5078 if (!vas[area]) 5079 goto err_free; 5080 } 5081 5082 goto retry; 5083 } 5084 5085 err_free: 5086 for (area = 0; area < nr_vms; area++) { 5087 if (vas[area]) 5088 kmem_cache_free(vmap_area_cachep, vas[area]); 5089 5090 kfree(vms[area]); 5091 } 5092 err_free2: 5093 kfree(vas); 5094 kfree(vms); 5095 return NULL; 5096 5097 err_free_shadow: 5098 spin_lock(&free_vmap_area_lock); 5099 /* 5100 * We release all the vmalloc shadows, even the ones for regions that 5101 * hadn't been successfully added. This relies on kasan_release_vmalloc 5102 * being able to tolerate this case. 5103 */ 5104 for (area = 0; area < nr_vms; area++) { 5105 orig_start = vas[area]->va_start; 5106 orig_end = vas[area]->va_end; 5107 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 5108 &free_vmap_area_list); 5109 if (va) 5110 kasan_release_vmalloc(orig_start, orig_end, 5111 va->va_start, va->va_end, 5112 KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH); 5113 vas[area] = NULL; 5114 kfree(vms[area]); 5115 } 5116 spin_unlock(&free_vmap_area_lock); 5117 kfree(vas); 5118 kfree(vms); 5119 return NULL; 5120 } 5121 5122 /** 5123 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 5124 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 5125 * @nr_vms: the number of allocated areas 5126 * 5127 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 5128 */ 5129 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 5130 { 5131 int i; 5132 5133 for (i = 0; i < nr_vms; i++) 5134 free_vm_area(vms[i]); 5135 kfree(vms); 5136 } 5137 #endif /* CONFIG_SMP */ 5138 5139 #ifdef CONFIG_PRINTK 5140 bool vmalloc_dump_obj(void *object) 5141 { 5142 const void *caller; 5143 struct vm_struct *vm; 5144 struct vmap_area *va; 5145 struct vmap_node *vn; 5146 unsigned long addr; 5147 unsigned int nr_pages; 5148 5149 addr = PAGE_ALIGN((unsigned long) object); 5150 vn = addr_to_node(addr); 5151 5152 if (!spin_trylock(&vn->busy.lock)) 5153 return false; 5154 5155 va = __find_vmap_area(addr, &vn->busy.root); 5156 if (!va || !va->vm) { 5157 spin_unlock(&vn->busy.lock); 5158 return false; 5159 } 5160 5161 vm = va->vm; 5162 addr = (unsigned long) vm->addr; 5163 caller = vm->caller; 5164 nr_pages = vm->nr_pages; 5165 spin_unlock(&vn->busy.lock); 5166 5167 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 5168 nr_pages, addr, caller); 5169 5170 return true; 5171 } 5172 #endif 5173 5174 #ifdef CONFIG_PROC_FS 5175 5176 /* 5177 * Print number of pages allocated on each memory node. 5178 * 5179 * This function can only be called if CONFIG_NUMA is enabled 5180 * and VM_UNINITIALIZED bit in v->flags is disabled. 5181 */ 5182 static void show_numa_info(struct seq_file *m, struct vm_struct *v, 5183 unsigned int *counters) 5184 { 5185 unsigned int nr; 5186 unsigned int step = 1U << vm_area_page_order(v); 5187 5188 if (!counters) 5189 return; 5190 5191 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 5192 5193 for (nr = 0; nr < v->nr_pages; nr += step) 5194 counters[page_to_nid(v->pages[nr])] += step; 5195 for_each_node_state(nr, N_HIGH_MEMORY) 5196 if (counters[nr]) 5197 seq_printf(m, " N%u=%u", nr, counters[nr]); 5198 } 5199 5200 static void show_purge_info(struct seq_file *m) 5201 { 5202 struct vmap_node *vn; 5203 struct vmap_area *va; 5204 5205 for_each_vmap_node(vn) { 5206 spin_lock(&vn->lazy.lock); 5207 list_for_each_entry(va, &vn->lazy.head, list) { 5208 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 5209 (void *)va->va_start, (void *)va->va_end, 5210 va_size(va)); 5211 } 5212 spin_unlock(&vn->lazy.lock); 5213 } 5214 } 5215 5216 static int vmalloc_info_show(struct seq_file *m, void *p) 5217 { 5218 struct vmap_node *vn; 5219 struct vmap_area *va; 5220 struct vm_struct *v; 5221 unsigned int *counters; 5222 5223 if (IS_ENABLED(CONFIG_NUMA)) 5224 counters = kmalloc_array(nr_node_ids, sizeof(unsigned int), GFP_KERNEL); 5225 5226 for_each_vmap_node(vn) { 5227 spin_lock(&vn->busy.lock); 5228 list_for_each_entry(va, &vn->busy.head, list) { 5229 if (!va->vm) { 5230 if (va->flags & VMAP_RAM) 5231 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 5232 (void *)va->va_start, (void *)va->va_end, 5233 va_size(va)); 5234 5235 continue; 5236 } 5237 5238 v = va->vm; 5239 if (v->flags & VM_UNINITIALIZED) 5240 continue; 5241 5242 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 5243 smp_rmb(); 5244 5245 seq_printf(m, "0x%pK-0x%pK %7ld", 5246 v->addr, v->addr + v->size, v->size); 5247 5248 if (v->caller) 5249 seq_printf(m, " %pS", v->caller); 5250 5251 if (v->nr_pages) 5252 seq_printf(m, " pages=%d", v->nr_pages); 5253 5254 if (v->phys_addr) 5255 seq_printf(m, " phys=%pa", &v->phys_addr); 5256 5257 if (v->flags & VM_IOREMAP) 5258 seq_puts(m, " ioremap"); 5259 5260 if (v->flags & VM_SPARSE) 5261 seq_puts(m, " sparse"); 5262 5263 if (v->flags & VM_ALLOC) 5264 seq_puts(m, " vmalloc"); 5265 5266 if (v->flags & VM_MAP) 5267 seq_puts(m, " vmap"); 5268 5269 if (v->flags & VM_USERMAP) 5270 seq_puts(m, " user"); 5271 5272 if (v->flags & VM_DMA_COHERENT) 5273 seq_puts(m, " dma-coherent"); 5274 5275 if (is_vmalloc_addr(v->pages)) 5276 seq_puts(m, " vpages"); 5277 5278 if (IS_ENABLED(CONFIG_NUMA)) 5279 show_numa_info(m, v, counters); 5280 5281 seq_putc(m, '\n'); 5282 } 5283 spin_unlock(&vn->busy.lock); 5284 } 5285 5286 /* 5287 * As a final step, dump "unpurged" areas. 5288 */ 5289 show_purge_info(m); 5290 if (IS_ENABLED(CONFIG_NUMA)) 5291 kfree(counters); 5292 return 0; 5293 } 5294 5295 static int __init proc_vmalloc_init(void) 5296 { 5297 proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show); 5298 return 0; 5299 } 5300 module_init(proc_vmalloc_init); 5301 5302 #endif 5303 5304 static void __init vmap_init_free_space(void) 5305 { 5306 unsigned long vmap_start = 1; 5307 const unsigned long vmap_end = ULONG_MAX; 5308 struct vmap_area *free; 5309 struct vm_struct *busy; 5310 5311 /* 5312 * B F B B B F 5313 * -|-----|.....|-----|-----|-----|.....|- 5314 * | The KVA space | 5315 * |<--------------------------------->| 5316 */ 5317 for (busy = vmlist; busy; busy = busy->next) { 5318 if ((unsigned long) busy->addr - vmap_start > 0) { 5319 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5320 if (!WARN_ON_ONCE(!free)) { 5321 free->va_start = vmap_start; 5322 free->va_end = (unsigned long) busy->addr; 5323 5324 insert_vmap_area_augment(free, NULL, 5325 &free_vmap_area_root, 5326 &free_vmap_area_list); 5327 } 5328 } 5329 5330 vmap_start = (unsigned long) busy->addr + busy->size; 5331 } 5332 5333 if (vmap_end - vmap_start > 0) { 5334 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5335 if (!WARN_ON_ONCE(!free)) { 5336 free->va_start = vmap_start; 5337 free->va_end = vmap_end; 5338 5339 insert_vmap_area_augment(free, NULL, 5340 &free_vmap_area_root, 5341 &free_vmap_area_list); 5342 } 5343 } 5344 } 5345 5346 static void vmap_init_nodes(void) 5347 { 5348 struct vmap_node *vn; 5349 int i; 5350 5351 #if BITS_PER_LONG == 64 5352 /* 5353 * A high threshold of max nodes is fixed and bound to 128, 5354 * thus a scale factor is 1 for systems where number of cores 5355 * are less or equal to specified threshold. 5356 * 5357 * As for NUMA-aware notes. For bigger systems, for example 5358 * NUMA with multi-sockets, where we can end-up with thousands 5359 * of cores in total, a "sub-numa-clustering" should be added. 5360 * 5361 * In this case a NUMA domain is considered as a single entity 5362 * with dedicated sub-nodes in it which describe one group or 5363 * set of cores. Therefore a per-domain purging is supposed to 5364 * be added as well as a per-domain balancing. 5365 */ 5366 int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); 5367 5368 if (n > 1) { 5369 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT); 5370 if (vn) { 5371 /* Node partition is 16 pages. */ 5372 vmap_zone_size = (1 << 4) * PAGE_SIZE; 5373 nr_vmap_nodes = n; 5374 vmap_nodes = vn; 5375 } else { 5376 pr_err("Failed to allocate an array. Disable a node layer\n"); 5377 } 5378 } 5379 #endif 5380 5381 for_each_vmap_node(vn) { 5382 vn->busy.root = RB_ROOT; 5383 INIT_LIST_HEAD(&vn->busy.head); 5384 spin_lock_init(&vn->busy.lock); 5385 5386 vn->lazy.root = RB_ROOT; 5387 INIT_LIST_HEAD(&vn->lazy.head); 5388 spin_lock_init(&vn->lazy.lock); 5389 5390 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 5391 INIT_LIST_HEAD(&vn->pool[i].head); 5392 WRITE_ONCE(vn->pool[i].len, 0); 5393 } 5394 5395 spin_lock_init(&vn->pool_lock); 5396 } 5397 } 5398 5399 static unsigned long 5400 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 5401 { 5402 unsigned long count = 0; 5403 struct vmap_node *vn; 5404 int i; 5405 5406 for_each_vmap_node(vn) { 5407 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) 5408 count += READ_ONCE(vn->pool[i].len); 5409 } 5410 5411 return count ? count : SHRINK_EMPTY; 5412 } 5413 5414 static unsigned long 5415 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 5416 { 5417 struct vmap_node *vn; 5418 5419 for_each_vmap_node(vn) 5420 decay_va_pool_node(vn, true); 5421 5422 return SHRINK_STOP; 5423 } 5424 5425 void __init vmalloc_init(void) 5426 { 5427 struct shrinker *vmap_node_shrinker; 5428 struct vmap_area *va; 5429 struct vmap_node *vn; 5430 struct vm_struct *tmp; 5431 int i; 5432 5433 /* 5434 * Create the cache for vmap_area objects. 5435 */ 5436 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 5437 5438 for_each_possible_cpu(i) { 5439 struct vmap_block_queue *vbq; 5440 struct vfree_deferred *p; 5441 5442 vbq = &per_cpu(vmap_block_queue, i); 5443 spin_lock_init(&vbq->lock); 5444 INIT_LIST_HEAD(&vbq->free); 5445 p = &per_cpu(vfree_deferred, i); 5446 init_llist_head(&p->list); 5447 INIT_WORK(&p->wq, delayed_vfree_work); 5448 xa_init(&vbq->vmap_blocks); 5449 } 5450 5451 /* 5452 * Setup nodes before importing vmlist. 5453 */ 5454 vmap_init_nodes(); 5455 5456 /* Import existing vmlist entries. */ 5457 for (tmp = vmlist; tmp; tmp = tmp->next) { 5458 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5459 if (WARN_ON_ONCE(!va)) 5460 continue; 5461 5462 va->va_start = (unsigned long)tmp->addr; 5463 va->va_end = va->va_start + tmp->size; 5464 va->vm = tmp; 5465 5466 vn = addr_to_node(va->va_start); 5467 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 5468 } 5469 5470 /* 5471 * Now we can initialize a free vmap space. 5472 */ 5473 vmap_init_free_space(); 5474 vmap_initialized = true; 5475 5476 vmap_node_shrinker = shrinker_alloc(0, "vmap-node"); 5477 if (!vmap_node_shrinker) { 5478 pr_err("Failed to allocate vmap-node shrinker!\n"); 5479 return; 5480 } 5481 5482 vmap_node_shrinker->count_objects = vmap_node_shrink_count; 5483 vmap_node_shrinker->scan_objects = vmap_node_shrink_scan; 5484 shrinker_register(vmap_node_shrinker); 5485 } 5486