1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1993 Linus Torvalds 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/set_memory.h> 22 #include <linux/debugobjects.h> 23 #include <linux/kallsyms.h> 24 #include <linux/list.h> 25 #include <linux/notifier.h> 26 #include <linux/rbtree.h> 27 #include <linux/xarray.h> 28 #include <linux/io.h> 29 #include <linux/rcupdate.h> 30 #include <linux/pfn.h> 31 #include <linux/kmemleak.h> 32 #include <linux/atomic.h> 33 #include <linux/compiler.h> 34 #include <linux/memcontrol.h> 35 #include <linux/llist.h> 36 #include <linux/uio.h> 37 #include <linux/bitops.h> 38 #include <linux/rbtree_augmented.h> 39 #include <linux/overflow.h> 40 #include <linux/pgtable.h> 41 #include <linux/hugetlb.h> 42 #include <linux/sched/mm.h> 43 #include <asm/tlbflush.h> 44 #include <asm/shmparam.h> 45 46 #define CREATE_TRACE_POINTS 47 #include <trace/events/vmalloc.h> 48 49 #include "internal.h" 50 #include "pgalloc-track.h" 51 52 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 53 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 54 55 static int __init set_nohugeiomap(char *str) 56 { 57 ioremap_max_page_shift = PAGE_SHIFT; 58 return 0; 59 } 60 early_param("nohugeiomap", set_nohugeiomap); 61 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 62 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 63 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 64 65 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 66 static bool __ro_after_init vmap_allow_huge = true; 67 68 static int __init set_nohugevmalloc(char *str) 69 { 70 vmap_allow_huge = false; 71 return 0; 72 } 73 early_param("nohugevmalloc", set_nohugevmalloc); 74 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 75 static const bool vmap_allow_huge = false; 76 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 77 78 bool is_vmalloc_addr(const void *x) 79 { 80 unsigned long addr = (unsigned long)kasan_reset_tag(x); 81 82 return addr >= VMALLOC_START && addr < VMALLOC_END; 83 } 84 EXPORT_SYMBOL(is_vmalloc_addr); 85 86 struct vfree_deferred { 87 struct llist_head list; 88 struct work_struct wq; 89 }; 90 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 91 92 /*** Page table manipulation functions ***/ 93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 94 phys_addr_t phys_addr, pgprot_t prot, 95 unsigned int max_page_shift, pgtbl_mod_mask *mask) 96 { 97 pte_t *pte; 98 u64 pfn; 99 unsigned long size = PAGE_SIZE; 100 101 pfn = phys_addr >> PAGE_SHIFT; 102 pte = pte_alloc_kernel_track(pmd, addr, mask); 103 if (!pte) 104 return -ENOMEM; 105 do { 106 BUG_ON(!pte_none(ptep_get(pte))); 107 108 #ifdef CONFIG_HUGETLB_PAGE 109 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 110 if (size != PAGE_SIZE) { 111 pte_t entry = pfn_pte(pfn, prot); 112 113 entry = arch_make_huge_pte(entry, ilog2(size), 0); 114 set_huge_pte_at(&init_mm, addr, pte, entry, size); 115 pfn += PFN_DOWN(size); 116 continue; 117 } 118 #endif 119 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 120 pfn++; 121 } while (pte += PFN_DOWN(size), addr += size, addr != end); 122 *mask |= PGTBL_PTE_MODIFIED; 123 return 0; 124 } 125 126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 127 phys_addr_t phys_addr, pgprot_t prot, 128 unsigned int max_page_shift) 129 { 130 if (max_page_shift < PMD_SHIFT) 131 return 0; 132 133 if (!arch_vmap_pmd_supported(prot)) 134 return 0; 135 136 if ((end - addr) != PMD_SIZE) 137 return 0; 138 139 if (!IS_ALIGNED(addr, PMD_SIZE)) 140 return 0; 141 142 if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 143 return 0; 144 145 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 146 return 0; 147 148 return pmd_set_huge(pmd, phys_addr, prot); 149 } 150 151 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 152 phys_addr_t phys_addr, pgprot_t prot, 153 unsigned int max_page_shift, pgtbl_mod_mask *mask) 154 { 155 pmd_t *pmd; 156 unsigned long next; 157 158 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 159 if (!pmd) 160 return -ENOMEM; 161 do { 162 next = pmd_addr_end(addr, end); 163 164 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 165 max_page_shift)) { 166 *mask |= PGTBL_PMD_MODIFIED; 167 continue; 168 } 169 170 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) 171 return -ENOMEM; 172 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 173 return 0; 174 } 175 176 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 177 phys_addr_t phys_addr, pgprot_t prot, 178 unsigned int max_page_shift) 179 { 180 if (max_page_shift < PUD_SHIFT) 181 return 0; 182 183 if (!arch_vmap_pud_supported(prot)) 184 return 0; 185 186 if ((end - addr) != PUD_SIZE) 187 return 0; 188 189 if (!IS_ALIGNED(addr, PUD_SIZE)) 190 return 0; 191 192 if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 193 return 0; 194 195 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 196 return 0; 197 198 return pud_set_huge(pud, phys_addr, prot); 199 } 200 201 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 202 phys_addr_t phys_addr, pgprot_t prot, 203 unsigned int max_page_shift, pgtbl_mod_mask *mask) 204 { 205 pud_t *pud; 206 unsigned long next; 207 208 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 209 if (!pud) 210 return -ENOMEM; 211 do { 212 next = pud_addr_end(addr, end); 213 214 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 215 max_page_shift)) { 216 *mask |= PGTBL_PUD_MODIFIED; 217 continue; 218 } 219 220 if (vmap_pmd_range(pud, addr, next, phys_addr, prot, 221 max_page_shift, mask)) 222 return -ENOMEM; 223 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 224 return 0; 225 } 226 227 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 228 phys_addr_t phys_addr, pgprot_t prot, 229 unsigned int max_page_shift) 230 { 231 if (max_page_shift < P4D_SHIFT) 232 return 0; 233 234 if (!arch_vmap_p4d_supported(prot)) 235 return 0; 236 237 if ((end - addr) != P4D_SIZE) 238 return 0; 239 240 if (!IS_ALIGNED(addr, P4D_SIZE)) 241 return 0; 242 243 if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 244 return 0; 245 246 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 247 return 0; 248 249 return p4d_set_huge(p4d, phys_addr, prot); 250 } 251 252 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 253 phys_addr_t phys_addr, pgprot_t prot, 254 unsigned int max_page_shift, pgtbl_mod_mask *mask) 255 { 256 p4d_t *p4d; 257 unsigned long next; 258 259 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 260 if (!p4d) 261 return -ENOMEM; 262 do { 263 next = p4d_addr_end(addr, end); 264 265 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 266 max_page_shift)) { 267 *mask |= PGTBL_P4D_MODIFIED; 268 continue; 269 } 270 271 if (vmap_pud_range(p4d, addr, next, phys_addr, prot, 272 max_page_shift, mask)) 273 return -ENOMEM; 274 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 275 return 0; 276 } 277 278 static int vmap_range_noflush(unsigned long addr, unsigned long end, 279 phys_addr_t phys_addr, pgprot_t prot, 280 unsigned int max_page_shift) 281 { 282 pgd_t *pgd; 283 unsigned long start; 284 unsigned long next; 285 int err; 286 pgtbl_mod_mask mask = 0; 287 288 might_sleep(); 289 BUG_ON(addr >= end); 290 291 start = addr; 292 pgd = pgd_offset_k(addr); 293 do { 294 next = pgd_addr_end(addr, end); 295 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 296 max_page_shift, &mask); 297 if (err) 298 break; 299 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 300 301 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 302 arch_sync_kernel_mappings(start, end); 303 304 return err; 305 } 306 307 int vmap_page_range(unsigned long addr, unsigned long end, 308 phys_addr_t phys_addr, pgprot_t prot) 309 { 310 int err; 311 312 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 313 ioremap_max_page_shift); 314 flush_cache_vmap(addr, end); 315 if (!err) 316 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, 317 ioremap_max_page_shift); 318 return err; 319 } 320 321 int ioremap_page_range(unsigned long addr, unsigned long end, 322 phys_addr_t phys_addr, pgprot_t prot) 323 { 324 struct vm_struct *area; 325 326 area = find_vm_area((void *)addr); 327 if (!area || !(area->flags & VM_IOREMAP)) { 328 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); 329 return -EINVAL; 330 } 331 if (addr != (unsigned long)area->addr || 332 (void *)end != area->addr + get_vm_area_size(area)) { 333 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n", 334 addr, end, (long)area->addr, 335 (long)area->addr + get_vm_area_size(area)); 336 return -ERANGE; 337 } 338 return vmap_page_range(addr, end, phys_addr, prot); 339 } 340 341 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 342 pgtbl_mod_mask *mask) 343 { 344 pte_t *pte; 345 346 pte = pte_offset_kernel(pmd, addr); 347 do { 348 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 349 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 350 } while (pte++, addr += PAGE_SIZE, addr != end); 351 *mask |= PGTBL_PTE_MODIFIED; 352 } 353 354 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 355 pgtbl_mod_mask *mask) 356 { 357 pmd_t *pmd; 358 unsigned long next; 359 int cleared; 360 361 pmd = pmd_offset(pud, addr); 362 do { 363 next = pmd_addr_end(addr, end); 364 365 cleared = pmd_clear_huge(pmd); 366 if (cleared || pmd_bad(*pmd)) 367 *mask |= PGTBL_PMD_MODIFIED; 368 369 if (cleared) 370 continue; 371 if (pmd_none_or_clear_bad(pmd)) 372 continue; 373 vunmap_pte_range(pmd, addr, next, mask); 374 375 cond_resched(); 376 } while (pmd++, addr = next, addr != end); 377 } 378 379 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 380 pgtbl_mod_mask *mask) 381 { 382 pud_t *pud; 383 unsigned long next; 384 int cleared; 385 386 pud = pud_offset(p4d, addr); 387 do { 388 next = pud_addr_end(addr, end); 389 390 cleared = pud_clear_huge(pud); 391 if (cleared || pud_bad(*pud)) 392 *mask |= PGTBL_PUD_MODIFIED; 393 394 if (cleared) 395 continue; 396 if (pud_none_or_clear_bad(pud)) 397 continue; 398 vunmap_pmd_range(pud, addr, next, mask); 399 } while (pud++, addr = next, addr != end); 400 } 401 402 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 403 pgtbl_mod_mask *mask) 404 { 405 p4d_t *p4d; 406 unsigned long next; 407 408 p4d = p4d_offset(pgd, addr); 409 do { 410 next = p4d_addr_end(addr, end); 411 412 p4d_clear_huge(p4d); 413 if (p4d_bad(*p4d)) 414 *mask |= PGTBL_P4D_MODIFIED; 415 416 if (p4d_none_or_clear_bad(p4d)) 417 continue; 418 vunmap_pud_range(p4d, addr, next, mask); 419 } while (p4d++, addr = next, addr != end); 420 } 421 422 /* 423 * vunmap_range_noflush is similar to vunmap_range, but does not 424 * flush caches or TLBs. 425 * 426 * The caller is responsible for calling flush_cache_vmap() before calling 427 * this function, and flush_tlb_kernel_range after it has returned 428 * successfully (and before the addresses are expected to cause a page fault 429 * or be re-mapped for something else, if TLB flushes are being delayed or 430 * coalesced). 431 * 432 * This is an internal function only. Do not use outside mm/. 433 */ 434 void __vunmap_range_noflush(unsigned long start, unsigned long end) 435 { 436 unsigned long next; 437 pgd_t *pgd; 438 unsigned long addr = start; 439 pgtbl_mod_mask mask = 0; 440 441 BUG_ON(addr >= end); 442 pgd = pgd_offset_k(addr); 443 do { 444 next = pgd_addr_end(addr, end); 445 if (pgd_bad(*pgd)) 446 mask |= PGTBL_PGD_MODIFIED; 447 if (pgd_none_or_clear_bad(pgd)) 448 continue; 449 vunmap_p4d_range(pgd, addr, next, &mask); 450 } while (pgd++, addr = next, addr != end); 451 452 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 453 arch_sync_kernel_mappings(start, end); 454 } 455 456 void vunmap_range_noflush(unsigned long start, unsigned long end) 457 { 458 kmsan_vunmap_range_noflush(start, end); 459 __vunmap_range_noflush(start, end); 460 } 461 462 /** 463 * vunmap_range - unmap kernel virtual addresses 464 * @addr: start of the VM area to unmap 465 * @end: end of the VM area to unmap (non-inclusive) 466 * 467 * Clears any present PTEs in the virtual address range, flushes TLBs and 468 * caches. Any subsequent access to the address before it has been re-mapped 469 * is a kernel bug. 470 */ 471 void vunmap_range(unsigned long addr, unsigned long end) 472 { 473 flush_cache_vunmap(addr, end); 474 vunmap_range_noflush(addr, end); 475 flush_tlb_kernel_range(addr, end); 476 } 477 478 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 479 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 480 pgtbl_mod_mask *mask) 481 { 482 pte_t *pte; 483 484 /* 485 * nr is a running index into the array which helps higher level 486 * callers keep track of where we're up to. 487 */ 488 489 pte = pte_alloc_kernel_track(pmd, addr, mask); 490 if (!pte) 491 return -ENOMEM; 492 do { 493 struct page *page = pages[*nr]; 494 495 if (WARN_ON(!pte_none(ptep_get(pte)))) 496 return -EBUSY; 497 if (WARN_ON(!page)) 498 return -ENOMEM; 499 if (WARN_ON(!pfn_valid(page_to_pfn(page)))) 500 return -EINVAL; 501 502 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 503 (*nr)++; 504 } while (pte++, addr += PAGE_SIZE, addr != end); 505 *mask |= PGTBL_PTE_MODIFIED; 506 return 0; 507 } 508 509 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 510 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 511 pgtbl_mod_mask *mask) 512 { 513 pmd_t *pmd; 514 unsigned long next; 515 516 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 517 if (!pmd) 518 return -ENOMEM; 519 do { 520 next = pmd_addr_end(addr, end); 521 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 522 return -ENOMEM; 523 } while (pmd++, addr = next, addr != end); 524 return 0; 525 } 526 527 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 528 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 529 pgtbl_mod_mask *mask) 530 { 531 pud_t *pud; 532 unsigned long next; 533 534 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 535 if (!pud) 536 return -ENOMEM; 537 do { 538 next = pud_addr_end(addr, end); 539 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 540 return -ENOMEM; 541 } while (pud++, addr = next, addr != end); 542 return 0; 543 } 544 545 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 546 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 547 pgtbl_mod_mask *mask) 548 { 549 p4d_t *p4d; 550 unsigned long next; 551 552 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 553 if (!p4d) 554 return -ENOMEM; 555 do { 556 next = p4d_addr_end(addr, end); 557 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 558 return -ENOMEM; 559 } while (p4d++, addr = next, addr != end); 560 return 0; 561 } 562 563 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 564 pgprot_t prot, struct page **pages) 565 { 566 unsigned long start = addr; 567 pgd_t *pgd; 568 unsigned long next; 569 int err = 0; 570 int nr = 0; 571 pgtbl_mod_mask mask = 0; 572 573 BUG_ON(addr >= end); 574 pgd = pgd_offset_k(addr); 575 do { 576 next = pgd_addr_end(addr, end); 577 if (pgd_bad(*pgd)) 578 mask |= PGTBL_PGD_MODIFIED; 579 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 580 if (err) 581 return err; 582 } while (pgd++, addr = next, addr != end); 583 584 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 585 arch_sync_kernel_mappings(start, end); 586 587 return 0; 588 } 589 590 /* 591 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 592 * flush caches. 593 * 594 * The caller is responsible for calling flush_cache_vmap() after this 595 * function returns successfully and before the addresses are accessed. 596 * 597 * This is an internal function only. Do not use outside mm/. 598 */ 599 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, 600 pgprot_t prot, struct page **pages, unsigned int page_shift) 601 { 602 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 603 604 WARN_ON(page_shift < PAGE_SHIFT); 605 606 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 607 page_shift == PAGE_SHIFT) 608 return vmap_small_pages_range_noflush(addr, end, prot, pages); 609 610 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 611 int err; 612 613 err = vmap_range_noflush(addr, addr + (1UL << page_shift), 614 page_to_phys(pages[i]), prot, 615 page_shift); 616 if (err) 617 return err; 618 619 addr += 1UL << page_shift; 620 } 621 622 return 0; 623 } 624 625 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 626 pgprot_t prot, struct page **pages, unsigned int page_shift) 627 { 628 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, 629 page_shift); 630 631 if (ret) 632 return ret; 633 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 634 } 635 636 /** 637 * vmap_pages_range - map pages to a kernel virtual address 638 * @addr: start of the VM area to map 639 * @end: end of the VM area to map (non-inclusive) 640 * @prot: page protection flags to use 641 * @pages: pages to map (always PAGE_SIZE pages) 642 * @page_shift: maximum shift that the pages may be mapped with, @pages must 643 * be aligned and contiguous up to at least this shift. 644 * 645 * RETURNS: 646 * 0 on success, -errno on failure. 647 */ 648 static int vmap_pages_range(unsigned long addr, unsigned long end, 649 pgprot_t prot, struct page **pages, unsigned int page_shift) 650 { 651 int err; 652 653 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 654 flush_cache_vmap(addr, end); 655 return err; 656 } 657 658 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, 659 unsigned long end) 660 { 661 might_sleep(); 662 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) 663 return -EINVAL; 664 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) 665 return -EINVAL; 666 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) 667 return -EINVAL; 668 if ((end - start) >> PAGE_SHIFT > totalram_pages()) 669 return -E2BIG; 670 if (start < (unsigned long)area->addr || 671 (void *)end > area->addr + get_vm_area_size(area)) 672 return -ERANGE; 673 return 0; 674 } 675 676 /** 677 * vm_area_map_pages - map pages inside given sparse vm_area 678 * @area: vm_area 679 * @start: start address inside vm_area 680 * @end: end address inside vm_area 681 * @pages: pages to map (always PAGE_SIZE pages) 682 */ 683 int vm_area_map_pages(struct vm_struct *area, unsigned long start, 684 unsigned long end, struct page **pages) 685 { 686 int err; 687 688 err = check_sparse_vm_area(area, start, end); 689 if (err) 690 return err; 691 692 return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); 693 } 694 695 /** 696 * vm_area_unmap_pages - unmap pages inside given sparse vm_area 697 * @area: vm_area 698 * @start: start address inside vm_area 699 * @end: end address inside vm_area 700 */ 701 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, 702 unsigned long end) 703 { 704 if (check_sparse_vm_area(area, start, end)) 705 return; 706 707 vunmap_range(start, end); 708 } 709 710 int is_vmalloc_or_module_addr(const void *x) 711 { 712 /* 713 * ARM, x86-64 and sparc64 put modules in a special place, 714 * and fall back on vmalloc() if that fails. Others 715 * just put it in the vmalloc space. 716 */ 717 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 718 unsigned long addr = (unsigned long)kasan_reset_tag(x); 719 if (addr >= MODULES_VADDR && addr < MODULES_END) 720 return 1; 721 #endif 722 return is_vmalloc_addr(x); 723 } 724 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); 725 726 /* 727 * Walk a vmap address to the struct page it maps. Huge vmap mappings will 728 * return the tail page that corresponds to the base page address, which 729 * matches small vmap mappings. 730 */ 731 struct page *vmalloc_to_page(const void *vmalloc_addr) 732 { 733 unsigned long addr = (unsigned long) vmalloc_addr; 734 struct page *page = NULL; 735 pgd_t *pgd = pgd_offset_k(addr); 736 p4d_t *p4d; 737 pud_t *pud; 738 pmd_t *pmd; 739 pte_t *ptep, pte; 740 741 /* 742 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 743 * architectures that do not vmalloc module space 744 */ 745 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 746 747 if (pgd_none(*pgd)) 748 return NULL; 749 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 750 return NULL; /* XXX: no allowance for huge pgd */ 751 if (WARN_ON_ONCE(pgd_bad(*pgd))) 752 return NULL; 753 754 p4d = p4d_offset(pgd, addr); 755 if (p4d_none(*p4d)) 756 return NULL; 757 if (p4d_leaf(*p4d)) 758 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 759 if (WARN_ON_ONCE(p4d_bad(*p4d))) 760 return NULL; 761 762 pud = pud_offset(p4d, addr); 763 if (pud_none(*pud)) 764 return NULL; 765 if (pud_leaf(*pud)) 766 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 767 if (WARN_ON_ONCE(pud_bad(*pud))) 768 return NULL; 769 770 pmd = pmd_offset(pud, addr); 771 if (pmd_none(*pmd)) 772 return NULL; 773 if (pmd_leaf(*pmd)) 774 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 775 if (WARN_ON_ONCE(pmd_bad(*pmd))) 776 return NULL; 777 778 ptep = pte_offset_kernel(pmd, addr); 779 pte = ptep_get(ptep); 780 if (pte_present(pte)) 781 page = pte_page(pte); 782 783 return page; 784 } 785 EXPORT_SYMBOL(vmalloc_to_page); 786 787 /* 788 * Map a vmalloc()-space virtual address to the physical page frame number. 789 */ 790 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 791 { 792 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 793 } 794 EXPORT_SYMBOL(vmalloc_to_pfn); 795 796 797 /*** Global kva allocator ***/ 798 799 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 800 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 801 802 803 static DEFINE_SPINLOCK(free_vmap_area_lock); 804 static bool vmap_initialized __read_mostly; 805 806 /* 807 * This kmem_cache is used for vmap_area objects. Instead of 808 * allocating from slab we reuse an object from this cache to 809 * make things faster. Especially in "no edge" splitting of 810 * free block. 811 */ 812 static struct kmem_cache *vmap_area_cachep; 813 814 /* 815 * This linked list is used in pair with free_vmap_area_root. 816 * It gives O(1) access to prev/next to perform fast coalescing. 817 */ 818 static LIST_HEAD(free_vmap_area_list); 819 820 /* 821 * This augment red-black tree represents the free vmap space. 822 * All vmap_area objects in this tree are sorted by va->va_start 823 * address. It is used for allocation and merging when a vmap 824 * object is released. 825 * 826 * Each vmap_area node contains a maximum available free block 827 * of its sub-tree, right or left. Therefore it is possible to 828 * find a lowest match of free area. 829 */ 830 static struct rb_root free_vmap_area_root = RB_ROOT; 831 832 /* 833 * Preload a CPU with one object for "no edge" split case. The 834 * aim is to get rid of allocations from the atomic context, thus 835 * to use more permissive allocation masks. 836 */ 837 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 838 839 /* 840 * This structure defines a single, solid model where a list and 841 * rb-tree are part of one entity protected by the lock. Nodes are 842 * sorted in ascending order, thus for O(1) access to left/right 843 * neighbors a list is used as well as for sequential traversal. 844 */ 845 struct rb_list { 846 struct rb_root root; 847 struct list_head head; 848 spinlock_t lock; 849 }; 850 851 /* 852 * A fast size storage contains VAs up to 1M size. A pool consists 853 * of linked between each other ready to go VAs of certain sizes. 854 * An index in the pool-array corresponds to number of pages + 1. 855 */ 856 #define MAX_VA_SIZE_PAGES 256 857 858 struct vmap_pool { 859 struct list_head head; 860 unsigned long len; 861 }; 862 863 /* 864 * An effective vmap-node logic. Users make use of nodes instead 865 * of a global heap. It allows to balance an access and mitigate 866 * contention. 867 */ 868 static struct vmap_node { 869 /* Simple size segregated storage. */ 870 struct vmap_pool pool[MAX_VA_SIZE_PAGES]; 871 spinlock_t pool_lock; 872 bool skip_populate; 873 874 /* Bookkeeping data of this node. */ 875 struct rb_list busy; 876 struct rb_list lazy; 877 878 /* 879 * Ready-to-free areas. 880 */ 881 struct list_head purge_list; 882 struct work_struct purge_work; 883 unsigned long nr_purged; 884 } single; 885 886 /* 887 * Initial setup consists of one single node, i.e. a balancing 888 * is fully disabled. Later on, after vmap is initialized these 889 * parameters are updated based on a system capacity. 890 */ 891 static struct vmap_node *vmap_nodes = &single; 892 static __read_mostly unsigned int nr_vmap_nodes = 1; 893 static __read_mostly unsigned int vmap_zone_size = 1; 894 895 static inline unsigned int 896 addr_to_node_id(unsigned long addr) 897 { 898 return (addr / vmap_zone_size) % nr_vmap_nodes; 899 } 900 901 static inline struct vmap_node * 902 addr_to_node(unsigned long addr) 903 { 904 return &vmap_nodes[addr_to_node_id(addr)]; 905 } 906 907 static inline struct vmap_node * 908 id_to_node(unsigned int id) 909 { 910 return &vmap_nodes[id % nr_vmap_nodes]; 911 } 912 913 /* 914 * We use the value 0 to represent "no node", that is why 915 * an encoded value will be the node-id incremented by 1. 916 * It is always greater then 0. A valid node_id which can 917 * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id 918 * is not valid 0 is returned. 919 */ 920 static unsigned int 921 encode_vn_id(unsigned int node_id) 922 { 923 /* Can store U8_MAX [0:254] nodes. */ 924 if (node_id < nr_vmap_nodes) 925 return (node_id + 1) << BITS_PER_BYTE; 926 927 /* Warn and no node encoded. */ 928 WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id); 929 return 0; 930 } 931 932 /* 933 * Returns an encoded node-id, the valid range is within 934 * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is 935 * returned if extracted data is wrong. 936 */ 937 static unsigned int 938 decode_vn_id(unsigned int val) 939 { 940 unsigned int node_id = (val >> BITS_PER_BYTE) - 1; 941 942 /* Can store U8_MAX [0:254] nodes. */ 943 if (node_id < nr_vmap_nodes) 944 return node_id; 945 946 /* If it was _not_ zero, warn. */ 947 WARN_ONCE(node_id != UINT_MAX, 948 "Decode wrong node id (%d)\n", node_id); 949 950 return nr_vmap_nodes; 951 } 952 953 static bool 954 is_vn_id_valid(unsigned int node_id) 955 { 956 if (node_id < nr_vmap_nodes) 957 return true; 958 959 return false; 960 } 961 962 static __always_inline unsigned long 963 va_size(struct vmap_area *va) 964 { 965 return (va->va_end - va->va_start); 966 } 967 968 static __always_inline unsigned long 969 get_subtree_max_size(struct rb_node *node) 970 { 971 struct vmap_area *va; 972 973 va = rb_entry_safe(node, struct vmap_area, rb_node); 974 return va ? va->subtree_max_size : 0; 975 } 976 977 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 978 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 979 980 static void reclaim_and_purge_vmap_areas(void); 981 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 982 static void drain_vmap_area_work(struct work_struct *work); 983 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 984 985 static atomic_long_t nr_vmalloc_pages; 986 987 unsigned long vmalloc_nr_pages(void) 988 { 989 return atomic_long_read(&nr_vmalloc_pages); 990 } 991 992 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 993 { 994 struct rb_node *n = root->rb_node; 995 996 addr = (unsigned long)kasan_reset_tag((void *)addr); 997 998 while (n) { 999 struct vmap_area *va; 1000 1001 va = rb_entry(n, struct vmap_area, rb_node); 1002 if (addr < va->va_start) 1003 n = n->rb_left; 1004 else if (addr >= va->va_end) 1005 n = n->rb_right; 1006 else 1007 return va; 1008 } 1009 1010 return NULL; 1011 } 1012 1013 /* Look up the first VA which satisfies addr < va_end, NULL if none. */ 1014 static struct vmap_area * 1015 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) 1016 { 1017 struct vmap_area *va = NULL; 1018 struct rb_node *n = root->rb_node; 1019 1020 addr = (unsigned long)kasan_reset_tag((void *)addr); 1021 1022 while (n) { 1023 struct vmap_area *tmp; 1024 1025 tmp = rb_entry(n, struct vmap_area, rb_node); 1026 if (tmp->va_end > addr) { 1027 va = tmp; 1028 if (tmp->va_start <= addr) 1029 break; 1030 1031 n = n->rb_left; 1032 } else 1033 n = n->rb_right; 1034 } 1035 1036 return va; 1037 } 1038 1039 /* 1040 * Returns a node where a first VA, that satisfies addr < va_end, resides. 1041 * If success, a node is locked. A user is responsible to unlock it when a 1042 * VA is no longer needed to be accessed. 1043 * 1044 * Returns NULL if nothing found. 1045 */ 1046 static struct vmap_node * 1047 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) 1048 { 1049 unsigned long va_start_lowest; 1050 struct vmap_node *vn; 1051 int i; 1052 1053 repeat: 1054 for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) { 1055 vn = &vmap_nodes[i]; 1056 1057 spin_lock(&vn->busy.lock); 1058 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); 1059 1060 if (*va) 1061 if (!va_start_lowest || (*va)->va_start < va_start_lowest) 1062 va_start_lowest = (*va)->va_start; 1063 spin_unlock(&vn->busy.lock); 1064 } 1065 1066 /* 1067 * Check if found VA exists, it might have gone away. In this case we 1068 * repeat the search because a VA has been removed concurrently and we 1069 * need to proceed to the next one, which is a rare case. 1070 */ 1071 if (va_start_lowest) { 1072 vn = addr_to_node(va_start_lowest); 1073 1074 spin_lock(&vn->busy.lock); 1075 *va = __find_vmap_area(va_start_lowest, &vn->busy.root); 1076 1077 if (*va) 1078 return vn; 1079 1080 spin_unlock(&vn->busy.lock); 1081 goto repeat; 1082 } 1083 1084 return NULL; 1085 } 1086 1087 /* 1088 * This function returns back addresses of parent node 1089 * and its left or right link for further processing. 1090 * 1091 * Otherwise NULL is returned. In that case all further 1092 * steps regarding inserting of conflicting overlap range 1093 * have to be declined and actually considered as a bug. 1094 */ 1095 static __always_inline struct rb_node ** 1096 find_va_links(struct vmap_area *va, 1097 struct rb_root *root, struct rb_node *from, 1098 struct rb_node **parent) 1099 { 1100 struct vmap_area *tmp_va; 1101 struct rb_node **link; 1102 1103 if (root) { 1104 link = &root->rb_node; 1105 if (unlikely(!*link)) { 1106 *parent = NULL; 1107 return link; 1108 } 1109 } else { 1110 link = &from; 1111 } 1112 1113 /* 1114 * Go to the bottom of the tree. When we hit the last point 1115 * we end up with parent rb_node and correct direction, i name 1116 * it link, where the new va->rb_node will be attached to. 1117 */ 1118 do { 1119 tmp_va = rb_entry(*link, struct vmap_area, rb_node); 1120 1121 /* 1122 * During the traversal we also do some sanity check. 1123 * Trigger the BUG() if there are sides(left/right) 1124 * or full overlaps. 1125 */ 1126 if (va->va_end <= tmp_va->va_start) 1127 link = &(*link)->rb_left; 1128 else if (va->va_start >= tmp_va->va_end) 1129 link = &(*link)->rb_right; 1130 else { 1131 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 1132 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 1133 1134 return NULL; 1135 } 1136 } while (*link); 1137 1138 *parent = &tmp_va->rb_node; 1139 return link; 1140 } 1141 1142 static __always_inline struct list_head * 1143 get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 1144 { 1145 struct list_head *list; 1146 1147 if (unlikely(!parent)) 1148 /* 1149 * The red-black tree where we try to find VA neighbors 1150 * before merging or inserting is empty, i.e. it means 1151 * there is no free vmap space. Normally it does not 1152 * happen but we handle this case anyway. 1153 */ 1154 return NULL; 1155 1156 list = &rb_entry(parent, struct vmap_area, rb_node)->list; 1157 return (&parent->rb_right == link ? list->next : list); 1158 } 1159 1160 static __always_inline void 1161 __link_va(struct vmap_area *va, struct rb_root *root, 1162 struct rb_node *parent, struct rb_node **link, 1163 struct list_head *head, bool augment) 1164 { 1165 /* 1166 * VA is still not in the list, but we can 1167 * identify its future previous list_head node. 1168 */ 1169 if (likely(parent)) { 1170 head = &rb_entry(parent, struct vmap_area, rb_node)->list; 1171 if (&parent->rb_right != link) 1172 head = head->prev; 1173 } 1174 1175 /* Insert to the rb-tree */ 1176 rb_link_node(&va->rb_node, parent, link); 1177 if (augment) { 1178 /* 1179 * Some explanation here. Just perform simple insertion 1180 * to the tree. We do not set va->subtree_max_size to 1181 * its current size before calling rb_insert_augmented(). 1182 * It is because we populate the tree from the bottom 1183 * to parent levels when the node _is_ in the tree. 1184 * 1185 * Therefore we set subtree_max_size to zero after insertion, 1186 * to let __augment_tree_propagate_from() puts everything to 1187 * the correct order later on. 1188 */ 1189 rb_insert_augmented(&va->rb_node, 1190 root, &free_vmap_area_rb_augment_cb); 1191 va->subtree_max_size = 0; 1192 } else { 1193 rb_insert_color(&va->rb_node, root); 1194 } 1195 1196 /* Address-sort this list */ 1197 list_add(&va->list, head); 1198 } 1199 1200 static __always_inline void 1201 link_va(struct vmap_area *va, struct rb_root *root, 1202 struct rb_node *parent, struct rb_node **link, 1203 struct list_head *head) 1204 { 1205 __link_va(va, root, parent, link, head, false); 1206 } 1207 1208 static __always_inline void 1209 link_va_augment(struct vmap_area *va, struct rb_root *root, 1210 struct rb_node *parent, struct rb_node **link, 1211 struct list_head *head) 1212 { 1213 __link_va(va, root, parent, link, head, true); 1214 } 1215 1216 static __always_inline void 1217 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) 1218 { 1219 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 1220 return; 1221 1222 if (augment) 1223 rb_erase_augmented(&va->rb_node, 1224 root, &free_vmap_area_rb_augment_cb); 1225 else 1226 rb_erase(&va->rb_node, root); 1227 1228 list_del_init(&va->list); 1229 RB_CLEAR_NODE(&va->rb_node); 1230 } 1231 1232 static __always_inline void 1233 unlink_va(struct vmap_area *va, struct rb_root *root) 1234 { 1235 __unlink_va(va, root, false); 1236 } 1237 1238 static __always_inline void 1239 unlink_va_augment(struct vmap_area *va, struct rb_root *root) 1240 { 1241 __unlink_va(va, root, true); 1242 } 1243 1244 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1245 /* 1246 * Gets called when remove the node and rotate. 1247 */ 1248 static __always_inline unsigned long 1249 compute_subtree_max_size(struct vmap_area *va) 1250 { 1251 return max3(va_size(va), 1252 get_subtree_max_size(va->rb_node.rb_left), 1253 get_subtree_max_size(va->rb_node.rb_right)); 1254 } 1255 1256 static void 1257 augment_tree_propagate_check(void) 1258 { 1259 struct vmap_area *va; 1260 unsigned long computed_size; 1261 1262 list_for_each_entry(va, &free_vmap_area_list, list) { 1263 computed_size = compute_subtree_max_size(va); 1264 if (computed_size != va->subtree_max_size) 1265 pr_emerg("tree is corrupted: %lu, %lu\n", 1266 va_size(va), va->subtree_max_size); 1267 } 1268 } 1269 #endif 1270 1271 /* 1272 * This function populates subtree_max_size from bottom to upper 1273 * levels starting from VA point. The propagation must be done 1274 * when VA size is modified by changing its va_start/va_end. Or 1275 * in case of newly inserting of VA to the tree. 1276 * 1277 * It means that __augment_tree_propagate_from() must be called: 1278 * - After VA has been inserted to the tree(free path); 1279 * - After VA has been shrunk(allocation path); 1280 * - After VA has been increased(merging path). 1281 * 1282 * Please note that, it does not mean that upper parent nodes 1283 * and their subtree_max_size are recalculated all the time up 1284 * to the root node. 1285 * 1286 * 4--8 1287 * /\ 1288 * / \ 1289 * / \ 1290 * 2--2 8--8 1291 * 1292 * For example if we modify the node 4, shrinking it to 2, then 1293 * no any modification is required. If we shrink the node 2 to 1 1294 * its subtree_max_size is updated only, and set to 1. If we shrink 1295 * the node 8 to 6, then its subtree_max_size is set to 6 and parent 1296 * node becomes 4--6. 1297 */ 1298 static __always_inline void 1299 augment_tree_propagate_from(struct vmap_area *va) 1300 { 1301 /* 1302 * Populate the tree from bottom towards the root until 1303 * the calculated maximum available size of checked node 1304 * is equal to its current one. 1305 */ 1306 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1307 1308 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1309 augment_tree_propagate_check(); 1310 #endif 1311 } 1312 1313 static void 1314 insert_vmap_area(struct vmap_area *va, 1315 struct rb_root *root, struct list_head *head) 1316 { 1317 struct rb_node **link; 1318 struct rb_node *parent; 1319 1320 link = find_va_links(va, root, NULL, &parent); 1321 if (link) 1322 link_va(va, root, parent, link, head); 1323 } 1324 1325 static void 1326 insert_vmap_area_augment(struct vmap_area *va, 1327 struct rb_node *from, struct rb_root *root, 1328 struct list_head *head) 1329 { 1330 struct rb_node **link; 1331 struct rb_node *parent; 1332 1333 if (from) 1334 link = find_va_links(va, NULL, from, &parent); 1335 else 1336 link = find_va_links(va, root, NULL, &parent); 1337 1338 if (link) { 1339 link_va_augment(va, root, parent, link, head); 1340 augment_tree_propagate_from(va); 1341 } 1342 } 1343 1344 /* 1345 * Merge de-allocated chunk of VA memory with previous 1346 * and next free blocks. If coalesce is not done a new 1347 * free area is inserted. If VA has been merged, it is 1348 * freed. 1349 * 1350 * Please note, it can return NULL in case of overlap 1351 * ranges, followed by WARN() report. Despite it is a 1352 * buggy behaviour, a system can be alive and keep 1353 * ongoing. 1354 */ 1355 static __always_inline struct vmap_area * 1356 __merge_or_add_vmap_area(struct vmap_area *va, 1357 struct rb_root *root, struct list_head *head, bool augment) 1358 { 1359 struct vmap_area *sibling; 1360 struct list_head *next; 1361 struct rb_node **link; 1362 struct rb_node *parent; 1363 bool merged = false; 1364 1365 /* 1366 * Find a place in the tree where VA potentially will be 1367 * inserted, unless it is merged with its sibling/siblings. 1368 */ 1369 link = find_va_links(va, root, NULL, &parent); 1370 if (!link) 1371 return NULL; 1372 1373 /* 1374 * Get next node of VA to check if merging can be done. 1375 */ 1376 next = get_va_next_sibling(parent, link); 1377 if (unlikely(next == NULL)) 1378 goto insert; 1379 1380 /* 1381 * start end 1382 * | | 1383 * |<------VA------>|<-----Next----->| 1384 * | | 1385 * start end 1386 */ 1387 if (next != head) { 1388 sibling = list_entry(next, struct vmap_area, list); 1389 if (sibling->va_start == va->va_end) { 1390 sibling->va_start = va->va_start; 1391 1392 /* Free vmap_area object. */ 1393 kmem_cache_free(vmap_area_cachep, va); 1394 1395 /* Point to the new merged area. */ 1396 va = sibling; 1397 merged = true; 1398 } 1399 } 1400 1401 /* 1402 * start end 1403 * | | 1404 * |<-----Prev----->|<------VA------>| 1405 * | | 1406 * start end 1407 */ 1408 if (next->prev != head) { 1409 sibling = list_entry(next->prev, struct vmap_area, list); 1410 if (sibling->va_end == va->va_start) { 1411 /* 1412 * If both neighbors are coalesced, it is important 1413 * to unlink the "next" node first, followed by merging 1414 * with "previous" one. Otherwise the tree might not be 1415 * fully populated if a sibling's augmented value is 1416 * "normalized" because of rotation operations. 1417 */ 1418 if (merged) 1419 __unlink_va(va, root, augment); 1420 1421 sibling->va_end = va->va_end; 1422 1423 /* Free vmap_area object. */ 1424 kmem_cache_free(vmap_area_cachep, va); 1425 1426 /* Point to the new merged area. */ 1427 va = sibling; 1428 merged = true; 1429 } 1430 } 1431 1432 insert: 1433 if (!merged) 1434 __link_va(va, root, parent, link, head, augment); 1435 1436 return va; 1437 } 1438 1439 static __always_inline struct vmap_area * 1440 merge_or_add_vmap_area(struct vmap_area *va, 1441 struct rb_root *root, struct list_head *head) 1442 { 1443 return __merge_or_add_vmap_area(va, root, head, false); 1444 } 1445 1446 static __always_inline struct vmap_area * 1447 merge_or_add_vmap_area_augment(struct vmap_area *va, 1448 struct rb_root *root, struct list_head *head) 1449 { 1450 va = __merge_or_add_vmap_area(va, root, head, true); 1451 if (va) 1452 augment_tree_propagate_from(va); 1453 1454 return va; 1455 } 1456 1457 static __always_inline bool 1458 is_within_this_va(struct vmap_area *va, unsigned long size, 1459 unsigned long align, unsigned long vstart) 1460 { 1461 unsigned long nva_start_addr; 1462 1463 if (va->va_start > vstart) 1464 nva_start_addr = ALIGN(va->va_start, align); 1465 else 1466 nva_start_addr = ALIGN(vstart, align); 1467 1468 /* Can be overflowed due to big size or alignment. */ 1469 if (nva_start_addr + size < nva_start_addr || 1470 nva_start_addr < vstart) 1471 return false; 1472 1473 return (nva_start_addr + size <= va->va_end); 1474 } 1475 1476 /* 1477 * Find the first free block(lowest start address) in the tree, 1478 * that will accomplish the request corresponding to passing 1479 * parameters. Please note, with an alignment bigger than PAGE_SIZE, 1480 * a search length is adjusted to account for worst case alignment 1481 * overhead. 1482 */ 1483 static __always_inline struct vmap_area * 1484 find_vmap_lowest_match(struct rb_root *root, unsigned long size, 1485 unsigned long align, unsigned long vstart, bool adjust_search_size) 1486 { 1487 struct vmap_area *va; 1488 struct rb_node *node; 1489 unsigned long length; 1490 1491 /* Start from the root. */ 1492 node = root->rb_node; 1493 1494 /* Adjust the search size for alignment overhead. */ 1495 length = adjust_search_size ? size + align - 1 : size; 1496 1497 while (node) { 1498 va = rb_entry(node, struct vmap_area, rb_node); 1499 1500 if (get_subtree_max_size(node->rb_left) >= length && 1501 vstart < va->va_start) { 1502 node = node->rb_left; 1503 } else { 1504 if (is_within_this_va(va, size, align, vstart)) 1505 return va; 1506 1507 /* 1508 * Does not make sense to go deeper towards the right 1509 * sub-tree if it does not have a free block that is 1510 * equal or bigger to the requested search length. 1511 */ 1512 if (get_subtree_max_size(node->rb_right) >= length) { 1513 node = node->rb_right; 1514 continue; 1515 } 1516 1517 /* 1518 * OK. We roll back and find the first right sub-tree, 1519 * that will satisfy the search criteria. It can happen 1520 * due to "vstart" restriction or an alignment overhead 1521 * that is bigger then PAGE_SIZE. 1522 */ 1523 while ((node = rb_parent(node))) { 1524 va = rb_entry(node, struct vmap_area, rb_node); 1525 if (is_within_this_va(va, size, align, vstart)) 1526 return va; 1527 1528 if (get_subtree_max_size(node->rb_right) >= length && 1529 vstart <= va->va_start) { 1530 /* 1531 * Shift the vstart forward. Please note, we update it with 1532 * parent's start address adding "1" because we do not want 1533 * to enter same sub-tree after it has already been checked 1534 * and no suitable free block found there. 1535 */ 1536 vstart = va->va_start + 1; 1537 node = node->rb_right; 1538 break; 1539 } 1540 } 1541 } 1542 } 1543 1544 return NULL; 1545 } 1546 1547 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1548 #include <linux/random.h> 1549 1550 static struct vmap_area * 1551 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, 1552 unsigned long align, unsigned long vstart) 1553 { 1554 struct vmap_area *va; 1555 1556 list_for_each_entry(va, head, list) { 1557 if (!is_within_this_va(va, size, align, vstart)) 1558 continue; 1559 1560 return va; 1561 } 1562 1563 return NULL; 1564 } 1565 1566 static void 1567 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, 1568 unsigned long size, unsigned long align) 1569 { 1570 struct vmap_area *va_1, *va_2; 1571 unsigned long vstart; 1572 unsigned int rnd; 1573 1574 get_random_bytes(&rnd, sizeof(rnd)); 1575 vstart = VMALLOC_START + rnd; 1576 1577 va_1 = find_vmap_lowest_match(root, size, align, vstart, false); 1578 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); 1579 1580 if (va_1 != va_2) 1581 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1582 va_1, va_2, vstart); 1583 } 1584 #endif 1585 1586 enum fit_type { 1587 NOTHING_FIT = 0, 1588 FL_FIT_TYPE = 1, /* full fit */ 1589 LE_FIT_TYPE = 2, /* left edge fit */ 1590 RE_FIT_TYPE = 3, /* right edge fit */ 1591 NE_FIT_TYPE = 4 /* no edge fit */ 1592 }; 1593 1594 static __always_inline enum fit_type 1595 classify_va_fit_type(struct vmap_area *va, 1596 unsigned long nva_start_addr, unsigned long size) 1597 { 1598 enum fit_type type; 1599 1600 /* Check if it is within VA. */ 1601 if (nva_start_addr < va->va_start || 1602 nva_start_addr + size > va->va_end) 1603 return NOTHING_FIT; 1604 1605 /* Now classify. */ 1606 if (va->va_start == nva_start_addr) { 1607 if (va->va_end == nva_start_addr + size) 1608 type = FL_FIT_TYPE; 1609 else 1610 type = LE_FIT_TYPE; 1611 } else if (va->va_end == nva_start_addr + size) { 1612 type = RE_FIT_TYPE; 1613 } else { 1614 type = NE_FIT_TYPE; 1615 } 1616 1617 return type; 1618 } 1619 1620 static __always_inline int 1621 va_clip(struct rb_root *root, struct list_head *head, 1622 struct vmap_area *va, unsigned long nva_start_addr, 1623 unsigned long size) 1624 { 1625 struct vmap_area *lva = NULL; 1626 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); 1627 1628 if (type == FL_FIT_TYPE) { 1629 /* 1630 * No need to split VA, it fully fits. 1631 * 1632 * | | 1633 * V NVA V 1634 * |---------------| 1635 */ 1636 unlink_va_augment(va, root); 1637 kmem_cache_free(vmap_area_cachep, va); 1638 } else if (type == LE_FIT_TYPE) { 1639 /* 1640 * Split left edge of fit VA. 1641 * 1642 * | | 1643 * V NVA V R 1644 * |-------|-------| 1645 */ 1646 va->va_start += size; 1647 } else if (type == RE_FIT_TYPE) { 1648 /* 1649 * Split right edge of fit VA. 1650 * 1651 * | | 1652 * L V NVA V 1653 * |-------|-------| 1654 */ 1655 va->va_end = nva_start_addr; 1656 } else if (type == NE_FIT_TYPE) { 1657 /* 1658 * Split no edge of fit VA. 1659 * 1660 * | | 1661 * L V NVA V R 1662 * |---|-------|---| 1663 */ 1664 lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 1665 if (unlikely(!lva)) { 1666 /* 1667 * For percpu allocator we do not do any pre-allocation 1668 * and leave it as it is. The reason is it most likely 1669 * never ends up with NE_FIT_TYPE splitting. In case of 1670 * percpu allocations offsets and sizes are aligned to 1671 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 1672 * are its main fitting cases. 1673 * 1674 * There are a few exceptions though, as an example it is 1675 * a first allocation (early boot up) when we have "one" 1676 * big free space that has to be split. 1677 * 1678 * Also we can hit this path in case of regular "vmap" 1679 * allocations, if "this" current CPU was not preloaded. 1680 * See the comment in alloc_vmap_area() why. If so, then 1681 * GFP_NOWAIT is used instead to get an extra object for 1682 * split purpose. That is rare and most time does not 1683 * occur. 1684 * 1685 * What happens if an allocation gets failed. Basically, 1686 * an "overflow" path is triggered to purge lazily freed 1687 * areas to free some memory, then, the "retry" path is 1688 * triggered to repeat one more time. See more details 1689 * in alloc_vmap_area() function. 1690 */ 1691 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 1692 if (!lva) 1693 return -1; 1694 } 1695 1696 /* 1697 * Build the remainder. 1698 */ 1699 lva->va_start = va->va_start; 1700 lva->va_end = nva_start_addr; 1701 1702 /* 1703 * Shrink this VA to remaining size. 1704 */ 1705 va->va_start = nva_start_addr + size; 1706 } else { 1707 return -1; 1708 } 1709 1710 if (type != FL_FIT_TYPE) { 1711 augment_tree_propagate_from(va); 1712 1713 if (lva) /* type == NE_FIT_TYPE */ 1714 insert_vmap_area_augment(lva, &va->rb_node, root, head); 1715 } 1716 1717 return 0; 1718 } 1719 1720 static unsigned long 1721 va_alloc(struct vmap_area *va, 1722 struct rb_root *root, struct list_head *head, 1723 unsigned long size, unsigned long align, 1724 unsigned long vstart, unsigned long vend) 1725 { 1726 unsigned long nva_start_addr; 1727 int ret; 1728 1729 if (va->va_start > vstart) 1730 nva_start_addr = ALIGN(va->va_start, align); 1731 else 1732 nva_start_addr = ALIGN(vstart, align); 1733 1734 /* Check the "vend" restriction. */ 1735 if (nva_start_addr + size > vend) 1736 return vend; 1737 1738 /* Update the free vmap_area. */ 1739 ret = va_clip(root, head, va, nva_start_addr, size); 1740 if (WARN_ON_ONCE(ret)) 1741 return vend; 1742 1743 return nva_start_addr; 1744 } 1745 1746 /* 1747 * Returns a start address of the newly allocated area, if success. 1748 * Otherwise a vend is returned that indicates failure. 1749 */ 1750 static __always_inline unsigned long 1751 __alloc_vmap_area(struct rb_root *root, struct list_head *head, 1752 unsigned long size, unsigned long align, 1753 unsigned long vstart, unsigned long vend) 1754 { 1755 bool adjust_search_size = true; 1756 unsigned long nva_start_addr; 1757 struct vmap_area *va; 1758 1759 /* 1760 * Do not adjust when: 1761 * a) align <= PAGE_SIZE, because it does not make any sense. 1762 * All blocks(their start addresses) are at least PAGE_SIZE 1763 * aligned anyway; 1764 * b) a short range where a requested size corresponds to exactly 1765 * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 1766 * With adjusted search length an allocation would not succeed. 1767 */ 1768 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 1769 adjust_search_size = false; 1770 1771 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); 1772 if (unlikely(!va)) 1773 return vend; 1774 1775 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); 1776 if (nva_start_addr == vend) 1777 return vend; 1778 1779 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1780 find_vmap_lowest_match_check(root, head, size, align); 1781 #endif 1782 1783 return nva_start_addr; 1784 } 1785 1786 /* 1787 * Free a region of KVA allocated by alloc_vmap_area 1788 */ 1789 static void free_vmap_area(struct vmap_area *va) 1790 { 1791 struct vmap_node *vn = addr_to_node(va->va_start); 1792 1793 /* 1794 * Remove from the busy tree/list. 1795 */ 1796 spin_lock(&vn->busy.lock); 1797 unlink_va(va, &vn->busy.root); 1798 spin_unlock(&vn->busy.lock); 1799 1800 /* 1801 * Insert/Merge it back to the free tree/list. 1802 */ 1803 spin_lock(&free_vmap_area_lock); 1804 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1805 spin_unlock(&free_vmap_area_lock); 1806 } 1807 1808 static inline void 1809 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1810 { 1811 struct vmap_area *va = NULL; 1812 1813 /* 1814 * Preload this CPU with one extra vmap_area object. It is used 1815 * when fit type of free area is NE_FIT_TYPE. It guarantees that 1816 * a CPU that does an allocation is preloaded. 1817 * 1818 * We do it in non-atomic context, thus it allows us to use more 1819 * permissive allocation masks to be more stable under low memory 1820 * condition and high memory pressure. 1821 */ 1822 if (!this_cpu_read(ne_fit_preload_node)) 1823 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1824 1825 spin_lock(lock); 1826 1827 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) 1828 kmem_cache_free(vmap_area_cachep, va); 1829 } 1830 1831 static struct vmap_pool * 1832 size_to_va_pool(struct vmap_node *vn, unsigned long size) 1833 { 1834 unsigned int idx = (size - 1) / PAGE_SIZE; 1835 1836 if (idx < MAX_VA_SIZE_PAGES) 1837 return &vn->pool[idx]; 1838 1839 return NULL; 1840 } 1841 1842 static bool 1843 node_pool_add_va(struct vmap_node *n, struct vmap_area *va) 1844 { 1845 struct vmap_pool *vp; 1846 1847 vp = size_to_va_pool(n, va_size(va)); 1848 if (!vp) 1849 return false; 1850 1851 spin_lock(&n->pool_lock); 1852 list_add(&va->list, &vp->head); 1853 WRITE_ONCE(vp->len, vp->len + 1); 1854 spin_unlock(&n->pool_lock); 1855 1856 return true; 1857 } 1858 1859 static struct vmap_area * 1860 node_pool_del_va(struct vmap_node *vn, unsigned long size, 1861 unsigned long align, unsigned long vstart, 1862 unsigned long vend) 1863 { 1864 struct vmap_area *va = NULL; 1865 struct vmap_pool *vp; 1866 int err = 0; 1867 1868 vp = size_to_va_pool(vn, size); 1869 if (!vp || list_empty(&vp->head)) 1870 return NULL; 1871 1872 spin_lock(&vn->pool_lock); 1873 if (!list_empty(&vp->head)) { 1874 va = list_first_entry(&vp->head, struct vmap_area, list); 1875 1876 if (IS_ALIGNED(va->va_start, align)) { 1877 /* 1878 * Do some sanity check and emit a warning 1879 * if one of below checks detects an error. 1880 */ 1881 err |= (va_size(va) != size); 1882 err |= (va->va_start < vstart); 1883 err |= (va->va_end > vend); 1884 1885 if (!WARN_ON_ONCE(err)) { 1886 list_del_init(&va->list); 1887 WRITE_ONCE(vp->len, vp->len - 1); 1888 } else { 1889 va = NULL; 1890 } 1891 } else { 1892 list_move_tail(&va->list, &vp->head); 1893 va = NULL; 1894 } 1895 } 1896 spin_unlock(&vn->pool_lock); 1897 1898 return va; 1899 } 1900 1901 static struct vmap_area * 1902 node_alloc(unsigned long size, unsigned long align, 1903 unsigned long vstart, unsigned long vend, 1904 unsigned long *addr, unsigned int *vn_id) 1905 { 1906 struct vmap_area *va; 1907 1908 *vn_id = 0; 1909 *addr = vend; 1910 1911 /* 1912 * Fallback to a global heap if not vmalloc or there 1913 * is only one node. 1914 */ 1915 if (vstart != VMALLOC_START || vend != VMALLOC_END || 1916 nr_vmap_nodes == 1) 1917 return NULL; 1918 1919 *vn_id = raw_smp_processor_id() % nr_vmap_nodes; 1920 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); 1921 *vn_id = encode_vn_id(*vn_id); 1922 1923 if (va) 1924 *addr = va->va_start; 1925 1926 return va; 1927 } 1928 1929 static inline void setup_vmalloc_vm(struct vm_struct *vm, 1930 struct vmap_area *va, unsigned long flags, const void *caller) 1931 { 1932 vm->flags = flags; 1933 vm->addr = (void *)va->va_start; 1934 vm->size = va->va_end - va->va_start; 1935 vm->caller = caller; 1936 va->vm = vm; 1937 } 1938 1939 /* 1940 * Allocate a region of KVA of the specified size and alignment, within the 1941 * vstart and vend. If vm is passed in, the two will also be bound. 1942 */ 1943 static struct vmap_area *alloc_vmap_area(unsigned long size, 1944 unsigned long align, 1945 unsigned long vstart, unsigned long vend, 1946 int node, gfp_t gfp_mask, 1947 unsigned long va_flags, struct vm_struct *vm) 1948 { 1949 struct vmap_node *vn; 1950 struct vmap_area *va; 1951 unsigned long freed; 1952 unsigned long addr; 1953 unsigned int vn_id; 1954 int purged = 0; 1955 int ret; 1956 1957 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) 1958 return ERR_PTR(-EINVAL); 1959 1960 if (unlikely(!vmap_initialized)) 1961 return ERR_PTR(-EBUSY); 1962 1963 might_sleep(); 1964 1965 /* 1966 * If a VA is obtained from a global heap(if it fails here) 1967 * it is anyway marked with this "vn_id" so it is returned 1968 * to this pool's node later. Such way gives a possibility 1969 * to populate pools based on users demand. 1970 * 1971 * On success a ready to go VA is returned. 1972 */ 1973 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); 1974 if (!va) { 1975 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 1976 1977 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1978 if (unlikely(!va)) 1979 return ERR_PTR(-ENOMEM); 1980 1981 /* 1982 * Only scan the relevant parts containing pointers to other objects 1983 * to avoid false negatives. 1984 */ 1985 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 1986 } 1987 1988 retry: 1989 if (addr == vend) { 1990 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 1991 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, 1992 size, align, vstart, vend); 1993 spin_unlock(&free_vmap_area_lock); 1994 } 1995 1996 trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); 1997 1998 /* 1999 * If an allocation fails, the "vend" address is 2000 * returned. Therefore trigger the overflow path. 2001 */ 2002 if (unlikely(addr == vend)) 2003 goto overflow; 2004 2005 va->va_start = addr; 2006 va->va_end = addr + size; 2007 va->vm = NULL; 2008 va->flags = (va_flags | vn_id); 2009 2010 if (vm) { 2011 vm->addr = (void *)va->va_start; 2012 vm->size = va->va_end - va->va_start; 2013 va->vm = vm; 2014 } 2015 2016 vn = addr_to_node(va->va_start); 2017 2018 spin_lock(&vn->busy.lock); 2019 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 2020 spin_unlock(&vn->busy.lock); 2021 2022 BUG_ON(!IS_ALIGNED(va->va_start, align)); 2023 BUG_ON(va->va_start < vstart); 2024 BUG_ON(va->va_end > vend); 2025 2026 ret = kasan_populate_vmalloc(addr, size); 2027 if (ret) { 2028 free_vmap_area(va); 2029 return ERR_PTR(ret); 2030 } 2031 2032 return va; 2033 2034 overflow: 2035 if (!purged) { 2036 reclaim_and_purge_vmap_areas(); 2037 purged = 1; 2038 goto retry; 2039 } 2040 2041 freed = 0; 2042 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 2043 2044 if (freed > 0) { 2045 purged = 0; 2046 goto retry; 2047 } 2048 2049 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 2050 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 2051 size); 2052 2053 kmem_cache_free(vmap_area_cachep, va); 2054 return ERR_PTR(-EBUSY); 2055 } 2056 2057 int register_vmap_purge_notifier(struct notifier_block *nb) 2058 { 2059 return blocking_notifier_chain_register(&vmap_notify_list, nb); 2060 } 2061 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 2062 2063 int unregister_vmap_purge_notifier(struct notifier_block *nb) 2064 { 2065 return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 2066 } 2067 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 2068 2069 /* 2070 * lazy_max_pages is the maximum amount of virtual address space we gather up 2071 * before attempting to purge with a TLB flush. 2072 * 2073 * There is a tradeoff here: a larger number will cover more kernel page tables 2074 * and take slightly longer to purge, but it will linearly reduce the number of 2075 * global TLB flushes that must be performed. It would seem natural to scale 2076 * this number up linearly with the number of CPUs (because vmapping activity 2077 * could also scale linearly with the number of CPUs), however it is likely 2078 * that in practice, workloads might be constrained in other ways that mean 2079 * vmap activity will not scale linearly with CPUs. Also, I want to be 2080 * conservative and not introduce a big latency on huge systems, so go with 2081 * a less aggressive log scale. It will still be an improvement over the old 2082 * code, and it will be simple to change the scale factor if we find that it 2083 * becomes a problem on bigger systems. 2084 */ 2085 static unsigned long lazy_max_pages(void) 2086 { 2087 unsigned int log; 2088 2089 log = fls(num_online_cpus()); 2090 2091 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 2092 } 2093 2094 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 2095 2096 /* 2097 * Serialize vmap purging. There is no actual critical section protected 2098 * by this lock, but we want to avoid concurrent calls for performance 2099 * reasons and to make the pcpu_get_vm_areas more deterministic. 2100 */ 2101 static DEFINE_MUTEX(vmap_purge_lock); 2102 2103 /* for per-CPU blocks */ 2104 static void purge_fragmented_blocks_allcpus(void); 2105 static cpumask_t purge_nodes; 2106 2107 static void 2108 reclaim_list_global(struct list_head *head) 2109 { 2110 struct vmap_area *va, *n; 2111 2112 if (list_empty(head)) 2113 return; 2114 2115 spin_lock(&free_vmap_area_lock); 2116 list_for_each_entry_safe(va, n, head, list) 2117 merge_or_add_vmap_area_augment(va, 2118 &free_vmap_area_root, &free_vmap_area_list); 2119 spin_unlock(&free_vmap_area_lock); 2120 } 2121 2122 static void 2123 decay_va_pool_node(struct vmap_node *vn, bool full_decay) 2124 { 2125 struct vmap_area *va, *nva; 2126 struct list_head decay_list; 2127 struct rb_root decay_root; 2128 unsigned long n_decay; 2129 int i; 2130 2131 decay_root = RB_ROOT; 2132 INIT_LIST_HEAD(&decay_list); 2133 2134 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 2135 struct list_head tmp_list; 2136 2137 if (list_empty(&vn->pool[i].head)) 2138 continue; 2139 2140 INIT_LIST_HEAD(&tmp_list); 2141 2142 /* Detach the pool, so no-one can access it. */ 2143 spin_lock(&vn->pool_lock); 2144 list_replace_init(&vn->pool[i].head, &tmp_list); 2145 spin_unlock(&vn->pool_lock); 2146 2147 if (full_decay) 2148 WRITE_ONCE(vn->pool[i].len, 0); 2149 2150 /* Decay a pool by ~25% out of left objects. */ 2151 n_decay = vn->pool[i].len >> 2; 2152 2153 list_for_each_entry_safe(va, nva, &tmp_list, list) { 2154 list_del_init(&va->list); 2155 merge_or_add_vmap_area(va, &decay_root, &decay_list); 2156 2157 if (!full_decay) { 2158 WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1); 2159 2160 if (!--n_decay) 2161 break; 2162 } 2163 } 2164 2165 /* 2166 * Attach the pool back if it has been partly decayed. 2167 * Please note, it is supposed that nobody(other contexts) 2168 * can populate the pool therefore a simple list replace 2169 * operation takes place here. 2170 */ 2171 if (!full_decay && !list_empty(&tmp_list)) { 2172 spin_lock(&vn->pool_lock); 2173 list_replace_init(&tmp_list, &vn->pool[i].head); 2174 spin_unlock(&vn->pool_lock); 2175 } 2176 } 2177 2178 reclaim_list_global(&decay_list); 2179 } 2180 2181 static void purge_vmap_node(struct work_struct *work) 2182 { 2183 struct vmap_node *vn = container_of(work, 2184 struct vmap_node, purge_work); 2185 struct vmap_area *va, *n_va; 2186 LIST_HEAD(local_list); 2187 2188 vn->nr_purged = 0; 2189 2190 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { 2191 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 2192 unsigned long orig_start = va->va_start; 2193 unsigned long orig_end = va->va_end; 2194 unsigned int vn_id = decode_vn_id(va->flags); 2195 2196 list_del_init(&va->list); 2197 2198 if (is_vmalloc_or_module_addr((void *)orig_start)) 2199 kasan_release_vmalloc(orig_start, orig_end, 2200 va->va_start, va->va_end); 2201 2202 atomic_long_sub(nr, &vmap_lazy_nr); 2203 vn->nr_purged++; 2204 2205 if (is_vn_id_valid(vn_id) && !vn->skip_populate) 2206 if (node_pool_add_va(vn, va)) 2207 continue; 2208 2209 /* Go back to global. */ 2210 list_add(&va->list, &local_list); 2211 } 2212 2213 reclaim_list_global(&local_list); 2214 } 2215 2216 /* 2217 * Purges all lazily-freed vmap areas. 2218 */ 2219 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, 2220 bool full_pool_decay) 2221 { 2222 unsigned long nr_purged_areas = 0; 2223 unsigned int nr_purge_helpers; 2224 unsigned int nr_purge_nodes; 2225 struct vmap_node *vn; 2226 int i; 2227 2228 lockdep_assert_held(&vmap_purge_lock); 2229 2230 /* 2231 * Use cpumask to mark which node has to be processed. 2232 */ 2233 purge_nodes = CPU_MASK_NONE; 2234 2235 for (i = 0; i < nr_vmap_nodes; i++) { 2236 vn = &vmap_nodes[i]; 2237 2238 INIT_LIST_HEAD(&vn->purge_list); 2239 vn->skip_populate = full_pool_decay; 2240 decay_va_pool_node(vn, full_pool_decay); 2241 2242 if (RB_EMPTY_ROOT(&vn->lazy.root)) 2243 continue; 2244 2245 spin_lock(&vn->lazy.lock); 2246 WRITE_ONCE(vn->lazy.root.rb_node, NULL); 2247 list_replace_init(&vn->lazy.head, &vn->purge_list); 2248 spin_unlock(&vn->lazy.lock); 2249 2250 start = min(start, list_first_entry(&vn->purge_list, 2251 struct vmap_area, list)->va_start); 2252 2253 end = max(end, list_last_entry(&vn->purge_list, 2254 struct vmap_area, list)->va_end); 2255 2256 cpumask_set_cpu(i, &purge_nodes); 2257 } 2258 2259 nr_purge_nodes = cpumask_weight(&purge_nodes); 2260 if (nr_purge_nodes > 0) { 2261 flush_tlb_kernel_range(start, end); 2262 2263 /* One extra worker is per a lazy_max_pages() full set minus one. */ 2264 nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages(); 2265 nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1; 2266 2267 for_each_cpu(i, &purge_nodes) { 2268 vn = &vmap_nodes[i]; 2269 2270 if (nr_purge_helpers > 0) { 2271 INIT_WORK(&vn->purge_work, purge_vmap_node); 2272 2273 if (cpumask_test_cpu(i, cpu_online_mask)) 2274 schedule_work_on(i, &vn->purge_work); 2275 else 2276 schedule_work(&vn->purge_work); 2277 2278 nr_purge_helpers--; 2279 } else { 2280 vn->purge_work.func = NULL; 2281 purge_vmap_node(&vn->purge_work); 2282 nr_purged_areas += vn->nr_purged; 2283 } 2284 } 2285 2286 for_each_cpu(i, &purge_nodes) { 2287 vn = &vmap_nodes[i]; 2288 2289 if (vn->purge_work.func) { 2290 flush_work(&vn->purge_work); 2291 nr_purged_areas += vn->nr_purged; 2292 } 2293 } 2294 } 2295 2296 trace_purge_vmap_area_lazy(start, end, nr_purged_areas); 2297 return nr_purged_areas > 0; 2298 } 2299 2300 /* 2301 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. 2302 */ 2303 static void reclaim_and_purge_vmap_areas(void) 2304 2305 { 2306 mutex_lock(&vmap_purge_lock); 2307 purge_fragmented_blocks_allcpus(); 2308 __purge_vmap_area_lazy(ULONG_MAX, 0, true); 2309 mutex_unlock(&vmap_purge_lock); 2310 } 2311 2312 static void drain_vmap_area_work(struct work_struct *work) 2313 { 2314 mutex_lock(&vmap_purge_lock); 2315 __purge_vmap_area_lazy(ULONG_MAX, 0, false); 2316 mutex_unlock(&vmap_purge_lock); 2317 } 2318 2319 /* 2320 * Free a vmap area, caller ensuring that the area has been unmapped, 2321 * unlinked and flush_cache_vunmap had been called for the correct 2322 * range previously. 2323 */ 2324 static void free_vmap_area_noflush(struct vmap_area *va) 2325 { 2326 unsigned long nr_lazy_max = lazy_max_pages(); 2327 unsigned long va_start = va->va_start; 2328 unsigned int vn_id = decode_vn_id(va->flags); 2329 struct vmap_node *vn; 2330 unsigned long nr_lazy; 2331 2332 if (WARN_ON_ONCE(!list_empty(&va->list))) 2333 return; 2334 2335 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 2336 PAGE_SHIFT, &vmap_lazy_nr); 2337 2338 /* 2339 * If it was request by a certain node we would like to 2340 * return it to that node, i.e. its pool for later reuse. 2341 */ 2342 vn = is_vn_id_valid(vn_id) ? 2343 id_to_node(vn_id):addr_to_node(va->va_start); 2344 2345 spin_lock(&vn->lazy.lock); 2346 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); 2347 spin_unlock(&vn->lazy.lock); 2348 2349 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); 2350 2351 /* After this point, we may free va at any time */ 2352 if (unlikely(nr_lazy > nr_lazy_max)) 2353 schedule_work(&drain_vmap_work); 2354 } 2355 2356 /* 2357 * Free and unmap a vmap area 2358 */ 2359 static void free_unmap_vmap_area(struct vmap_area *va) 2360 { 2361 flush_cache_vunmap(va->va_start, va->va_end); 2362 vunmap_range_noflush(va->va_start, va->va_end); 2363 if (debug_pagealloc_enabled_static()) 2364 flush_tlb_kernel_range(va->va_start, va->va_end); 2365 2366 free_vmap_area_noflush(va); 2367 } 2368 2369 struct vmap_area *find_vmap_area(unsigned long addr) 2370 { 2371 struct vmap_node *vn; 2372 struct vmap_area *va; 2373 int i, j; 2374 2375 if (unlikely(!vmap_initialized)) 2376 return NULL; 2377 2378 /* 2379 * An addr_to_node_id(addr) converts an address to a node index 2380 * where a VA is located. If VA spans several zones and passed 2381 * addr is not the same as va->va_start, what is not common, we 2382 * may need to scan extra nodes. See an example: 2383 * 2384 * <----va----> 2385 * -|-----|-----|-----|-----|- 2386 * 1 2 0 1 2387 * 2388 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed 2389 * addr is within 2 or 0 nodes we should do extra work. 2390 */ 2391 i = j = addr_to_node_id(addr); 2392 do { 2393 vn = &vmap_nodes[i]; 2394 2395 spin_lock(&vn->busy.lock); 2396 va = __find_vmap_area(addr, &vn->busy.root); 2397 spin_unlock(&vn->busy.lock); 2398 2399 if (va) 2400 return va; 2401 } while ((i = (i + 1) % nr_vmap_nodes) != j); 2402 2403 return NULL; 2404 } 2405 2406 static struct vmap_area *find_unlink_vmap_area(unsigned long addr) 2407 { 2408 struct vmap_node *vn; 2409 struct vmap_area *va; 2410 int i, j; 2411 2412 /* 2413 * Check the comment in the find_vmap_area() about the loop. 2414 */ 2415 i = j = addr_to_node_id(addr); 2416 do { 2417 vn = &vmap_nodes[i]; 2418 2419 spin_lock(&vn->busy.lock); 2420 va = __find_vmap_area(addr, &vn->busy.root); 2421 if (va) 2422 unlink_va(va, &vn->busy.root); 2423 spin_unlock(&vn->busy.lock); 2424 2425 if (va) 2426 return va; 2427 } while ((i = (i + 1) % nr_vmap_nodes) != j); 2428 2429 return NULL; 2430 } 2431 2432 /*** Per cpu kva allocator ***/ 2433 2434 /* 2435 * vmap space is limited especially on 32 bit architectures. Ensure there is 2436 * room for at least 16 percpu vmap blocks per CPU. 2437 */ 2438 /* 2439 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 2440 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 2441 * instead (we just need a rough idea) 2442 */ 2443 #if BITS_PER_LONG == 32 2444 #define VMALLOC_SPACE (128UL*1024*1024) 2445 #else 2446 #define VMALLOC_SPACE (128UL*1024*1024*1024) 2447 #endif 2448 2449 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 2450 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 2451 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 2452 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 2453 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 2454 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 2455 #define VMAP_BBMAP_BITS \ 2456 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 2457 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 2458 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 2459 2460 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 2461 2462 /* 2463 * Purge threshold to prevent overeager purging of fragmented blocks for 2464 * regular operations: Purge if vb->free is less than 1/4 of the capacity. 2465 */ 2466 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) 2467 2468 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ 2469 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ 2470 #define VMAP_FLAGS_MASK 0x3 2471 2472 struct vmap_block_queue { 2473 spinlock_t lock; 2474 struct list_head free; 2475 2476 /* 2477 * An xarray requires an extra memory dynamically to 2478 * be allocated. If it is an issue, we can use rb-tree 2479 * instead. 2480 */ 2481 struct xarray vmap_blocks; 2482 }; 2483 2484 struct vmap_block { 2485 spinlock_t lock; 2486 struct vmap_area *va; 2487 unsigned long free, dirty; 2488 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); 2489 unsigned long dirty_min, dirty_max; /*< dirty range */ 2490 struct list_head free_list; 2491 struct rcu_head rcu_head; 2492 struct list_head purge; 2493 }; 2494 2495 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 2496 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 2497 2498 /* 2499 * In order to fast access to any "vmap_block" associated with a 2500 * specific address, we use a hash. 2501 * 2502 * A per-cpu vmap_block_queue is used in both ways, to serialize 2503 * an access to free block chains among CPUs(alloc path) and it 2504 * also acts as a vmap_block hash(alloc/free paths). It means we 2505 * overload it, since we already have the per-cpu array which is 2506 * used as a hash table. When used as a hash a 'cpu' passed to 2507 * per_cpu() is not actually a CPU but rather a hash index. 2508 * 2509 * A hash function is addr_to_vb_xa() which hashes any address 2510 * to a specific index(in a hash) it belongs to. This then uses a 2511 * per_cpu() macro to access an array with generated index. 2512 * 2513 * An example: 2514 * 2515 * CPU_1 CPU_2 CPU_0 2516 * | | | 2517 * V V V 2518 * 0 10 20 30 40 50 60 2519 * |------|------|------|------|------|------|...<vmap address space> 2520 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 2521 * 2522 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus 2523 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; 2524 * 2525 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus 2526 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; 2527 * 2528 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus 2529 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. 2530 * 2531 * This technique almost always avoids lock contention on insert/remove, 2532 * however xarray spinlocks protect against any contention that remains. 2533 */ 2534 static struct xarray * 2535 addr_to_vb_xa(unsigned long addr) 2536 { 2537 int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus(); 2538 2539 return &per_cpu(vmap_block_queue, index).vmap_blocks; 2540 } 2541 2542 /* 2543 * We should probably have a fallback mechanism to allocate virtual memory 2544 * out of partially filled vmap blocks. However vmap block sizing should be 2545 * fairly reasonable according to the vmalloc size, so it shouldn't be a 2546 * big problem. 2547 */ 2548 2549 static unsigned long addr_to_vb_idx(unsigned long addr) 2550 { 2551 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 2552 addr /= VMAP_BLOCK_SIZE; 2553 return addr; 2554 } 2555 2556 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 2557 { 2558 unsigned long addr; 2559 2560 addr = va_start + (pages_off << PAGE_SHIFT); 2561 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 2562 return (void *)addr; 2563 } 2564 2565 /** 2566 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 2567 * block. Of course pages number can't exceed VMAP_BBMAP_BITS 2568 * @order: how many 2^order pages should be occupied in newly allocated block 2569 * @gfp_mask: flags for the page level allocator 2570 * 2571 * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 2572 */ 2573 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 2574 { 2575 struct vmap_block_queue *vbq; 2576 struct vmap_block *vb; 2577 struct vmap_area *va; 2578 struct xarray *xa; 2579 unsigned long vb_idx; 2580 int node, err; 2581 void *vaddr; 2582 2583 node = numa_node_id(); 2584 2585 vb = kmalloc_node(sizeof(struct vmap_block), 2586 gfp_mask & GFP_RECLAIM_MASK, node); 2587 if (unlikely(!vb)) 2588 return ERR_PTR(-ENOMEM); 2589 2590 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 2591 VMALLOC_START, VMALLOC_END, 2592 node, gfp_mask, 2593 VMAP_RAM|VMAP_BLOCK, NULL); 2594 if (IS_ERR(va)) { 2595 kfree(vb); 2596 return ERR_CAST(va); 2597 } 2598 2599 vaddr = vmap_block_vaddr(va->va_start, 0); 2600 spin_lock_init(&vb->lock); 2601 vb->va = va; 2602 /* At least something should be left free */ 2603 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 2604 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); 2605 vb->free = VMAP_BBMAP_BITS - (1UL << order); 2606 vb->dirty = 0; 2607 vb->dirty_min = VMAP_BBMAP_BITS; 2608 vb->dirty_max = 0; 2609 bitmap_set(vb->used_map, 0, (1UL << order)); 2610 INIT_LIST_HEAD(&vb->free_list); 2611 2612 xa = addr_to_vb_xa(va->va_start); 2613 vb_idx = addr_to_vb_idx(va->va_start); 2614 err = xa_insert(xa, vb_idx, vb, gfp_mask); 2615 if (err) { 2616 kfree(vb); 2617 free_vmap_area(va); 2618 return ERR_PTR(err); 2619 } 2620 2621 vbq = raw_cpu_ptr(&vmap_block_queue); 2622 spin_lock(&vbq->lock); 2623 list_add_tail_rcu(&vb->free_list, &vbq->free); 2624 spin_unlock(&vbq->lock); 2625 2626 return vaddr; 2627 } 2628 2629 static void free_vmap_block(struct vmap_block *vb) 2630 { 2631 struct vmap_node *vn; 2632 struct vmap_block *tmp; 2633 struct xarray *xa; 2634 2635 xa = addr_to_vb_xa(vb->va->va_start); 2636 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); 2637 BUG_ON(tmp != vb); 2638 2639 vn = addr_to_node(vb->va->va_start); 2640 spin_lock(&vn->busy.lock); 2641 unlink_va(vb->va, &vn->busy.root); 2642 spin_unlock(&vn->busy.lock); 2643 2644 free_vmap_area_noflush(vb->va); 2645 kfree_rcu(vb, rcu_head); 2646 } 2647 2648 static bool purge_fragmented_block(struct vmap_block *vb, 2649 struct vmap_block_queue *vbq, struct list_head *purge_list, 2650 bool force_purge) 2651 { 2652 if (vb->free + vb->dirty != VMAP_BBMAP_BITS || 2653 vb->dirty == VMAP_BBMAP_BITS) 2654 return false; 2655 2656 /* Don't overeagerly purge usable blocks unless requested */ 2657 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) 2658 return false; 2659 2660 /* prevent further allocs after releasing lock */ 2661 WRITE_ONCE(vb->free, 0); 2662 /* prevent purging it again */ 2663 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); 2664 vb->dirty_min = 0; 2665 vb->dirty_max = VMAP_BBMAP_BITS; 2666 spin_lock(&vbq->lock); 2667 list_del_rcu(&vb->free_list); 2668 spin_unlock(&vbq->lock); 2669 list_add_tail(&vb->purge, purge_list); 2670 return true; 2671 } 2672 2673 static void free_purged_blocks(struct list_head *purge_list) 2674 { 2675 struct vmap_block *vb, *n_vb; 2676 2677 list_for_each_entry_safe(vb, n_vb, purge_list, purge) { 2678 list_del(&vb->purge); 2679 free_vmap_block(vb); 2680 } 2681 } 2682 2683 static void purge_fragmented_blocks(int cpu) 2684 { 2685 LIST_HEAD(purge); 2686 struct vmap_block *vb; 2687 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2688 2689 rcu_read_lock(); 2690 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2691 unsigned long free = READ_ONCE(vb->free); 2692 unsigned long dirty = READ_ONCE(vb->dirty); 2693 2694 if (free + dirty != VMAP_BBMAP_BITS || 2695 dirty == VMAP_BBMAP_BITS) 2696 continue; 2697 2698 spin_lock(&vb->lock); 2699 purge_fragmented_block(vb, vbq, &purge, true); 2700 spin_unlock(&vb->lock); 2701 } 2702 rcu_read_unlock(); 2703 free_purged_blocks(&purge); 2704 } 2705 2706 static void purge_fragmented_blocks_allcpus(void) 2707 { 2708 int cpu; 2709 2710 for_each_possible_cpu(cpu) 2711 purge_fragmented_blocks(cpu); 2712 } 2713 2714 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2715 { 2716 struct vmap_block_queue *vbq; 2717 struct vmap_block *vb; 2718 void *vaddr = NULL; 2719 unsigned int order; 2720 2721 BUG_ON(offset_in_page(size)); 2722 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2723 if (WARN_ON(size == 0)) { 2724 /* 2725 * Allocating 0 bytes isn't what caller wants since 2726 * get_order(0) returns funny result. Just warn and terminate 2727 * early. 2728 */ 2729 return NULL; 2730 } 2731 order = get_order(size); 2732 2733 rcu_read_lock(); 2734 vbq = raw_cpu_ptr(&vmap_block_queue); 2735 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2736 unsigned long pages_off; 2737 2738 if (READ_ONCE(vb->free) < (1UL << order)) 2739 continue; 2740 2741 spin_lock(&vb->lock); 2742 if (vb->free < (1UL << order)) { 2743 spin_unlock(&vb->lock); 2744 continue; 2745 } 2746 2747 pages_off = VMAP_BBMAP_BITS - vb->free; 2748 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 2749 WRITE_ONCE(vb->free, vb->free - (1UL << order)); 2750 bitmap_set(vb->used_map, pages_off, (1UL << order)); 2751 if (vb->free == 0) { 2752 spin_lock(&vbq->lock); 2753 list_del_rcu(&vb->free_list); 2754 spin_unlock(&vbq->lock); 2755 } 2756 2757 spin_unlock(&vb->lock); 2758 break; 2759 } 2760 2761 rcu_read_unlock(); 2762 2763 /* Allocate new block if nothing was found */ 2764 if (!vaddr) 2765 vaddr = new_vmap_block(order, gfp_mask); 2766 2767 return vaddr; 2768 } 2769 2770 static void vb_free(unsigned long addr, unsigned long size) 2771 { 2772 unsigned long offset; 2773 unsigned int order; 2774 struct vmap_block *vb; 2775 struct xarray *xa; 2776 2777 BUG_ON(offset_in_page(size)); 2778 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2779 2780 flush_cache_vunmap(addr, addr + size); 2781 2782 order = get_order(size); 2783 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 2784 2785 xa = addr_to_vb_xa(addr); 2786 vb = xa_load(xa, addr_to_vb_idx(addr)); 2787 2788 spin_lock(&vb->lock); 2789 bitmap_clear(vb->used_map, offset, (1UL << order)); 2790 spin_unlock(&vb->lock); 2791 2792 vunmap_range_noflush(addr, addr + size); 2793 2794 if (debug_pagealloc_enabled_static()) 2795 flush_tlb_kernel_range(addr, addr + size); 2796 2797 spin_lock(&vb->lock); 2798 2799 /* Expand the not yet TLB flushed dirty range */ 2800 vb->dirty_min = min(vb->dirty_min, offset); 2801 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2802 2803 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); 2804 if (vb->dirty == VMAP_BBMAP_BITS) { 2805 BUG_ON(vb->free); 2806 spin_unlock(&vb->lock); 2807 free_vmap_block(vb); 2808 } else 2809 spin_unlock(&vb->lock); 2810 } 2811 2812 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2813 { 2814 LIST_HEAD(purge_list); 2815 int cpu; 2816 2817 if (unlikely(!vmap_initialized)) 2818 return; 2819 2820 mutex_lock(&vmap_purge_lock); 2821 2822 for_each_possible_cpu(cpu) { 2823 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2824 struct vmap_block *vb; 2825 unsigned long idx; 2826 2827 rcu_read_lock(); 2828 xa_for_each(&vbq->vmap_blocks, idx, vb) { 2829 spin_lock(&vb->lock); 2830 2831 /* 2832 * Try to purge a fragmented block first. If it's 2833 * not purgeable, check whether there is dirty 2834 * space to be flushed. 2835 */ 2836 if (!purge_fragmented_block(vb, vbq, &purge_list, false) && 2837 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { 2838 unsigned long va_start = vb->va->va_start; 2839 unsigned long s, e; 2840 2841 s = va_start + (vb->dirty_min << PAGE_SHIFT); 2842 e = va_start + (vb->dirty_max << PAGE_SHIFT); 2843 2844 start = min(s, start); 2845 end = max(e, end); 2846 2847 /* Prevent that this is flushed again */ 2848 vb->dirty_min = VMAP_BBMAP_BITS; 2849 vb->dirty_max = 0; 2850 2851 flush = 1; 2852 } 2853 spin_unlock(&vb->lock); 2854 } 2855 rcu_read_unlock(); 2856 } 2857 free_purged_blocks(&purge_list); 2858 2859 if (!__purge_vmap_area_lazy(start, end, false) && flush) 2860 flush_tlb_kernel_range(start, end); 2861 mutex_unlock(&vmap_purge_lock); 2862 } 2863 2864 /** 2865 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2866 * 2867 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2868 * to amortize TLB flushing overheads. What this means is that any page you 2869 * have now, may, in a former life, have been mapped into kernel virtual 2870 * address by the vmap layer and so there might be some CPUs with TLB entries 2871 * still referencing that page (additional to the regular 1:1 kernel mapping). 2872 * 2873 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 2874 * be sure that none of the pages we have control over will have any aliases 2875 * from the vmap layer. 2876 */ 2877 void vm_unmap_aliases(void) 2878 { 2879 unsigned long start = ULONG_MAX, end = 0; 2880 int flush = 0; 2881 2882 _vm_unmap_aliases(start, end, flush); 2883 } 2884 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 2885 2886 /** 2887 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 2888 * @mem: the pointer returned by vm_map_ram 2889 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 2890 */ 2891 void vm_unmap_ram(const void *mem, unsigned int count) 2892 { 2893 unsigned long size = (unsigned long)count << PAGE_SHIFT; 2894 unsigned long addr = (unsigned long)kasan_reset_tag(mem); 2895 struct vmap_area *va; 2896 2897 might_sleep(); 2898 BUG_ON(!addr); 2899 BUG_ON(addr < VMALLOC_START); 2900 BUG_ON(addr > VMALLOC_END); 2901 BUG_ON(!PAGE_ALIGNED(addr)); 2902 2903 kasan_poison_vmalloc(mem, size); 2904 2905 if (likely(count <= VMAP_MAX_ALLOC)) { 2906 debug_check_no_locks_freed(mem, size); 2907 vb_free(addr, size); 2908 return; 2909 } 2910 2911 va = find_unlink_vmap_area(addr); 2912 if (WARN_ON_ONCE(!va)) 2913 return; 2914 2915 debug_check_no_locks_freed((void *)va->va_start, 2916 (va->va_end - va->va_start)); 2917 free_unmap_vmap_area(va); 2918 } 2919 EXPORT_SYMBOL(vm_unmap_ram); 2920 2921 /** 2922 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 2923 * @pages: an array of pointers to the pages to be mapped 2924 * @count: number of pages 2925 * @node: prefer to allocate data structures on this node 2926 * 2927 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 2928 * faster than vmap so it's good. But if you mix long-life and short-life 2929 * objects with vm_map_ram(), it could consume lots of address space through 2930 * fragmentation (especially on a 32bit machine). You could see failures in 2931 * the end. Please use this function for short-lived objects. 2932 * 2933 * Returns: a pointer to the address that has been mapped, or %NULL on failure 2934 */ 2935 void *vm_map_ram(struct page **pages, unsigned int count, int node) 2936 { 2937 unsigned long size = (unsigned long)count << PAGE_SHIFT; 2938 unsigned long addr; 2939 void *mem; 2940 2941 if (likely(count <= VMAP_MAX_ALLOC)) { 2942 mem = vb_alloc(size, GFP_KERNEL); 2943 if (IS_ERR(mem)) 2944 return NULL; 2945 addr = (unsigned long)mem; 2946 } else { 2947 struct vmap_area *va; 2948 va = alloc_vmap_area(size, PAGE_SIZE, 2949 VMALLOC_START, VMALLOC_END, 2950 node, GFP_KERNEL, VMAP_RAM, 2951 NULL); 2952 if (IS_ERR(va)) 2953 return NULL; 2954 2955 addr = va->va_start; 2956 mem = (void *)addr; 2957 } 2958 2959 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 2960 pages, PAGE_SHIFT) < 0) { 2961 vm_unmap_ram(mem, count); 2962 return NULL; 2963 } 2964 2965 /* 2966 * Mark the pages as accessible, now that they are mapped. 2967 * With hardware tag-based KASAN, marking is skipped for 2968 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 2969 */ 2970 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); 2971 2972 return mem; 2973 } 2974 EXPORT_SYMBOL(vm_map_ram); 2975 2976 static struct vm_struct *vmlist __initdata; 2977 2978 static inline unsigned int vm_area_page_order(struct vm_struct *vm) 2979 { 2980 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2981 return vm->page_order; 2982 #else 2983 return 0; 2984 #endif 2985 } 2986 2987 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 2988 { 2989 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2990 vm->page_order = order; 2991 #else 2992 BUG_ON(order != 0); 2993 #endif 2994 } 2995 2996 /** 2997 * vm_area_add_early - add vmap area early during boot 2998 * @vm: vm_struct to add 2999 * 3000 * This function is used to add fixed kernel vm area to vmlist before 3001 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 3002 * should contain proper values and the other fields should be zero. 3003 * 3004 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3005 */ 3006 void __init vm_area_add_early(struct vm_struct *vm) 3007 { 3008 struct vm_struct *tmp, **p; 3009 3010 BUG_ON(vmap_initialized); 3011 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 3012 if (tmp->addr >= vm->addr) { 3013 BUG_ON(tmp->addr < vm->addr + vm->size); 3014 break; 3015 } else 3016 BUG_ON(tmp->addr + tmp->size > vm->addr); 3017 } 3018 vm->next = *p; 3019 *p = vm; 3020 } 3021 3022 /** 3023 * vm_area_register_early - register vmap area early during boot 3024 * @vm: vm_struct to register 3025 * @align: requested alignment 3026 * 3027 * This function is used to register kernel vm area before 3028 * vmalloc_init() is called. @vm->size and @vm->flags should contain 3029 * proper values on entry and other fields should be zero. On return, 3030 * vm->addr contains the allocated address. 3031 * 3032 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3033 */ 3034 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 3035 { 3036 unsigned long addr = ALIGN(VMALLOC_START, align); 3037 struct vm_struct *cur, **p; 3038 3039 BUG_ON(vmap_initialized); 3040 3041 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 3042 if ((unsigned long)cur->addr - addr >= vm->size) 3043 break; 3044 addr = ALIGN((unsigned long)cur->addr + cur->size, align); 3045 } 3046 3047 BUG_ON(addr > VMALLOC_END - vm->size); 3048 vm->addr = (void *)addr; 3049 vm->next = *p; 3050 *p = vm; 3051 kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 3052 } 3053 3054 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 3055 { 3056 /* 3057 * Before removing VM_UNINITIALIZED, 3058 * we should make sure that vm has proper values. 3059 * Pair with smp_rmb() in show_numa_info(). 3060 */ 3061 smp_wmb(); 3062 vm->flags &= ~VM_UNINITIALIZED; 3063 } 3064 3065 static struct vm_struct *__get_vm_area_node(unsigned long size, 3066 unsigned long align, unsigned long shift, unsigned long flags, 3067 unsigned long start, unsigned long end, int node, 3068 gfp_t gfp_mask, const void *caller) 3069 { 3070 struct vmap_area *va; 3071 struct vm_struct *area; 3072 unsigned long requested_size = size; 3073 3074 BUG_ON(in_interrupt()); 3075 size = ALIGN(size, 1ul << shift); 3076 if (unlikely(!size)) 3077 return NULL; 3078 3079 if (flags & VM_IOREMAP) 3080 align = 1ul << clamp_t(int, get_count_order_long(size), 3081 PAGE_SHIFT, IOREMAP_MAX_ORDER); 3082 3083 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 3084 if (unlikely(!area)) 3085 return NULL; 3086 3087 if (!(flags & VM_NO_GUARD)) 3088 size += PAGE_SIZE; 3089 3090 area->flags = flags; 3091 area->caller = caller; 3092 3093 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); 3094 if (IS_ERR(va)) { 3095 kfree(area); 3096 return NULL; 3097 } 3098 3099 /* 3100 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a 3101 * best-effort approach, as they can be mapped outside of vmalloc code. 3102 * For VM_ALLOC mappings, the pages are marked as accessible after 3103 * getting mapped in __vmalloc_node_range(). 3104 * With hardware tag-based KASAN, marking is skipped for 3105 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 3106 */ 3107 if (!(flags & VM_ALLOC)) 3108 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, 3109 KASAN_VMALLOC_PROT_NORMAL); 3110 3111 return area; 3112 } 3113 3114 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 3115 unsigned long start, unsigned long end, 3116 const void *caller) 3117 { 3118 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 3119 NUMA_NO_NODE, GFP_KERNEL, caller); 3120 } 3121 3122 /** 3123 * get_vm_area - reserve a contiguous kernel virtual area 3124 * @size: size of the area 3125 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 3126 * 3127 * Search an area of @size in the kernel virtual mapping area, 3128 * and reserved it for out purposes. Returns the area descriptor 3129 * on success or %NULL on failure. 3130 * 3131 * Return: the area descriptor on success or %NULL on failure. 3132 */ 3133 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 3134 { 3135 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 3136 VMALLOC_START, VMALLOC_END, 3137 NUMA_NO_NODE, GFP_KERNEL, 3138 __builtin_return_address(0)); 3139 } 3140 3141 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 3142 const void *caller) 3143 { 3144 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 3145 VMALLOC_START, VMALLOC_END, 3146 NUMA_NO_NODE, GFP_KERNEL, caller); 3147 } 3148 3149 /** 3150 * find_vm_area - find a continuous kernel virtual area 3151 * @addr: base address 3152 * 3153 * Search for the kernel VM area starting at @addr, and return it. 3154 * It is up to the caller to do all required locking to keep the returned 3155 * pointer valid. 3156 * 3157 * Return: the area descriptor on success or %NULL on failure. 3158 */ 3159 struct vm_struct *find_vm_area(const void *addr) 3160 { 3161 struct vmap_area *va; 3162 3163 va = find_vmap_area((unsigned long)addr); 3164 if (!va) 3165 return NULL; 3166 3167 return va->vm; 3168 } 3169 3170 /** 3171 * remove_vm_area - find and remove a continuous kernel virtual area 3172 * @addr: base address 3173 * 3174 * Search for the kernel VM area starting at @addr, and remove it. 3175 * This function returns the found VM area, but using it is NOT safe 3176 * on SMP machines, except for its size or flags. 3177 * 3178 * Return: the area descriptor on success or %NULL on failure. 3179 */ 3180 struct vm_struct *remove_vm_area(const void *addr) 3181 { 3182 struct vmap_area *va; 3183 struct vm_struct *vm; 3184 3185 might_sleep(); 3186 3187 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 3188 addr)) 3189 return NULL; 3190 3191 va = find_unlink_vmap_area((unsigned long)addr); 3192 if (!va || !va->vm) 3193 return NULL; 3194 vm = va->vm; 3195 3196 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); 3197 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); 3198 kasan_free_module_shadow(vm); 3199 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); 3200 3201 free_unmap_vmap_area(va); 3202 return vm; 3203 } 3204 3205 static inline void set_area_direct_map(const struct vm_struct *area, 3206 int (*set_direct_map)(struct page *page)) 3207 { 3208 int i; 3209 3210 /* HUGE_VMALLOC passes small pages to set_direct_map */ 3211 for (i = 0; i < area->nr_pages; i++) 3212 if (page_address(area->pages[i])) 3213 set_direct_map(area->pages[i]); 3214 } 3215 3216 /* 3217 * Flush the vm mapping and reset the direct map. 3218 */ 3219 static void vm_reset_perms(struct vm_struct *area) 3220 { 3221 unsigned long start = ULONG_MAX, end = 0; 3222 unsigned int page_order = vm_area_page_order(area); 3223 int flush_dmap = 0; 3224 int i; 3225 3226 /* 3227 * Find the start and end range of the direct mappings to make sure that 3228 * the vm_unmap_aliases() flush includes the direct map. 3229 */ 3230 for (i = 0; i < area->nr_pages; i += 1U << page_order) { 3231 unsigned long addr = (unsigned long)page_address(area->pages[i]); 3232 3233 if (addr) { 3234 unsigned long page_size; 3235 3236 page_size = PAGE_SIZE << page_order; 3237 start = min(addr, start); 3238 end = max(addr + page_size, end); 3239 flush_dmap = 1; 3240 } 3241 } 3242 3243 /* 3244 * Set direct map to something invalid so that it won't be cached if 3245 * there are any accesses after the TLB flush, then flush the TLB and 3246 * reset the direct map permissions to the default. 3247 */ 3248 set_area_direct_map(area, set_direct_map_invalid_noflush); 3249 _vm_unmap_aliases(start, end, flush_dmap); 3250 set_area_direct_map(area, set_direct_map_default_noflush); 3251 } 3252 3253 static void delayed_vfree_work(struct work_struct *w) 3254 { 3255 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 3256 struct llist_node *t, *llnode; 3257 3258 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 3259 vfree(llnode); 3260 } 3261 3262 /** 3263 * vfree_atomic - release memory allocated by vmalloc() 3264 * @addr: memory base address 3265 * 3266 * This one is just like vfree() but can be called in any atomic context 3267 * except NMIs. 3268 */ 3269 void vfree_atomic(const void *addr) 3270 { 3271 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 3272 3273 BUG_ON(in_nmi()); 3274 kmemleak_free(addr); 3275 3276 /* 3277 * Use raw_cpu_ptr() because this can be called from preemptible 3278 * context. Preemption is absolutely fine here, because the llist_add() 3279 * implementation is lockless, so it works even if we are adding to 3280 * another cpu's list. schedule_work() should be fine with this too. 3281 */ 3282 if (addr && llist_add((struct llist_node *)addr, &p->list)) 3283 schedule_work(&p->wq); 3284 } 3285 3286 /** 3287 * vfree - Release memory allocated by vmalloc() 3288 * @addr: Memory base address 3289 * 3290 * Free the virtually continuous memory area starting at @addr, as obtained 3291 * from one of the vmalloc() family of APIs. This will usually also free the 3292 * physical memory underlying the virtual allocation, but that memory is 3293 * reference counted, so it will not be freed until the last user goes away. 3294 * 3295 * If @addr is NULL, no operation is performed. 3296 * 3297 * Context: 3298 * May sleep if called *not* from interrupt context. 3299 * Must not be called in NMI context (strictly speaking, it could be 3300 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 3301 * conventions for vfree() arch-dependent would be a really bad idea). 3302 */ 3303 void vfree(const void *addr) 3304 { 3305 struct vm_struct *vm; 3306 int i; 3307 3308 if (unlikely(in_interrupt())) { 3309 vfree_atomic(addr); 3310 return; 3311 } 3312 3313 BUG_ON(in_nmi()); 3314 kmemleak_free(addr); 3315 might_sleep(); 3316 3317 if (!addr) 3318 return; 3319 3320 vm = remove_vm_area(addr); 3321 if (unlikely(!vm)) { 3322 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 3323 addr); 3324 return; 3325 } 3326 3327 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) 3328 vm_reset_perms(vm); 3329 for (i = 0; i < vm->nr_pages; i++) { 3330 struct page *page = vm->pages[i]; 3331 3332 BUG_ON(!page); 3333 mod_memcg_page_state(page, MEMCG_VMALLOC, -1); 3334 /* 3335 * High-order allocs for huge vmallocs are split, so 3336 * can be freed as an array of order-0 allocations 3337 */ 3338 __free_page(page); 3339 cond_resched(); 3340 } 3341 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); 3342 kvfree(vm->pages); 3343 kfree(vm); 3344 } 3345 EXPORT_SYMBOL(vfree); 3346 3347 /** 3348 * vunmap - release virtual mapping obtained by vmap() 3349 * @addr: memory base address 3350 * 3351 * Free the virtually contiguous memory area starting at @addr, 3352 * which was created from the page array passed to vmap(). 3353 * 3354 * Must not be called in interrupt context. 3355 */ 3356 void vunmap(const void *addr) 3357 { 3358 struct vm_struct *vm; 3359 3360 BUG_ON(in_interrupt()); 3361 might_sleep(); 3362 3363 if (!addr) 3364 return; 3365 vm = remove_vm_area(addr); 3366 if (unlikely(!vm)) { 3367 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", 3368 addr); 3369 return; 3370 } 3371 kfree(vm); 3372 } 3373 EXPORT_SYMBOL(vunmap); 3374 3375 /** 3376 * vmap - map an array of pages into virtually contiguous space 3377 * @pages: array of page pointers 3378 * @count: number of pages to map 3379 * @flags: vm_area->flags 3380 * @prot: page protection for the mapping 3381 * 3382 * Maps @count pages from @pages into contiguous kernel virtual space. 3383 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 3384 * (which must be kmalloc or vmalloc memory) and one reference per pages in it 3385 * are transferred from the caller to vmap(), and will be freed / dropped when 3386 * vfree() is called on the return value. 3387 * 3388 * Return: the address of the area or %NULL on failure 3389 */ 3390 void *vmap(struct page **pages, unsigned int count, 3391 unsigned long flags, pgprot_t prot) 3392 { 3393 struct vm_struct *area; 3394 unsigned long addr; 3395 unsigned long size; /* In bytes */ 3396 3397 might_sleep(); 3398 3399 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) 3400 return NULL; 3401 3402 /* 3403 * Your top guard is someone else's bottom guard. Not having a top 3404 * guard compromises someone else's mappings too. 3405 */ 3406 if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 3407 flags &= ~VM_NO_GUARD; 3408 3409 if (count > totalram_pages()) 3410 return NULL; 3411 3412 size = (unsigned long)count << PAGE_SHIFT; 3413 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 3414 if (!area) 3415 return NULL; 3416 3417 addr = (unsigned long)area->addr; 3418 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 3419 pages, PAGE_SHIFT) < 0) { 3420 vunmap(area->addr); 3421 return NULL; 3422 } 3423 3424 if (flags & VM_MAP_PUT_PAGES) { 3425 area->pages = pages; 3426 area->nr_pages = count; 3427 } 3428 return area->addr; 3429 } 3430 EXPORT_SYMBOL(vmap); 3431 3432 #ifdef CONFIG_VMAP_PFN 3433 struct vmap_pfn_data { 3434 unsigned long *pfns; 3435 pgprot_t prot; 3436 unsigned int idx; 3437 }; 3438 3439 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 3440 { 3441 struct vmap_pfn_data *data = private; 3442 unsigned long pfn = data->pfns[data->idx]; 3443 pte_t ptent; 3444 3445 if (WARN_ON_ONCE(pfn_valid(pfn))) 3446 return -EINVAL; 3447 3448 ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); 3449 set_pte_at(&init_mm, addr, pte, ptent); 3450 3451 data->idx++; 3452 return 0; 3453 } 3454 3455 /** 3456 * vmap_pfn - map an array of PFNs into virtually contiguous space 3457 * @pfns: array of PFNs 3458 * @count: number of pages to map 3459 * @prot: page protection for the mapping 3460 * 3461 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 3462 * the start address of the mapping. 3463 */ 3464 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 3465 { 3466 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 3467 struct vm_struct *area; 3468 3469 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 3470 __builtin_return_address(0)); 3471 if (!area) 3472 return NULL; 3473 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 3474 count * PAGE_SIZE, vmap_pfn_apply, &data)) { 3475 free_vm_area(area); 3476 return NULL; 3477 } 3478 3479 flush_cache_vmap((unsigned long)area->addr, 3480 (unsigned long)area->addr + count * PAGE_SIZE); 3481 3482 return area->addr; 3483 } 3484 EXPORT_SYMBOL_GPL(vmap_pfn); 3485 #endif /* CONFIG_VMAP_PFN */ 3486 3487 static inline unsigned int 3488 vm_area_alloc_pages(gfp_t gfp, int nid, 3489 unsigned int order, unsigned int nr_pages, struct page **pages) 3490 { 3491 unsigned int nr_allocated = 0; 3492 gfp_t alloc_gfp = gfp; 3493 bool nofail = false; 3494 struct page *page; 3495 int i; 3496 3497 /* 3498 * For order-0 pages we make use of bulk allocator, if 3499 * the page array is partly or not at all populated due 3500 * to fails, fallback to a single page allocator that is 3501 * more permissive. 3502 */ 3503 if (!order) { 3504 /* bulk allocator doesn't support nofail req. officially */ 3505 gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL; 3506 3507 while (nr_allocated < nr_pages) { 3508 unsigned int nr, nr_pages_request; 3509 3510 /* 3511 * A maximum allowed request is hard-coded and is 100 3512 * pages per call. That is done in order to prevent a 3513 * long preemption off scenario in the bulk-allocator 3514 * so the range is [1:100]. 3515 */ 3516 nr_pages_request = min(100U, nr_pages - nr_allocated); 3517 3518 /* memory allocation should consider mempolicy, we can't 3519 * wrongly use nearest node when nid == NUMA_NO_NODE, 3520 * otherwise memory may be allocated in only one node, 3521 * but mempolicy wants to alloc memory by interleaving. 3522 */ 3523 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 3524 nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp, 3525 nr_pages_request, 3526 pages + nr_allocated); 3527 3528 else 3529 nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid, 3530 nr_pages_request, 3531 pages + nr_allocated); 3532 3533 nr_allocated += nr; 3534 cond_resched(); 3535 3536 /* 3537 * If zero or pages were obtained partly, 3538 * fallback to a single page allocator. 3539 */ 3540 if (nr != nr_pages_request) 3541 break; 3542 } 3543 } else if (gfp & __GFP_NOFAIL) { 3544 /* 3545 * Higher order nofail allocations are really expensive and 3546 * potentially dangerous (pre-mature OOM, disruptive reclaim 3547 * and compaction etc. 3548 */ 3549 alloc_gfp &= ~__GFP_NOFAIL; 3550 nofail = true; 3551 } 3552 3553 /* High-order pages or fallback path if "bulk" fails. */ 3554 while (nr_allocated < nr_pages) { 3555 if (fatal_signal_pending(current)) 3556 break; 3557 3558 if (nid == NUMA_NO_NODE) 3559 page = alloc_pages_noprof(alloc_gfp, order); 3560 else 3561 page = alloc_pages_node_noprof(nid, alloc_gfp, order); 3562 if (unlikely(!page)) { 3563 if (!nofail) 3564 break; 3565 3566 /* fall back to the zero order allocations */ 3567 alloc_gfp |= __GFP_NOFAIL; 3568 order = 0; 3569 continue; 3570 } 3571 3572 /* 3573 * Higher order allocations must be able to be treated as 3574 * indepdenent small pages by callers (as they can with 3575 * small-page vmallocs). Some drivers do their own refcounting 3576 * on vmalloc_to_page() pages, some use page->mapping, 3577 * page->lru, etc. 3578 */ 3579 if (order) 3580 split_page(page, order); 3581 3582 /* 3583 * Careful, we allocate and map page-order pages, but 3584 * tracking is done per PAGE_SIZE page so as to keep the 3585 * vm_struct APIs independent of the physical/mapped size. 3586 */ 3587 for (i = 0; i < (1U << order); i++) 3588 pages[nr_allocated + i] = page + i; 3589 3590 cond_resched(); 3591 nr_allocated += 1U << order; 3592 } 3593 3594 return nr_allocated; 3595 } 3596 3597 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 3598 pgprot_t prot, unsigned int page_shift, 3599 int node) 3600 { 3601 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 3602 bool nofail = gfp_mask & __GFP_NOFAIL; 3603 unsigned long addr = (unsigned long)area->addr; 3604 unsigned long size = get_vm_area_size(area); 3605 unsigned long array_size; 3606 unsigned int nr_small_pages = size >> PAGE_SHIFT; 3607 unsigned int page_order; 3608 unsigned int flags; 3609 int ret; 3610 3611 array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 3612 3613 if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 3614 gfp_mask |= __GFP_HIGHMEM; 3615 3616 /* Please note that the recursion is strictly bounded. */ 3617 if (array_size > PAGE_SIZE) { 3618 area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node, 3619 area->caller); 3620 } else { 3621 area->pages = kmalloc_node_noprof(array_size, nested_gfp, node); 3622 } 3623 3624 if (!area->pages) { 3625 warn_alloc(gfp_mask, NULL, 3626 "vmalloc error: size %lu, failed to allocated page array size %lu", 3627 nr_small_pages * PAGE_SIZE, array_size); 3628 free_vm_area(area); 3629 return NULL; 3630 } 3631 3632 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 3633 page_order = vm_area_page_order(area); 3634 3635 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, 3636 node, page_order, nr_small_pages, area->pages); 3637 3638 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 3639 if (gfp_mask & __GFP_ACCOUNT) { 3640 int i; 3641 3642 for (i = 0; i < area->nr_pages; i++) 3643 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); 3644 } 3645 3646 /* 3647 * If not enough pages were obtained to accomplish an 3648 * allocation request, free them via vfree() if any. 3649 */ 3650 if (area->nr_pages != nr_small_pages) { 3651 /* 3652 * vm_area_alloc_pages() can fail due to insufficient memory but 3653 * also:- 3654 * 3655 * - a pending fatal signal 3656 * - insufficient huge page-order pages 3657 * 3658 * Since we always retry allocations at order-0 in the huge page 3659 * case a warning for either is spurious. 3660 */ 3661 if (!fatal_signal_pending(current) && page_order == 0) 3662 warn_alloc(gfp_mask, NULL, 3663 "vmalloc error: size %lu, failed to allocate pages", 3664 area->nr_pages * PAGE_SIZE); 3665 goto fail; 3666 } 3667 3668 /* 3669 * page tables allocations ignore external gfp mask, enforce it 3670 * by the scope API 3671 */ 3672 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3673 flags = memalloc_nofs_save(); 3674 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3675 flags = memalloc_noio_save(); 3676 3677 do { 3678 ret = vmap_pages_range(addr, addr + size, prot, area->pages, 3679 page_shift); 3680 if (nofail && (ret < 0)) 3681 schedule_timeout_uninterruptible(1); 3682 } while (nofail && (ret < 0)); 3683 3684 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3685 memalloc_nofs_restore(flags); 3686 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3687 memalloc_noio_restore(flags); 3688 3689 if (ret < 0) { 3690 warn_alloc(gfp_mask, NULL, 3691 "vmalloc error: size %lu, failed to map pages", 3692 area->nr_pages * PAGE_SIZE); 3693 goto fail; 3694 } 3695 3696 return area->addr; 3697 3698 fail: 3699 vfree(area->addr); 3700 return NULL; 3701 } 3702 3703 /** 3704 * __vmalloc_node_range - allocate virtually contiguous memory 3705 * @size: allocation size 3706 * @align: desired alignment 3707 * @start: vm area range start 3708 * @end: vm area range end 3709 * @gfp_mask: flags for the page level allocator 3710 * @prot: protection mask for the allocated pages 3711 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 3712 * @node: node to use for allocation or NUMA_NO_NODE 3713 * @caller: caller's return address 3714 * 3715 * Allocate enough pages to cover @size from the page level 3716 * allocator with @gfp_mask flags. Please note that the full set of gfp 3717 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all 3718 * supported. 3719 * Zone modifiers are not supported. From the reclaim modifiers 3720 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) 3721 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and 3722 * __GFP_RETRY_MAYFAIL are not supported). 3723 * 3724 * __GFP_NOWARN can be used to suppress failures messages. 3725 * 3726 * Map them into contiguous kernel virtual space, using a pagetable 3727 * protection of @prot. 3728 * 3729 * Return: the address of the area or %NULL on failure 3730 */ 3731 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, 3732 unsigned long start, unsigned long end, gfp_t gfp_mask, 3733 pgprot_t prot, unsigned long vm_flags, int node, 3734 const void *caller) 3735 { 3736 struct vm_struct *area; 3737 void *ret; 3738 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; 3739 unsigned long real_size = size; 3740 unsigned long real_align = align; 3741 unsigned int shift = PAGE_SHIFT; 3742 3743 if (WARN_ON_ONCE(!size)) 3744 return NULL; 3745 3746 if ((size >> PAGE_SHIFT) > totalram_pages()) { 3747 warn_alloc(gfp_mask, NULL, 3748 "vmalloc error: size %lu, exceeds total pages", 3749 real_size); 3750 return NULL; 3751 } 3752 3753 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { 3754 unsigned long size_per_node; 3755 3756 /* 3757 * Try huge pages. Only try for PAGE_KERNEL allocations, 3758 * others like modules don't yet expect huge pages in 3759 * their allocations due to apply_to_page_range not 3760 * supporting them. 3761 */ 3762 3763 size_per_node = size; 3764 if (node == NUMA_NO_NODE) 3765 size_per_node /= num_online_nodes(); 3766 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) 3767 shift = PMD_SHIFT; 3768 else 3769 shift = arch_vmap_pte_supported_shift(size_per_node); 3770 3771 align = max(real_align, 1UL << shift); 3772 size = ALIGN(real_size, 1UL << shift); 3773 } 3774 3775 again: 3776 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | 3777 VM_UNINITIALIZED | vm_flags, start, end, node, 3778 gfp_mask, caller); 3779 if (!area) { 3780 bool nofail = gfp_mask & __GFP_NOFAIL; 3781 warn_alloc(gfp_mask, NULL, 3782 "vmalloc error: size %lu, vm_struct allocation failed%s", 3783 real_size, (nofail) ? ". Retrying." : ""); 3784 if (nofail) { 3785 schedule_timeout_uninterruptible(1); 3786 goto again; 3787 } 3788 goto fail; 3789 } 3790 3791 /* 3792 * Prepare arguments for __vmalloc_area_node() and 3793 * kasan_unpoison_vmalloc(). 3794 */ 3795 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { 3796 if (kasan_hw_tags_enabled()) { 3797 /* 3798 * Modify protection bits to allow tagging. 3799 * This must be done before mapping. 3800 */ 3801 prot = arch_vmap_pgprot_tagged(prot); 3802 3803 /* 3804 * Skip page_alloc poisoning and zeroing for physical 3805 * pages backing VM_ALLOC mapping. Memory is instead 3806 * poisoned and zeroed by kasan_unpoison_vmalloc(). 3807 */ 3808 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; 3809 } 3810 3811 /* Take note that the mapping is PAGE_KERNEL. */ 3812 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; 3813 } 3814 3815 /* Allocate physical pages and map them into vmalloc space. */ 3816 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 3817 if (!ret) 3818 goto fail; 3819 3820 /* 3821 * Mark the pages as accessible, now that they are mapped. 3822 * The condition for setting KASAN_VMALLOC_INIT should complement the 3823 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check 3824 * to make sure that memory is initialized under the same conditions. 3825 * Tag-based KASAN modes only assign tags to normal non-executable 3826 * allocations, see __kasan_unpoison_vmalloc(). 3827 */ 3828 kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 3829 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && 3830 (gfp_mask & __GFP_SKIP_ZERO)) 3831 kasan_flags |= KASAN_VMALLOC_INIT; 3832 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ 3833 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); 3834 3835 /* 3836 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 3837 * flag. It means that vm_struct is not fully initialized. 3838 * Now, it is fully initialized, so remove this flag here. 3839 */ 3840 clear_vm_uninitialized_flag(area); 3841 3842 size = PAGE_ALIGN(size); 3843 if (!(vm_flags & VM_DEFER_KMEMLEAK)) 3844 kmemleak_vmalloc(area, size, gfp_mask); 3845 3846 return area->addr; 3847 3848 fail: 3849 if (shift > PAGE_SHIFT) { 3850 shift = PAGE_SHIFT; 3851 align = real_align; 3852 size = real_size; 3853 goto again; 3854 } 3855 3856 return NULL; 3857 } 3858 3859 /** 3860 * __vmalloc_node - allocate virtually contiguous memory 3861 * @size: allocation size 3862 * @align: desired alignment 3863 * @gfp_mask: flags for the page level allocator 3864 * @node: node to use for allocation or NUMA_NO_NODE 3865 * @caller: caller's return address 3866 * 3867 * Allocate enough pages to cover @size from the page level allocator with 3868 * @gfp_mask flags. Map them into contiguous kernel virtual space. 3869 * 3870 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 3871 * and __GFP_NOFAIL are not supported 3872 * 3873 * Any use of gfp flags outside of GFP_KERNEL should be consulted 3874 * with mm people. 3875 * 3876 * Return: pointer to the allocated memory or %NULL on error 3877 */ 3878 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, 3879 gfp_t gfp_mask, int node, const void *caller) 3880 { 3881 return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END, 3882 gfp_mask, PAGE_KERNEL, 0, node, caller); 3883 } 3884 /* 3885 * This is only for performance analysis of vmalloc and stress purpose. 3886 * It is required by vmalloc test module, therefore do not use it other 3887 * than that. 3888 */ 3889 #ifdef CONFIG_TEST_VMALLOC_MODULE 3890 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof); 3891 #endif 3892 3893 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) 3894 { 3895 return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE, 3896 __builtin_return_address(0)); 3897 } 3898 EXPORT_SYMBOL(__vmalloc_noprof); 3899 3900 /** 3901 * vmalloc - allocate virtually contiguous memory 3902 * @size: allocation size 3903 * 3904 * Allocate enough pages to cover @size from the page level 3905 * allocator and map them into contiguous kernel virtual space. 3906 * 3907 * For tight control over page level allocator and protection flags 3908 * use __vmalloc() instead. 3909 * 3910 * Return: pointer to the allocated memory or %NULL on error 3911 */ 3912 void *vmalloc_noprof(unsigned long size) 3913 { 3914 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE, 3915 __builtin_return_address(0)); 3916 } 3917 EXPORT_SYMBOL(vmalloc_noprof); 3918 3919 /** 3920 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages 3921 * @size: allocation size 3922 * @gfp_mask: flags for the page level allocator 3923 * 3924 * Allocate enough pages to cover @size from the page level 3925 * allocator and map them into contiguous kernel virtual space. 3926 * If @size is greater than or equal to PMD_SIZE, allow using 3927 * huge pages for the memory 3928 * 3929 * Return: pointer to the allocated memory or %NULL on error 3930 */ 3931 void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) 3932 { 3933 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, 3934 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 3935 NUMA_NO_NODE, __builtin_return_address(0)); 3936 } 3937 EXPORT_SYMBOL_GPL(vmalloc_huge_noprof); 3938 3939 /** 3940 * vzalloc - allocate virtually contiguous memory with zero fill 3941 * @size: allocation size 3942 * 3943 * Allocate enough pages to cover @size from the page level 3944 * allocator and map them into contiguous kernel virtual space. 3945 * The memory allocated is set to zero. 3946 * 3947 * For tight control over page level allocator and protection flags 3948 * use __vmalloc() instead. 3949 * 3950 * Return: pointer to the allocated memory or %NULL on error 3951 */ 3952 void *vzalloc_noprof(unsigned long size) 3953 { 3954 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 3955 __builtin_return_address(0)); 3956 } 3957 EXPORT_SYMBOL(vzalloc_noprof); 3958 3959 /** 3960 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 3961 * @size: allocation size 3962 * 3963 * The resulting memory area is zeroed so it can be mapped to userspace 3964 * without leaking data. 3965 * 3966 * Return: pointer to the allocated memory or %NULL on error 3967 */ 3968 void *vmalloc_user_noprof(unsigned long size) 3969 { 3970 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3971 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 3972 VM_USERMAP, NUMA_NO_NODE, 3973 __builtin_return_address(0)); 3974 } 3975 EXPORT_SYMBOL(vmalloc_user_noprof); 3976 3977 /** 3978 * vmalloc_node - allocate memory on a specific node 3979 * @size: allocation size 3980 * @node: numa node 3981 * 3982 * Allocate enough pages to cover @size from the page level 3983 * allocator and map them into contiguous kernel virtual space. 3984 * 3985 * For tight control over page level allocator and protection flags 3986 * use __vmalloc() instead. 3987 * 3988 * Return: pointer to the allocated memory or %NULL on error 3989 */ 3990 void *vmalloc_node_noprof(unsigned long size, int node) 3991 { 3992 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node, 3993 __builtin_return_address(0)); 3994 } 3995 EXPORT_SYMBOL(vmalloc_node_noprof); 3996 3997 /** 3998 * vzalloc_node - allocate memory on a specific node with zero fill 3999 * @size: allocation size 4000 * @node: numa node 4001 * 4002 * Allocate enough pages to cover @size from the page level 4003 * allocator and map them into contiguous kernel virtual space. 4004 * The memory allocated is set to zero. 4005 * 4006 * Return: pointer to the allocated memory or %NULL on error 4007 */ 4008 void *vzalloc_node_noprof(unsigned long size, int node) 4009 { 4010 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node, 4011 __builtin_return_address(0)); 4012 } 4013 EXPORT_SYMBOL(vzalloc_node_noprof); 4014 4015 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 4016 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 4017 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 4018 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 4019 #else 4020 /* 4021 * 64b systems should always have either DMA or DMA32 zones. For others 4022 * GFP_DMA32 should do the right thing and use the normal zone. 4023 */ 4024 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 4025 #endif 4026 4027 /** 4028 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 4029 * @size: allocation size 4030 * 4031 * Allocate enough 32bit PA addressable pages to cover @size from the 4032 * page level allocator and map them into contiguous kernel virtual space. 4033 * 4034 * Return: pointer to the allocated memory or %NULL on error 4035 */ 4036 void *vmalloc_32_noprof(unsigned long size) 4037 { 4038 return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 4039 __builtin_return_address(0)); 4040 } 4041 EXPORT_SYMBOL(vmalloc_32_noprof); 4042 4043 /** 4044 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 4045 * @size: allocation size 4046 * 4047 * The resulting memory area is 32bit addressable and zeroed so it can be 4048 * mapped to userspace without leaking data. 4049 * 4050 * Return: pointer to the allocated memory or %NULL on error 4051 */ 4052 void *vmalloc_32_user_noprof(unsigned long size) 4053 { 4054 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, 4055 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 4056 VM_USERMAP, NUMA_NO_NODE, 4057 __builtin_return_address(0)); 4058 } 4059 EXPORT_SYMBOL(vmalloc_32_user_noprof); 4060 4061 /* 4062 * Atomically zero bytes in the iterator. 4063 * 4064 * Returns the number of zeroed bytes. 4065 */ 4066 static size_t zero_iter(struct iov_iter *iter, size_t count) 4067 { 4068 size_t remains = count; 4069 4070 while (remains > 0) { 4071 size_t num, copied; 4072 4073 num = min_t(size_t, remains, PAGE_SIZE); 4074 copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); 4075 remains -= copied; 4076 4077 if (copied < num) 4078 break; 4079 } 4080 4081 return count - remains; 4082 } 4083 4084 /* 4085 * small helper routine, copy contents to iter from addr. 4086 * If the page is not present, fill zero. 4087 * 4088 * Returns the number of copied bytes. 4089 */ 4090 static size_t aligned_vread_iter(struct iov_iter *iter, 4091 const char *addr, size_t count) 4092 { 4093 size_t remains = count; 4094 struct page *page; 4095 4096 while (remains > 0) { 4097 unsigned long offset, length; 4098 size_t copied = 0; 4099 4100 offset = offset_in_page(addr); 4101 length = PAGE_SIZE - offset; 4102 if (length > remains) 4103 length = remains; 4104 page = vmalloc_to_page(addr); 4105 /* 4106 * To do safe access to this _mapped_ area, we need lock. But 4107 * adding lock here means that we need to add overhead of 4108 * vmalloc()/vfree() calls for this _debug_ interface, rarely 4109 * used. Instead of that, we'll use an local mapping via 4110 * copy_page_to_iter_nofault() and accept a small overhead in 4111 * this access function. 4112 */ 4113 if (page) 4114 copied = copy_page_to_iter_nofault(page, offset, 4115 length, iter); 4116 else 4117 copied = zero_iter(iter, length); 4118 4119 addr += copied; 4120 remains -= copied; 4121 4122 if (copied != length) 4123 break; 4124 } 4125 4126 return count - remains; 4127 } 4128 4129 /* 4130 * Read from a vm_map_ram region of memory. 4131 * 4132 * Returns the number of copied bytes. 4133 */ 4134 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, 4135 size_t count, unsigned long flags) 4136 { 4137 char *start; 4138 struct vmap_block *vb; 4139 struct xarray *xa; 4140 unsigned long offset; 4141 unsigned int rs, re; 4142 size_t remains, n; 4143 4144 /* 4145 * If it's area created by vm_map_ram() interface directly, but 4146 * not further subdividing and delegating management to vmap_block, 4147 * handle it here. 4148 */ 4149 if (!(flags & VMAP_BLOCK)) 4150 return aligned_vread_iter(iter, addr, count); 4151 4152 remains = count; 4153 4154 /* 4155 * Area is split into regions and tracked with vmap_block, read out 4156 * each region and zero fill the hole between regions. 4157 */ 4158 xa = addr_to_vb_xa((unsigned long) addr); 4159 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); 4160 if (!vb) 4161 goto finished_zero; 4162 4163 spin_lock(&vb->lock); 4164 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { 4165 spin_unlock(&vb->lock); 4166 goto finished_zero; 4167 } 4168 4169 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { 4170 size_t copied; 4171 4172 if (remains == 0) 4173 goto finished; 4174 4175 start = vmap_block_vaddr(vb->va->va_start, rs); 4176 4177 if (addr < start) { 4178 size_t to_zero = min_t(size_t, start - addr, remains); 4179 size_t zeroed = zero_iter(iter, to_zero); 4180 4181 addr += zeroed; 4182 remains -= zeroed; 4183 4184 if (remains == 0 || zeroed != to_zero) 4185 goto finished; 4186 } 4187 4188 /*it could start reading from the middle of used region*/ 4189 offset = offset_in_page(addr); 4190 n = ((re - rs + 1) << PAGE_SHIFT) - offset; 4191 if (n > remains) 4192 n = remains; 4193 4194 copied = aligned_vread_iter(iter, start + offset, n); 4195 4196 addr += copied; 4197 remains -= copied; 4198 4199 if (copied != n) 4200 goto finished; 4201 } 4202 4203 spin_unlock(&vb->lock); 4204 4205 finished_zero: 4206 /* zero-fill the left dirty or free regions */ 4207 return count - remains + zero_iter(iter, remains); 4208 finished: 4209 /* We couldn't copy/zero everything */ 4210 spin_unlock(&vb->lock); 4211 return count - remains; 4212 } 4213 4214 /** 4215 * vread_iter() - read vmalloc area in a safe way to an iterator. 4216 * @iter: the iterator to which data should be written. 4217 * @addr: vm address. 4218 * @count: number of bytes to be read. 4219 * 4220 * This function checks that addr is a valid vmalloc'ed area, and 4221 * copy data from that area to a given buffer. If the given memory range 4222 * of [addr...addr+count) includes some valid address, data is copied to 4223 * proper area of @buf. If there are memory holes, they'll be zero-filled. 4224 * IOREMAP area is treated as memory hole and no copy is done. 4225 * 4226 * If [addr...addr+count) doesn't includes any intersects with alive 4227 * vm_struct area, returns 0. @buf should be kernel's buffer. 4228 * 4229 * Note: In usual ops, vread() is never necessary because the caller 4230 * should know vmalloc() area is valid and can use memcpy(). 4231 * This is for routines which have to access vmalloc area without 4232 * any information, as /proc/kcore. 4233 * 4234 * Return: number of bytes for which addr and buf should be increased 4235 * (same number as @count) or %0 if [addr...addr+count) doesn't 4236 * include any intersection with valid vmalloc area 4237 */ 4238 long vread_iter(struct iov_iter *iter, const char *addr, size_t count) 4239 { 4240 struct vmap_node *vn; 4241 struct vmap_area *va; 4242 struct vm_struct *vm; 4243 char *vaddr; 4244 size_t n, size, flags, remains; 4245 unsigned long next; 4246 4247 addr = kasan_reset_tag(addr); 4248 4249 /* Don't allow overflow */ 4250 if ((unsigned long) addr + count < count) 4251 count = -(unsigned long) addr; 4252 4253 remains = count; 4254 4255 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); 4256 if (!vn) 4257 goto finished_zero; 4258 4259 /* no intersects with alive vmap_area */ 4260 if ((unsigned long)addr + remains <= va->va_start) 4261 goto finished_zero; 4262 4263 do { 4264 size_t copied; 4265 4266 if (remains == 0) 4267 goto finished; 4268 4269 vm = va->vm; 4270 flags = va->flags & VMAP_FLAGS_MASK; 4271 /* 4272 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need 4273 * be set together with VMAP_RAM. 4274 */ 4275 WARN_ON(flags == VMAP_BLOCK); 4276 4277 if (!vm && !flags) 4278 goto next_va; 4279 4280 if (vm && (vm->flags & VM_UNINITIALIZED)) 4281 goto next_va; 4282 4283 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 4284 smp_rmb(); 4285 4286 vaddr = (char *) va->va_start; 4287 size = vm ? get_vm_area_size(vm) : va_size(va); 4288 4289 if (addr >= vaddr + size) 4290 goto next_va; 4291 4292 if (addr < vaddr) { 4293 size_t to_zero = min_t(size_t, vaddr - addr, remains); 4294 size_t zeroed = zero_iter(iter, to_zero); 4295 4296 addr += zeroed; 4297 remains -= zeroed; 4298 4299 if (remains == 0 || zeroed != to_zero) 4300 goto finished; 4301 } 4302 4303 n = vaddr + size - addr; 4304 if (n > remains) 4305 n = remains; 4306 4307 if (flags & VMAP_RAM) 4308 copied = vmap_ram_vread_iter(iter, addr, n, flags); 4309 else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE)))) 4310 copied = aligned_vread_iter(iter, addr, n); 4311 else /* IOREMAP | SPARSE area is treated as memory hole */ 4312 copied = zero_iter(iter, n); 4313 4314 addr += copied; 4315 remains -= copied; 4316 4317 if (copied != n) 4318 goto finished; 4319 4320 next_va: 4321 next = va->va_end; 4322 spin_unlock(&vn->busy.lock); 4323 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); 4324 4325 finished_zero: 4326 if (vn) 4327 spin_unlock(&vn->busy.lock); 4328 4329 /* zero-fill memory holes */ 4330 return count - remains + zero_iter(iter, remains); 4331 finished: 4332 /* Nothing remains, or We couldn't copy/zero everything. */ 4333 if (vn) 4334 spin_unlock(&vn->busy.lock); 4335 4336 return count - remains; 4337 } 4338 4339 /** 4340 * remap_vmalloc_range_partial - map vmalloc pages to userspace 4341 * @vma: vma to cover 4342 * @uaddr: target user address to start at 4343 * @kaddr: virtual address of vmalloc kernel memory 4344 * @pgoff: offset from @kaddr to start at 4345 * @size: size of map area 4346 * 4347 * Returns: 0 for success, -Exxx on failure 4348 * 4349 * This function checks that @kaddr is a valid vmalloc'ed area, 4350 * and that it is big enough to cover the range starting at 4351 * @uaddr in @vma. Will return failure if that criteria isn't 4352 * met. 4353 * 4354 * Similar to remap_pfn_range() (see mm/memory.c) 4355 */ 4356 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 4357 void *kaddr, unsigned long pgoff, 4358 unsigned long size) 4359 { 4360 struct vm_struct *area; 4361 unsigned long off; 4362 unsigned long end_index; 4363 4364 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 4365 return -EINVAL; 4366 4367 size = PAGE_ALIGN(size); 4368 4369 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 4370 return -EINVAL; 4371 4372 area = find_vm_area(kaddr); 4373 if (!area) 4374 return -EINVAL; 4375 4376 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 4377 return -EINVAL; 4378 4379 if (check_add_overflow(size, off, &end_index) || 4380 end_index > get_vm_area_size(area)) 4381 return -EINVAL; 4382 kaddr += off; 4383 4384 do { 4385 struct page *page = vmalloc_to_page(kaddr); 4386 int ret; 4387 4388 ret = vm_insert_page(vma, uaddr, page); 4389 if (ret) 4390 return ret; 4391 4392 uaddr += PAGE_SIZE; 4393 kaddr += PAGE_SIZE; 4394 size -= PAGE_SIZE; 4395 } while (size > 0); 4396 4397 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 4398 4399 return 0; 4400 } 4401 4402 /** 4403 * remap_vmalloc_range - map vmalloc pages to userspace 4404 * @vma: vma to cover (map full range of vma) 4405 * @addr: vmalloc memory 4406 * @pgoff: number of pages into addr before first page to map 4407 * 4408 * Returns: 0 for success, -Exxx on failure 4409 * 4410 * This function checks that addr is a valid vmalloc'ed area, and 4411 * that it is big enough to cover the vma. Will return failure if 4412 * that criteria isn't met. 4413 * 4414 * Similar to remap_pfn_range() (see mm/memory.c) 4415 */ 4416 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 4417 unsigned long pgoff) 4418 { 4419 return remap_vmalloc_range_partial(vma, vma->vm_start, 4420 addr, pgoff, 4421 vma->vm_end - vma->vm_start); 4422 } 4423 EXPORT_SYMBOL(remap_vmalloc_range); 4424 4425 void free_vm_area(struct vm_struct *area) 4426 { 4427 struct vm_struct *ret; 4428 ret = remove_vm_area(area->addr); 4429 BUG_ON(ret != area); 4430 kfree(area); 4431 } 4432 EXPORT_SYMBOL_GPL(free_vm_area); 4433 4434 #ifdef CONFIG_SMP 4435 static struct vmap_area *node_to_va(struct rb_node *n) 4436 { 4437 return rb_entry_safe(n, struct vmap_area, rb_node); 4438 } 4439 4440 /** 4441 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 4442 * @addr: target address 4443 * 4444 * Returns: vmap_area if it is found. If there is no such area 4445 * the first highest(reverse order) vmap_area is returned 4446 * i.e. va->va_start < addr && va->va_end < addr or NULL 4447 * if there are no any areas before @addr. 4448 */ 4449 static struct vmap_area * 4450 pvm_find_va_enclose_addr(unsigned long addr) 4451 { 4452 struct vmap_area *va, *tmp; 4453 struct rb_node *n; 4454 4455 n = free_vmap_area_root.rb_node; 4456 va = NULL; 4457 4458 while (n) { 4459 tmp = rb_entry(n, struct vmap_area, rb_node); 4460 if (tmp->va_start <= addr) { 4461 va = tmp; 4462 if (tmp->va_end >= addr) 4463 break; 4464 4465 n = n->rb_right; 4466 } else { 4467 n = n->rb_left; 4468 } 4469 } 4470 4471 return va; 4472 } 4473 4474 /** 4475 * pvm_determine_end_from_reverse - find the highest aligned address 4476 * of free block below VMALLOC_END 4477 * @va: 4478 * in - the VA we start the search(reverse order); 4479 * out - the VA with the highest aligned end address. 4480 * @align: alignment for required highest address 4481 * 4482 * Returns: determined end address within vmap_area 4483 */ 4484 static unsigned long 4485 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 4486 { 4487 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4488 unsigned long addr; 4489 4490 if (likely(*va)) { 4491 list_for_each_entry_from_reverse((*va), 4492 &free_vmap_area_list, list) { 4493 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 4494 if ((*va)->va_start < addr) 4495 return addr; 4496 } 4497 } 4498 4499 return 0; 4500 } 4501 4502 /** 4503 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 4504 * @offsets: array containing offset of each area 4505 * @sizes: array containing size of each area 4506 * @nr_vms: the number of areas to allocate 4507 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 4508 * 4509 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 4510 * vm_structs on success, %NULL on failure 4511 * 4512 * Percpu allocator wants to use congruent vm areas so that it can 4513 * maintain the offsets among percpu areas. This function allocates 4514 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 4515 * be scattered pretty far, distance between two areas easily going up 4516 * to gigabytes. To avoid interacting with regular vmallocs, these 4517 * areas are allocated from top. 4518 * 4519 * Despite its complicated look, this allocator is rather simple. It 4520 * does everything top-down and scans free blocks from the end looking 4521 * for matching base. While scanning, if any of the areas do not fit the 4522 * base address is pulled down to fit the area. Scanning is repeated till 4523 * all the areas fit and then all necessary data structures are inserted 4524 * and the result is returned. 4525 */ 4526 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 4527 const size_t *sizes, int nr_vms, 4528 size_t align) 4529 { 4530 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 4531 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4532 struct vmap_area **vas, *va; 4533 struct vm_struct **vms; 4534 int area, area2, last_area, term_area; 4535 unsigned long base, start, size, end, last_end, orig_start, orig_end; 4536 bool purged = false; 4537 4538 /* verify parameters and allocate data structures */ 4539 BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 4540 for (last_area = 0, area = 0; area < nr_vms; area++) { 4541 start = offsets[area]; 4542 end = start + sizes[area]; 4543 4544 /* is everything aligned properly? */ 4545 BUG_ON(!IS_ALIGNED(offsets[area], align)); 4546 BUG_ON(!IS_ALIGNED(sizes[area], align)); 4547 4548 /* detect the area with the highest address */ 4549 if (start > offsets[last_area]) 4550 last_area = area; 4551 4552 for (area2 = area + 1; area2 < nr_vms; area2++) { 4553 unsigned long start2 = offsets[area2]; 4554 unsigned long end2 = start2 + sizes[area2]; 4555 4556 BUG_ON(start2 < end && start < end2); 4557 } 4558 } 4559 last_end = offsets[last_area] + sizes[last_area]; 4560 4561 if (vmalloc_end - vmalloc_start < last_end) { 4562 WARN_ON(true); 4563 return NULL; 4564 } 4565 4566 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 4567 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 4568 if (!vas || !vms) 4569 goto err_free2; 4570 4571 for (area = 0; area < nr_vms; area++) { 4572 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 4573 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 4574 if (!vas[area] || !vms[area]) 4575 goto err_free; 4576 } 4577 retry: 4578 spin_lock(&free_vmap_area_lock); 4579 4580 /* start scanning - we scan from the top, begin with the last area */ 4581 area = term_area = last_area; 4582 start = offsets[area]; 4583 end = start + sizes[area]; 4584 4585 va = pvm_find_va_enclose_addr(vmalloc_end); 4586 base = pvm_determine_end_from_reverse(&va, align) - end; 4587 4588 while (true) { 4589 /* 4590 * base might have underflowed, add last_end before 4591 * comparing. 4592 */ 4593 if (base + last_end < vmalloc_start + last_end) 4594 goto overflow; 4595 4596 /* 4597 * Fitting base has not been found. 4598 */ 4599 if (va == NULL) 4600 goto overflow; 4601 4602 /* 4603 * If required width exceeds current VA block, move 4604 * base downwards and then recheck. 4605 */ 4606 if (base + end > va->va_end) { 4607 base = pvm_determine_end_from_reverse(&va, align) - end; 4608 term_area = area; 4609 continue; 4610 } 4611 4612 /* 4613 * If this VA does not fit, move base downwards and recheck. 4614 */ 4615 if (base + start < va->va_start) { 4616 va = node_to_va(rb_prev(&va->rb_node)); 4617 base = pvm_determine_end_from_reverse(&va, align) - end; 4618 term_area = area; 4619 continue; 4620 } 4621 4622 /* 4623 * This area fits, move on to the previous one. If 4624 * the previous one is the terminal one, we're done. 4625 */ 4626 area = (area + nr_vms - 1) % nr_vms; 4627 if (area == term_area) 4628 break; 4629 4630 start = offsets[area]; 4631 end = start + sizes[area]; 4632 va = pvm_find_va_enclose_addr(base + end); 4633 } 4634 4635 /* we've found a fitting base, insert all va's */ 4636 for (area = 0; area < nr_vms; area++) { 4637 int ret; 4638 4639 start = base + offsets[area]; 4640 size = sizes[area]; 4641 4642 va = pvm_find_va_enclose_addr(start); 4643 if (WARN_ON_ONCE(va == NULL)) 4644 /* It is a BUG(), but trigger recovery instead. */ 4645 goto recovery; 4646 4647 ret = va_clip(&free_vmap_area_root, 4648 &free_vmap_area_list, va, start, size); 4649 if (WARN_ON_ONCE(unlikely(ret))) 4650 /* It is a BUG(), but trigger recovery instead. */ 4651 goto recovery; 4652 4653 /* Allocated area. */ 4654 va = vas[area]; 4655 va->va_start = start; 4656 va->va_end = start + size; 4657 } 4658 4659 spin_unlock(&free_vmap_area_lock); 4660 4661 /* populate the kasan shadow space */ 4662 for (area = 0; area < nr_vms; area++) { 4663 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 4664 goto err_free_shadow; 4665 } 4666 4667 /* insert all vm's */ 4668 for (area = 0; area < nr_vms; area++) { 4669 struct vmap_node *vn = addr_to_node(vas[area]->va_start); 4670 4671 spin_lock(&vn->busy.lock); 4672 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); 4673 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 4674 pcpu_get_vm_areas); 4675 spin_unlock(&vn->busy.lock); 4676 } 4677 4678 /* 4679 * Mark allocated areas as accessible. Do it now as a best-effort 4680 * approach, as they can be mapped outside of vmalloc code. 4681 * With hardware tag-based KASAN, marking is skipped for 4682 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 4683 */ 4684 for (area = 0; area < nr_vms; area++) 4685 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, 4686 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); 4687 4688 kfree(vas); 4689 return vms; 4690 4691 recovery: 4692 /* 4693 * Remove previously allocated areas. There is no 4694 * need in removing these areas from the busy tree, 4695 * because they are inserted only on the final step 4696 * and when pcpu_get_vm_areas() is success. 4697 */ 4698 while (area--) { 4699 orig_start = vas[area]->va_start; 4700 orig_end = vas[area]->va_end; 4701 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 4702 &free_vmap_area_list); 4703 if (va) 4704 kasan_release_vmalloc(orig_start, orig_end, 4705 va->va_start, va->va_end); 4706 vas[area] = NULL; 4707 } 4708 4709 overflow: 4710 spin_unlock(&free_vmap_area_lock); 4711 if (!purged) { 4712 reclaim_and_purge_vmap_areas(); 4713 purged = true; 4714 4715 /* Before "retry", check if we recover. */ 4716 for (area = 0; area < nr_vms; area++) { 4717 if (vas[area]) 4718 continue; 4719 4720 vas[area] = kmem_cache_zalloc( 4721 vmap_area_cachep, GFP_KERNEL); 4722 if (!vas[area]) 4723 goto err_free; 4724 } 4725 4726 goto retry; 4727 } 4728 4729 err_free: 4730 for (area = 0; area < nr_vms; area++) { 4731 if (vas[area]) 4732 kmem_cache_free(vmap_area_cachep, vas[area]); 4733 4734 kfree(vms[area]); 4735 } 4736 err_free2: 4737 kfree(vas); 4738 kfree(vms); 4739 return NULL; 4740 4741 err_free_shadow: 4742 spin_lock(&free_vmap_area_lock); 4743 /* 4744 * We release all the vmalloc shadows, even the ones for regions that 4745 * hadn't been successfully added. This relies on kasan_release_vmalloc 4746 * being able to tolerate this case. 4747 */ 4748 for (area = 0; area < nr_vms; area++) { 4749 orig_start = vas[area]->va_start; 4750 orig_end = vas[area]->va_end; 4751 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 4752 &free_vmap_area_list); 4753 if (va) 4754 kasan_release_vmalloc(orig_start, orig_end, 4755 va->va_start, va->va_end); 4756 vas[area] = NULL; 4757 kfree(vms[area]); 4758 } 4759 spin_unlock(&free_vmap_area_lock); 4760 kfree(vas); 4761 kfree(vms); 4762 return NULL; 4763 } 4764 4765 /** 4766 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 4767 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 4768 * @nr_vms: the number of allocated areas 4769 * 4770 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 4771 */ 4772 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 4773 { 4774 int i; 4775 4776 for (i = 0; i < nr_vms; i++) 4777 free_vm_area(vms[i]); 4778 kfree(vms); 4779 } 4780 #endif /* CONFIG_SMP */ 4781 4782 #ifdef CONFIG_PRINTK 4783 bool vmalloc_dump_obj(void *object) 4784 { 4785 const void *caller; 4786 struct vm_struct *vm; 4787 struct vmap_area *va; 4788 struct vmap_node *vn; 4789 unsigned long addr; 4790 unsigned int nr_pages; 4791 4792 addr = PAGE_ALIGN((unsigned long) object); 4793 vn = addr_to_node(addr); 4794 4795 if (!spin_trylock(&vn->busy.lock)) 4796 return false; 4797 4798 va = __find_vmap_area(addr, &vn->busy.root); 4799 if (!va || !va->vm) { 4800 spin_unlock(&vn->busy.lock); 4801 return false; 4802 } 4803 4804 vm = va->vm; 4805 addr = (unsigned long) vm->addr; 4806 caller = vm->caller; 4807 nr_pages = vm->nr_pages; 4808 spin_unlock(&vn->busy.lock); 4809 4810 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 4811 nr_pages, addr, caller); 4812 4813 return true; 4814 } 4815 #endif 4816 4817 #ifdef CONFIG_PROC_FS 4818 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 4819 { 4820 if (IS_ENABLED(CONFIG_NUMA)) { 4821 unsigned int nr, *counters = m->private; 4822 unsigned int step = 1U << vm_area_page_order(v); 4823 4824 if (!counters) 4825 return; 4826 4827 if (v->flags & VM_UNINITIALIZED) 4828 return; 4829 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 4830 smp_rmb(); 4831 4832 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 4833 4834 for (nr = 0; nr < v->nr_pages; nr += step) 4835 counters[page_to_nid(v->pages[nr])] += step; 4836 for_each_node_state(nr, N_HIGH_MEMORY) 4837 if (counters[nr]) 4838 seq_printf(m, " N%u=%u", nr, counters[nr]); 4839 } 4840 } 4841 4842 static void show_purge_info(struct seq_file *m) 4843 { 4844 struct vmap_node *vn; 4845 struct vmap_area *va; 4846 int i; 4847 4848 for (i = 0; i < nr_vmap_nodes; i++) { 4849 vn = &vmap_nodes[i]; 4850 4851 spin_lock(&vn->lazy.lock); 4852 list_for_each_entry(va, &vn->lazy.head, list) { 4853 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 4854 (void *)va->va_start, (void *)va->va_end, 4855 va->va_end - va->va_start); 4856 } 4857 spin_unlock(&vn->lazy.lock); 4858 } 4859 } 4860 4861 static int vmalloc_info_show(struct seq_file *m, void *p) 4862 { 4863 struct vmap_node *vn; 4864 struct vmap_area *va; 4865 struct vm_struct *v; 4866 int i; 4867 4868 for (i = 0; i < nr_vmap_nodes; i++) { 4869 vn = &vmap_nodes[i]; 4870 4871 spin_lock(&vn->busy.lock); 4872 list_for_each_entry(va, &vn->busy.head, list) { 4873 if (!va->vm) { 4874 if (va->flags & VMAP_RAM) 4875 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 4876 (void *)va->va_start, (void *)va->va_end, 4877 va->va_end - va->va_start); 4878 4879 continue; 4880 } 4881 4882 v = va->vm; 4883 4884 seq_printf(m, "0x%pK-0x%pK %7ld", 4885 v->addr, v->addr + v->size, v->size); 4886 4887 if (v->caller) 4888 seq_printf(m, " %pS", v->caller); 4889 4890 if (v->nr_pages) 4891 seq_printf(m, " pages=%d", v->nr_pages); 4892 4893 if (v->phys_addr) 4894 seq_printf(m, " phys=%pa", &v->phys_addr); 4895 4896 if (v->flags & VM_IOREMAP) 4897 seq_puts(m, " ioremap"); 4898 4899 if (v->flags & VM_SPARSE) 4900 seq_puts(m, " sparse"); 4901 4902 if (v->flags & VM_ALLOC) 4903 seq_puts(m, " vmalloc"); 4904 4905 if (v->flags & VM_MAP) 4906 seq_puts(m, " vmap"); 4907 4908 if (v->flags & VM_USERMAP) 4909 seq_puts(m, " user"); 4910 4911 if (v->flags & VM_DMA_COHERENT) 4912 seq_puts(m, " dma-coherent"); 4913 4914 if (is_vmalloc_addr(v->pages)) 4915 seq_puts(m, " vpages"); 4916 4917 show_numa_info(m, v); 4918 seq_putc(m, '\n'); 4919 } 4920 spin_unlock(&vn->busy.lock); 4921 } 4922 4923 /* 4924 * As a final step, dump "unpurged" areas. 4925 */ 4926 show_purge_info(m); 4927 return 0; 4928 } 4929 4930 static int __init proc_vmalloc_init(void) 4931 { 4932 void *priv_data = NULL; 4933 4934 if (IS_ENABLED(CONFIG_NUMA)) 4935 priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 4936 4937 proc_create_single_data("vmallocinfo", 4938 0400, NULL, vmalloc_info_show, priv_data); 4939 4940 return 0; 4941 } 4942 module_init(proc_vmalloc_init); 4943 4944 #endif 4945 4946 static void __init vmap_init_free_space(void) 4947 { 4948 unsigned long vmap_start = 1; 4949 const unsigned long vmap_end = ULONG_MAX; 4950 struct vmap_area *free; 4951 struct vm_struct *busy; 4952 4953 /* 4954 * B F B B B F 4955 * -|-----|.....|-----|-----|-----|.....|- 4956 * | The KVA space | 4957 * |<--------------------------------->| 4958 */ 4959 for (busy = vmlist; busy; busy = busy->next) { 4960 if ((unsigned long) busy->addr - vmap_start > 0) { 4961 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 4962 if (!WARN_ON_ONCE(!free)) { 4963 free->va_start = vmap_start; 4964 free->va_end = (unsigned long) busy->addr; 4965 4966 insert_vmap_area_augment(free, NULL, 4967 &free_vmap_area_root, 4968 &free_vmap_area_list); 4969 } 4970 } 4971 4972 vmap_start = (unsigned long) busy->addr + busy->size; 4973 } 4974 4975 if (vmap_end - vmap_start > 0) { 4976 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 4977 if (!WARN_ON_ONCE(!free)) { 4978 free->va_start = vmap_start; 4979 free->va_end = vmap_end; 4980 4981 insert_vmap_area_augment(free, NULL, 4982 &free_vmap_area_root, 4983 &free_vmap_area_list); 4984 } 4985 } 4986 } 4987 4988 static void vmap_init_nodes(void) 4989 { 4990 struct vmap_node *vn; 4991 int i, n; 4992 4993 #if BITS_PER_LONG == 64 4994 /* 4995 * A high threshold of max nodes is fixed and bound to 128, 4996 * thus a scale factor is 1 for systems where number of cores 4997 * are less or equal to specified threshold. 4998 * 4999 * As for NUMA-aware notes. For bigger systems, for example 5000 * NUMA with multi-sockets, where we can end-up with thousands 5001 * of cores in total, a "sub-numa-clustering" should be added. 5002 * 5003 * In this case a NUMA domain is considered as a single entity 5004 * with dedicated sub-nodes in it which describe one group or 5005 * set of cores. Therefore a per-domain purging is supposed to 5006 * be added as well as a per-domain balancing. 5007 */ 5008 n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); 5009 5010 if (n > 1) { 5011 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN); 5012 if (vn) { 5013 /* Node partition is 16 pages. */ 5014 vmap_zone_size = (1 << 4) * PAGE_SIZE; 5015 nr_vmap_nodes = n; 5016 vmap_nodes = vn; 5017 } else { 5018 pr_err("Failed to allocate an array. Disable a node layer\n"); 5019 } 5020 } 5021 #endif 5022 5023 for (n = 0; n < nr_vmap_nodes; n++) { 5024 vn = &vmap_nodes[n]; 5025 vn->busy.root = RB_ROOT; 5026 INIT_LIST_HEAD(&vn->busy.head); 5027 spin_lock_init(&vn->busy.lock); 5028 5029 vn->lazy.root = RB_ROOT; 5030 INIT_LIST_HEAD(&vn->lazy.head); 5031 spin_lock_init(&vn->lazy.lock); 5032 5033 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 5034 INIT_LIST_HEAD(&vn->pool[i].head); 5035 WRITE_ONCE(vn->pool[i].len, 0); 5036 } 5037 5038 spin_lock_init(&vn->pool_lock); 5039 } 5040 } 5041 5042 static unsigned long 5043 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 5044 { 5045 unsigned long count; 5046 struct vmap_node *vn; 5047 int i, j; 5048 5049 for (count = 0, i = 0; i < nr_vmap_nodes; i++) { 5050 vn = &vmap_nodes[i]; 5051 5052 for (j = 0; j < MAX_VA_SIZE_PAGES; j++) 5053 count += READ_ONCE(vn->pool[j].len); 5054 } 5055 5056 return count ? count : SHRINK_EMPTY; 5057 } 5058 5059 static unsigned long 5060 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 5061 { 5062 int i; 5063 5064 for (i = 0; i < nr_vmap_nodes; i++) 5065 decay_va_pool_node(&vmap_nodes[i], true); 5066 5067 return SHRINK_STOP; 5068 } 5069 5070 void __init vmalloc_init(void) 5071 { 5072 struct shrinker *vmap_node_shrinker; 5073 struct vmap_area *va; 5074 struct vmap_node *vn; 5075 struct vm_struct *tmp; 5076 int i; 5077 5078 /* 5079 * Create the cache for vmap_area objects. 5080 */ 5081 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 5082 5083 for_each_possible_cpu(i) { 5084 struct vmap_block_queue *vbq; 5085 struct vfree_deferred *p; 5086 5087 vbq = &per_cpu(vmap_block_queue, i); 5088 spin_lock_init(&vbq->lock); 5089 INIT_LIST_HEAD(&vbq->free); 5090 p = &per_cpu(vfree_deferred, i); 5091 init_llist_head(&p->list); 5092 INIT_WORK(&p->wq, delayed_vfree_work); 5093 xa_init(&vbq->vmap_blocks); 5094 } 5095 5096 /* 5097 * Setup nodes before importing vmlist. 5098 */ 5099 vmap_init_nodes(); 5100 5101 /* Import existing vmlist entries. */ 5102 for (tmp = vmlist; tmp; tmp = tmp->next) { 5103 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5104 if (WARN_ON_ONCE(!va)) 5105 continue; 5106 5107 va->va_start = (unsigned long)tmp->addr; 5108 va->va_end = va->va_start + tmp->size; 5109 va->vm = tmp; 5110 5111 vn = addr_to_node(va->va_start); 5112 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 5113 } 5114 5115 /* 5116 * Now we can initialize a free vmap space. 5117 */ 5118 vmap_init_free_space(); 5119 vmap_initialized = true; 5120 5121 vmap_node_shrinker = shrinker_alloc(0, "vmap-node"); 5122 if (!vmap_node_shrinker) { 5123 pr_err("Failed to allocate vmap-node shrinker!\n"); 5124 return; 5125 } 5126 5127 vmap_node_shrinker->count_objects = vmap_node_shrink_count; 5128 vmap_node_shrinker->scan_objects = vmap_node_shrink_scan; 5129 shrinker_register(vmap_node_shrinker); 5130 } 5131