1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1993 Linus Torvalds 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/set_memory.h> 22 #include <linux/debugobjects.h> 23 #include <linux/kallsyms.h> 24 #include <linux/list.h> 25 #include <linux/notifier.h> 26 #include <linux/rbtree.h> 27 #include <linux/xarray.h> 28 #include <linux/io.h> 29 #include <linux/rcupdate.h> 30 #include <linux/pfn.h> 31 #include <linux/kmemleak.h> 32 #include <linux/atomic.h> 33 #include <linux/compiler.h> 34 #include <linux/memcontrol.h> 35 #include <linux/llist.h> 36 #include <linux/bitops.h> 37 #include <linux/rbtree_augmented.h> 38 #include <linux/overflow.h> 39 #include <linux/pgtable.h> 40 #include <linux/uaccess.h> 41 #include <linux/hugetlb.h> 42 #include <linux/sched/mm.h> 43 #include <asm/tlbflush.h> 44 #include <asm/shmparam.h> 45 46 #include "internal.h" 47 #include "pgalloc-track.h" 48 49 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 50 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 51 52 static int __init set_nohugeiomap(char *str) 53 { 54 ioremap_max_page_shift = PAGE_SHIFT; 55 return 0; 56 } 57 early_param("nohugeiomap", set_nohugeiomap); 58 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 59 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 60 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 61 62 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 63 static bool __ro_after_init vmap_allow_huge = true; 64 65 static int __init set_nohugevmalloc(char *str) 66 { 67 vmap_allow_huge = false; 68 return 0; 69 } 70 early_param("nohugevmalloc", set_nohugevmalloc); 71 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 72 static const bool vmap_allow_huge = false; 73 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 74 75 bool is_vmalloc_addr(const void *x) 76 { 77 unsigned long addr = (unsigned long)x; 78 79 return addr >= VMALLOC_START && addr < VMALLOC_END; 80 } 81 EXPORT_SYMBOL(is_vmalloc_addr); 82 83 struct vfree_deferred { 84 struct llist_head list; 85 struct work_struct wq; 86 }; 87 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 88 89 static void __vunmap(const void *, int); 90 91 static void free_work(struct work_struct *w) 92 { 93 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 94 struct llist_node *t, *llnode; 95 96 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 97 __vunmap((void *)llnode, 1); 98 } 99 100 /*** Page table manipulation functions ***/ 101 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 102 phys_addr_t phys_addr, pgprot_t prot, 103 unsigned int max_page_shift, pgtbl_mod_mask *mask) 104 { 105 pte_t *pte; 106 u64 pfn; 107 unsigned long size = PAGE_SIZE; 108 109 pfn = phys_addr >> PAGE_SHIFT; 110 pte = pte_alloc_kernel_track(pmd, addr, mask); 111 if (!pte) 112 return -ENOMEM; 113 do { 114 BUG_ON(!pte_none(*pte)); 115 116 #ifdef CONFIG_HUGETLB_PAGE 117 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 118 if (size != PAGE_SIZE) { 119 pte_t entry = pfn_pte(pfn, prot); 120 121 entry = arch_make_huge_pte(entry, ilog2(size), 0); 122 set_huge_pte_at(&init_mm, addr, pte, entry); 123 pfn += PFN_DOWN(size); 124 continue; 125 } 126 #endif 127 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 128 pfn++; 129 } while (pte += PFN_DOWN(size), addr += size, addr != end); 130 *mask |= PGTBL_PTE_MODIFIED; 131 return 0; 132 } 133 134 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 135 phys_addr_t phys_addr, pgprot_t prot, 136 unsigned int max_page_shift) 137 { 138 if (max_page_shift < PMD_SHIFT) 139 return 0; 140 141 if (!arch_vmap_pmd_supported(prot)) 142 return 0; 143 144 if ((end - addr) != PMD_SIZE) 145 return 0; 146 147 if (!IS_ALIGNED(addr, PMD_SIZE)) 148 return 0; 149 150 if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 151 return 0; 152 153 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 154 return 0; 155 156 return pmd_set_huge(pmd, phys_addr, prot); 157 } 158 159 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 160 phys_addr_t phys_addr, pgprot_t prot, 161 unsigned int max_page_shift, pgtbl_mod_mask *mask) 162 { 163 pmd_t *pmd; 164 unsigned long next; 165 166 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 167 if (!pmd) 168 return -ENOMEM; 169 do { 170 next = pmd_addr_end(addr, end); 171 172 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 173 max_page_shift)) { 174 *mask |= PGTBL_PMD_MODIFIED; 175 continue; 176 } 177 178 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) 179 return -ENOMEM; 180 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 181 return 0; 182 } 183 184 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 185 phys_addr_t phys_addr, pgprot_t prot, 186 unsigned int max_page_shift) 187 { 188 if (max_page_shift < PUD_SHIFT) 189 return 0; 190 191 if (!arch_vmap_pud_supported(prot)) 192 return 0; 193 194 if ((end - addr) != PUD_SIZE) 195 return 0; 196 197 if (!IS_ALIGNED(addr, PUD_SIZE)) 198 return 0; 199 200 if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 201 return 0; 202 203 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 204 return 0; 205 206 return pud_set_huge(pud, phys_addr, prot); 207 } 208 209 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 210 phys_addr_t phys_addr, pgprot_t prot, 211 unsigned int max_page_shift, pgtbl_mod_mask *mask) 212 { 213 pud_t *pud; 214 unsigned long next; 215 216 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 217 if (!pud) 218 return -ENOMEM; 219 do { 220 next = pud_addr_end(addr, end); 221 222 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 223 max_page_shift)) { 224 *mask |= PGTBL_PUD_MODIFIED; 225 continue; 226 } 227 228 if (vmap_pmd_range(pud, addr, next, phys_addr, prot, 229 max_page_shift, mask)) 230 return -ENOMEM; 231 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 232 return 0; 233 } 234 235 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 236 phys_addr_t phys_addr, pgprot_t prot, 237 unsigned int max_page_shift) 238 { 239 if (max_page_shift < P4D_SHIFT) 240 return 0; 241 242 if (!arch_vmap_p4d_supported(prot)) 243 return 0; 244 245 if ((end - addr) != P4D_SIZE) 246 return 0; 247 248 if (!IS_ALIGNED(addr, P4D_SIZE)) 249 return 0; 250 251 if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 252 return 0; 253 254 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 255 return 0; 256 257 return p4d_set_huge(p4d, phys_addr, prot); 258 } 259 260 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 261 phys_addr_t phys_addr, pgprot_t prot, 262 unsigned int max_page_shift, pgtbl_mod_mask *mask) 263 { 264 p4d_t *p4d; 265 unsigned long next; 266 267 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 268 if (!p4d) 269 return -ENOMEM; 270 do { 271 next = p4d_addr_end(addr, end); 272 273 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 274 max_page_shift)) { 275 *mask |= PGTBL_P4D_MODIFIED; 276 continue; 277 } 278 279 if (vmap_pud_range(p4d, addr, next, phys_addr, prot, 280 max_page_shift, mask)) 281 return -ENOMEM; 282 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 283 return 0; 284 } 285 286 static int vmap_range_noflush(unsigned long addr, unsigned long end, 287 phys_addr_t phys_addr, pgprot_t prot, 288 unsigned int max_page_shift) 289 { 290 pgd_t *pgd; 291 unsigned long start; 292 unsigned long next; 293 int err; 294 pgtbl_mod_mask mask = 0; 295 296 might_sleep(); 297 BUG_ON(addr >= end); 298 299 start = addr; 300 pgd = pgd_offset_k(addr); 301 do { 302 next = pgd_addr_end(addr, end); 303 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 304 max_page_shift, &mask); 305 if (err) 306 break; 307 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 308 309 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 310 arch_sync_kernel_mappings(start, end); 311 312 return err; 313 } 314 315 int ioremap_page_range(unsigned long addr, unsigned long end, 316 phys_addr_t phys_addr, pgprot_t prot) 317 { 318 int err; 319 320 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 321 ioremap_max_page_shift); 322 flush_cache_vmap(addr, end); 323 return err; 324 } 325 326 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 327 pgtbl_mod_mask *mask) 328 { 329 pte_t *pte; 330 331 pte = pte_offset_kernel(pmd, addr); 332 do { 333 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 334 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 335 } while (pte++, addr += PAGE_SIZE, addr != end); 336 *mask |= PGTBL_PTE_MODIFIED; 337 } 338 339 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 340 pgtbl_mod_mask *mask) 341 { 342 pmd_t *pmd; 343 unsigned long next; 344 int cleared; 345 346 pmd = pmd_offset(pud, addr); 347 do { 348 next = pmd_addr_end(addr, end); 349 350 cleared = pmd_clear_huge(pmd); 351 if (cleared || pmd_bad(*pmd)) 352 *mask |= PGTBL_PMD_MODIFIED; 353 354 if (cleared) 355 continue; 356 if (pmd_none_or_clear_bad(pmd)) 357 continue; 358 vunmap_pte_range(pmd, addr, next, mask); 359 360 cond_resched(); 361 } while (pmd++, addr = next, addr != end); 362 } 363 364 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 365 pgtbl_mod_mask *mask) 366 { 367 pud_t *pud; 368 unsigned long next; 369 int cleared; 370 371 pud = pud_offset(p4d, addr); 372 do { 373 next = pud_addr_end(addr, end); 374 375 cleared = pud_clear_huge(pud); 376 if (cleared || pud_bad(*pud)) 377 *mask |= PGTBL_PUD_MODIFIED; 378 379 if (cleared) 380 continue; 381 if (pud_none_or_clear_bad(pud)) 382 continue; 383 vunmap_pmd_range(pud, addr, next, mask); 384 } while (pud++, addr = next, addr != end); 385 } 386 387 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 388 pgtbl_mod_mask *mask) 389 { 390 p4d_t *p4d; 391 unsigned long next; 392 int cleared; 393 394 p4d = p4d_offset(pgd, addr); 395 do { 396 next = p4d_addr_end(addr, end); 397 398 cleared = p4d_clear_huge(p4d); 399 if (cleared || p4d_bad(*p4d)) 400 *mask |= PGTBL_P4D_MODIFIED; 401 402 if (cleared) 403 continue; 404 if (p4d_none_or_clear_bad(p4d)) 405 continue; 406 vunmap_pud_range(p4d, addr, next, mask); 407 } while (p4d++, addr = next, addr != end); 408 } 409 410 /* 411 * vunmap_range_noflush is similar to vunmap_range, but does not 412 * flush caches or TLBs. 413 * 414 * The caller is responsible for calling flush_cache_vmap() before calling 415 * this function, and flush_tlb_kernel_range after it has returned 416 * successfully (and before the addresses are expected to cause a page fault 417 * or be re-mapped for something else, if TLB flushes are being delayed or 418 * coalesced). 419 * 420 * This is an internal function only. Do not use outside mm/. 421 */ 422 void vunmap_range_noflush(unsigned long start, unsigned long end) 423 { 424 unsigned long next; 425 pgd_t *pgd; 426 unsigned long addr = start; 427 pgtbl_mod_mask mask = 0; 428 429 BUG_ON(addr >= end); 430 pgd = pgd_offset_k(addr); 431 do { 432 next = pgd_addr_end(addr, end); 433 if (pgd_bad(*pgd)) 434 mask |= PGTBL_PGD_MODIFIED; 435 if (pgd_none_or_clear_bad(pgd)) 436 continue; 437 vunmap_p4d_range(pgd, addr, next, &mask); 438 } while (pgd++, addr = next, addr != end); 439 440 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 441 arch_sync_kernel_mappings(start, end); 442 } 443 444 /** 445 * vunmap_range - unmap kernel virtual addresses 446 * @addr: start of the VM area to unmap 447 * @end: end of the VM area to unmap (non-inclusive) 448 * 449 * Clears any present PTEs in the virtual address range, flushes TLBs and 450 * caches. Any subsequent access to the address before it has been re-mapped 451 * is a kernel bug. 452 */ 453 void vunmap_range(unsigned long addr, unsigned long end) 454 { 455 flush_cache_vunmap(addr, end); 456 vunmap_range_noflush(addr, end); 457 flush_tlb_kernel_range(addr, end); 458 } 459 460 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 461 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 462 pgtbl_mod_mask *mask) 463 { 464 pte_t *pte; 465 466 /* 467 * nr is a running index into the array which helps higher level 468 * callers keep track of where we're up to. 469 */ 470 471 pte = pte_alloc_kernel_track(pmd, addr, mask); 472 if (!pte) 473 return -ENOMEM; 474 do { 475 struct page *page = pages[*nr]; 476 477 if (WARN_ON(!pte_none(*pte))) 478 return -EBUSY; 479 if (WARN_ON(!page)) 480 return -ENOMEM; 481 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 482 (*nr)++; 483 } while (pte++, addr += PAGE_SIZE, addr != end); 484 *mask |= PGTBL_PTE_MODIFIED; 485 return 0; 486 } 487 488 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 489 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 490 pgtbl_mod_mask *mask) 491 { 492 pmd_t *pmd; 493 unsigned long next; 494 495 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 496 if (!pmd) 497 return -ENOMEM; 498 do { 499 next = pmd_addr_end(addr, end); 500 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 501 return -ENOMEM; 502 } while (pmd++, addr = next, addr != end); 503 return 0; 504 } 505 506 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 507 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 508 pgtbl_mod_mask *mask) 509 { 510 pud_t *pud; 511 unsigned long next; 512 513 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 514 if (!pud) 515 return -ENOMEM; 516 do { 517 next = pud_addr_end(addr, end); 518 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 519 return -ENOMEM; 520 } while (pud++, addr = next, addr != end); 521 return 0; 522 } 523 524 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 525 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 526 pgtbl_mod_mask *mask) 527 { 528 p4d_t *p4d; 529 unsigned long next; 530 531 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 532 if (!p4d) 533 return -ENOMEM; 534 do { 535 next = p4d_addr_end(addr, end); 536 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 537 return -ENOMEM; 538 } while (p4d++, addr = next, addr != end); 539 return 0; 540 } 541 542 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 543 pgprot_t prot, struct page **pages) 544 { 545 unsigned long start = addr; 546 pgd_t *pgd; 547 unsigned long next; 548 int err = 0; 549 int nr = 0; 550 pgtbl_mod_mask mask = 0; 551 552 BUG_ON(addr >= end); 553 pgd = pgd_offset_k(addr); 554 do { 555 next = pgd_addr_end(addr, end); 556 if (pgd_bad(*pgd)) 557 mask |= PGTBL_PGD_MODIFIED; 558 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 559 if (err) 560 return err; 561 } while (pgd++, addr = next, addr != end); 562 563 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 564 arch_sync_kernel_mappings(start, end); 565 566 return 0; 567 } 568 569 /* 570 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 571 * flush caches. 572 * 573 * The caller is responsible for calling flush_cache_vmap() after this 574 * function returns successfully and before the addresses are accessed. 575 * 576 * This is an internal function only. Do not use outside mm/. 577 */ 578 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 579 pgprot_t prot, struct page **pages, unsigned int page_shift) 580 { 581 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 582 583 WARN_ON(page_shift < PAGE_SHIFT); 584 585 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 586 page_shift == PAGE_SHIFT) 587 return vmap_small_pages_range_noflush(addr, end, prot, pages); 588 589 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 590 int err; 591 592 err = vmap_range_noflush(addr, addr + (1UL << page_shift), 593 __pa(page_address(pages[i])), prot, 594 page_shift); 595 if (err) 596 return err; 597 598 addr += 1UL << page_shift; 599 } 600 601 return 0; 602 } 603 604 /** 605 * vmap_pages_range - map pages to a kernel virtual address 606 * @addr: start of the VM area to map 607 * @end: end of the VM area to map (non-inclusive) 608 * @prot: page protection flags to use 609 * @pages: pages to map (always PAGE_SIZE pages) 610 * @page_shift: maximum shift that the pages may be mapped with, @pages must 611 * be aligned and contiguous up to at least this shift. 612 * 613 * RETURNS: 614 * 0 on success, -errno on failure. 615 */ 616 static int vmap_pages_range(unsigned long addr, unsigned long end, 617 pgprot_t prot, struct page **pages, unsigned int page_shift) 618 { 619 int err; 620 621 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 622 flush_cache_vmap(addr, end); 623 return err; 624 } 625 626 int is_vmalloc_or_module_addr(const void *x) 627 { 628 /* 629 * ARM, x86-64 and sparc64 put modules in a special place, 630 * and fall back on vmalloc() if that fails. Others 631 * just put it in the vmalloc space. 632 */ 633 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 634 unsigned long addr = (unsigned long)x; 635 if (addr >= MODULES_VADDR && addr < MODULES_END) 636 return 1; 637 #endif 638 return is_vmalloc_addr(x); 639 } 640 641 /* 642 * Walk a vmap address to the struct page it maps. Huge vmap mappings will 643 * return the tail page that corresponds to the base page address, which 644 * matches small vmap mappings. 645 */ 646 struct page *vmalloc_to_page(const void *vmalloc_addr) 647 { 648 unsigned long addr = (unsigned long) vmalloc_addr; 649 struct page *page = NULL; 650 pgd_t *pgd = pgd_offset_k(addr); 651 p4d_t *p4d; 652 pud_t *pud; 653 pmd_t *pmd; 654 pte_t *ptep, pte; 655 656 /* 657 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 658 * architectures that do not vmalloc module space 659 */ 660 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 661 662 if (pgd_none(*pgd)) 663 return NULL; 664 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 665 return NULL; /* XXX: no allowance for huge pgd */ 666 if (WARN_ON_ONCE(pgd_bad(*pgd))) 667 return NULL; 668 669 p4d = p4d_offset(pgd, addr); 670 if (p4d_none(*p4d)) 671 return NULL; 672 if (p4d_leaf(*p4d)) 673 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 674 if (WARN_ON_ONCE(p4d_bad(*p4d))) 675 return NULL; 676 677 pud = pud_offset(p4d, addr); 678 if (pud_none(*pud)) 679 return NULL; 680 if (pud_leaf(*pud)) 681 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 682 if (WARN_ON_ONCE(pud_bad(*pud))) 683 return NULL; 684 685 pmd = pmd_offset(pud, addr); 686 if (pmd_none(*pmd)) 687 return NULL; 688 if (pmd_leaf(*pmd)) 689 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 690 if (WARN_ON_ONCE(pmd_bad(*pmd))) 691 return NULL; 692 693 ptep = pte_offset_map(pmd, addr); 694 pte = *ptep; 695 if (pte_present(pte)) 696 page = pte_page(pte); 697 pte_unmap(ptep); 698 699 return page; 700 } 701 EXPORT_SYMBOL(vmalloc_to_page); 702 703 /* 704 * Map a vmalloc()-space virtual address to the physical page frame number. 705 */ 706 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 707 { 708 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 709 } 710 EXPORT_SYMBOL(vmalloc_to_pfn); 711 712 713 /*** Global kva allocator ***/ 714 715 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 716 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 717 718 719 static DEFINE_SPINLOCK(vmap_area_lock); 720 static DEFINE_SPINLOCK(free_vmap_area_lock); 721 /* Export for kexec only */ 722 LIST_HEAD(vmap_area_list); 723 static struct rb_root vmap_area_root = RB_ROOT; 724 static bool vmap_initialized __read_mostly; 725 726 static struct rb_root purge_vmap_area_root = RB_ROOT; 727 static LIST_HEAD(purge_vmap_area_list); 728 static DEFINE_SPINLOCK(purge_vmap_area_lock); 729 730 /* 731 * This kmem_cache is used for vmap_area objects. Instead of 732 * allocating from slab we reuse an object from this cache to 733 * make things faster. Especially in "no edge" splitting of 734 * free block. 735 */ 736 static struct kmem_cache *vmap_area_cachep; 737 738 /* 739 * This linked list is used in pair with free_vmap_area_root. 740 * It gives O(1) access to prev/next to perform fast coalescing. 741 */ 742 static LIST_HEAD(free_vmap_area_list); 743 744 /* 745 * This augment red-black tree represents the free vmap space. 746 * All vmap_area objects in this tree are sorted by va->va_start 747 * address. It is used for allocation and merging when a vmap 748 * object is released. 749 * 750 * Each vmap_area node contains a maximum available free block 751 * of its sub-tree, right or left. Therefore it is possible to 752 * find a lowest match of free area. 753 */ 754 static struct rb_root free_vmap_area_root = RB_ROOT; 755 756 /* 757 * Preload a CPU with one object for "no edge" split case. The 758 * aim is to get rid of allocations from the atomic context, thus 759 * to use more permissive allocation masks. 760 */ 761 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 762 763 static __always_inline unsigned long 764 va_size(struct vmap_area *va) 765 { 766 return (va->va_end - va->va_start); 767 } 768 769 static __always_inline unsigned long 770 get_subtree_max_size(struct rb_node *node) 771 { 772 struct vmap_area *va; 773 774 va = rb_entry_safe(node, struct vmap_area, rb_node); 775 return va ? va->subtree_max_size : 0; 776 } 777 778 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 779 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 780 781 static void purge_vmap_area_lazy(void); 782 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 783 static void drain_vmap_area_work(struct work_struct *work); 784 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 785 786 static atomic_long_t nr_vmalloc_pages; 787 788 unsigned long vmalloc_nr_pages(void) 789 { 790 return atomic_long_read(&nr_vmalloc_pages); 791 } 792 793 static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr) 794 { 795 struct vmap_area *va = NULL; 796 struct rb_node *n = vmap_area_root.rb_node; 797 798 while (n) { 799 struct vmap_area *tmp; 800 801 tmp = rb_entry(n, struct vmap_area, rb_node); 802 if (tmp->va_end > addr) { 803 va = tmp; 804 if (tmp->va_start <= addr) 805 break; 806 807 n = n->rb_left; 808 } else 809 n = n->rb_right; 810 } 811 812 return va; 813 } 814 815 static struct vmap_area *__find_vmap_area(unsigned long addr) 816 { 817 struct rb_node *n = vmap_area_root.rb_node; 818 819 while (n) { 820 struct vmap_area *va; 821 822 va = rb_entry(n, struct vmap_area, rb_node); 823 if (addr < va->va_start) 824 n = n->rb_left; 825 else if (addr >= va->va_end) 826 n = n->rb_right; 827 else 828 return va; 829 } 830 831 return NULL; 832 } 833 834 /* 835 * This function returns back addresses of parent node 836 * and its left or right link for further processing. 837 * 838 * Otherwise NULL is returned. In that case all further 839 * steps regarding inserting of conflicting overlap range 840 * have to be declined and actually considered as a bug. 841 */ 842 static __always_inline struct rb_node ** 843 find_va_links(struct vmap_area *va, 844 struct rb_root *root, struct rb_node *from, 845 struct rb_node **parent) 846 { 847 struct vmap_area *tmp_va; 848 struct rb_node **link; 849 850 if (root) { 851 link = &root->rb_node; 852 if (unlikely(!*link)) { 853 *parent = NULL; 854 return link; 855 } 856 } else { 857 link = &from; 858 } 859 860 /* 861 * Go to the bottom of the tree. When we hit the last point 862 * we end up with parent rb_node and correct direction, i name 863 * it link, where the new va->rb_node will be attached to. 864 */ 865 do { 866 tmp_va = rb_entry(*link, struct vmap_area, rb_node); 867 868 /* 869 * During the traversal we also do some sanity check. 870 * Trigger the BUG() if there are sides(left/right) 871 * or full overlaps. 872 */ 873 if (va->va_start < tmp_va->va_end && 874 va->va_end <= tmp_va->va_start) 875 link = &(*link)->rb_left; 876 else if (va->va_end > tmp_va->va_start && 877 va->va_start >= tmp_va->va_end) 878 link = &(*link)->rb_right; 879 else { 880 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 881 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 882 883 return NULL; 884 } 885 } while (*link); 886 887 *parent = &tmp_va->rb_node; 888 return link; 889 } 890 891 static __always_inline struct list_head * 892 get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 893 { 894 struct list_head *list; 895 896 if (unlikely(!parent)) 897 /* 898 * The red-black tree where we try to find VA neighbors 899 * before merging or inserting is empty, i.e. it means 900 * there is no free vmap space. Normally it does not 901 * happen but we handle this case anyway. 902 */ 903 return NULL; 904 905 list = &rb_entry(parent, struct vmap_area, rb_node)->list; 906 return (&parent->rb_right == link ? list->next : list); 907 } 908 909 static __always_inline void 910 link_va(struct vmap_area *va, struct rb_root *root, 911 struct rb_node *parent, struct rb_node **link, struct list_head *head) 912 { 913 /* 914 * VA is still not in the list, but we can 915 * identify its future previous list_head node. 916 */ 917 if (likely(parent)) { 918 head = &rb_entry(parent, struct vmap_area, rb_node)->list; 919 if (&parent->rb_right != link) 920 head = head->prev; 921 } 922 923 /* Insert to the rb-tree */ 924 rb_link_node(&va->rb_node, parent, link); 925 if (root == &free_vmap_area_root) { 926 /* 927 * Some explanation here. Just perform simple insertion 928 * to the tree. We do not set va->subtree_max_size to 929 * its current size before calling rb_insert_augmented(). 930 * It is because of we populate the tree from the bottom 931 * to parent levels when the node _is_ in the tree. 932 * 933 * Therefore we set subtree_max_size to zero after insertion, 934 * to let __augment_tree_propagate_from() puts everything to 935 * the correct order later on. 936 */ 937 rb_insert_augmented(&va->rb_node, 938 root, &free_vmap_area_rb_augment_cb); 939 va->subtree_max_size = 0; 940 } else { 941 rb_insert_color(&va->rb_node, root); 942 } 943 944 /* Address-sort this list */ 945 list_add(&va->list, head); 946 } 947 948 static __always_inline void 949 unlink_va(struct vmap_area *va, struct rb_root *root) 950 { 951 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 952 return; 953 954 if (root == &free_vmap_area_root) 955 rb_erase_augmented(&va->rb_node, 956 root, &free_vmap_area_rb_augment_cb); 957 else 958 rb_erase(&va->rb_node, root); 959 960 list_del(&va->list); 961 RB_CLEAR_NODE(&va->rb_node); 962 } 963 964 #if DEBUG_AUGMENT_PROPAGATE_CHECK 965 /* 966 * Gets called when remove the node and rotate. 967 */ 968 static __always_inline unsigned long 969 compute_subtree_max_size(struct vmap_area *va) 970 { 971 return max3(va_size(va), 972 get_subtree_max_size(va->rb_node.rb_left), 973 get_subtree_max_size(va->rb_node.rb_right)); 974 } 975 976 static void 977 augment_tree_propagate_check(void) 978 { 979 struct vmap_area *va; 980 unsigned long computed_size; 981 982 list_for_each_entry(va, &free_vmap_area_list, list) { 983 computed_size = compute_subtree_max_size(va); 984 if (computed_size != va->subtree_max_size) 985 pr_emerg("tree is corrupted: %lu, %lu\n", 986 va_size(va), va->subtree_max_size); 987 } 988 } 989 #endif 990 991 /* 992 * This function populates subtree_max_size from bottom to upper 993 * levels starting from VA point. The propagation must be done 994 * when VA size is modified by changing its va_start/va_end. Or 995 * in case of newly inserting of VA to the tree. 996 * 997 * It means that __augment_tree_propagate_from() must be called: 998 * - After VA has been inserted to the tree(free path); 999 * - After VA has been shrunk(allocation path); 1000 * - After VA has been increased(merging path). 1001 * 1002 * Please note that, it does not mean that upper parent nodes 1003 * and their subtree_max_size are recalculated all the time up 1004 * to the root node. 1005 * 1006 * 4--8 1007 * /\ 1008 * / \ 1009 * / \ 1010 * 2--2 8--8 1011 * 1012 * For example if we modify the node 4, shrinking it to 2, then 1013 * no any modification is required. If we shrink the node 2 to 1 1014 * its subtree_max_size is updated only, and set to 1. If we shrink 1015 * the node 8 to 6, then its subtree_max_size is set to 6 and parent 1016 * node becomes 4--6. 1017 */ 1018 static __always_inline void 1019 augment_tree_propagate_from(struct vmap_area *va) 1020 { 1021 /* 1022 * Populate the tree from bottom towards the root until 1023 * the calculated maximum available size of checked node 1024 * is equal to its current one. 1025 */ 1026 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1027 1028 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1029 augment_tree_propagate_check(); 1030 #endif 1031 } 1032 1033 static void 1034 insert_vmap_area(struct vmap_area *va, 1035 struct rb_root *root, struct list_head *head) 1036 { 1037 struct rb_node **link; 1038 struct rb_node *parent; 1039 1040 link = find_va_links(va, root, NULL, &parent); 1041 if (link) 1042 link_va(va, root, parent, link, head); 1043 } 1044 1045 static void 1046 insert_vmap_area_augment(struct vmap_area *va, 1047 struct rb_node *from, struct rb_root *root, 1048 struct list_head *head) 1049 { 1050 struct rb_node **link; 1051 struct rb_node *parent; 1052 1053 if (from) 1054 link = find_va_links(va, NULL, from, &parent); 1055 else 1056 link = find_va_links(va, root, NULL, &parent); 1057 1058 if (link) { 1059 link_va(va, root, parent, link, head); 1060 augment_tree_propagate_from(va); 1061 } 1062 } 1063 1064 /* 1065 * Merge de-allocated chunk of VA memory with previous 1066 * and next free blocks. If coalesce is not done a new 1067 * free area is inserted. If VA has been merged, it is 1068 * freed. 1069 * 1070 * Please note, it can return NULL in case of overlap 1071 * ranges, followed by WARN() report. Despite it is a 1072 * buggy behaviour, a system can be alive and keep 1073 * ongoing. 1074 */ 1075 static __always_inline struct vmap_area * 1076 merge_or_add_vmap_area(struct vmap_area *va, 1077 struct rb_root *root, struct list_head *head) 1078 { 1079 struct vmap_area *sibling; 1080 struct list_head *next; 1081 struct rb_node **link; 1082 struct rb_node *parent; 1083 bool merged = false; 1084 1085 /* 1086 * Find a place in the tree where VA potentially will be 1087 * inserted, unless it is merged with its sibling/siblings. 1088 */ 1089 link = find_va_links(va, root, NULL, &parent); 1090 if (!link) 1091 return NULL; 1092 1093 /* 1094 * Get next node of VA to check if merging can be done. 1095 */ 1096 next = get_va_next_sibling(parent, link); 1097 if (unlikely(next == NULL)) 1098 goto insert; 1099 1100 /* 1101 * start end 1102 * | | 1103 * |<------VA------>|<-----Next----->| 1104 * | | 1105 * start end 1106 */ 1107 if (next != head) { 1108 sibling = list_entry(next, struct vmap_area, list); 1109 if (sibling->va_start == va->va_end) { 1110 sibling->va_start = va->va_start; 1111 1112 /* Free vmap_area object. */ 1113 kmem_cache_free(vmap_area_cachep, va); 1114 1115 /* Point to the new merged area. */ 1116 va = sibling; 1117 merged = true; 1118 } 1119 } 1120 1121 /* 1122 * start end 1123 * | | 1124 * |<-----Prev----->|<------VA------>| 1125 * | | 1126 * start end 1127 */ 1128 if (next->prev != head) { 1129 sibling = list_entry(next->prev, struct vmap_area, list); 1130 if (sibling->va_end == va->va_start) { 1131 /* 1132 * If both neighbors are coalesced, it is important 1133 * to unlink the "next" node first, followed by merging 1134 * with "previous" one. Otherwise the tree might not be 1135 * fully populated if a sibling's augmented value is 1136 * "normalized" because of rotation operations. 1137 */ 1138 if (merged) 1139 unlink_va(va, root); 1140 1141 sibling->va_end = va->va_end; 1142 1143 /* Free vmap_area object. */ 1144 kmem_cache_free(vmap_area_cachep, va); 1145 1146 /* Point to the new merged area. */ 1147 va = sibling; 1148 merged = true; 1149 } 1150 } 1151 1152 insert: 1153 if (!merged) 1154 link_va(va, root, parent, link, head); 1155 1156 return va; 1157 } 1158 1159 static __always_inline struct vmap_area * 1160 merge_or_add_vmap_area_augment(struct vmap_area *va, 1161 struct rb_root *root, struct list_head *head) 1162 { 1163 va = merge_or_add_vmap_area(va, root, head); 1164 if (va) 1165 augment_tree_propagate_from(va); 1166 1167 return va; 1168 } 1169 1170 static __always_inline bool 1171 is_within_this_va(struct vmap_area *va, unsigned long size, 1172 unsigned long align, unsigned long vstart) 1173 { 1174 unsigned long nva_start_addr; 1175 1176 if (va->va_start > vstart) 1177 nva_start_addr = ALIGN(va->va_start, align); 1178 else 1179 nva_start_addr = ALIGN(vstart, align); 1180 1181 /* Can be overflowed due to big size or alignment. */ 1182 if (nva_start_addr + size < nva_start_addr || 1183 nva_start_addr < vstart) 1184 return false; 1185 1186 return (nva_start_addr + size <= va->va_end); 1187 } 1188 1189 /* 1190 * Find the first free block(lowest start address) in the tree, 1191 * that will accomplish the request corresponding to passing 1192 * parameters. Please note, with an alignment bigger than PAGE_SIZE, 1193 * a search length is adjusted to account for worst case alignment 1194 * overhead. 1195 */ 1196 static __always_inline struct vmap_area * 1197 find_vmap_lowest_match(unsigned long size, unsigned long align, 1198 unsigned long vstart, bool adjust_search_size) 1199 { 1200 struct vmap_area *va; 1201 struct rb_node *node; 1202 unsigned long length; 1203 1204 /* Start from the root. */ 1205 node = free_vmap_area_root.rb_node; 1206 1207 /* Adjust the search size for alignment overhead. */ 1208 length = adjust_search_size ? size + align - 1 : size; 1209 1210 while (node) { 1211 va = rb_entry(node, struct vmap_area, rb_node); 1212 1213 if (get_subtree_max_size(node->rb_left) >= length && 1214 vstart < va->va_start) { 1215 node = node->rb_left; 1216 } else { 1217 if (is_within_this_va(va, size, align, vstart)) 1218 return va; 1219 1220 /* 1221 * Does not make sense to go deeper towards the right 1222 * sub-tree if it does not have a free block that is 1223 * equal or bigger to the requested search length. 1224 */ 1225 if (get_subtree_max_size(node->rb_right) >= length) { 1226 node = node->rb_right; 1227 continue; 1228 } 1229 1230 /* 1231 * OK. We roll back and find the first right sub-tree, 1232 * that will satisfy the search criteria. It can happen 1233 * due to "vstart" restriction or an alignment overhead 1234 * that is bigger then PAGE_SIZE. 1235 */ 1236 while ((node = rb_parent(node))) { 1237 va = rb_entry(node, struct vmap_area, rb_node); 1238 if (is_within_this_va(va, size, align, vstart)) 1239 return va; 1240 1241 if (get_subtree_max_size(node->rb_right) >= length && 1242 vstart <= va->va_start) { 1243 /* 1244 * Shift the vstart forward. Please note, we update it with 1245 * parent's start address adding "1" because we do not want 1246 * to enter same sub-tree after it has already been checked 1247 * and no suitable free block found there. 1248 */ 1249 vstart = va->va_start + 1; 1250 node = node->rb_right; 1251 break; 1252 } 1253 } 1254 } 1255 } 1256 1257 return NULL; 1258 } 1259 1260 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1261 #include <linux/random.h> 1262 1263 static struct vmap_area * 1264 find_vmap_lowest_linear_match(unsigned long size, 1265 unsigned long align, unsigned long vstart) 1266 { 1267 struct vmap_area *va; 1268 1269 list_for_each_entry(va, &free_vmap_area_list, list) { 1270 if (!is_within_this_va(va, size, align, vstart)) 1271 continue; 1272 1273 return va; 1274 } 1275 1276 return NULL; 1277 } 1278 1279 static void 1280 find_vmap_lowest_match_check(unsigned long size, unsigned long align) 1281 { 1282 struct vmap_area *va_1, *va_2; 1283 unsigned long vstart; 1284 unsigned int rnd; 1285 1286 get_random_bytes(&rnd, sizeof(rnd)); 1287 vstart = VMALLOC_START + rnd; 1288 1289 va_1 = find_vmap_lowest_match(size, align, vstart, false); 1290 va_2 = find_vmap_lowest_linear_match(size, align, vstart); 1291 1292 if (va_1 != va_2) 1293 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1294 va_1, va_2, vstart); 1295 } 1296 #endif 1297 1298 enum fit_type { 1299 NOTHING_FIT = 0, 1300 FL_FIT_TYPE = 1, /* full fit */ 1301 LE_FIT_TYPE = 2, /* left edge fit */ 1302 RE_FIT_TYPE = 3, /* right edge fit */ 1303 NE_FIT_TYPE = 4 /* no edge fit */ 1304 }; 1305 1306 static __always_inline enum fit_type 1307 classify_va_fit_type(struct vmap_area *va, 1308 unsigned long nva_start_addr, unsigned long size) 1309 { 1310 enum fit_type type; 1311 1312 /* Check if it is within VA. */ 1313 if (nva_start_addr < va->va_start || 1314 nva_start_addr + size > va->va_end) 1315 return NOTHING_FIT; 1316 1317 /* Now classify. */ 1318 if (va->va_start == nva_start_addr) { 1319 if (va->va_end == nva_start_addr + size) 1320 type = FL_FIT_TYPE; 1321 else 1322 type = LE_FIT_TYPE; 1323 } else if (va->va_end == nva_start_addr + size) { 1324 type = RE_FIT_TYPE; 1325 } else { 1326 type = NE_FIT_TYPE; 1327 } 1328 1329 return type; 1330 } 1331 1332 static __always_inline int 1333 adjust_va_to_fit_type(struct vmap_area *va, 1334 unsigned long nva_start_addr, unsigned long size, 1335 enum fit_type type) 1336 { 1337 struct vmap_area *lva = NULL; 1338 1339 if (type == FL_FIT_TYPE) { 1340 /* 1341 * No need to split VA, it fully fits. 1342 * 1343 * | | 1344 * V NVA V 1345 * |---------------| 1346 */ 1347 unlink_va(va, &free_vmap_area_root); 1348 kmem_cache_free(vmap_area_cachep, va); 1349 } else if (type == LE_FIT_TYPE) { 1350 /* 1351 * Split left edge of fit VA. 1352 * 1353 * | | 1354 * V NVA V R 1355 * |-------|-------| 1356 */ 1357 va->va_start += size; 1358 } else if (type == RE_FIT_TYPE) { 1359 /* 1360 * Split right edge of fit VA. 1361 * 1362 * | | 1363 * L V NVA V 1364 * |-------|-------| 1365 */ 1366 va->va_end = nva_start_addr; 1367 } else if (type == NE_FIT_TYPE) { 1368 /* 1369 * Split no edge of fit VA. 1370 * 1371 * | | 1372 * L V NVA V R 1373 * |---|-------|---| 1374 */ 1375 lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 1376 if (unlikely(!lva)) { 1377 /* 1378 * For percpu allocator we do not do any pre-allocation 1379 * and leave it as it is. The reason is it most likely 1380 * never ends up with NE_FIT_TYPE splitting. In case of 1381 * percpu allocations offsets and sizes are aligned to 1382 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 1383 * are its main fitting cases. 1384 * 1385 * There are a few exceptions though, as an example it is 1386 * a first allocation (early boot up) when we have "one" 1387 * big free space that has to be split. 1388 * 1389 * Also we can hit this path in case of regular "vmap" 1390 * allocations, if "this" current CPU was not preloaded. 1391 * See the comment in alloc_vmap_area() why. If so, then 1392 * GFP_NOWAIT is used instead to get an extra object for 1393 * split purpose. That is rare and most time does not 1394 * occur. 1395 * 1396 * What happens if an allocation gets failed. Basically, 1397 * an "overflow" path is triggered to purge lazily freed 1398 * areas to free some memory, then, the "retry" path is 1399 * triggered to repeat one more time. See more details 1400 * in alloc_vmap_area() function. 1401 */ 1402 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 1403 if (!lva) 1404 return -1; 1405 } 1406 1407 /* 1408 * Build the remainder. 1409 */ 1410 lva->va_start = va->va_start; 1411 lva->va_end = nva_start_addr; 1412 1413 /* 1414 * Shrink this VA to remaining size. 1415 */ 1416 va->va_start = nva_start_addr + size; 1417 } else { 1418 return -1; 1419 } 1420 1421 if (type != FL_FIT_TYPE) { 1422 augment_tree_propagate_from(va); 1423 1424 if (lva) /* type == NE_FIT_TYPE */ 1425 insert_vmap_area_augment(lva, &va->rb_node, 1426 &free_vmap_area_root, &free_vmap_area_list); 1427 } 1428 1429 return 0; 1430 } 1431 1432 /* 1433 * Returns a start address of the newly allocated area, if success. 1434 * Otherwise a vend is returned that indicates failure. 1435 */ 1436 static __always_inline unsigned long 1437 __alloc_vmap_area(unsigned long size, unsigned long align, 1438 unsigned long vstart, unsigned long vend) 1439 { 1440 bool adjust_search_size = true; 1441 unsigned long nva_start_addr; 1442 struct vmap_area *va; 1443 enum fit_type type; 1444 int ret; 1445 1446 /* 1447 * Do not adjust when: 1448 * a) align <= PAGE_SIZE, because it does not make any sense. 1449 * All blocks(their start addresses) are at least PAGE_SIZE 1450 * aligned anyway; 1451 * b) a short range where a requested size corresponds to exactly 1452 * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 1453 * With adjusted search length an allocation would not succeed. 1454 */ 1455 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 1456 adjust_search_size = false; 1457 1458 va = find_vmap_lowest_match(size, align, vstart, adjust_search_size); 1459 if (unlikely(!va)) 1460 return vend; 1461 1462 if (va->va_start > vstart) 1463 nva_start_addr = ALIGN(va->va_start, align); 1464 else 1465 nva_start_addr = ALIGN(vstart, align); 1466 1467 /* Check the "vend" restriction. */ 1468 if (nva_start_addr + size > vend) 1469 return vend; 1470 1471 /* Classify what we have found. */ 1472 type = classify_va_fit_type(va, nva_start_addr, size); 1473 if (WARN_ON_ONCE(type == NOTHING_FIT)) 1474 return vend; 1475 1476 /* Update the free vmap_area. */ 1477 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); 1478 if (ret) 1479 return vend; 1480 1481 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1482 find_vmap_lowest_match_check(size, align); 1483 #endif 1484 1485 return nva_start_addr; 1486 } 1487 1488 /* 1489 * Free a region of KVA allocated by alloc_vmap_area 1490 */ 1491 static void free_vmap_area(struct vmap_area *va) 1492 { 1493 /* 1494 * Remove from the busy tree/list. 1495 */ 1496 spin_lock(&vmap_area_lock); 1497 unlink_va(va, &vmap_area_root); 1498 spin_unlock(&vmap_area_lock); 1499 1500 /* 1501 * Insert/Merge it back to the free tree/list. 1502 */ 1503 spin_lock(&free_vmap_area_lock); 1504 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1505 spin_unlock(&free_vmap_area_lock); 1506 } 1507 1508 static inline void 1509 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1510 { 1511 struct vmap_area *va = NULL; 1512 1513 /* 1514 * Preload this CPU with one extra vmap_area object. It is used 1515 * when fit type of free area is NE_FIT_TYPE. It guarantees that 1516 * a CPU that does an allocation is preloaded. 1517 * 1518 * We do it in non-atomic context, thus it allows us to use more 1519 * permissive allocation masks to be more stable under low memory 1520 * condition and high memory pressure. 1521 */ 1522 if (!this_cpu_read(ne_fit_preload_node)) 1523 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1524 1525 spin_lock(lock); 1526 1527 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) 1528 kmem_cache_free(vmap_area_cachep, va); 1529 } 1530 1531 /* 1532 * Allocate a region of KVA of the specified size and alignment, within the 1533 * vstart and vend. 1534 */ 1535 static struct vmap_area *alloc_vmap_area(unsigned long size, 1536 unsigned long align, 1537 unsigned long vstart, unsigned long vend, 1538 int node, gfp_t gfp_mask) 1539 { 1540 struct vmap_area *va; 1541 unsigned long freed; 1542 unsigned long addr; 1543 int purged = 0; 1544 int ret; 1545 1546 BUG_ON(!size); 1547 BUG_ON(offset_in_page(size)); 1548 BUG_ON(!is_power_of_2(align)); 1549 1550 if (unlikely(!vmap_initialized)) 1551 return ERR_PTR(-EBUSY); 1552 1553 might_sleep(); 1554 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 1555 1556 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1557 if (unlikely(!va)) 1558 return ERR_PTR(-ENOMEM); 1559 1560 /* 1561 * Only scan the relevant parts containing pointers to other objects 1562 * to avoid false negatives. 1563 */ 1564 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 1565 1566 retry: 1567 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 1568 addr = __alloc_vmap_area(size, align, vstart, vend); 1569 spin_unlock(&free_vmap_area_lock); 1570 1571 /* 1572 * If an allocation fails, the "vend" address is 1573 * returned. Therefore trigger the overflow path. 1574 */ 1575 if (unlikely(addr == vend)) 1576 goto overflow; 1577 1578 va->va_start = addr; 1579 va->va_end = addr + size; 1580 va->vm = NULL; 1581 1582 spin_lock(&vmap_area_lock); 1583 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 1584 spin_unlock(&vmap_area_lock); 1585 1586 BUG_ON(!IS_ALIGNED(va->va_start, align)); 1587 BUG_ON(va->va_start < vstart); 1588 BUG_ON(va->va_end > vend); 1589 1590 ret = kasan_populate_vmalloc(addr, size); 1591 if (ret) { 1592 free_vmap_area(va); 1593 return ERR_PTR(ret); 1594 } 1595 1596 return va; 1597 1598 overflow: 1599 if (!purged) { 1600 purge_vmap_area_lazy(); 1601 purged = 1; 1602 goto retry; 1603 } 1604 1605 freed = 0; 1606 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 1607 1608 if (freed > 0) { 1609 purged = 0; 1610 goto retry; 1611 } 1612 1613 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1614 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1615 size); 1616 1617 kmem_cache_free(vmap_area_cachep, va); 1618 return ERR_PTR(-EBUSY); 1619 } 1620 1621 int register_vmap_purge_notifier(struct notifier_block *nb) 1622 { 1623 return blocking_notifier_chain_register(&vmap_notify_list, nb); 1624 } 1625 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 1626 1627 int unregister_vmap_purge_notifier(struct notifier_block *nb) 1628 { 1629 return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 1630 } 1631 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 1632 1633 /* 1634 * lazy_max_pages is the maximum amount of virtual address space we gather up 1635 * before attempting to purge with a TLB flush. 1636 * 1637 * There is a tradeoff here: a larger number will cover more kernel page tables 1638 * and take slightly longer to purge, but it will linearly reduce the number of 1639 * global TLB flushes that must be performed. It would seem natural to scale 1640 * this number up linearly with the number of CPUs (because vmapping activity 1641 * could also scale linearly with the number of CPUs), however it is likely 1642 * that in practice, workloads might be constrained in other ways that mean 1643 * vmap activity will not scale linearly with CPUs. Also, I want to be 1644 * conservative and not introduce a big latency on huge systems, so go with 1645 * a less aggressive log scale. It will still be an improvement over the old 1646 * code, and it will be simple to change the scale factor if we find that it 1647 * becomes a problem on bigger systems. 1648 */ 1649 static unsigned long lazy_max_pages(void) 1650 { 1651 unsigned int log; 1652 1653 log = fls(num_online_cpus()); 1654 1655 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1656 } 1657 1658 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1659 1660 /* 1661 * Serialize vmap purging. There is no actual critical section protected 1662 * by this look, but we want to avoid concurrent calls for performance 1663 * reasons and to make the pcpu_get_vm_areas more deterministic. 1664 */ 1665 static DEFINE_MUTEX(vmap_purge_lock); 1666 1667 /* for per-CPU blocks */ 1668 static void purge_fragmented_blocks_allcpus(void); 1669 1670 #ifdef CONFIG_X86_64 1671 /* 1672 * called before a call to iounmap() if the caller wants vm_area_struct's 1673 * immediately freed. 1674 */ 1675 void set_iounmap_nonlazy(void) 1676 { 1677 atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); 1678 } 1679 #endif /* CONFIG_X86_64 */ 1680 1681 /* 1682 * Purges all lazily-freed vmap areas. 1683 */ 1684 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1685 { 1686 unsigned long resched_threshold; 1687 struct list_head local_pure_list; 1688 struct vmap_area *va, *n_va; 1689 1690 lockdep_assert_held(&vmap_purge_lock); 1691 1692 spin_lock(&purge_vmap_area_lock); 1693 purge_vmap_area_root = RB_ROOT; 1694 list_replace_init(&purge_vmap_area_list, &local_pure_list); 1695 spin_unlock(&purge_vmap_area_lock); 1696 1697 if (unlikely(list_empty(&local_pure_list))) 1698 return false; 1699 1700 start = min(start, 1701 list_first_entry(&local_pure_list, 1702 struct vmap_area, list)->va_start); 1703 1704 end = max(end, 1705 list_last_entry(&local_pure_list, 1706 struct vmap_area, list)->va_end); 1707 1708 flush_tlb_kernel_range(start, end); 1709 resched_threshold = lazy_max_pages() << 1; 1710 1711 spin_lock(&free_vmap_area_lock); 1712 list_for_each_entry_safe(va, n_va, &local_pure_list, list) { 1713 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 1714 unsigned long orig_start = va->va_start; 1715 unsigned long orig_end = va->va_end; 1716 1717 /* 1718 * Finally insert or merge lazily-freed area. It is 1719 * detached and there is no need to "unlink" it from 1720 * anything. 1721 */ 1722 va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root, 1723 &free_vmap_area_list); 1724 1725 if (!va) 1726 continue; 1727 1728 if (is_vmalloc_or_module_addr((void *)orig_start)) 1729 kasan_release_vmalloc(orig_start, orig_end, 1730 va->va_start, va->va_end); 1731 1732 atomic_long_sub(nr, &vmap_lazy_nr); 1733 1734 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1735 cond_resched_lock(&free_vmap_area_lock); 1736 } 1737 spin_unlock(&free_vmap_area_lock); 1738 return true; 1739 } 1740 1741 /* 1742 * Kick off a purge of the outstanding lazy areas. 1743 */ 1744 static void purge_vmap_area_lazy(void) 1745 { 1746 mutex_lock(&vmap_purge_lock); 1747 purge_fragmented_blocks_allcpus(); 1748 __purge_vmap_area_lazy(ULONG_MAX, 0); 1749 mutex_unlock(&vmap_purge_lock); 1750 } 1751 1752 static void drain_vmap_area_work(struct work_struct *work) 1753 { 1754 unsigned long nr_lazy; 1755 1756 do { 1757 mutex_lock(&vmap_purge_lock); 1758 __purge_vmap_area_lazy(ULONG_MAX, 0); 1759 mutex_unlock(&vmap_purge_lock); 1760 1761 /* Recheck if further work is required. */ 1762 nr_lazy = atomic_long_read(&vmap_lazy_nr); 1763 } while (nr_lazy > lazy_max_pages()); 1764 } 1765 1766 /* 1767 * Free a vmap area, caller ensuring that the area has been unmapped 1768 * and flush_cache_vunmap had been called for the correct range 1769 * previously. 1770 */ 1771 static void free_vmap_area_noflush(struct vmap_area *va) 1772 { 1773 unsigned long nr_lazy; 1774 1775 spin_lock(&vmap_area_lock); 1776 unlink_va(va, &vmap_area_root); 1777 spin_unlock(&vmap_area_lock); 1778 1779 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 1780 PAGE_SHIFT, &vmap_lazy_nr); 1781 1782 /* 1783 * Merge or place it to the purge tree/list. 1784 */ 1785 spin_lock(&purge_vmap_area_lock); 1786 merge_or_add_vmap_area(va, 1787 &purge_vmap_area_root, &purge_vmap_area_list); 1788 spin_unlock(&purge_vmap_area_lock); 1789 1790 /* After this point, we may free va at any time */ 1791 if (unlikely(nr_lazy > lazy_max_pages())) 1792 schedule_work(&drain_vmap_work); 1793 } 1794 1795 /* 1796 * Free and unmap a vmap area 1797 */ 1798 static void free_unmap_vmap_area(struct vmap_area *va) 1799 { 1800 flush_cache_vunmap(va->va_start, va->va_end); 1801 vunmap_range_noflush(va->va_start, va->va_end); 1802 if (debug_pagealloc_enabled_static()) 1803 flush_tlb_kernel_range(va->va_start, va->va_end); 1804 1805 free_vmap_area_noflush(va); 1806 } 1807 1808 static struct vmap_area *find_vmap_area(unsigned long addr) 1809 { 1810 struct vmap_area *va; 1811 1812 spin_lock(&vmap_area_lock); 1813 va = __find_vmap_area(addr); 1814 spin_unlock(&vmap_area_lock); 1815 1816 return va; 1817 } 1818 1819 /*** Per cpu kva allocator ***/ 1820 1821 /* 1822 * vmap space is limited especially on 32 bit architectures. Ensure there is 1823 * room for at least 16 percpu vmap blocks per CPU. 1824 */ 1825 /* 1826 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1827 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1828 * instead (we just need a rough idea) 1829 */ 1830 #if BITS_PER_LONG == 32 1831 #define VMALLOC_SPACE (128UL*1024*1024) 1832 #else 1833 #define VMALLOC_SPACE (128UL*1024*1024*1024) 1834 #endif 1835 1836 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1837 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1838 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1839 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1840 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1841 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1842 #define VMAP_BBMAP_BITS \ 1843 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1844 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1845 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1846 1847 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1848 1849 struct vmap_block_queue { 1850 spinlock_t lock; 1851 struct list_head free; 1852 }; 1853 1854 struct vmap_block { 1855 spinlock_t lock; 1856 struct vmap_area *va; 1857 unsigned long free, dirty; 1858 unsigned long dirty_min, dirty_max; /*< dirty range */ 1859 struct list_head free_list; 1860 struct rcu_head rcu_head; 1861 struct list_head purge; 1862 }; 1863 1864 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1865 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1866 1867 /* 1868 * XArray of vmap blocks, indexed by address, to quickly find a vmap block 1869 * in the free path. Could get rid of this if we change the API to return a 1870 * "cookie" from alloc, to be passed to free. But no big deal yet. 1871 */ 1872 static DEFINE_XARRAY(vmap_blocks); 1873 1874 /* 1875 * We should probably have a fallback mechanism to allocate virtual memory 1876 * out of partially filled vmap blocks. However vmap block sizing should be 1877 * fairly reasonable according to the vmalloc size, so it shouldn't be a 1878 * big problem. 1879 */ 1880 1881 static unsigned long addr_to_vb_idx(unsigned long addr) 1882 { 1883 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1884 addr /= VMAP_BLOCK_SIZE; 1885 return addr; 1886 } 1887 1888 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1889 { 1890 unsigned long addr; 1891 1892 addr = va_start + (pages_off << PAGE_SHIFT); 1893 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1894 return (void *)addr; 1895 } 1896 1897 /** 1898 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1899 * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1900 * @order: how many 2^order pages should be occupied in newly allocated block 1901 * @gfp_mask: flags for the page level allocator 1902 * 1903 * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1904 */ 1905 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1906 { 1907 struct vmap_block_queue *vbq; 1908 struct vmap_block *vb; 1909 struct vmap_area *va; 1910 unsigned long vb_idx; 1911 int node, err; 1912 void *vaddr; 1913 1914 node = numa_node_id(); 1915 1916 vb = kmalloc_node(sizeof(struct vmap_block), 1917 gfp_mask & GFP_RECLAIM_MASK, node); 1918 if (unlikely(!vb)) 1919 return ERR_PTR(-ENOMEM); 1920 1921 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1922 VMALLOC_START, VMALLOC_END, 1923 node, gfp_mask); 1924 if (IS_ERR(va)) { 1925 kfree(vb); 1926 return ERR_CAST(va); 1927 } 1928 1929 vaddr = vmap_block_vaddr(va->va_start, 0); 1930 spin_lock_init(&vb->lock); 1931 vb->va = va; 1932 /* At least something should be left free */ 1933 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1934 vb->free = VMAP_BBMAP_BITS - (1UL << order); 1935 vb->dirty = 0; 1936 vb->dirty_min = VMAP_BBMAP_BITS; 1937 vb->dirty_max = 0; 1938 INIT_LIST_HEAD(&vb->free_list); 1939 1940 vb_idx = addr_to_vb_idx(va->va_start); 1941 err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask); 1942 if (err) { 1943 kfree(vb); 1944 free_vmap_area(va); 1945 return ERR_PTR(err); 1946 } 1947 1948 vbq = &get_cpu_var(vmap_block_queue); 1949 spin_lock(&vbq->lock); 1950 list_add_tail_rcu(&vb->free_list, &vbq->free); 1951 spin_unlock(&vbq->lock); 1952 put_cpu_var(vmap_block_queue); 1953 1954 return vaddr; 1955 } 1956 1957 static void free_vmap_block(struct vmap_block *vb) 1958 { 1959 struct vmap_block *tmp; 1960 1961 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); 1962 BUG_ON(tmp != vb); 1963 1964 free_vmap_area_noflush(vb->va); 1965 kfree_rcu(vb, rcu_head); 1966 } 1967 1968 static void purge_fragmented_blocks(int cpu) 1969 { 1970 LIST_HEAD(purge); 1971 struct vmap_block *vb; 1972 struct vmap_block *n_vb; 1973 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1974 1975 rcu_read_lock(); 1976 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1977 1978 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 1979 continue; 1980 1981 spin_lock(&vb->lock); 1982 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 1983 vb->free = 0; /* prevent further allocs after releasing lock */ 1984 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 1985 vb->dirty_min = 0; 1986 vb->dirty_max = VMAP_BBMAP_BITS; 1987 spin_lock(&vbq->lock); 1988 list_del_rcu(&vb->free_list); 1989 spin_unlock(&vbq->lock); 1990 spin_unlock(&vb->lock); 1991 list_add_tail(&vb->purge, &purge); 1992 } else 1993 spin_unlock(&vb->lock); 1994 } 1995 rcu_read_unlock(); 1996 1997 list_for_each_entry_safe(vb, n_vb, &purge, purge) { 1998 list_del(&vb->purge); 1999 free_vmap_block(vb); 2000 } 2001 } 2002 2003 static void purge_fragmented_blocks_allcpus(void) 2004 { 2005 int cpu; 2006 2007 for_each_possible_cpu(cpu) 2008 purge_fragmented_blocks(cpu); 2009 } 2010 2011 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2012 { 2013 struct vmap_block_queue *vbq; 2014 struct vmap_block *vb; 2015 void *vaddr = NULL; 2016 unsigned int order; 2017 2018 BUG_ON(offset_in_page(size)); 2019 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2020 if (WARN_ON(size == 0)) { 2021 /* 2022 * Allocating 0 bytes isn't what caller wants since 2023 * get_order(0) returns funny result. Just warn and terminate 2024 * early. 2025 */ 2026 return NULL; 2027 } 2028 order = get_order(size); 2029 2030 rcu_read_lock(); 2031 vbq = &get_cpu_var(vmap_block_queue); 2032 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2033 unsigned long pages_off; 2034 2035 spin_lock(&vb->lock); 2036 if (vb->free < (1UL << order)) { 2037 spin_unlock(&vb->lock); 2038 continue; 2039 } 2040 2041 pages_off = VMAP_BBMAP_BITS - vb->free; 2042 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 2043 vb->free -= 1UL << order; 2044 if (vb->free == 0) { 2045 spin_lock(&vbq->lock); 2046 list_del_rcu(&vb->free_list); 2047 spin_unlock(&vbq->lock); 2048 } 2049 2050 spin_unlock(&vb->lock); 2051 break; 2052 } 2053 2054 put_cpu_var(vmap_block_queue); 2055 rcu_read_unlock(); 2056 2057 /* Allocate new block if nothing was found */ 2058 if (!vaddr) 2059 vaddr = new_vmap_block(order, gfp_mask); 2060 2061 return vaddr; 2062 } 2063 2064 static void vb_free(unsigned long addr, unsigned long size) 2065 { 2066 unsigned long offset; 2067 unsigned int order; 2068 struct vmap_block *vb; 2069 2070 BUG_ON(offset_in_page(size)); 2071 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2072 2073 flush_cache_vunmap(addr, addr + size); 2074 2075 order = get_order(size); 2076 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 2077 vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr)); 2078 2079 vunmap_range_noflush(addr, addr + size); 2080 2081 if (debug_pagealloc_enabled_static()) 2082 flush_tlb_kernel_range(addr, addr + size); 2083 2084 spin_lock(&vb->lock); 2085 2086 /* Expand dirty range */ 2087 vb->dirty_min = min(vb->dirty_min, offset); 2088 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2089 2090 vb->dirty += 1UL << order; 2091 if (vb->dirty == VMAP_BBMAP_BITS) { 2092 BUG_ON(vb->free); 2093 spin_unlock(&vb->lock); 2094 free_vmap_block(vb); 2095 } else 2096 spin_unlock(&vb->lock); 2097 } 2098 2099 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2100 { 2101 int cpu; 2102 2103 if (unlikely(!vmap_initialized)) 2104 return; 2105 2106 might_sleep(); 2107 2108 for_each_possible_cpu(cpu) { 2109 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2110 struct vmap_block *vb; 2111 2112 rcu_read_lock(); 2113 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2114 spin_lock(&vb->lock); 2115 if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { 2116 unsigned long va_start = vb->va->va_start; 2117 unsigned long s, e; 2118 2119 s = va_start + (vb->dirty_min << PAGE_SHIFT); 2120 e = va_start + (vb->dirty_max << PAGE_SHIFT); 2121 2122 start = min(s, start); 2123 end = max(e, end); 2124 2125 flush = 1; 2126 } 2127 spin_unlock(&vb->lock); 2128 } 2129 rcu_read_unlock(); 2130 } 2131 2132 mutex_lock(&vmap_purge_lock); 2133 purge_fragmented_blocks_allcpus(); 2134 if (!__purge_vmap_area_lazy(start, end) && flush) 2135 flush_tlb_kernel_range(start, end); 2136 mutex_unlock(&vmap_purge_lock); 2137 } 2138 2139 /** 2140 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2141 * 2142 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2143 * to amortize TLB flushing overheads. What this means is that any page you 2144 * have now, may, in a former life, have been mapped into kernel virtual 2145 * address by the vmap layer and so there might be some CPUs with TLB entries 2146 * still referencing that page (additional to the regular 1:1 kernel mapping). 2147 * 2148 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 2149 * be sure that none of the pages we have control over will have any aliases 2150 * from the vmap layer. 2151 */ 2152 void vm_unmap_aliases(void) 2153 { 2154 unsigned long start = ULONG_MAX, end = 0; 2155 int flush = 0; 2156 2157 _vm_unmap_aliases(start, end, flush); 2158 } 2159 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 2160 2161 /** 2162 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 2163 * @mem: the pointer returned by vm_map_ram 2164 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 2165 */ 2166 void vm_unmap_ram(const void *mem, unsigned int count) 2167 { 2168 unsigned long size = (unsigned long)count << PAGE_SHIFT; 2169 unsigned long addr = (unsigned long)mem; 2170 struct vmap_area *va; 2171 2172 might_sleep(); 2173 BUG_ON(!addr); 2174 BUG_ON(addr < VMALLOC_START); 2175 BUG_ON(addr > VMALLOC_END); 2176 BUG_ON(!PAGE_ALIGNED(addr)); 2177 2178 kasan_poison_vmalloc(mem, size); 2179 2180 if (likely(count <= VMAP_MAX_ALLOC)) { 2181 debug_check_no_locks_freed(mem, size); 2182 vb_free(addr, size); 2183 return; 2184 } 2185 2186 va = find_vmap_area(addr); 2187 BUG_ON(!va); 2188 debug_check_no_locks_freed((void *)va->va_start, 2189 (va->va_end - va->va_start)); 2190 free_unmap_vmap_area(va); 2191 } 2192 EXPORT_SYMBOL(vm_unmap_ram); 2193 2194 /** 2195 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 2196 * @pages: an array of pointers to the pages to be mapped 2197 * @count: number of pages 2198 * @node: prefer to allocate data structures on this node 2199 * 2200 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 2201 * faster than vmap so it's good. But if you mix long-life and short-life 2202 * objects with vm_map_ram(), it could consume lots of address space through 2203 * fragmentation (especially on a 32bit machine). You could see failures in 2204 * the end. Please use this function for short-lived objects. 2205 * 2206 * Returns: a pointer to the address that has been mapped, or %NULL on failure 2207 */ 2208 void *vm_map_ram(struct page **pages, unsigned int count, int node) 2209 { 2210 unsigned long size = (unsigned long)count << PAGE_SHIFT; 2211 unsigned long addr; 2212 void *mem; 2213 2214 if (likely(count <= VMAP_MAX_ALLOC)) { 2215 mem = vb_alloc(size, GFP_KERNEL); 2216 if (IS_ERR(mem)) 2217 return NULL; 2218 addr = (unsigned long)mem; 2219 } else { 2220 struct vmap_area *va; 2221 va = alloc_vmap_area(size, PAGE_SIZE, 2222 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 2223 if (IS_ERR(va)) 2224 return NULL; 2225 2226 addr = va->va_start; 2227 mem = (void *)addr; 2228 } 2229 2230 kasan_unpoison_vmalloc(mem, size); 2231 2232 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 2233 pages, PAGE_SHIFT) < 0) { 2234 vm_unmap_ram(mem, count); 2235 return NULL; 2236 } 2237 2238 return mem; 2239 } 2240 EXPORT_SYMBOL(vm_map_ram); 2241 2242 static struct vm_struct *vmlist __initdata; 2243 2244 static inline unsigned int vm_area_page_order(struct vm_struct *vm) 2245 { 2246 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2247 return vm->page_order; 2248 #else 2249 return 0; 2250 #endif 2251 } 2252 2253 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 2254 { 2255 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2256 vm->page_order = order; 2257 #else 2258 BUG_ON(order != 0); 2259 #endif 2260 } 2261 2262 /** 2263 * vm_area_add_early - add vmap area early during boot 2264 * @vm: vm_struct to add 2265 * 2266 * This function is used to add fixed kernel vm area to vmlist before 2267 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 2268 * should contain proper values and the other fields should be zero. 2269 * 2270 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2271 */ 2272 void __init vm_area_add_early(struct vm_struct *vm) 2273 { 2274 struct vm_struct *tmp, **p; 2275 2276 BUG_ON(vmap_initialized); 2277 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 2278 if (tmp->addr >= vm->addr) { 2279 BUG_ON(tmp->addr < vm->addr + vm->size); 2280 break; 2281 } else 2282 BUG_ON(tmp->addr + tmp->size > vm->addr); 2283 } 2284 vm->next = *p; 2285 *p = vm; 2286 } 2287 2288 /** 2289 * vm_area_register_early - register vmap area early during boot 2290 * @vm: vm_struct to register 2291 * @align: requested alignment 2292 * 2293 * This function is used to register kernel vm area before 2294 * vmalloc_init() is called. @vm->size and @vm->flags should contain 2295 * proper values on entry and other fields should be zero. On return, 2296 * vm->addr contains the allocated address. 2297 * 2298 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2299 */ 2300 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 2301 { 2302 unsigned long addr = ALIGN(VMALLOC_START, align); 2303 struct vm_struct *cur, **p; 2304 2305 BUG_ON(vmap_initialized); 2306 2307 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 2308 if ((unsigned long)cur->addr - addr >= vm->size) 2309 break; 2310 addr = ALIGN((unsigned long)cur->addr + cur->size, align); 2311 } 2312 2313 BUG_ON(addr > VMALLOC_END - vm->size); 2314 vm->addr = (void *)addr; 2315 vm->next = *p; 2316 *p = vm; 2317 kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 2318 } 2319 2320 static void vmap_init_free_space(void) 2321 { 2322 unsigned long vmap_start = 1; 2323 const unsigned long vmap_end = ULONG_MAX; 2324 struct vmap_area *busy, *free; 2325 2326 /* 2327 * B F B B B F 2328 * -|-----|.....|-----|-----|-----|.....|- 2329 * | The KVA space | 2330 * |<--------------------------------->| 2331 */ 2332 list_for_each_entry(busy, &vmap_area_list, list) { 2333 if (busy->va_start - vmap_start > 0) { 2334 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 2335 if (!WARN_ON_ONCE(!free)) { 2336 free->va_start = vmap_start; 2337 free->va_end = busy->va_start; 2338 2339 insert_vmap_area_augment(free, NULL, 2340 &free_vmap_area_root, 2341 &free_vmap_area_list); 2342 } 2343 } 2344 2345 vmap_start = busy->va_end; 2346 } 2347 2348 if (vmap_end - vmap_start > 0) { 2349 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 2350 if (!WARN_ON_ONCE(!free)) { 2351 free->va_start = vmap_start; 2352 free->va_end = vmap_end; 2353 2354 insert_vmap_area_augment(free, NULL, 2355 &free_vmap_area_root, 2356 &free_vmap_area_list); 2357 } 2358 } 2359 } 2360 2361 void __init vmalloc_init(void) 2362 { 2363 struct vmap_area *va; 2364 struct vm_struct *tmp; 2365 int i; 2366 2367 /* 2368 * Create the cache for vmap_area objects. 2369 */ 2370 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 2371 2372 for_each_possible_cpu(i) { 2373 struct vmap_block_queue *vbq; 2374 struct vfree_deferred *p; 2375 2376 vbq = &per_cpu(vmap_block_queue, i); 2377 spin_lock_init(&vbq->lock); 2378 INIT_LIST_HEAD(&vbq->free); 2379 p = &per_cpu(vfree_deferred, i); 2380 init_llist_head(&p->list); 2381 INIT_WORK(&p->wq, free_work); 2382 } 2383 2384 /* Import existing vmlist entries. */ 2385 for (tmp = vmlist; tmp; tmp = tmp->next) { 2386 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 2387 if (WARN_ON_ONCE(!va)) 2388 continue; 2389 2390 va->va_start = (unsigned long)tmp->addr; 2391 va->va_end = va->va_start + tmp->size; 2392 va->vm = tmp; 2393 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 2394 } 2395 2396 /* 2397 * Now we can initialize a free vmap space. 2398 */ 2399 vmap_init_free_space(); 2400 vmap_initialized = true; 2401 } 2402 2403 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 2404 struct vmap_area *va, unsigned long flags, const void *caller) 2405 { 2406 vm->flags = flags; 2407 vm->addr = (void *)va->va_start; 2408 vm->size = va->va_end - va->va_start; 2409 vm->caller = caller; 2410 va->vm = vm; 2411 } 2412 2413 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 2414 unsigned long flags, const void *caller) 2415 { 2416 spin_lock(&vmap_area_lock); 2417 setup_vmalloc_vm_locked(vm, va, flags, caller); 2418 spin_unlock(&vmap_area_lock); 2419 } 2420 2421 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2422 { 2423 /* 2424 * Before removing VM_UNINITIALIZED, 2425 * we should make sure that vm has proper values. 2426 * Pair with smp_rmb() in show_numa_info(). 2427 */ 2428 smp_wmb(); 2429 vm->flags &= ~VM_UNINITIALIZED; 2430 } 2431 2432 static struct vm_struct *__get_vm_area_node(unsigned long size, 2433 unsigned long align, unsigned long shift, unsigned long flags, 2434 unsigned long start, unsigned long end, int node, 2435 gfp_t gfp_mask, const void *caller) 2436 { 2437 struct vmap_area *va; 2438 struct vm_struct *area; 2439 unsigned long requested_size = size; 2440 2441 BUG_ON(in_interrupt()); 2442 size = ALIGN(size, 1ul << shift); 2443 if (unlikely(!size)) 2444 return NULL; 2445 2446 if (flags & VM_IOREMAP) 2447 align = 1ul << clamp_t(int, get_count_order_long(size), 2448 PAGE_SHIFT, IOREMAP_MAX_ORDER); 2449 2450 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 2451 if (unlikely(!area)) 2452 return NULL; 2453 2454 if (!(flags & VM_NO_GUARD)) 2455 size += PAGE_SIZE; 2456 2457 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 2458 if (IS_ERR(va)) { 2459 kfree(area); 2460 return NULL; 2461 } 2462 2463 kasan_unpoison_vmalloc((void *)va->va_start, requested_size); 2464 2465 setup_vmalloc_vm(area, va, flags, caller); 2466 2467 return area; 2468 } 2469 2470 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 2471 unsigned long start, unsigned long end, 2472 const void *caller) 2473 { 2474 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 2475 NUMA_NO_NODE, GFP_KERNEL, caller); 2476 } 2477 2478 /** 2479 * get_vm_area - reserve a contiguous kernel virtual area 2480 * @size: size of the area 2481 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 2482 * 2483 * Search an area of @size in the kernel virtual mapping area, 2484 * and reserved it for out purposes. Returns the area descriptor 2485 * on success or %NULL on failure. 2486 * 2487 * Return: the area descriptor on success or %NULL on failure. 2488 */ 2489 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 2490 { 2491 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 2492 VMALLOC_START, VMALLOC_END, 2493 NUMA_NO_NODE, GFP_KERNEL, 2494 __builtin_return_address(0)); 2495 } 2496 2497 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 2498 const void *caller) 2499 { 2500 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 2501 VMALLOC_START, VMALLOC_END, 2502 NUMA_NO_NODE, GFP_KERNEL, caller); 2503 } 2504 2505 /** 2506 * find_vm_area - find a continuous kernel virtual area 2507 * @addr: base address 2508 * 2509 * Search for the kernel VM area starting at @addr, and return it. 2510 * It is up to the caller to do all required locking to keep the returned 2511 * pointer valid. 2512 * 2513 * Return: the area descriptor on success or %NULL on failure. 2514 */ 2515 struct vm_struct *find_vm_area(const void *addr) 2516 { 2517 struct vmap_area *va; 2518 2519 va = find_vmap_area((unsigned long)addr); 2520 if (!va) 2521 return NULL; 2522 2523 return va->vm; 2524 } 2525 2526 /** 2527 * remove_vm_area - find and remove a continuous kernel virtual area 2528 * @addr: base address 2529 * 2530 * Search for the kernel VM area starting at @addr, and remove it. 2531 * This function returns the found VM area, but using it is NOT safe 2532 * on SMP machines, except for its size or flags. 2533 * 2534 * Return: the area descriptor on success or %NULL on failure. 2535 */ 2536 struct vm_struct *remove_vm_area(const void *addr) 2537 { 2538 struct vmap_area *va; 2539 2540 might_sleep(); 2541 2542 spin_lock(&vmap_area_lock); 2543 va = __find_vmap_area((unsigned long)addr); 2544 if (va && va->vm) { 2545 struct vm_struct *vm = va->vm; 2546 2547 va->vm = NULL; 2548 spin_unlock(&vmap_area_lock); 2549 2550 kasan_free_shadow(vm); 2551 free_unmap_vmap_area(va); 2552 2553 return vm; 2554 } 2555 2556 spin_unlock(&vmap_area_lock); 2557 return NULL; 2558 } 2559 2560 static inline void set_area_direct_map(const struct vm_struct *area, 2561 int (*set_direct_map)(struct page *page)) 2562 { 2563 int i; 2564 2565 /* HUGE_VMALLOC passes small pages to set_direct_map */ 2566 for (i = 0; i < area->nr_pages; i++) 2567 if (page_address(area->pages[i])) 2568 set_direct_map(area->pages[i]); 2569 } 2570 2571 /* Handle removing and resetting vm mappings related to the vm_struct. */ 2572 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2573 { 2574 unsigned long start = ULONG_MAX, end = 0; 2575 unsigned int page_order = vm_area_page_order(area); 2576 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 2577 int flush_dmap = 0; 2578 int i; 2579 2580 remove_vm_area(area->addr); 2581 2582 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2583 if (!flush_reset) 2584 return; 2585 2586 /* 2587 * If not deallocating pages, just do the flush of the VM area and 2588 * return. 2589 */ 2590 if (!deallocate_pages) { 2591 vm_unmap_aliases(); 2592 return; 2593 } 2594 2595 /* 2596 * If execution gets here, flush the vm mapping and reset the direct 2597 * map. Find the start and end range of the direct mappings to make sure 2598 * the vm_unmap_aliases() flush includes the direct map. 2599 */ 2600 for (i = 0; i < area->nr_pages; i += 1U << page_order) { 2601 unsigned long addr = (unsigned long)page_address(area->pages[i]); 2602 if (addr) { 2603 unsigned long page_size; 2604 2605 page_size = PAGE_SIZE << page_order; 2606 start = min(addr, start); 2607 end = max(addr + page_size, end); 2608 flush_dmap = 1; 2609 } 2610 } 2611 2612 /* 2613 * Set direct map to something invalid so that it won't be cached if 2614 * there are any accesses after the TLB flush, then flush the TLB and 2615 * reset the direct map permissions to the default. 2616 */ 2617 set_area_direct_map(area, set_direct_map_invalid_noflush); 2618 _vm_unmap_aliases(start, end, flush_dmap); 2619 set_area_direct_map(area, set_direct_map_default_noflush); 2620 } 2621 2622 static void __vunmap(const void *addr, int deallocate_pages) 2623 { 2624 struct vm_struct *area; 2625 2626 if (!addr) 2627 return; 2628 2629 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2630 addr)) 2631 return; 2632 2633 area = find_vm_area(addr); 2634 if (unlikely(!area)) { 2635 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 2636 addr); 2637 return; 2638 } 2639 2640 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 2641 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 2642 2643 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); 2644 2645 vm_remove_mappings(area, deallocate_pages); 2646 2647 if (deallocate_pages) { 2648 unsigned int page_order = vm_area_page_order(area); 2649 int i, step = 1U << page_order; 2650 2651 for (i = 0; i < area->nr_pages; i += step) { 2652 struct page *page = area->pages[i]; 2653 2654 BUG_ON(!page); 2655 mod_memcg_page_state(page, MEMCG_VMALLOC, -step); 2656 __free_pages(page, page_order); 2657 cond_resched(); 2658 } 2659 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); 2660 2661 kvfree(area->pages); 2662 } 2663 2664 kfree(area); 2665 } 2666 2667 static inline void __vfree_deferred(const void *addr) 2668 { 2669 /* 2670 * Use raw_cpu_ptr() because this can be called from preemptible 2671 * context. Preemption is absolutely fine here, because the llist_add() 2672 * implementation is lockless, so it works even if we are adding to 2673 * another cpu's list. schedule_work() should be fine with this too. 2674 */ 2675 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2676 2677 if (llist_add((struct llist_node *)addr, &p->list)) 2678 schedule_work(&p->wq); 2679 } 2680 2681 /** 2682 * vfree_atomic - release memory allocated by vmalloc() 2683 * @addr: memory base address 2684 * 2685 * This one is just like vfree() but can be called in any atomic context 2686 * except NMIs. 2687 */ 2688 void vfree_atomic(const void *addr) 2689 { 2690 BUG_ON(in_nmi()); 2691 2692 kmemleak_free(addr); 2693 2694 if (!addr) 2695 return; 2696 __vfree_deferred(addr); 2697 } 2698 2699 static void __vfree(const void *addr) 2700 { 2701 if (unlikely(in_interrupt())) 2702 __vfree_deferred(addr); 2703 else 2704 __vunmap(addr, 1); 2705 } 2706 2707 /** 2708 * vfree - Release memory allocated by vmalloc() 2709 * @addr: Memory base address 2710 * 2711 * Free the virtually continuous memory area starting at @addr, as obtained 2712 * from one of the vmalloc() family of APIs. This will usually also free the 2713 * physical memory underlying the virtual allocation, but that memory is 2714 * reference counted, so it will not be freed until the last user goes away. 2715 * 2716 * If @addr is NULL, no operation is performed. 2717 * 2718 * Context: 2719 * May sleep if called *not* from interrupt context. 2720 * Must not be called in NMI context (strictly speaking, it could be 2721 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 2722 * conventions for vfree() arch-dependent would be a really bad idea). 2723 */ 2724 void vfree(const void *addr) 2725 { 2726 BUG_ON(in_nmi()); 2727 2728 kmemleak_free(addr); 2729 2730 might_sleep_if(!in_interrupt()); 2731 2732 if (!addr) 2733 return; 2734 2735 __vfree(addr); 2736 } 2737 EXPORT_SYMBOL(vfree); 2738 2739 /** 2740 * vunmap - release virtual mapping obtained by vmap() 2741 * @addr: memory base address 2742 * 2743 * Free the virtually contiguous memory area starting at @addr, 2744 * which was created from the page array passed to vmap(). 2745 * 2746 * Must not be called in interrupt context. 2747 */ 2748 void vunmap(const void *addr) 2749 { 2750 BUG_ON(in_interrupt()); 2751 might_sleep(); 2752 if (addr) 2753 __vunmap(addr, 0); 2754 } 2755 EXPORT_SYMBOL(vunmap); 2756 2757 /** 2758 * vmap - map an array of pages into virtually contiguous space 2759 * @pages: array of page pointers 2760 * @count: number of pages to map 2761 * @flags: vm_area->flags 2762 * @prot: page protection for the mapping 2763 * 2764 * Maps @count pages from @pages into contiguous kernel virtual space. 2765 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 2766 * (which must be kmalloc or vmalloc memory) and one reference per pages in it 2767 * are transferred from the caller to vmap(), and will be freed / dropped when 2768 * vfree() is called on the return value. 2769 * 2770 * Return: the address of the area or %NULL on failure 2771 */ 2772 void *vmap(struct page **pages, unsigned int count, 2773 unsigned long flags, pgprot_t prot) 2774 { 2775 struct vm_struct *area; 2776 unsigned long addr; 2777 unsigned long size; /* In bytes */ 2778 2779 might_sleep(); 2780 2781 /* 2782 * Your top guard is someone else's bottom guard. Not having a top 2783 * guard compromises someone else's mappings too. 2784 */ 2785 if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 2786 flags &= ~VM_NO_GUARD; 2787 2788 if (count > totalram_pages()) 2789 return NULL; 2790 2791 size = (unsigned long)count << PAGE_SHIFT; 2792 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 2793 if (!area) 2794 return NULL; 2795 2796 addr = (unsigned long)area->addr; 2797 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 2798 pages, PAGE_SHIFT) < 0) { 2799 vunmap(area->addr); 2800 return NULL; 2801 } 2802 2803 if (flags & VM_MAP_PUT_PAGES) { 2804 area->pages = pages; 2805 area->nr_pages = count; 2806 } 2807 return area->addr; 2808 } 2809 EXPORT_SYMBOL(vmap); 2810 2811 #ifdef CONFIG_VMAP_PFN 2812 struct vmap_pfn_data { 2813 unsigned long *pfns; 2814 pgprot_t prot; 2815 unsigned int idx; 2816 }; 2817 2818 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 2819 { 2820 struct vmap_pfn_data *data = private; 2821 2822 if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) 2823 return -EINVAL; 2824 *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); 2825 return 0; 2826 } 2827 2828 /** 2829 * vmap_pfn - map an array of PFNs into virtually contiguous space 2830 * @pfns: array of PFNs 2831 * @count: number of pages to map 2832 * @prot: page protection for the mapping 2833 * 2834 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 2835 * the start address of the mapping. 2836 */ 2837 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 2838 { 2839 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 2840 struct vm_struct *area; 2841 2842 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 2843 __builtin_return_address(0)); 2844 if (!area) 2845 return NULL; 2846 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2847 count * PAGE_SIZE, vmap_pfn_apply, &data)) { 2848 free_vm_area(area); 2849 return NULL; 2850 } 2851 return area->addr; 2852 } 2853 EXPORT_SYMBOL_GPL(vmap_pfn); 2854 #endif /* CONFIG_VMAP_PFN */ 2855 2856 static inline unsigned int 2857 vm_area_alloc_pages(gfp_t gfp, int nid, 2858 unsigned int order, unsigned int nr_pages, struct page **pages) 2859 { 2860 unsigned int nr_allocated = 0; 2861 struct page *page; 2862 int i; 2863 2864 /* 2865 * For order-0 pages we make use of bulk allocator, if 2866 * the page array is partly or not at all populated due 2867 * to fails, fallback to a single page allocator that is 2868 * more permissive. 2869 */ 2870 if (!order) { 2871 gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL; 2872 2873 while (nr_allocated < nr_pages) { 2874 unsigned int nr, nr_pages_request; 2875 2876 /* 2877 * A maximum allowed request is hard-coded and is 100 2878 * pages per call. That is done in order to prevent a 2879 * long preemption off scenario in the bulk-allocator 2880 * so the range is [1:100]. 2881 */ 2882 nr_pages_request = min(100U, nr_pages - nr_allocated); 2883 2884 /* memory allocation should consider mempolicy, we can't 2885 * wrongly use nearest node when nid == NUMA_NO_NODE, 2886 * otherwise memory may be allocated in only one node, 2887 * but mempolcy want to alloc memory by interleaving. 2888 */ 2889 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 2890 nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, 2891 nr_pages_request, 2892 pages + nr_allocated); 2893 2894 else 2895 nr = alloc_pages_bulk_array_node(bulk_gfp, nid, 2896 nr_pages_request, 2897 pages + nr_allocated); 2898 2899 nr_allocated += nr; 2900 cond_resched(); 2901 2902 /* 2903 * If zero or pages were obtained partly, 2904 * fallback to a single page allocator. 2905 */ 2906 if (nr != nr_pages_request) 2907 break; 2908 } 2909 } else 2910 /* 2911 * Compound pages required for remap_vmalloc_page if 2912 * high-order pages. 2913 */ 2914 gfp |= __GFP_COMP; 2915 2916 /* High-order pages or fallback path if "bulk" fails. */ 2917 2918 while (nr_allocated < nr_pages) { 2919 if (fatal_signal_pending(current)) 2920 break; 2921 2922 if (nid == NUMA_NO_NODE) 2923 page = alloc_pages(gfp, order); 2924 else 2925 page = alloc_pages_node(nid, gfp, order); 2926 if (unlikely(!page)) 2927 break; 2928 2929 /* 2930 * Careful, we allocate and map page-order pages, but 2931 * tracking is done per PAGE_SIZE page so as to keep the 2932 * vm_struct APIs independent of the physical/mapped size. 2933 */ 2934 for (i = 0; i < (1U << order); i++) 2935 pages[nr_allocated + i] = page + i; 2936 2937 cond_resched(); 2938 nr_allocated += 1U << order; 2939 } 2940 2941 return nr_allocated; 2942 } 2943 2944 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 2945 pgprot_t prot, unsigned int page_shift, 2946 int node) 2947 { 2948 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 2949 bool nofail = gfp_mask & __GFP_NOFAIL; 2950 unsigned long addr = (unsigned long)area->addr; 2951 unsigned long size = get_vm_area_size(area); 2952 unsigned long array_size; 2953 unsigned int nr_small_pages = size >> PAGE_SHIFT; 2954 unsigned int page_order; 2955 unsigned int flags; 2956 int ret; 2957 2958 array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 2959 gfp_mask |= __GFP_NOWARN; 2960 if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 2961 gfp_mask |= __GFP_HIGHMEM; 2962 2963 /* Please note that the recursion is strictly bounded. */ 2964 if (array_size > PAGE_SIZE) { 2965 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, 2966 area->caller); 2967 } else { 2968 area->pages = kmalloc_node(array_size, nested_gfp, node); 2969 } 2970 2971 if (!area->pages) { 2972 warn_alloc(gfp_mask, NULL, 2973 "vmalloc error: size %lu, failed to allocated page array size %lu", 2974 nr_small_pages * PAGE_SIZE, array_size); 2975 free_vm_area(area); 2976 return NULL; 2977 } 2978 2979 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 2980 page_order = vm_area_page_order(area); 2981 2982 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, 2983 node, page_order, nr_small_pages, area->pages); 2984 2985 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 2986 if (gfp_mask & __GFP_ACCOUNT) { 2987 int i, step = 1U << page_order; 2988 2989 for (i = 0; i < area->nr_pages; i += step) 2990 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 2991 step); 2992 } 2993 2994 /* 2995 * If not enough pages were obtained to accomplish an 2996 * allocation request, free them via __vfree() if any. 2997 */ 2998 if (area->nr_pages != nr_small_pages) { 2999 warn_alloc(gfp_mask, NULL, 3000 "vmalloc error: size %lu, page order %u, failed to allocate pages", 3001 area->nr_pages * PAGE_SIZE, page_order); 3002 goto fail; 3003 } 3004 3005 /* 3006 * page tables allocations ignore external gfp mask, enforce it 3007 * by the scope API 3008 */ 3009 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3010 flags = memalloc_nofs_save(); 3011 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3012 flags = memalloc_noio_save(); 3013 3014 do { 3015 ret = vmap_pages_range(addr, addr + size, prot, area->pages, 3016 page_shift); 3017 if (nofail && (ret < 0)) 3018 schedule_timeout_uninterruptible(1); 3019 } while (nofail && (ret < 0)); 3020 3021 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3022 memalloc_nofs_restore(flags); 3023 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3024 memalloc_noio_restore(flags); 3025 3026 if (ret < 0) { 3027 warn_alloc(gfp_mask, NULL, 3028 "vmalloc error: size %lu, failed to map pages", 3029 area->nr_pages * PAGE_SIZE); 3030 goto fail; 3031 } 3032 3033 return area->addr; 3034 3035 fail: 3036 __vfree(area->addr); 3037 return NULL; 3038 } 3039 3040 /** 3041 * __vmalloc_node_range - allocate virtually contiguous memory 3042 * @size: allocation size 3043 * @align: desired alignment 3044 * @start: vm area range start 3045 * @end: vm area range end 3046 * @gfp_mask: flags for the page level allocator 3047 * @prot: protection mask for the allocated pages 3048 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 3049 * @node: node to use for allocation or NUMA_NO_NODE 3050 * @caller: caller's return address 3051 * 3052 * Allocate enough pages to cover @size from the page level 3053 * allocator with @gfp_mask flags. Please note that the full set of gfp 3054 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all 3055 * supported. 3056 * Zone modifiers are not supported. From the reclaim modifiers 3057 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) 3058 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and 3059 * __GFP_RETRY_MAYFAIL are not supported). 3060 * 3061 * __GFP_NOWARN can be used to suppress failures messages. 3062 * 3063 * Map them into contiguous kernel virtual space, using a pagetable 3064 * protection of @prot. 3065 * 3066 * Return: the address of the area or %NULL on failure 3067 */ 3068 void *__vmalloc_node_range(unsigned long size, unsigned long align, 3069 unsigned long start, unsigned long end, gfp_t gfp_mask, 3070 pgprot_t prot, unsigned long vm_flags, int node, 3071 const void *caller) 3072 { 3073 struct vm_struct *area; 3074 void *addr; 3075 unsigned long real_size = size; 3076 unsigned long real_align = align; 3077 unsigned int shift = PAGE_SHIFT; 3078 3079 if (WARN_ON_ONCE(!size)) 3080 return NULL; 3081 3082 if ((size >> PAGE_SHIFT) > totalram_pages()) { 3083 warn_alloc(gfp_mask, NULL, 3084 "vmalloc error: size %lu, exceeds total pages", 3085 real_size); 3086 return NULL; 3087 } 3088 3089 if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) { 3090 unsigned long size_per_node; 3091 3092 /* 3093 * Try huge pages. Only try for PAGE_KERNEL allocations, 3094 * others like modules don't yet expect huge pages in 3095 * their allocations due to apply_to_page_range not 3096 * supporting them. 3097 */ 3098 3099 size_per_node = size; 3100 if (node == NUMA_NO_NODE) 3101 size_per_node /= num_online_nodes(); 3102 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) 3103 shift = PMD_SHIFT; 3104 else 3105 shift = arch_vmap_pte_supported_shift(size_per_node); 3106 3107 align = max(real_align, 1UL << shift); 3108 size = ALIGN(real_size, 1UL << shift); 3109 } 3110 3111 again: 3112 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | 3113 VM_UNINITIALIZED | vm_flags, start, end, node, 3114 gfp_mask, caller); 3115 if (!area) { 3116 bool nofail = gfp_mask & __GFP_NOFAIL; 3117 warn_alloc(gfp_mask, NULL, 3118 "vmalloc error: size %lu, vm_struct allocation failed%s", 3119 real_size, (nofail) ? ". Retrying." : ""); 3120 if (nofail) { 3121 schedule_timeout_uninterruptible(1); 3122 goto again; 3123 } 3124 goto fail; 3125 } 3126 3127 addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 3128 if (!addr) 3129 goto fail; 3130 3131 /* 3132 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 3133 * flag. It means that vm_struct is not fully initialized. 3134 * Now, it is fully initialized, so remove this flag here. 3135 */ 3136 clear_vm_uninitialized_flag(area); 3137 3138 size = PAGE_ALIGN(size); 3139 if (!(vm_flags & VM_DEFER_KMEMLEAK)) 3140 kmemleak_vmalloc(area, size, gfp_mask); 3141 3142 return addr; 3143 3144 fail: 3145 if (shift > PAGE_SHIFT) { 3146 shift = PAGE_SHIFT; 3147 align = real_align; 3148 size = real_size; 3149 goto again; 3150 } 3151 3152 return NULL; 3153 } 3154 3155 /** 3156 * __vmalloc_node - allocate virtually contiguous memory 3157 * @size: allocation size 3158 * @align: desired alignment 3159 * @gfp_mask: flags for the page level allocator 3160 * @node: node to use for allocation or NUMA_NO_NODE 3161 * @caller: caller's return address 3162 * 3163 * Allocate enough pages to cover @size from the page level allocator with 3164 * @gfp_mask flags. Map them into contiguous kernel virtual space. 3165 * 3166 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 3167 * and __GFP_NOFAIL are not supported 3168 * 3169 * Any use of gfp flags outside of GFP_KERNEL should be consulted 3170 * with mm people. 3171 * 3172 * Return: pointer to the allocated memory or %NULL on error 3173 */ 3174 void *__vmalloc_node(unsigned long size, unsigned long align, 3175 gfp_t gfp_mask, int node, const void *caller) 3176 { 3177 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 3178 gfp_mask, PAGE_KERNEL, 0, node, caller); 3179 } 3180 /* 3181 * This is only for performance analysis of vmalloc and stress purpose. 3182 * It is required by vmalloc test module, therefore do not use it other 3183 * than that. 3184 */ 3185 #ifdef CONFIG_TEST_VMALLOC_MODULE 3186 EXPORT_SYMBOL_GPL(__vmalloc_node); 3187 #endif 3188 3189 void *__vmalloc(unsigned long size, gfp_t gfp_mask) 3190 { 3191 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, 3192 __builtin_return_address(0)); 3193 } 3194 EXPORT_SYMBOL(__vmalloc); 3195 3196 /** 3197 * vmalloc - allocate virtually contiguous memory 3198 * @size: allocation size 3199 * 3200 * Allocate enough pages to cover @size from the page level 3201 * allocator and map them into contiguous kernel virtual space. 3202 * 3203 * For tight control over page level allocator and protection flags 3204 * use __vmalloc() instead. 3205 * 3206 * Return: pointer to the allocated memory or %NULL on error 3207 */ 3208 void *vmalloc(unsigned long size) 3209 { 3210 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, 3211 __builtin_return_address(0)); 3212 } 3213 EXPORT_SYMBOL(vmalloc); 3214 3215 /** 3216 * vmalloc_no_huge - allocate virtually contiguous memory using small pages 3217 * @size: allocation size 3218 * 3219 * Allocate enough non-huge pages to cover @size from the page level 3220 * allocator and map them into contiguous kernel virtual space. 3221 * 3222 * Return: pointer to the allocated memory or %NULL on error 3223 */ 3224 void *vmalloc_no_huge(unsigned long size) 3225 { 3226 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 3227 GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP, 3228 NUMA_NO_NODE, __builtin_return_address(0)); 3229 } 3230 EXPORT_SYMBOL(vmalloc_no_huge); 3231 3232 /** 3233 * vzalloc - allocate virtually contiguous memory with zero fill 3234 * @size: allocation size 3235 * 3236 * Allocate enough pages to cover @size from the page level 3237 * allocator and map them into contiguous kernel virtual space. 3238 * The memory allocated is set to zero. 3239 * 3240 * For tight control over page level allocator and protection flags 3241 * use __vmalloc() instead. 3242 * 3243 * Return: pointer to the allocated memory or %NULL on error 3244 */ 3245 void *vzalloc(unsigned long size) 3246 { 3247 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 3248 __builtin_return_address(0)); 3249 } 3250 EXPORT_SYMBOL(vzalloc); 3251 3252 /** 3253 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 3254 * @size: allocation size 3255 * 3256 * The resulting memory area is zeroed so it can be mapped to userspace 3257 * without leaking data. 3258 * 3259 * Return: pointer to the allocated memory or %NULL on error 3260 */ 3261 void *vmalloc_user(unsigned long size) 3262 { 3263 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3264 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 3265 VM_USERMAP, NUMA_NO_NODE, 3266 __builtin_return_address(0)); 3267 } 3268 EXPORT_SYMBOL(vmalloc_user); 3269 3270 /** 3271 * vmalloc_node - allocate memory on a specific node 3272 * @size: allocation size 3273 * @node: numa node 3274 * 3275 * Allocate enough pages to cover @size from the page level 3276 * allocator and map them into contiguous kernel virtual space. 3277 * 3278 * For tight control over page level allocator and protection flags 3279 * use __vmalloc() instead. 3280 * 3281 * Return: pointer to the allocated memory or %NULL on error 3282 */ 3283 void *vmalloc_node(unsigned long size, int node) 3284 { 3285 return __vmalloc_node(size, 1, GFP_KERNEL, node, 3286 __builtin_return_address(0)); 3287 } 3288 EXPORT_SYMBOL(vmalloc_node); 3289 3290 /** 3291 * vzalloc_node - allocate memory on a specific node with zero fill 3292 * @size: allocation size 3293 * @node: numa node 3294 * 3295 * Allocate enough pages to cover @size from the page level 3296 * allocator and map them into contiguous kernel virtual space. 3297 * The memory allocated is set to zero. 3298 * 3299 * Return: pointer to the allocated memory or %NULL on error 3300 */ 3301 void *vzalloc_node(unsigned long size, int node) 3302 { 3303 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, 3304 __builtin_return_address(0)); 3305 } 3306 EXPORT_SYMBOL(vzalloc_node); 3307 3308 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 3309 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 3310 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 3311 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 3312 #else 3313 /* 3314 * 64b systems should always have either DMA or DMA32 zones. For others 3315 * GFP_DMA32 should do the right thing and use the normal zone. 3316 */ 3317 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 3318 #endif 3319 3320 /** 3321 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 3322 * @size: allocation size 3323 * 3324 * Allocate enough 32bit PA addressable pages to cover @size from the 3325 * page level allocator and map them into contiguous kernel virtual space. 3326 * 3327 * Return: pointer to the allocated memory or %NULL on error 3328 */ 3329 void *vmalloc_32(unsigned long size) 3330 { 3331 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 3332 __builtin_return_address(0)); 3333 } 3334 EXPORT_SYMBOL(vmalloc_32); 3335 3336 /** 3337 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 3338 * @size: allocation size 3339 * 3340 * The resulting memory area is 32bit addressable and zeroed so it can be 3341 * mapped to userspace without leaking data. 3342 * 3343 * Return: pointer to the allocated memory or %NULL on error 3344 */ 3345 void *vmalloc_32_user(unsigned long size) 3346 { 3347 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3348 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 3349 VM_USERMAP, NUMA_NO_NODE, 3350 __builtin_return_address(0)); 3351 } 3352 EXPORT_SYMBOL(vmalloc_32_user); 3353 3354 /* 3355 * small helper routine , copy contents to buf from addr. 3356 * If the page is not present, fill zero. 3357 */ 3358 3359 static int aligned_vread(char *buf, char *addr, unsigned long count) 3360 { 3361 struct page *p; 3362 int copied = 0; 3363 3364 while (count) { 3365 unsigned long offset, length; 3366 3367 offset = offset_in_page(addr); 3368 length = PAGE_SIZE - offset; 3369 if (length > count) 3370 length = count; 3371 p = vmalloc_to_page(addr); 3372 /* 3373 * To do safe access to this _mapped_ area, we need 3374 * lock. But adding lock here means that we need to add 3375 * overhead of vmalloc()/vfree() calls for this _debug_ 3376 * interface, rarely used. Instead of that, we'll use 3377 * kmap() and get small overhead in this access function. 3378 */ 3379 if (p) { 3380 /* We can expect USER0 is not used -- see vread() */ 3381 void *map = kmap_atomic(p); 3382 memcpy(buf, map + offset, length); 3383 kunmap_atomic(map); 3384 } else 3385 memset(buf, 0, length); 3386 3387 addr += length; 3388 buf += length; 3389 copied += length; 3390 count -= length; 3391 } 3392 return copied; 3393 } 3394 3395 /** 3396 * vread() - read vmalloc area in a safe way. 3397 * @buf: buffer for reading data 3398 * @addr: vm address. 3399 * @count: number of bytes to be read. 3400 * 3401 * This function checks that addr is a valid vmalloc'ed area, and 3402 * copy data from that area to a given buffer. If the given memory range 3403 * of [addr...addr+count) includes some valid address, data is copied to 3404 * proper area of @buf. If there are memory holes, they'll be zero-filled. 3405 * IOREMAP area is treated as memory hole and no copy is done. 3406 * 3407 * If [addr...addr+count) doesn't includes any intersects with alive 3408 * vm_struct area, returns 0. @buf should be kernel's buffer. 3409 * 3410 * Note: In usual ops, vread() is never necessary because the caller 3411 * should know vmalloc() area is valid and can use memcpy(). 3412 * This is for routines which have to access vmalloc area without 3413 * any information, as /proc/kcore. 3414 * 3415 * Return: number of bytes for which addr and buf should be increased 3416 * (same number as @count) or %0 if [addr...addr+count) doesn't 3417 * include any intersection with valid vmalloc area 3418 */ 3419 long vread(char *buf, char *addr, unsigned long count) 3420 { 3421 struct vmap_area *va; 3422 struct vm_struct *vm; 3423 char *vaddr, *buf_start = buf; 3424 unsigned long buflen = count; 3425 unsigned long n; 3426 3427 /* Don't allow overflow */ 3428 if ((unsigned long) addr + count < count) 3429 count = -(unsigned long) addr; 3430 3431 spin_lock(&vmap_area_lock); 3432 va = find_vmap_area_exceed_addr((unsigned long)addr); 3433 if (!va) 3434 goto finished; 3435 3436 /* no intersects with alive vmap_area */ 3437 if ((unsigned long)addr + count <= va->va_start) 3438 goto finished; 3439 3440 list_for_each_entry_from(va, &vmap_area_list, list) { 3441 if (!count) 3442 break; 3443 3444 if (!va->vm) 3445 continue; 3446 3447 vm = va->vm; 3448 vaddr = (char *) vm->addr; 3449 if (addr >= vaddr + get_vm_area_size(vm)) 3450 continue; 3451 while (addr < vaddr) { 3452 if (count == 0) 3453 goto finished; 3454 *buf = '\0'; 3455 buf++; 3456 addr++; 3457 count--; 3458 } 3459 n = vaddr + get_vm_area_size(vm) - addr; 3460 if (n > count) 3461 n = count; 3462 if (!(vm->flags & VM_IOREMAP)) 3463 aligned_vread(buf, addr, n); 3464 else /* IOREMAP area is treated as memory hole */ 3465 memset(buf, 0, n); 3466 buf += n; 3467 addr += n; 3468 count -= n; 3469 } 3470 finished: 3471 spin_unlock(&vmap_area_lock); 3472 3473 if (buf == buf_start) 3474 return 0; 3475 /* zero-fill memory holes */ 3476 if (buf != buf_start + buflen) 3477 memset(buf, 0, buflen - (buf - buf_start)); 3478 3479 return buflen; 3480 } 3481 3482 /** 3483 * remap_vmalloc_range_partial - map vmalloc pages to userspace 3484 * @vma: vma to cover 3485 * @uaddr: target user address to start at 3486 * @kaddr: virtual address of vmalloc kernel memory 3487 * @pgoff: offset from @kaddr to start at 3488 * @size: size of map area 3489 * 3490 * Returns: 0 for success, -Exxx on failure 3491 * 3492 * This function checks that @kaddr is a valid vmalloc'ed area, 3493 * and that it is big enough to cover the range starting at 3494 * @uaddr in @vma. Will return failure if that criteria isn't 3495 * met. 3496 * 3497 * Similar to remap_pfn_range() (see mm/memory.c) 3498 */ 3499 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 3500 void *kaddr, unsigned long pgoff, 3501 unsigned long size) 3502 { 3503 struct vm_struct *area; 3504 unsigned long off; 3505 unsigned long end_index; 3506 3507 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 3508 return -EINVAL; 3509 3510 size = PAGE_ALIGN(size); 3511 3512 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 3513 return -EINVAL; 3514 3515 area = find_vm_area(kaddr); 3516 if (!area) 3517 return -EINVAL; 3518 3519 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 3520 return -EINVAL; 3521 3522 if (check_add_overflow(size, off, &end_index) || 3523 end_index > get_vm_area_size(area)) 3524 return -EINVAL; 3525 kaddr += off; 3526 3527 do { 3528 struct page *page = vmalloc_to_page(kaddr); 3529 int ret; 3530 3531 ret = vm_insert_page(vma, uaddr, page); 3532 if (ret) 3533 return ret; 3534 3535 uaddr += PAGE_SIZE; 3536 kaddr += PAGE_SIZE; 3537 size -= PAGE_SIZE; 3538 } while (size > 0); 3539 3540 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3541 3542 return 0; 3543 } 3544 3545 /** 3546 * remap_vmalloc_range - map vmalloc pages to userspace 3547 * @vma: vma to cover (map full range of vma) 3548 * @addr: vmalloc memory 3549 * @pgoff: number of pages into addr before first page to map 3550 * 3551 * Returns: 0 for success, -Exxx on failure 3552 * 3553 * This function checks that addr is a valid vmalloc'ed area, and 3554 * that it is big enough to cover the vma. Will return failure if 3555 * that criteria isn't met. 3556 * 3557 * Similar to remap_pfn_range() (see mm/memory.c) 3558 */ 3559 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 3560 unsigned long pgoff) 3561 { 3562 return remap_vmalloc_range_partial(vma, vma->vm_start, 3563 addr, pgoff, 3564 vma->vm_end - vma->vm_start); 3565 } 3566 EXPORT_SYMBOL(remap_vmalloc_range); 3567 3568 void free_vm_area(struct vm_struct *area) 3569 { 3570 struct vm_struct *ret; 3571 ret = remove_vm_area(area->addr); 3572 BUG_ON(ret != area); 3573 kfree(area); 3574 } 3575 EXPORT_SYMBOL_GPL(free_vm_area); 3576 3577 #ifdef CONFIG_SMP 3578 static struct vmap_area *node_to_va(struct rb_node *n) 3579 { 3580 return rb_entry_safe(n, struct vmap_area, rb_node); 3581 } 3582 3583 /** 3584 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 3585 * @addr: target address 3586 * 3587 * Returns: vmap_area if it is found. If there is no such area 3588 * the first highest(reverse order) vmap_area is returned 3589 * i.e. va->va_start < addr && va->va_end < addr or NULL 3590 * if there are no any areas before @addr. 3591 */ 3592 static struct vmap_area * 3593 pvm_find_va_enclose_addr(unsigned long addr) 3594 { 3595 struct vmap_area *va, *tmp; 3596 struct rb_node *n; 3597 3598 n = free_vmap_area_root.rb_node; 3599 va = NULL; 3600 3601 while (n) { 3602 tmp = rb_entry(n, struct vmap_area, rb_node); 3603 if (tmp->va_start <= addr) { 3604 va = tmp; 3605 if (tmp->va_end >= addr) 3606 break; 3607 3608 n = n->rb_right; 3609 } else { 3610 n = n->rb_left; 3611 } 3612 } 3613 3614 return va; 3615 } 3616 3617 /** 3618 * pvm_determine_end_from_reverse - find the highest aligned address 3619 * of free block below VMALLOC_END 3620 * @va: 3621 * in - the VA we start the search(reverse order); 3622 * out - the VA with the highest aligned end address. 3623 * @align: alignment for required highest address 3624 * 3625 * Returns: determined end address within vmap_area 3626 */ 3627 static unsigned long 3628 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3629 { 3630 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3631 unsigned long addr; 3632 3633 if (likely(*va)) { 3634 list_for_each_entry_from_reverse((*va), 3635 &free_vmap_area_list, list) { 3636 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 3637 if ((*va)->va_start < addr) 3638 return addr; 3639 } 3640 } 3641 3642 return 0; 3643 } 3644 3645 /** 3646 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3647 * @offsets: array containing offset of each area 3648 * @sizes: array containing size of each area 3649 * @nr_vms: the number of areas to allocate 3650 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3651 * 3652 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3653 * vm_structs on success, %NULL on failure 3654 * 3655 * Percpu allocator wants to use congruent vm areas so that it can 3656 * maintain the offsets among percpu areas. This function allocates 3657 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3658 * be scattered pretty far, distance between two areas easily going up 3659 * to gigabytes. To avoid interacting with regular vmallocs, these 3660 * areas are allocated from top. 3661 * 3662 * Despite its complicated look, this allocator is rather simple. It 3663 * does everything top-down and scans free blocks from the end looking 3664 * for matching base. While scanning, if any of the areas do not fit the 3665 * base address is pulled down to fit the area. Scanning is repeated till 3666 * all the areas fit and then all necessary data structures are inserted 3667 * and the result is returned. 3668 */ 3669 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3670 const size_t *sizes, int nr_vms, 3671 size_t align) 3672 { 3673 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3674 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3675 struct vmap_area **vas, *va; 3676 struct vm_struct **vms; 3677 int area, area2, last_area, term_area; 3678 unsigned long base, start, size, end, last_end, orig_start, orig_end; 3679 bool purged = false; 3680 enum fit_type type; 3681 3682 /* verify parameters and allocate data structures */ 3683 BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3684 for (last_area = 0, area = 0; area < nr_vms; area++) { 3685 start = offsets[area]; 3686 end = start + sizes[area]; 3687 3688 /* is everything aligned properly? */ 3689 BUG_ON(!IS_ALIGNED(offsets[area], align)); 3690 BUG_ON(!IS_ALIGNED(sizes[area], align)); 3691 3692 /* detect the area with the highest address */ 3693 if (start > offsets[last_area]) 3694 last_area = area; 3695 3696 for (area2 = area + 1; area2 < nr_vms; area2++) { 3697 unsigned long start2 = offsets[area2]; 3698 unsigned long end2 = start2 + sizes[area2]; 3699 3700 BUG_ON(start2 < end && start < end2); 3701 } 3702 } 3703 last_end = offsets[last_area] + sizes[last_area]; 3704 3705 if (vmalloc_end - vmalloc_start < last_end) { 3706 WARN_ON(true); 3707 return NULL; 3708 } 3709 3710 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 3711 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3712 if (!vas || !vms) 3713 goto err_free2; 3714 3715 for (area = 0; area < nr_vms; area++) { 3716 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3717 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3718 if (!vas[area] || !vms[area]) 3719 goto err_free; 3720 } 3721 retry: 3722 spin_lock(&free_vmap_area_lock); 3723 3724 /* start scanning - we scan from the top, begin with the last area */ 3725 area = term_area = last_area; 3726 start = offsets[area]; 3727 end = start + sizes[area]; 3728 3729 va = pvm_find_va_enclose_addr(vmalloc_end); 3730 base = pvm_determine_end_from_reverse(&va, align) - end; 3731 3732 while (true) { 3733 /* 3734 * base might have underflowed, add last_end before 3735 * comparing. 3736 */ 3737 if (base + last_end < vmalloc_start + last_end) 3738 goto overflow; 3739 3740 /* 3741 * Fitting base has not been found. 3742 */ 3743 if (va == NULL) 3744 goto overflow; 3745 3746 /* 3747 * If required width exceeds current VA block, move 3748 * base downwards and then recheck. 3749 */ 3750 if (base + end > va->va_end) { 3751 base = pvm_determine_end_from_reverse(&va, align) - end; 3752 term_area = area; 3753 continue; 3754 } 3755 3756 /* 3757 * If this VA does not fit, move base downwards and recheck. 3758 */ 3759 if (base + start < va->va_start) { 3760 va = node_to_va(rb_prev(&va->rb_node)); 3761 base = pvm_determine_end_from_reverse(&va, align) - end; 3762 term_area = area; 3763 continue; 3764 } 3765 3766 /* 3767 * This area fits, move on to the previous one. If 3768 * the previous one is the terminal one, we're done. 3769 */ 3770 area = (area + nr_vms - 1) % nr_vms; 3771 if (area == term_area) 3772 break; 3773 3774 start = offsets[area]; 3775 end = start + sizes[area]; 3776 va = pvm_find_va_enclose_addr(base + end); 3777 } 3778 3779 /* we've found a fitting base, insert all va's */ 3780 for (area = 0; area < nr_vms; area++) { 3781 int ret; 3782 3783 start = base + offsets[area]; 3784 size = sizes[area]; 3785 3786 va = pvm_find_va_enclose_addr(start); 3787 if (WARN_ON_ONCE(va == NULL)) 3788 /* It is a BUG(), but trigger recovery instead. */ 3789 goto recovery; 3790 3791 type = classify_va_fit_type(va, start, size); 3792 if (WARN_ON_ONCE(type == NOTHING_FIT)) 3793 /* It is a BUG(), but trigger recovery instead. */ 3794 goto recovery; 3795 3796 ret = adjust_va_to_fit_type(va, start, size, type); 3797 if (unlikely(ret)) 3798 goto recovery; 3799 3800 /* Allocated area. */ 3801 va = vas[area]; 3802 va->va_start = start; 3803 va->va_end = start + size; 3804 } 3805 3806 spin_unlock(&free_vmap_area_lock); 3807 3808 /* populate the kasan shadow space */ 3809 for (area = 0; area < nr_vms; area++) { 3810 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 3811 goto err_free_shadow; 3812 3813 kasan_unpoison_vmalloc((void *)vas[area]->va_start, 3814 sizes[area]); 3815 } 3816 3817 /* insert all vm's */ 3818 spin_lock(&vmap_area_lock); 3819 for (area = 0; area < nr_vms; area++) { 3820 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); 3821 3822 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 3823 pcpu_get_vm_areas); 3824 } 3825 spin_unlock(&vmap_area_lock); 3826 3827 kfree(vas); 3828 return vms; 3829 3830 recovery: 3831 /* 3832 * Remove previously allocated areas. There is no 3833 * need in removing these areas from the busy tree, 3834 * because they are inserted only on the final step 3835 * and when pcpu_get_vm_areas() is success. 3836 */ 3837 while (area--) { 3838 orig_start = vas[area]->va_start; 3839 orig_end = vas[area]->va_end; 3840 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 3841 &free_vmap_area_list); 3842 if (va) 3843 kasan_release_vmalloc(orig_start, orig_end, 3844 va->va_start, va->va_end); 3845 vas[area] = NULL; 3846 } 3847 3848 overflow: 3849 spin_unlock(&free_vmap_area_lock); 3850 if (!purged) { 3851 purge_vmap_area_lazy(); 3852 purged = true; 3853 3854 /* Before "retry", check if we recover. */ 3855 for (area = 0; area < nr_vms; area++) { 3856 if (vas[area]) 3857 continue; 3858 3859 vas[area] = kmem_cache_zalloc( 3860 vmap_area_cachep, GFP_KERNEL); 3861 if (!vas[area]) 3862 goto err_free; 3863 } 3864 3865 goto retry; 3866 } 3867 3868 err_free: 3869 for (area = 0; area < nr_vms; area++) { 3870 if (vas[area]) 3871 kmem_cache_free(vmap_area_cachep, vas[area]); 3872 3873 kfree(vms[area]); 3874 } 3875 err_free2: 3876 kfree(vas); 3877 kfree(vms); 3878 return NULL; 3879 3880 err_free_shadow: 3881 spin_lock(&free_vmap_area_lock); 3882 /* 3883 * We release all the vmalloc shadows, even the ones for regions that 3884 * hadn't been successfully added. This relies on kasan_release_vmalloc 3885 * being able to tolerate this case. 3886 */ 3887 for (area = 0; area < nr_vms; area++) { 3888 orig_start = vas[area]->va_start; 3889 orig_end = vas[area]->va_end; 3890 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 3891 &free_vmap_area_list); 3892 if (va) 3893 kasan_release_vmalloc(orig_start, orig_end, 3894 va->va_start, va->va_end); 3895 vas[area] = NULL; 3896 kfree(vms[area]); 3897 } 3898 spin_unlock(&free_vmap_area_lock); 3899 kfree(vas); 3900 kfree(vms); 3901 return NULL; 3902 } 3903 3904 /** 3905 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 3906 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 3907 * @nr_vms: the number of allocated areas 3908 * 3909 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 3910 */ 3911 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 3912 { 3913 int i; 3914 3915 for (i = 0; i < nr_vms; i++) 3916 free_vm_area(vms[i]); 3917 kfree(vms); 3918 } 3919 #endif /* CONFIG_SMP */ 3920 3921 #ifdef CONFIG_PRINTK 3922 bool vmalloc_dump_obj(void *object) 3923 { 3924 struct vm_struct *vm; 3925 void *objp = (void *)PAGE_ALIGN((unsigned long)object); 3926 3927 vm = find_vm_area(objp); 3928 if (!vm) 3929 return false; 3930 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 3931 vm->nr_pages, (unsigned long)vm->addr, vm->caller); 3932 return true; 3933 } 3934 #endif 3935 3936 #ifdef CONFIG_PROC_FS 3937 static void *s_start(struct seq_file *m, loff_t *pos) 3938 __acquires(&vmap_purge_lock) 3939 __acquires(&vmap_area_lock) 3940 { 3941 mutex_lock(&vmap_purge_lock); 3942 spin_lock(&vmap_area_lock); 3943 3944 return seq_list_start(&vmap_area_list, *pos); 3945 } 3946 3947 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3948 { 3949 return seq_list_next(p, &vmap_area_list, pos); 3950 } 3951 3952 static void s_stop(struct seq_file *m, void *p) 3953 __releases(&vmap_area_lock) 3954 __releases(&vmap_purge_lock) 3955 { 3956 spin_unlock(&vmap_area_lock); 3957 mutex_unlock(&vmap_purge_lock); 3958 } 3959 3960 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 3961 { 3962 if (IS_ENABLED(CONFIG_NUMA)) { 3963 unsigned int nr, *counters = m->private; 3964 unsigned int step = 1U << vm_area_page_order(v); 3965 3966 if (!counters) 3967 return; 3968 3969 if (v->flags & VM_UNINITIALIZED) 3970 return; 3971 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 3972 smp_rmb(); 3973 3974 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 3975 3976 for (nr = 0; nr < v->nr_pages; nr += step) 3977 counters[page_to_nid(v->pages[nr])] += step; 3978 for_each_node_state(nr, N_HIGH_MEMORY) 3979 if (counters[nr]) 3980 seq_printf(m, " N%u=%u", nr, counters[nr]); 3981 } 3982 } 3983 3984 static void show_purge_info(struct seq_file *m) 3985 { 3986 struct vmap_area *va; 3987 3988 spin_lock(&purge_vmap_area_lock); 3989 list_for_each_entry(va, &purge_vmap_area_list, list) { 3990 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 3991 (void *)va->va_start, (void *)va->va_end, 3992 va->va_end - va->va_start); 3993 } 3994 spin_unlock(&purge_vmap_area_lock); 3995 } 3996 3997 static int s_show(struct seq_file *m, void *p) 3998 { 3999 struct vmap_area *va; 4000 struct vm_struct *v; 4001 4002 va = list_entry(p, struct vmap_area, list); 4003 4004 /* 4005 * s_show can encounter race with remove_vm_area, !vm on behalf 4006 * of vmap area is being tear down or vm_map_ram allocation. 4007 */ 4008 if (!va->vm) { 4009 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 4010 (void *)va->va_start, (void *)va->va_end, 4011 va->va_end - va->va_start); 4012 4013 goto final; 4014 } 4015 4016 v = va->vm; 4017 4018 seq_printf(m, "0x%pK-0x%pK %7ld", 4019 v->addr, v->addr + v->size, v->size); 4020 4021 if (v->caller) 4022 seq_printf(m, " %pS", v->caller); 4023 4024 if (v->nr_pages) 4025 seq_printf(m, " pages=%d", v->nr_pages); 4026 4027 if (v->phys_addr) 4028 seq_printf(m, " phys=%pa", &v->phys_addr); 4029 4030 if (v->flags & VM_IOREMAP) 4031 seq_puts(m, " ioremap"); 4032 4033 if (v->flags & VM_ALLOC) 4034 seq_puts(m, " vmalloc"); 4035 4036 if (v->flags & VM_MAP) 4037 seq_puts(m, " vmap"); 4038 4039 if (v->flags & VM_USERMAP) 4040 seq_puts(m, " user"); 4041 4042 if (v->flags & VM_DMA_COHERENT) 4043 seq_puts(m, " dma-coherent"); 4044 4045 if (is_vmalloc_addr(v->pages)) 4046 seq_puts(m, " vpages"); 4047 4048 show_numa_info(m, v); 4049 seq_putc(m, '\n'); 4050 4051 /* 4052 * As a final step, dump "unpurged" areas. 4053 */ 4054 final: 4055 if (list_is_last(&va->list, &vmap_area_list)) 4056 show_purge_info(m); 4057 4058 return 0; 4059 } 4060 4061 static const struct seq_operations vmalloc_op = { 4062 .start = s_start, 4063 .next = s_next, 4064 .stop = s_stop, 4065 .show = s_show, 4066 }; 4067 4068 static int __init proc_vmalloc_init(void) 4069 { 4070 if (IS_ENABLED(CONFIG_NUMA)) 4071 proc_create_seq_private("vmallocinfo", 0400, NULL, 4072 &vmalloc_op, 4073 nr_node_ids * sizeof(unsigned int), NULL); 4074 else 4075 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 4076 return 0; 4077 } 4078 module_init(proc_vmalloc_init); 4079 4080 #endif 4081