1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/vmalloc.c 4 * 5 * Copyright (C) 1993 Linus Torvalds 6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 7 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 8 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 9 * Numa awareness, Christoph Lameter, SGI, June 2005 10 */ 11 12 #include <linux/vmalloc.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/highmem.h> 16 #include <linux/sched/signal.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/interrupt.h> 20 #include <linux/proc_fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/set_memory.h> 23 #include <linux/debugobjects.h> 24 #include <linux/kallsyms.h> 25 #include <linux/list.h> 26 #include <linux/notifier.h> 27 #include <linux/rbtree.h> 28 #include <linux/radix-tree.h> 29 #include <linux/rcupdate.h> 30 #include <linux/pfn.h> 31 #include <linux/kmemleak.h> 32 #include <linux/atomic.h> 33 #include <linux/compiler.h> 34 #include <linux/llist.h> 35 #include <linux/bitops.h> 36 #include <linux/rbtree_augmented.h> 37 38 #include <linux/uaccess.h> 39 #include <asm/tlbflush.h> 40 #include <asm/shmparam.h> 41 42 #include "internal.h" 43 44 struct vfree_deferred { 45 struct llist_head list; 46 struct work_struct wq; 47 }; 48 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 49 50 static void __vunmap(const void *, int); 51 52 static void free_work(struct work_struct *w) 53 { 54 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 55 struct llist_node *t, *llnode; 56 57 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 58 __vunmap((void *)llnode, 1); 59 } 60 61 /*** Page table manipulation functions ***/ 62 63 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 64 { 65 pte_t *pte; 66 67 pte = pte_offset_kernel(pmd, addr); 68 do { 69 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 70 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 71 } while (pte++, addr += PAGE_SIZE, addr != end); 72 } 73 74 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 75 { 76 pmd_t *pmd; 77 unsigned long next; 78 79 pmd = pmd_offset(pud, addr); 80 do { 81 next = pmd_addr_end(addr, end); 82 if (pmd_clear_huge(pmd)) 83 continue; 84 if (pmd_none_or_clear_bad(pmd)) 85 continue; 86 vunmap_pte_range(pmd, addr, next); 87 } while (pmd++, addr = next, addr != end); 88 } 89 90 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) 91 { 92 pud_t *pud; 93 unsigned long next; 94 95 pud = pud_offset(p4d, addr); 96 do { 97 next = pud_addr_end(addr, end); 98 if (pud_clear_huge(pud)) 99 continue; 100 if (pud_none_or_clear_bad(pud)) 101 continue; 102 vunmap_pmd_range(pud, addr, next); 103 } while (pud++, addr = next, addr != end); 104 } 105 106 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) 107 { 108 p4d_t *p4d; 109 unsigned long next; 110 111 p4d = p4d_offset(pgd, addr); 112 do { 113 next = p4d_addr_end(addr, end); 114 if (p4d_clear_huge(p4d)) 115 continue; 116 if (p4d_none_or_clear_bad(p4d)) 117 continue; 118 vunmap_pud_range(p4d, addr, next); 119 } while (p4d++, addr = next, addr != end); 120 } 121 122 static void vunmap_page_range(unsigned long addr, unsigned long end) 123 { 124 pgd_t *pgd; 125 unsigned long next; 126 127 BUG_ON(addr >= end); 128 pgd = pgd_offset_k(addr); 129 do { 130 next = pgd_addr_end(addr, end); 131 if (pgd_none_or_clear_bad(pgd)) 132 continue; 133 vunmap_p4d_range(pgd, addr, next); 134 } while (pgd++, addr = next, addr != end); 135 } 136 137 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 138 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 139 { 140 pte_t *pte; 141 142 /* 143 * nr is a running index into the array which helps higher level 144 * callers keep track of where we're up to. 145 */ 146 147 pte = pte_alloc_kernel(pmd, addr); 148 if (!pte) 149 return -ENOMEM; 150 do { 151 struct page *page = pages[*nr]; 152 153 if (WARN_ON(!pte_none(*pte))) 154 return -EBUSY; 155 if (WARN_ON(!page)) 156 return -ENOMEM; 157 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 158 (*nr)++; 159 } while (pte++, addr += PAGE_SIZE, addr != end); 160 return 0; 161 } 162 163 static int vmap_pmd_range(pud_t *pud, unsigned long addr, 164 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 165 { 166 pmd_t *pmd; 167 unsigned long next; 168 169 pmd = pmd_alloc(&init_mm, pud, addr); 170 if (!pmd) 171 return -ENOMEM; 172 do { 173 next = pmd_addr_end(addr, end); 174 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 175 return -ENOMEM; 176 } while (pmd++, addr = next, addr != end); 177 return 0; 178 } 179 180 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, 181 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 182 { 183 pud_t *pud; 184 unsigned long next; 185 186 pud = pud_alloc(&init_mm, p4d, addr); 187 if (!pud) 188 return -ENOMEM; 189 do { 190 next = pud_addr_end(addr, end); 191 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 192 return -ENOMEM; 193 } while (pud++, addr = next, addr != end); 194 return 0; 195 } 196 197 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, 198 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 199 { 200 p4d_t *p4d; 201 unsigned long next; 202 203 p4d = p4d_alloc(&init_mm, pgd, addr); 204 if (!p4d) 205 return -ENOMEM; 206 do { 207 next = p4d_addr_end(addr, end); 208 if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) 209 return -ENOMEM; 210 } while (p4d++, addr = next, addr != end); 211 return 0; 212 } 213 214 /* 215 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 216 * will have pfns corresponding to the "pages" array. 217 * 218 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 219 */ 220 static int vmap_page_range_noflush(unsigned long start, unsigned long end, 221 pgprot_t prot, struct page **pages) 222 { 223 pgd_t *pgd; 224 unsigned long next; 225 unsigned long addr = start; 226 int err = 0; 227 int nr = 0; 228 229 BUG_ON(addr >= end); 230 pgd = pgd_offset_k(addr); 231 do { 232 next = pgd_addr_end(addr, end); 233 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); 234 if (err) 235 return err; 236 } while (pgd++, addr = next, addr != end); 237 238 return nr; 239 } 240 241 static int vmap_page_range(unsigned long start, unsigned long end, 242 pgprot_t prot, struct page **pages) 243 { 244 int ret; 245 246 ret = vmap_page_range_noflush(start, end, prot, pages); 247 flush_cache_vmap(start, end); 248 return ret; 249 } 250 251 int is_vmalloc_or_module_addr(const void *x) 252 { 253 /* 254 * ARM, x86-64 and sparc64 put modules in a special place, 255 * and fall back on vmalloc() if that fails. Others 256 * just put it in the vmalloc space. 257 */ 258 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 259 unsigned long addr = (unsigned long)x; 260 if (addr >= MODULES_VADDR && addr < MODULES_END) 261 return 1; 262 #endif 263 return is_vmalloc_addr(x); 264 } 265 266 /* 267 * Walk a vmap address to the struct page it maps. 268 */ 269 struct page *vmalloc_to_page(const void *vmalloc_addr) 270 { 271 unsigned long addr = (unsigned long) vmalloc_addr; 272 struct page *page = NULL; 273 pgd_t *pgd = pgd_offset_k(addr); 274 p4d_t *p4d; 275 pud_t *pud; 276 pmd_t *pmd; 277 pte_t *ptep, pte; 278 279 /* 280 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 281 * architectures that do not vmalloc module space 282 */ 283 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 284 285 if (pgd_none(*pgd)) 286 return NULL; 287 p4d = p4d_offset(pgd, addr); 288 if (p4d_none(*p4d)) 289 return NULL; 290 pud = pud_offset(p4d, addr); 291 292 /* 293 * Don't dereference bad PUD or PMD (below) entries. This will also 294 * identify huge mappings, which we may encounter on architectures 295 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be 296 * identified as vmalloc addresses by is_vmalloc_addr(), but are 297 * not [unambiguously] associated with a struct page, so there is 298 * no correct value to return for them. 299 */ 300 WARN_ON_ONCE(pud_bad(*pud)); 301 if (pud_none(*pud) || pud_bad(*pud)) 302 return NULL; 303 pmd = pmd_offset(pud, addr); 304 WARN_ON_ONCE(pmd_bad(*pmd)); 305 if (pmd_none(*pmd) || pmd_bad(*pmd)) 306 return NULL; 307 308 ptep = pte_offset_map(pmd, addr); 309 pte = *ptep; 310 if (pte_present(pte)) 311 page = pte_page(pte); 312 pte_unmap(ptep); 313 return page; 314 } 315 EXPORT_SYMBOL(vmalloc_to_page); 316 317 /* 318 * Map a vmalloc()-space virtual address to the physical page frame number. 319 */ 320 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 321 { 322 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 323 } 324 EXPORT_SYMBOL(vmalloc_to_pfn); 325 326 327 /*** Global kva allocator ***/ 328 329 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 330 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 331 332 333 static DEFINE_SPINLOCK(vmap_area_lock); 334 static DEFINE_SPINLOCK(free_vmap_area_lock); 335 /* Export for kexec only */ 336 LIST_HEAD(vmap_area_list); 337 static LLIST_HEAD(vmap_purge_list); 338 static struct rb_root vmap_area_root = RB_ROOT; 339 static bool vmap_initialized __read_mostly; 340 341 /* 342 * This kmem_cache is used for vmap_area objects. Instead of 343 * allocating from slab we reuse an object from this cache to 344 * make things faster. Especially in "no edge" splitting of 345 * free block. 346 */ 347 static struct kmem_cache *vmap_area_cachep; 348 349 /* 350 * This linked list is used in pair with free_vmap_area_root. 351 * It gives O(1) access to prev/next to perform fast coalescing. 352 */ 353 static LIST_HEAD(free_vmap_area_list); 354 355 /* 356 * This augment red-black tree represents the free vmap space. 357 * All vmap_area objects in this tree are sorted by va->va_start 358 * address. It is used for allocation and merging when a vmap 359 * object is released. 360 * 361 * Each vmap_area node contains a maximum available free block 362 * of its sub-tree, right or left. Therefore it is possible to 363 * find a lowest match of free area. 364 */ 365 static struct rb_root free_vmap_area_root = RB_ROOT; 366 367 /* 368 * Preload a CPU with one object for "no edge" split case. The 369 * aim is to get rid of allocations from the atomic context, thus 370 * to use more permissive allocation masks. 371 */ 372 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 373 374 static __always_inline unsigned long 375 va_size(struct vmap_area *va) 376 { 377 return (va->va_end - va->va_start); 378 } 379 380 static __always_inline unsigned long 381 get_subtree_max_size(struct rb_node *node) 382 { 383 struct vmap_area *va; 384 385 va = rb_entry_safe(node, struct vmap_area, rb_node); 386 return va ? va->subtree_max_size : 0; 387 } 388 389 /* 390 * Gets called when remove the node and rotate. 391 */ 392 static __always_inline unsigned long 393 compute_subtree_max_size(struct vmap_area *va) 394 { 395 return max3(va_size(va), 396 get_subtree_max_size(va->rb_node.rb_left), 397 get_subtree_max_size(va->rb_node.rb_right)); 398 } 399 400 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 401 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 402 403 static void purge_vmap_area_lazy(void); 404 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 405 static unsigned long lazy_max_pages(void); 406 407 static atomic_long_t nr_vmalloc_pages; 408 409 unsigned long vmalloc_nr_pages(void) 410 { 411 return atomic_long_read(&nr_vmalloc_pages); 412 } 413 414 static struct vmap_area *__find_vmap_area(unsigned long addr) 415 { 416 struct rb_node *n = vmap_area_root.rb_node; 417 418 while (n) { 419 struct vmap_area *va; 420 421 va = rb_entry(n, struct vmap_area, rb_node); 422 if (addr < va->va_start) 423 n = n->rb_left; 424 else if (addr >= va->va_end) 425 n = n->rb_right; 426 else 427 return va; 428 } 429 430 return NULL; 431 } 432 433 /* 434 * This function returns back addresses of parent node 435 * and its left or right link for further processing. 436 */ 437 static __always_inline struct rb_node ** 438 find_va_links(struct vmap_area *va, 439 struct rb_root *root, struct rb_node *from, 440 struct rb_node **parent) 441 { 442 struct vmap_area *tmp_va; 443 struct rb_node **link; 444 445 if (root) { 446 link = &root->rb_node; 447 if (unlikely(!*link)) { 448 *parent = NULL; 449 return link; 450 } 451 } else { 452 link = &from; 453 } 454 455 /* 456 * Go to the bottom of the tree. When we hit the last point 457 * we end up with parent rb_node and correct direction, i name 458 * it link, where the new va->rb_node will be attached to. 459 */ 460 do { 461 tmp_va = rb_entry(*link, struct vmap_area, rb_node); 462 463 /* 464 * During the traversal we also do some sanity check. 465 * Trigger the BUG() if there are sides(left/right) 466 * or full overlaps. 467 */ 468 if (va->va_start < tmp_va->va_end && 469 va->va_end <= tmp_va->va_start) 470 link = &(*link)->rb_left; 471 else if (va->va_end > tmp_va->va_start && 472 va->va_start >= tmp_va->va_end) 473 link = &(*link)->rb_right; 474 else 475 BUG(); 476 } while (*link); 477 478 *parent = &tmp_va->rb_node; 479 return link; 480 } 481 482 static __always_inline struct list_head * 483 get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 484 { 485 struct list_head *list; 486 487 if (unlikely(!parent)) 488 /* 489 * The red-black tree where we try to find VA neighbors 490 * before merging or inserting is empty, i.e. it means 491 * there is no free vmap space. Normally it does not 492 * happen but we handle this case anyway. 493 */ 494 return NULL; 495 496 list = &rb_entry(parent, struct vmap_area, rb_node)->list; 497 return (&parent->rb_right == link ? list->next : list); 498 } 499 500 static __always_inline void 501 link_va(struct vmap_area *va, struct rb_root *root, 502 struct rb_node *parent, struct rb_node **link, struct list_head *head) 503 { 504 /* 505 * VA is still not in the list, but we can 506 * identify its future previous list_head node. 507 */ 508 if (likely(parent)) { 509 head = &rb_entry(parent, struct vmap_area, rb_node)->list; 510 if (&parent->rb_right != link) 511 head = head->prev; 512 } 513 514 /* Insert to the rb-tree */ 515 rb_link_node(&va->rb_node, parent, link); 516 if (root == &free_vmap_area_root) { 517 /* 518 * Some explanation here. Just perform simple insertion 519 * to the tree. We do not set va->subtree_max_size to 520 * its current size before calling rb_insert_augmented(). 521 * It is because of we populate the tree from the bottom 522 * to parent levels when the node _is_ in the tree. 523 * 524 * Therefore we set subtree_max_size to zero after insertion, 525 * to let __augment_tree_propagate_from() puts everything to 526 * the correct order later on. 527 */ 528 rb_insert_augmented(&va->rb_node, 529 root, &free_vmap_area_rb_augment_cb); 530 va->subtree_max_size = 0; 531 } else { 532 rb_insert_color(&va->rb_node, root); 533 } 534 535 /* Address-sort this list */ 536 list_add(&va->list, head); 537 } 538 539 static __always_inline void 540 unlink_va(struct vmap_area *va, struct rb_root *root) 541 { 542 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 543 return; 544 545 if (root == &free_vmap_area_root) 546 rb_erase_augmented(&va->rb_node, 547 root, &free_vmap_area_rb_augment_cb); 548 else 549 rb_erase(&va->rb_node, root); 550 551 list_del(&va->list); 552 RB_CLEAR_NODE(&va->rb_node); 553 } 554 555 #if DEBUG_AUGMENT_PROPAGATE_CHECK 556 static void 557 augment_tree_propagate_check(struct rb_node *n) 558 { 559 struct vmap_area *va; 560 struct rb_node *node; 561 unsigned long size; 562 bool found = false; 563 564 if (n == NULL) 565 return; 566 567 va = rb_entry(n, struct vmap_area, rb_node); 568 size = va->subtree_max_size; 569 node = n; 570 571 while (node) { 572 va = rb_entry(node, struct vmap_area, rb_node); 573 574 if (get_subtree_max_size(node->rb_left) == size) { 575 node = node->rb_left; 576 } else { 577 if (va_size(va) == size) { 578 found = true; 579 break; 580 } 581 582 node = node->rb_right; 583 } 584 } 585 586 if (!found) { 587 va = rb_entry(n, struct vmap_area, rb_node); 588 pr_emerg("tree is corrupted: %lu, %lu\n", 589 va_size(va), va->subtree_max_size); 590 } 591 592 augment_tree_propagate_check(n->rb_left); 593 augment_tree_propagate_check(n->rb_right); 594 } 595 #endif 596 597 /* 598 * This function populates subtree_max_size from bottom to upper 599 * levels starting from VA point. The propagation must be done 600 * when VA size is modified by changing its va_start/va_end. Or 601 * in case of newly inserting of VA to the tree. 602 * 603 * It means that __augment_tree_propagate_from() must be called: 604 * - After VA has been inserted to the tree(free path); 605 * - After VA has been shrunk(allocation path); 606 * - After VA has been increased(merging path). 607 * 608 * Please note that, it does not mean that upper parent nodes 609 * and their subtree_max_size are recalculated all the time up 610 * to the root node. 611 * 612 * 4--8 613 * /\ 614 * / \ 615 * / \ 616 * 2--2 8--8 617 * 618 * For example if we modify the node 4, shrinking it to 2, then 619 * no any modification is required. If we shrink the node 2 to 1 620 * its subtree_max_size is updated only, and set to 1. If we shrink 621 * the node 8 to 6, then its subtree_max_size is set to 6 and parent 622 * node becomes 4--6. 623 */ 624 static __always_inline void 625 augment_tree_propagate_from(struct vmap_area *va) 626 { 627 struct rb_node *node = &va->rb_node; 628 unsigned long new_va_sub_max_size; 629 630 while (node) { 631 va = rb_entry(node, struct vmap_area, rb_node); 632 new_va_sub_max_size = compute_subtree_max_size(va); 633 634 /* 635 * If the newly calculated maximum available size of the 636 * subtree is equal to the current one, then it means that 637 * the tree is propagated correctly. So we have to stop at 638 * this point to save cycles. 639 */ 640 if (va->subtree_max_size == new_va_sub_max_size) 641 break; 642 643 va->subtree_max_size = new_va_sub_max_size; 644 node = rb_parent(&va->rb_node); 645 } 646 647 #if DEBUG_AUGMENT_PROPAGATE_CHECK 648 augment_tree_propagate_check(free_vmap_area_root.rb_node); 649 #endif 650 } 651 652 static void 653 insert_vmap_area(struct vmap_area *va, 654 struct rb_root *root, struct list_head *head) 655 { 656 struct rb_node **link; 657 struct rb_node *parent; 658 659 link = find_va_links(va, root, NULL, &parent); 660 link_va(va, root, parent, link, head); 661 } 662 663 static void 664 insert_vmap_area_augment(struct vmap_area *va, 665 struct rb_node *from, struct rb_root *root, 666 struct list_head *head) 667 { 668 struct rb_node **link; 669 struct rb_node *parent; 670 671 if (from) 672 link = find_va_links(va, NULL, from, &parent); 673 else 674 link = find_va_links(va, root, NULL, &parent); 675 676 link_va(va, root, parent, link, head); 677 augment_tree_propagate_from(va); 678 } 679 680 /* 681 * Merge de-allocated chunk of VA memory with previous 682 * and next free blocks. If coalesce is not done a new 683 * free area is inserted. If VA has been merged, it is 684 * freed. 685 */ 686 static __always_inline struct vmap_area * 687 merge_or_add_vmap_area(struct vmap_area *va, 688 struct rb_root *root, struct list_head *head) 689 { 690 struct vmap_area *sibling; 691 struct list_head *next; 692 struct rb_node **link; 693 struct rb_node *parent; 694 bool merged = false; 695 696 /* 697 * Find a place in the tree where VA potentially will be 698 * inserted, unless it is merged with its sibling/siblings. 699 */ 700 link = find_va_links(va, root, NULL, &parent); 701 702 /* 703 * Get next node of VA to check if merging can be done. 704 */ 705 next = get_va_next_sibling(parent, link); 706 if (unlikely(next == NULL)) 707 goto insert; 708 709 /* 710 * start end 711 * | | 712 * |<------VA------>|<-----Next----->| 713 * | | 714 * start end 715 */ 716 if (next != head) { 717 sibling = list_entry(next, struct vmap_area, list); 718 if (sibling->va_start == va->va_end) { 719 sibling->va_start = va->va_start; 720 721 /* Check and update the tree if needed. */ 722 augment_tree_propagate_from(sibling); 723 724 /* Free vmap_area object. */ 725 kmem_cache_free(vmap_area_cachep, va); 726 727 /* Point to the new merged area. */ 728 va = sibling; 729 merged = true; 730 } 731 } 732 733 /* 734 * start end 735 * | | 736 * |<-----Prev----->|<------VA------>| 737 * | | 738 * start end 739 */ 740 if (next->prev != head) { 741 sibling = list_entry(next->prev, struct vmap_area, list); 742 if (sibling->va_end == va->va_start) { 743 sibling->va_end = va->va_end; 744 745 /* Check and update the tree if needed. */ 746 augment_tree_propagate_from(sibling); 747 748 if (merged) 749 unlink_va(va, root); 750 751 /* Free vmap_area object. */ 752 kmem_cache_free(vmap_area_cachep, va); 753 754 /* Point to the new merged area. */ 755 va = sibling; 756 merged = true; 757 } 758 } 759 760 insert: 761 if (!merged) { 762 link_va(va, root, parent, link, head); 763 augment_tree_propagate_from(va); 764 } 765 766 return va; 767 } 768 769 static __always_inline bool 770 is_within_this_va(struct vmap_area *va, unsigned long size, 771 unsigned long align, unsigned long vstart) 772 { 773 unsigned long nva_start_addr; 774 775 if (va->va_start > vstart) 776 nva_start_addr = ALIGN(va->va_start, align); 777 else 778 nva_start_addr = ALIGN(vstart, align); 779 780 /* Can be overflowed due to big size or alignment. */ 781 if (nva_start_addr + size < nva_start_addr || 782 nva_start_addr < vstart) 783 return false; 784 785 return (nva_start_addr + size <= va->va_end); 786 } 787 788 /* 789 * Find the first free block(lowest start address) in the tree, 790 * that will accomplish the request corresponding to passing 791 * parameters. 792 */ 793 static __always_inline struct vmap_area * 794 find_vmap_lowest_match(unsigned long size, 795 unsigned long align, unsigned long vstart) 796 { 797 struct vmap_area *va; 798 struct rb_node *node; 799 unsigned long length; 800 801 /* Start from the root. */ 802 node = free_vmap_area_root.rb_node; 803 804 /* Adjust the search size for alignment overhead. */ 805 length = size + align - 1; 806 807 while (node) { 808 va = rb_entry(node, struct vmap_area, rb_node); 809 810 if (get_subtree_max_size(node->rb_left) >= length && 811 vstart < va->va_start) { 812 node = node->rb_left; 813 } else { 814 if (is_within_this_va(va, size, align, vstart)) 815 return va; 816 817 /* 818 * Does not make sense to go deeper towards the right 819 * sub-tree if it does not have a free block that is 820 * equal or bigger to the requested search length. 821 */ 822 if (get_subtree_max_size(node->rb_right) >= length) { 823 node = node->rb_right; 824 continue; 825 } 826 827 /* 828 * OK. We roll back and find the first right sub-tree, 829 * that will satisfy the search criteria. It can happen 830 * only once due to "vstart" restriction. 831 */ 832 while ((node = rb_parent(node))) { 833 va = rb_entry(node, struct vmap_area, rb_node); 834 if (is_within_this_va(va, size, align, vstart)) 835 return va; 836 837 if (get_subtree_max_size(node->rb_right) >= length && 838 vstart <= va->va_start) { 839 node = node->rb_right; 840 break; 841 } 842 } 843 } 844 } 845 846 return NULL; 847 } 848 849 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 850 #include <linux/random.h> 851 852 static struct vmap_area * 853 find_vmap_lowest_linear_match(unsigned long size, 854 unsigned long align, unsigned long vstart) 855 { 856 struct vmap_area *va; 857 858 list_for_each_entry(va, &free_vmap_area_list, list) { 859 if (!is_within_this_va(va, size, align, vstart)) 860 continue; 861 862 return va; 863 } 864 865 return NULL; 866 } 867 868 static void 869 find_vmap_lowest_match_check(unsigned long size) 870 { 871 struct vmap_area *va_1, *va_2; 872 unsigned long vstart; 873 unsigned int rnd; 874 875 get_random_bytes(&rnd, sizeof(rnd)); 876 vstart = VMALLOC_START + rnd; 877 878 va_1 = find_vmap_lowest_match(size, 1, vstart); 879 va_2 = find_vmap_lowest_linear_match(size, 1, vstart); 880 881 if (va_1 != va_2) 882 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 883 va_1, va_2, vstart); 884 } 885 #endif 886 887 enum fit_type { 888 NOTHING_FIT = 0, 889 FL_FIT_TYPE = 1, /* full fit */ 890 LE_FIT_TYPE = 2, /* left edge fit */ 891 RE_FIT_TYPE = 3, /* right edge fit */ 892 NE_FIT_TYPE = 4 /* no edge fit */ 893 }; 894 895 static __always_inline enum fit_type 896 classify_va_fit_type(struct vmap_area *va, 897 unsigned long nva_start_addr, unsigned long size) 898 { 899 enum fit_type type; 900 901 /* Check if it is within VA. */ 902 if (nva_start_addr < va->va_start || 903 nva_start_addr + size > va->va_end) 904 return NOTHING_FIT; 905 906 /* Now classify. */ 907 if (va->va_start == nva_start_addr) { 908 if (va->va_end == nva_start_addr + size) 909 type = FL_FIT_TYPE; 910 else 911 type = LE_FIT_TYPE; 912 } else if (va->va_end == nva_start_addr + size) { 913 type = RE_FIT_TYPE; 914 } else { 915 type = NE_FIT_TYPE; 916 } 917 918 return type; 919 } 920 921 static __always_inline int 922 adjust_va_to_fit_type(struct vmap_area *va, 923 unsigned long nva_start_addr, unsigned long size, 924 enum fit_type type) 925 { 926 struct vmap_area *lva = NULL; 927 928 if (type == FL_FIT_TYPE) { 929 /* 930 * No need to split VA, it fully fits. 931 * 932 * | | 933 * V NVA V 934 * |---------------| 935 */ 936 unlink_va(va, &free_vmap_area_root); 937 kmem_cache_free(vmap_area_cachep, va); 938 } else if (type == LE_FIT_TYPE) { 939 /* 940 * Split left edge of fit VA. 941 * 942 * | | 943 * V NVA V R 944 * |-------|-------| 945 */ 946 va->va_start += size; 947 } else if (type == RE_FIT_TYPE) { 948 /* 949 * Split right edge of fit VA. 950 * 951 * | | 952 * L V NVA V 953 * |-------|-------| 954 */ 955 va->va_end = nva_start_addr; 956 } else if (type == NE_FIT_TYPE) { 957 /* 958 * Split no edge of fit VA. 959 * 960 * | | 961 * L V NVA V R 962 * |---|-------|---| 963 */ 964 lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 965 if (unlikely(!lva)) { 966 /* 967 * For percpu allocator we do not do any pre-allocation 968 * and leave it as it is. The reason is it most likely 969 * never ends up with NE_FIT_TYPE splitting. In case of 970 * percpu allocations offsets and sizes are aligned to 971 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 972 * are its main fitting cases. 973 * 974 * There are a few exceptions though, as an example it is 975 * a first allocation (early boot up) when we have "one" 976 * big free space that has to be split. 977 * 978 * Also we can hit this path in case of regular "vmap" 979 * allocations, if "this" current CPU was not preloaded. 980 * See the comment in alloc_vmap_area() why. If so, then 981 * GFP_NOWAIT is used instead to get an extra object for 982 * split purpose. That is rare and most time does not 983 * occur. 984 * 985 * What happens if an allocation gets failed. Basically, 986 * an "overflow" path is triggered to purge lazily freed 987 * areas to free some memory, then, the "retry" path is 988 * triggered to repeat one more time. See more details 989 * in alloc_vmap_area() function. 990 */ 991 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 992 if (!lva) 993 return -1; 994 } 995 996 /* 997 * Build the remainder. 998 */ 999 lva->va_start = va->va_start; 1000 lva->va_end = nva_start_addr; 1001 1002 /* 1003 * Shrink this VA to remaining size. 1004 */ 1005 va->va_start = nva_start_addr + size; 1006 } else { 1007 return -1; 1008 } 1009 1010 if (type != FL_FIT_TYPE) { 1011 augment_tree_propagate_from(va); 1012 1013 if (lva) /* type == NE_FIT_TYPE */ 1014 insert_vmap_area_augment(lva, &va->rb_node, 1015 &free_vmap_area_root, &free_vmap_area_list); 1016 } 1017 1018 return 0; 1019 } 1020 1021 /* 1022 * Returns a start address of the newly allocated area, if success. 1023 * Otherwise a vend is returned that indicates failure. 1024 */ 1025 static __always_inline unsigned long 1026 __alloc_vmap_area(unsigned long size, unsigned long align, 1027 unsigned long vstart, unsigned long vend) 1028 { 1029 unsigned long nva_start_addr; 1030 struct vmap_area *va; 1031 enum fit_type type; 1032 int ret; 1033 1034 va = find_vmap_lowest_match(size, align, vstart); 1035 if (unlikely(!va)) 1036 return vend; 1037 1038 if (va->va_start > vstart) 1039 nva_start_addr = ALIGN(va->va_start, align); 1040 else 1041 nva_start_addr = ALIGN(vstart, align); 1042 1043 /* Check the "vend" restriction. */ 1044 if (nva_start_addr + size > vend) 1045 return vend; 1046 1047 /* Classify what we have found. */ 1048 type = classify_va_fit_type(va, nva_start_addr, size); 1049 if (WARN_ON_ONCE(type == NOTHING_FIT)) 1050 return vend; 1051 1052 /* Update the free vmap_area. */ 1053 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); 1054 if (ret) 1055 return vend; 1056 1057 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1058 find_vmap_lowest_match_check(size); 1059 #endif 1060 1061 return nva_start_addr; 1062 } 1063 1064 /* 1065 * Free a region of KVA allocated by alloc_vmap_area 1066 */ 1067 static void free_vmap_area(struct vmap_area *va) 1068 { 1069 /* 1070 * Remove from the busy tree/list. 1071 */ 1072 spin_lock(&vmap_area_lock); 1073 unlink_va(va, &vmap_area_root); 1074 spin_unlock(&vmap_area_lock); 1075 1076 /* 1077 * Insert/Merge it back to the free tree/list. 1078 */ 1079 spin_lock(&free_vmap_area_lock); 1080 merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list); 1081 spin_unlock(&free_vmap_area_lock); 1082 } 1083 1084 /* 1085 * Allocate a region of KVA of the specified size and alignment, within the 1086 * vstart and vend. 1087 */ 1088 static struct vmap_area *alloc_vmap_area(unsigned long size, 1089 unsigned long align, 1090 unsigned long vstart, unsigned long vend, 1091 int node, gfp_t gfp_mask) 1092 { 1093 struct vmap_area *va, *pva; 1094 unsigned long addr; 1095 int purged = 0; 1096 int ret; 1097 1098 BUG_ON(!size); 1099 BUG_ON(offset_in_page(size)); 1100 BUG_ON(!is_power_of_2(align)); 1101 1102 if (unlikely(!vmap_initialized)) 1103 return ERR_PTR(-EBUSY); 1104 1105 might_sleep(); 1106 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 1107 1108 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1109 if (unlikely(!va)) 1110 return ERR_PTR(-ENOMEM); 1111 1112 /* 1113 * Only scan the relevant parts containing pointers to other objects 1114 * to avoid false negatives. 1115 */ 1116 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 1117 1118 retry: 1119 /* 1120 * Preload this CPU with one extra vmap_area object. It is used 1121 * when fit type of free area is NE_FIT_TYPE. Please note, it 1122 * does not guarantee that an allocation occurs on a CPU that 1123 * is preloaded, instead we minimize the case when it is not. 1124 * It can happen because of cpu migration, because there is a 1125 * race until the below spinlock is taken. 1126 * 1127 * The preload is done in non-atomic context, thus it allows us 1128 * to use more permissive allocation masks to be more stable under 1129 * low memory condition and high memory pressure. In rare case, 1130 * if not preloaded, GFP_NOWAIT is used. 1131 * 1132 * Set "pva" to NULL here, because of "retry" path. 1133 */ 1134 pva = NULL; 1135 1136 if (!this_cpu_read(ne_fit_preload_node)) 1137 /* 1138 * Even if it fails we do not really care about that. 1139 * Just proceed as it is. If needed "overflow" path 1140 * will refill the cache we allocate from. 1141 */ 1142 pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1143 1144 spin_lock(&free_vmap_area_lock); 1145 1146 if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) 1147 kmem_cache_free(vmap_area_cachep, pva); 1148 1149 /* 1150 * If an allocation fails, the "vend" address is 1151 * returned. Therefore trigger the overflow path. 1152 */ 1153 addr = __alloc_vmap_area(size, align, vstart, vend); 1154 spin_unlock(&free_vmap_area_lock); 1155 1156 if (unlikely(addr == vend)) 1157 goto overflow; 1158 1159 va->va_start = addr; 1160 va->va_end = addr + size; 1161 va->vm = NULL; 1162 1163 1164 spin_lock(&vmap_area_lock); 1165 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 1166 spin_unlock(&vmap_area_lock); 1167 1168 BUG_ON(!IS_ALIGNED(va->va_start, align)); 1169 BUG_ON(va->va_start < vstart); 1170 BUG_ON(va->va_end > vend); 1171 1172 ret = kasan_populate_vmalloc(addr, size); 1173 if (ret) { 1174 free_vmap_area(va); 1175 return ERR_PTR(ret); 1176 } 1177 1178 return va; 1179 1180 overflow: 1181 if (!purged) { 1182 purge_vmap_area_lazy(); 1183 purged = 1; 1184 goto retry; 1185 } 1186 1187 if (gfpflags_allow_blocking(gfp_mask)) { 1188 unsigned long freed = 0; 1189 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 1190 if (freed > 0) { 1191 purged = 0; 1192 goto retry; 1193 } 1194 } 1195 1196 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1197 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1198 size); 1199 1200 kmem_cache_free(vmap_area_cachep, va); 1201 return ERR_PTR(-EBUSY); 1202 } 1203 1204 int register_vmap_purge_notifier(struct notifier_block *nb) 1205 { 1206 return blocking_notifier_chain_register(&vmap_notify_list, nb); 1207 } 1208 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 1209 1210 int unregister_vmap_purge_notifier(struct notifier_block *nb) 1211 { 1212 return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 1213 } 1214 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 1215 1216 /* 1217 * Clear the pagetable entries of a given vmap_area 1218 */ 1219 static void unmap_vmap_area(struct vmap_area *va) 1220 { 1221 vunmap_page_range(va->va_start, va->va_end); 1222 } 1223 1224 /* 1225 * lazy_max_pages is the maximum amount of virtual address space we gather up 1226 * before attempting to purge with a TLB flush. 1227 * 1228 * There is a tradeoff here: a larger number will cover more kernel page tables 1229 * and take slightly longer to purge, but it will linearly reduce the number of 1230 * global TLB flushes that must be performed. It would seem natural to scale 1231 * this number up linearly with the number of CPUs (because vmapping activity 1232 * could also scale linearly with the number of CPUs), however it is likely 1233 * that in practice, workloads might be constrained in other ways that mean 1234 * vmap activity will not scale linearly with CPUs. Also, I want to be 1235 * conservative and not introduce a big latency on huge systems, so go with 1236 * a less aggressive log scale. It will still be an improvement over the old 1237 * code, and it will be simple to change the scale factor if we find that it 1238 * becomes a problem on bigger systems. 1239 */ 1240 static unsigned long lazy_max_pages(void) 1241 { 1242 unsigned int log; 1243 1244 log = fls(num_online_cpus()); 1245 1246 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1247 } 1248 1249 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1250 1251 /* 1252 * Serialize vmap purging. There is no actual criticial section protected 1253 * by this look, but we want to avoid concurrent calls for performance 1254 * reasons and to make the pcpu_get_vm_areas more deterministic. 1255 */ 1256 static DEFINE_MUTEX(vmap_purge_lock); 1257 1258 /* for per-CPU blocks */ 1259 static void purge_fragmented_blocks_allcpus(void); 1260 1261 /* 1262 * called before a call to iounmap() if the caller wants vm_area_struct's 1263 * immediately freed. 1264 */ 1265 void set_iounmap_nonlazy(void) 1266 { 1267 atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); 1268 } 1269 1270 /* 1271 * Purges all lazily-freed vmap areas. 1272 */ 1273 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1274 { 1275 unsigned long resched_threshold; 1276 struct llist_node *valist; 1277 struct vmap_area *va; 1278 struct vmap_area *n_va; 1279 1280 lockdep_assert_held(&vmap_purge_lock); 1281 1282 valist = llist_del_all(&vmap_purge_list); 1283 if (unlikely(valist == NULL)) 1284 return false; 1285 1286 /* 1287 * First make sure the mappings are removed from all page-tables 1288 * before they are freed. 1289 */ 1290 vmalloc_sync_all(); 1291 1292 /* 1293 * TODO: to calculate a flush range without looping. 1294 * The list can be up to lazy_max_pages() elements. 1295 */ 1296 llist_for_each_entry(va, valist, purge_list) { 1297 if (va->va_start < start) 1298 start = va->va_start; 1299 if (va->va_end > end) 1300 end = va->va_end; 1301 } 1302 1303 flush_tlb_kernel_range(start, end); 1304 resched_threshold = lazy_max_pages() << 1; 1305 1306 spin_lock(&free_vmap_area_lock); 1307 llist_for_each_entry_safe(va, n_va, valist, purge_list) { 1308 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 1309 unsigned long orig_start = va->va_start; 1310 unsigned long orig_end = va->va_end; 1311 1312 /* 1313 * Finally insert or merge lazily-freed area. It is 1314 * detached and there is no need to "unlink" it from 1315 * anything. 1316 */ 1317 va = merge_or_add_vmap_area(va, &free_vmap_area_root, 1318 &free_vmap_area_list); 1319 1320 if (is_vmalloc_or_module_addr((void *)orig_start)) 1321 kasan_release_vmalloc(orig_start, orig_end, 1322 va->va_start, va->va_end); 1323 1324 atomic_long_sub(nr, &vmap_lazy_nr); 1325 1326 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1327 cond_resched_lock(&free_vmap_area_lock); 1328 } 1329 spin_unlock(&free_vmap_area_lock); 1330 return true; 1331 } 1332 1333 /* 1334 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 1335 * is already purging. 1336 */ 1337 static void try_purge_vmap_area_lazy(void) 1338 { 1339 if (mutex_trylock(&vmap_purge_lock)) { 1340 __purge_vmap_area_lazy(ULONG_MAX, 0); 1341 mutex_unlock(&vmap_purge_lock); 1342 } 1343 } 1344 1345 /* 1346 * Kick off a purge of the outstanding lazy areas. 1347 */ 1348 static void purge_vmap_area_lazy(void) 1349 { 1350 mutex_lock(&vmap_purge_lock); 1351 purge_fragmented_blocks_allcpus(); 1352 __purge_vmap_area_lazy(ULONG_MAX, 0); 1353 mutex_unlock(&vmap_purge_lock); 1354 } 1355 1356 /* 1357 * Free a vmap area, caller ensuring that the area has been unmapped 1358 * and flush_cache_vunmap had been called for the correct range 1359 * previously. 1360 */ 1361 static void free_vmap_area_noflush(struct vmap_area *va) 1362 { 1363 unsigned long nr_lazy; 1364 1365 spin_lock(&vmap_area_lock); 1366 unlink_va(va, &vmap_area_root); 1367 spin_unlock(&vmap_area_lock); 1368 1369 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 1370 PAGE_SHIFT, &vmap_lazy_nr); 1371 1372 /* After this point, we may free va at any time */ 1373 llist_add(&va->purge_list, &vmap_purge_list); 1374 1375 if (unlikely(nr_lazy > lazy_max_pages())) 1376 try_purge_vmap_area_lazy(); 1377 } 1378 1379 /* 1380 * Free and unmap a vmap area 1381 */ 1382 static void free_unmap_vmap_area(struct vmap_area *va) 1383 { 1384 flush_cache_vunmap(va->va_start, va->va_end); 1385 unmap_vmap_area(va); 1386 if (debug_pagealloc_enabled()) 1387 flush_tlb_kernel_range(va->va_start, va->va_end); 1388 1389 free_vmap_area_noflush(va); 1390 } 1391 1392 static struct vmap_area *find_vmap_area(unsigned long addr) 1393 { 1394 struct vmap_area *va; 1395 1396 spin_lock(&vmap_area_lock); 1397 va = __find_vmap_area(addr); 1398 spin_unlock(&vmap_area_lock); 1399 1400 return va; 1401 } 1402 1403 /*** Per cpu kva allocator ***/ 1404 1405 /* 1406 * vmap space is limited especially on 32 bit architectures. Ensure there is 1407 * room for at least 16 percpu vmap blocks per CPU. 1408 */ 1409 /* 1410 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1411 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1412 * instead (we just need a rough idea) 1413 */ 1414 #if BITS_PER_LONG == 32 1415 #define VMALLOC_SPACE (128UL*1024*1024) 1416 #else 1417 #define VMALLOC_SPACE (128UL*1024*1024*1024) 1418 #endif 1419 1420 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1421 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1422 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1423 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1424 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1425 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1426 #define VMAP_BBMAP_BITS \ 1427 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1428 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1429 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1430 1431 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1432 1433 struct vmap_block_queue { 1434 spinlock_t lock; 1435 struct list_head free; 1436 }; 1437 1438 struct vmap_block { 1439 spinlock_t lock; 1440 struct vmap_area *va; 1441 unsigned long free, dirty; 1442 unsigned long dirty_min, dirty_max; /*< dirty range */ 1443 struct list_head free_list; 1444 struct rcu_head rcu_head; 1445 struct list_head purge; 1446 }; 1447 1448 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1449 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1450 1451 /* 1452 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 1453 * in the free path. Could get rid of this if we change the API to return a 1454 * "cookie" from alloc, to be passed to free. But no big deal yet. 1455 */ 1456 static DEFINE_SPINLOCK(vmap_block_tree_lock); 1457 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 1458 1459 /* 1460 * We should probably have a fallback mechanism to allocate virtual memory 1461 * out of partially filled vmap blocks. However vmap block sizing should be 1462 * fairly reasonable according to the vmalloc size, so it shouldn't be a 1463 * big problem. 1464 */ 1465 1466 static unsigned long addr_to_vb_idx(unsigned long addr) 1467 { 1468 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1469 addr /= VMAP_BLOCK_SIZE; 1470 return addr; 1471 } 1472 1473 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1474 { 1475 unsigned long addr; 1476 1477 addr = va_start + (pages_off << PAGE_SHIFT); 1478 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1479 return (void *)addr; 1480 } 1481 1482 /** 1483 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1484 * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1485 * @order: how many 2^order pages should be occupied in newly allocated block 1486 * @gfp_mask: flags for the page level allocator 1487 * 1488 * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1489 */ 1490 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1491 { 1492 struct vmap_block_queue *vbq; 1493 struct vmap_block *vb; 1494 struct vmap_area *va; 1495 unsigned long vb_idx; 1496 int node, err; 1497 void *vaddr; 1498 1499 node = numa_node_id(); 1500 1501 vb = kmalloc_node(sizeof(struct vmap_block), 1502 gfp_mask & GFP_RECLAIM_MASK, node); 1503 if (unlikely(!vb)) 1504 return ERR_PTR(-ENOMEM); 1505 1506 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1507 VMALLOC_START, VMALLOC_END, 1508 node, gfp_mask); 1509 if (IS_ERR(va)) { 1510 kfree(vb); 1511 return ERR_CAST(va); 1512 } 1513 1514 err = radix_tree_preload(gfp_mask); 1515 if (unlikely(err)) { 1516 kfree(vb); 1517 free_vmap_area(va); 1518 return ERR_PTR(err); 1519 } 1520 1521 vaddr = vmap_block_vaddr(va->va_start, 0); 1522 spin_lock_init(&vb->lock); 1523 vb->va = va; 1524 /* At least something should be left free */ 1525 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1526 vb->free = VMAP_BBMAP_BITS - (1UL << order); 1527 vb->dirty = 0; 1528 vb->dirty_min = VMAP_BBMAP_BITS; 1529 vb->dirty_max = 0; 1530 INIT_LIST_HEAD(&vb->free_list); 1531 1532 vb_idx = addr_to_vb_idx(va->va_start); 1533 spin_lock(&vmap_block_tree_lock); 1534 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 1535 spin_unlock(&vmap_block_tree_lock); 1536 BUG_ON(err); 1537 radix_tree_preload_end(); 1538 1539 vbq = &get_cpu_var(vmap_block_queue); 1540 spin_lock(&vbq->lock); 1541 list_add_tail_rcu(&vb->free_list, &vbq->free); 1542 spin_unlock(&vbq->lock); 1543 put_cpu_var(vmap_block_queue); 1544 1545 return vaddr; 1546 } 1547 1548 static void free_vmap_block(struct vmap_block *vb) 1549 { 1550 struct vmap_block *tmp; 1551 unsigned long vb_idx; 1552 1553 vb_idx = addr_to_vb_idx(vb->va->va_start); 1554 spin_lock(&vmap_block_tree_lock); 1555 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 1556 spin_unlock(&vmap_block_tree_lock); 1557 BUG_ON(tmp != vb); 1558 1559 free_vmap_area_noflush(vb->va); 1560 kfree_rcu(vb, rcu_head); 1561 } 1562 1563 static void purge_fragmented_blocks(int cpu) 1564 { 1565 LIST_HEAD(purge); 1566 struct vmap_block *vb; 1567 struct vmap_block *n_vb; 1568 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1569 1570 rcu_read_lock(); 1571 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1572 1573 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 1574 continue; 1575 1576 spin_lock(&vb->lock); 1577 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 1578 vb->free = 0; /* prevent further allocs after releasing lock */ 1579 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 1580 vb->dirty_min = 0; 1581 vb->dirty_max = VMAP_BBMAP_BITS; 1582 spin_lock(&vbq->lock); 1583 list_del_rcu(&vb->free_list); 1584 spin_unlock(&vbq->lock); 1585 spin_unlock(&vb->lock); 1586 list_add_tail(&vb->purge, &purge); 1587 } else 1588 spin_unlock(&vb->lock); 1589 } 1590 rcu_read_unlock(); 1591 1592 list_for_each_entry_safe(vb, n_vb, &purge, purge) { 1593 list_del(&vb->purge); 1594 free_vmap_block(vb); 1595 } 1596 } 1597 1598 static void purge_fragmented_blocks_allcpus(void) 1599 { 1600 int cpu; 1601 1602 for_each_possible_cpu(cpu) 1603 purge_fragmented_blocks(cpu); 1604 } 1605 1606 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 1607 { 1608 struct vmap_block_queue *vbq; 1609 struct vmap_block *vb; 1610 void *vaddr = NULL; 1611 unsigned int order; 1612 1613 BUG_ON(offset_in_page(size)); 1614 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1615 if (WARN_ON(size == 0)) { 1616 /* 1617 * Allocating 0 bytes isn't what caller wants since 1618 * get_order(0) returns funny result. Just warn and terminate 1619 * early. 1620 */ 1621 return NULL; 1622 } 1623 order = get_order(size); 1624 1625 rcu_read_lock(); 1626 vbq = &get_cpu_var(vmap_block_queue); 1627 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1628 unsigned long pages_off; 1629 1630 spin_lock(&vb->lock); 1631 if (vb->free < (1UL << order)) { 1632 spin_unlock(&vb->lock); 1633 continue; 1634 } 1635 1636 pages_off = VMAP_BBMAP_BITS - vb->free; 1637 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 1638 vb->free -= 1UL << order; 1639 if (vb->free == 0) { 1640 spin_lock(&vbq->lock); 1641 list_del_rcu(&vb->free_list); 1642 spin_unlock(&vbq->lock); 1643 } 1644 1645 spin_unlock(&vb->lock); 1646 break; 1647 } 1648 1649 put_cpu_var(vmap_block_queue); 1650 rcu_read_unlock(); 1651 1652 /* Allocate new block if nothing was found */ 1653 if (!vaddr) 1654 vaddr = new_vmap_block(order, gfp_mask); 1655 1656 return vaddr; 1657 } 1658 1659 static void vb_free(const void *addr, unsigned long size) 1660 { 1661 unsigned long offset; 1662 unsigned long vb_idx; 1663 unsigned int order; 1664 struct vmap_block *vb; 1665 1666 BUG_ON(offset_in_page(size)); 1667 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1668 1669 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 1670 1671 order = get_order(size); 1672 1673 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 1674 offset >>= PAGE_SHIFT; 1675 1676 vb_idx = addr_to_vb_idx((unsigned long)addr); 1677 rcu_read_lock(); 1678 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 1679 rcu_read_unlock(); 1680 BUG_ON(!vb); 1681 1682 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 1683 1684 if (debug_pagealloc_enabled()) 1685 flush_tlb_kernel_range((unsigned long)addr, 1686 (unsigned long)addr + size); 1687 1688 spin_lock(&vb->lock); 1689 1690 /* Expand dirty range */ 1691 vb->dirty_min = min(vb->dirty_min, offset); 1692 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 1693 1694 vb->dirty += 1UL << order; 1695 if (vb->dirty == VMAP_BBMAP_BITS) { 1696 BUG_ON(vb->free); 1697 spin_unlock(&vb->lock); 1698 free_vmap_block(vb); 1699 } else 1700 spin_unlock(&vb->lock); 1701 } 1702 1703 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 1704 { 1705 int cpu; 1706 1707 if (unlikely(!vmap_initialized)) 1708 return; 1709 1710 might_sleep(); 1711 1712 for_each_possible_cpu(cpu) { 1713 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1714 struct vmap_block *vb; 1715 1716 rcu_read_lock(); 1717 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1718 spin_lock(&vb->lock); 1719 if (vb->dirty) { 1720 unsigned long va_start = vb->va->va_start; 1721 unsigned long s, e; 1722 1723 s = va_start + (vb->dirty_min << PAGE_SHIFT); 1724 e = va_start + (vb->dirty_max << PAGE_SHIFT); 1725 1726 start = min(s, start); 1727 end = max(e, end); 1728 1729 flush = 1; 1730 } 1731 spin_unlock(&vb->lock); 1732 } 1733 rcu_read_unlock(); 1734 } 1735 1736 mutex_lock(&vmap_purge_lock); 1737 purge_fragmented_blocks_allcpus(); 1738 if (!__purge_vmap_area_lazy(start, end) && flush) 1739 flush_tlb_kernel_range(start, end); 1740 mutex_unlock(&vmap_purge_lock); 1741 } 1742 1743 /** 1744 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1745 * 1746 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1747 * to amortize TLB flushing overheads. What this means is that any page you 1748 * have now, may, in a former life, have been mapped into kernel virtual 1749 * address by the vmap layer and so there might be some CPUs with TLB entries 1750 * still referencing that page (additional to the regular 1:1 kernel mapping). 1751 * 1752 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1753 * be sure that none of the pages we have control over will have any aliases 1754 * from the vmap layer. 1755 */ 1756 void vm_unmap_aliases(void) 1757 { 1758 unsigned long start = ULONG_MAX, end = 0; 1759 int flush = 0; 1760 1761 _vm_unmap_aliases(start, end, flush); 1762 } 1763 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1764 1765 /** 1766 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1767 * @mem: the pointer returned by vm_map_ram 1768 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1769 */ 1770 void vm_unmap_ram(const void *mem, unsigned int count) 1771 { 1772 unsigned long size = (unsigned long)count << PAGE_SHIFT; 1773 unsigned long addr = (unsigned long)mem; 1774 struct vmap_area *va; 1775 1776 might_sleep(); 1777 BUG_ON(!addr); 1778 BUG_ON(addr < VMALLOC_START); 1779 BUG_ON(addr > VMALLOC_END); 1780 BUG_ON(!PAGE_ALIGNED(addr)); 1781 1782 kasan_poison_vmalloc(mem, size); 1783 1784 if (likely(count <= VMAP_MAX_ALLOC)) { 1785 debug_check_no_locks_freed(mem, size); 1786 vb_free(mem, size); 1787 return; 1788 } 1789 1790 va = find_vmap_area(addr); 1791 BUG_ON(!va); 1792 debug_check_no_locks_freed((void *)va->va_start, 1793 (va->va_end - va->va_start)); 1794 free_unmap_vmap_area(va); 1795 } 1796 EXPORT_SYMBOL(vm_unmap_ram); 1797 1798 /** 1799 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1800 * @pages: an array of pointers to the pages to be mapped 1801 * @count: number of pages 1802 * @node: prefer to allocate data structures on this node 1803 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1804 * 1805 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 1806 * faster than vmap so it's good. But if you mix long-life and short-life 1807 * objects with vm_map_ram(), it could consume lots of address space through 1808 * fragmentation (especially on a 32bit machine). You could see failures in 1809 * the end. Please use this function for short-lived objects. 1810 * 1811 * Returns: a pointer to the address that has been mapped, or %NULL on failure 1812 */ 1813 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1814 { 1815 unsigned long size = (unsigned long)count << PAGE_SHIFT; 1816 unsigned long addr; 1817 void *mem; 1818 1819 if (likely(count <= VMAP_MAX_ALLOC)) { 1820 mem = vb_alloc(size, GFP_KERNEL); 1821 if (IS_ERR(mem)) 1822 return NULL; 1823 addr = (unsigned long)mem; 1824 } else { 1825 struct vmap_area *va; 1826 va = alloc_vmap_area(size, PAGE_SIZE, 1827 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1828 if (IS_ERR(va)) 1829 return NULL; 1830 1831 addr = va->va_start; 1832 mem = (void *)addr; 1833 } 1834 1835 kasan_unpoison_vmalloc(mem, size); 1836 1837 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1838 vm_unmap_ram(mem, count); 1839 return NULL; 1840 } 1841 return mem; 1842 } 1843 EXPORT_SYMBOL(vm_map_ram); 1844 1845 static struct vm_struct *vmlist __initdata; 1846 1847 /** 1848 * vm_area_add_early - add vmap area early during boot 1849 * @vm: vm_struct to add 1850 * 1851 * This function is used to add fixed kernel vm area to vmlist before 1852 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1853 * should contain proper values and the other fields should be zero. 1854 * 1855 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1856 */ 1857 void __init vm_area_add_early(struct vm_struct *vm) 1858 { 1859 struct vm_struct *tmp, **p; 1860 1861 BUG_ON(vmap_initialized); 1862 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1863 if (tmp->addr >= vm->addr) { 1864 BUG_ON(tmp->addr < vm->addr + vm->size); 1865 break; 1866 } else 1867 BUG_ON(tmp->addr + tmp->size > vm->addr); 1868 } 1869 vm->next = *p; 1870 *p = vm; 1871 } 1872 1873 /** 1874 * vm_area_register_early - register vmap area early during boot 1875 * @vm: vm_struct to register 1876 * @align: requested alignment 1877 * 1878 * This function is used to register kernel vm area before 1879 * vmalloc_init() is called. @vm->size and @vm->flags should contain 1880 * proper values on entry and other fields should be zero. On return, 1881 * vm->addr contains the allocated address. 1882 * 1883 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1884 */ 1885 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1886 { 1887 static size_t vm_init_off __initdata; 1888 unsigned long addr; 1889 1890 addr = ALIGN(VMALLOC_START + vm_init_off, align); 1891 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1892 1893 vm->addr = (void *)addr; 1894 1895 vm_area_add_early(vm); 1896 } 1897 1898 static void vmap_init_free_space(void) 1899 { 1900 unsigned long vmap_start = 1; 1901 const unsigned long vmap_end = ULONG_MAX; 1902 struct vmap_area *busy, *free; 1903 1904 /* 1905 * B F B B B F 1906 * -|-----|.....|-----|-----|-----|.....|- 1907 * | The KVA space | 1908 * |<--------------------------------->| 1909 */ 1910 list_for_each_entry(busy, &vmap_area_list, list) { 1911 if (busy->va_start - vmap_start > 0) { 1912 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 1913 if (!WARN_ON_ONCE(!free)) { 1914 free->va_start = vmap_start; 1915 free->va_end = busy->va_start; 1916 1917 insert_vmap_area_augment(free, NULL, 1918 &free_vmap_area_root, 1919 &free_vmap_area_list); 1920 } 1921 } 1922 1923 vmap_start = busy->va_end; 1924 } 1925 1926 if (vmap_end - vmap_start > 0) { 1927 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 1928 if (!WARN_ON_ONCE(!free)) { 1929 free->va_start = vmap_start; 1930 free->va_end = vmap_end; 1931 1932 insert_vmap_area_augment(free, NULL, 1933 &free_vmap_area_root, 1934 &free_vmap_area_list); 1935 } 1936 } 1937 } 1938 1939 void __init vmalloc_init(void) 1940 { 1941 struct vmap_area *va; 1942 struct vm_struct *tmp; 1943 int i; 1944 1945 /* 1946 * Create the cache for vmap_area objects. 1947 */ 1948 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 1949 1950 for_each_possible_cpu(i) { 1951 struct vmap_block_queue *vbq; 1952 struct vfree_deferred *p; 1953 1954 vbq = &per_cpu(vmap_block_queue, i); 1955 spin_lock_init(&vbq->lock); 1956 INIT_LIST_HEAD(&vbq->free); 1957 p = &per_cpu(vfree_deferred, i); 1958 init_llist_head(&p->list); 1959 INIT_WORK(&p->wq, free_work); 1960 } 1961 1962 /* Import existing vmlist entries. */ 1963 for (tmp = vmlist; tmp; tmp = tmp->next) { 1964 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 1965 if (WARN_ON_ONCE(!va)) 1966 continue; 1967 1968 va->va_start = (unsigned long)tmp->addr; 1969 va->va_end = va->va_start + tmp->size; 1970 va->vm = tmp; 1971 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 1972 } 1973 1974 /* 1975 * Now we can initialize a free vmap space. 1976 */ 1977 vmap_init_free_space(); 1978 vmap_initialized = true; 1979 } 1980 1981 /** 1982 * map_kernel_range_noflush - map kernel VM area with the specified pages 1983 * @addr: start of the VM area to map 1984 * @size: size of the VM area to map 1985 * @prot: page protection flags to use 1986 * @pages: pages to map 1987 * 1988 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 1989 * specify should have been allocated using get_vm_area() and its 1990 * friends. 1991 * 1992 * NOTE: 1993 * This function does NOT do any cache flushing. The caller is 1994 * responsible for calling flush_cache_vmap() on to-be-mapped areas 1995 * before calling this function. 1996 * 1997 * RETURNS: 1998 * The number of pages mapped on success, -errno on failure. 1999 */ 2000 int map_kernel_range_noflush(unsigned long addr, unsigned long size, 2001 pgprot_t prot, struct page **pages) 2002 { 2003 return vmap_page_range_noflush(addr, addr + size, prot, pages); 2004 } 2005 2006 /** 2007 * unmap_kernel_range_noflush - unmap kernel VM area 2008 * @addr: start of the VM area to unmap 2009 * @size: size of the VM area to unmap 2010 * 2011 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 2012 * specify should have been allocated using get_vm_area() and its 2013 * friends. 2014 * 2015 * NOTE: 2016 * This function does NOT do any cache flushing. The caller is 2017 * responsible for calling flush_cache_vunmap() on to-be-mapped areas 2018 * before calling this function and flush_tlb_kernel_range() after. 2019 */ 2020 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 2021 { 2022 vunmap_page_range(addr, addr + size); 2023 } 2024 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 2025 2026 /** 2027 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 2028 * @addr: start of the VM area to unmap 2029 * @size: size of the VM area to unmap 2030 * 2031 * Similar to unmap_kernel_range_noflush() but flushes vcache before 2032 * the unmapping and tlb after. 2033 */ 2034 void unmap_kernel_range(unsigned long addr, unsigned long size) 2035 { 2036 unsigned long end = addr + size; 2037 2038 flush_cache_vunmap(addr, end); 2039 vunmap_page_range(addr, end); 2040 flush_tlb_kernel_range(addr, end); 2041 } 2042 EXPORT_SYMBOL_GPL(unmap_kernel_range); 2043 2044 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) 2045 { 2046 unsigned long addr = (unsigned long)area->addr; 2047 unsigned long end = addr + get_vm_area_size(area); 2048 int err; 2049 2050 err = vmap_page_range(addr, end, prot, pages); 2051 2052 return err > 0 ? 0 : err; 2053 } 2054 EXPORT_SYMBOL_GPL(map_vm_area); 2055 2056 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 2057 struct vmap_area *va, unsigned long flags, const void *caller) 2058 { 2059 vm->flags = flags; 2060 vm->addr = (void *)va->va_start; 2061 vm->size = va->va_end - va->va_start; 2062 vm->caller = caller; 2063 va->vm = vm; 2064 } 2065 2066 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 2067 unsigned long flags, const void *caller) 2068 { 2069 spin_lock(&vmap_area_lock); 2070 setup_vmalloc_vm_locked(vm, va, flags, caller); 2071 spin_unlock(&vmap_area_lock); 2072 } 2073 2074 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2075 { 2076 /* 2077 * Before removing VM_UNINITIALIZED, 2078 * we should make sure that vm has proper values. 2079 * Pair with smp_rmb() in show_numa_info(). 2080 */ 2081 smp_wmb(); 2082 vm->flags &= ~VM_UNINITIALIZED; 2083 } 2084 2085 static struct vm_struct *__get_vm_area_node(unsigned long size, 2086 unsigned long align, unsigned long flags, unsigned long start, 2087 unsigned long end, int node, gfp_t gfp_mask, const void *caller) 2088 { 2089 struct vmap_area *va; 2090 struct vm_struct *area; 2091 unsigned long requested_size = size; 2092 2093 BUG_ON(in_interrupt()); 2094 size = PAGE_ALIGN(size); 2095 if (unlikely(!size)) 2096 return NULL; 2097 2098 if (flags & VM_IOREMAP) 2099 align = 1ul << clamp_t(int, get_count_order_long(size), 2100 PAGE_SHIFT, IOREMAP_MAX_ORDER); 2101 2102 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 2103 if (unlikely(!area)) 2104 return NULL; 2105 2106 if (!(flags & VM_NO_GUARD)) 2107 size += PAGE_SIZE; 2108 2109 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 2110 if (IS_ERR(va)) { 2111 kfree(area); 2112 return NULL; 2113 } 2114 2115 kasan_unpoison_vmalloc((void *)va->va_start, requested_size); 2116 2117 setup_vmalloc_vm(area, va, flags, caller); 2118 2119 return area; 2120 } 2121 2122 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 2123 unsigned long start, unsigned long end) 2124 { 2125 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 2126 GFP_KERNEL, __builtin_return_address(0)); 2127 } 2128 EXPORT_SYMBOL_GPL(__get_vm_area); 2129 2130 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 2131 unsigned long start, unsigned long end, 2132 const void *caller) 2133 { 2134 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 2135 GFP_KERNEL, caller); 2136 } 2137 2138 /** 2139 * get_vm_area - reserve a contiguous kernel virtual area 2140 * @size: size of the area 2141 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 2142 * 2143 * Search an area of @size in the kernel virtual mapping area, 2144 * and reserved it for out purposes. Returns the area descriptor 2145 * on success or %NULL on failure. 2146 * 2147 * Return: the area descriptor on success or %NULL on failure. 2148 */ 2149 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 2150 { 2151 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 2152 NUMA_NO_NODE, GFP_KERNEL, 2153 __builtin_return_address(0)); 2154 } 2155 2156 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 2157 const void *caller) 2158 { 2159 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 2160 NUMA_NO_NODE, GFP_KERNEL, caller); 2161 } 2162 2163 /** 2164 * find_vm_area - find a continuous kernel virtual area 2165 * @addr: base address 2166 * 2167 * Search for the kernel VM area starting at @addr, and return it. 2168 * It is up to the caller to do all required locking to keep the returned 2169 * pointer valid. 2170 * 2171 * Return: pointer to the found area or %NULL on faulure 2172 */ 2173 struct vm_struct *find_vm_area(const void *addr) 2174 { 2175 struct vmap_area *va; 2176 2177 va = find_vmap_area((unsigned long)addr); 2178 if (!va) 2179 return NULL; 2180 2181 return va->vm; 2182 } 2183 2184 /** 2185 * remove_vm_area - find and remove a continuous kernel virtual area 2186 * @addr: base address 2187 * 2188 * Search for the kernel VM area starting at @addr, and remove it. 2189 * This function returns the found VM area, but using it is NOT safe 2190 * on SMP machines, except for its size or flags. 2191 * 2192 * Return: pointer to the found area or %NULL on faulure 2193 */ 2194 struct vm_struct *remove_vm_area(const void *addr) 2195 { 2196 struct vmap_area *va; 2197 2198 might_sleep(); 2199 2200 spin_lock(&vmap_area_lock); 2201 va = __find_vmap_area((unsigned long)addr); 2202 if (va && va->vm) { 2203 struct vm_struct *vm = va->vm; 2204 2205 va->vm = NULL; 2206 spin_unlock(&vmap_area_lock); 2207 2208 kasan_free_shadow(vm); 2209 free_unmap_vmap_area(va); 2210 2211 return vm; 2212 } 2213 2214 spin_unlock(&vmap_area_lock); 2215 return NULL; 2216 } 2217 2218 static inline void set_area_direct_map(const struct vm_struct *area, 2219 int (*set_direct_map)(struct page *page)) 2220 { 2221 int i; 2222 2223 for (i = 0; i < area->nr_pages; i++) 2224 if (page_address(area->pages[i])) 2225 set_direct_map(area->pages[i]); 2226 } 2227 2228 /* Handle removing and resetting vm mappings related to the vm_struct. */ 2229 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2230 { 2231 unsigned long start = ULONG_MAX, end = 0; 2232 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 2233 int flush_dmap = 0; 2234 int i; 2235 2236 remove_vm_area(area->addr); 2237 2238 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2239 if (!flush_reset) 2240 return; 2241 2242 /* 2243 * If not deallocating pages, just do the flush of the VM area and 2244 * return. 2245 */ 2246 if (!deallocate_pages) { 2247 vm_unmap_aliases(); 2248 return; 2249 } 2250 2251 /* 2252 * If execution gets here, flush the vm mapping and reset the direct 2253 * map. Find the start and end range of the direct mappings to make sure 2254 * the vm_unmap_aliases() flush includes the direct map. 2255 */ 2256 for (i = 0; i < area->nr_pages; i++) { 2257 unsigned long addr = (unsigned long)page_address(area->pages[i]); 2258 if (addr) { 2259 start = min(addr, start); 2260 end = max(addr + PAGE_SIZE, end); 2261 flush_dmap = 1; 2262 } 2263 } 2264 2265 /* 2266 * Set direct map to something invalid so that it won't be cached if 2267 * there are any accesses after the TLB flush, then flush the TLB and 2268 * reset the direct map permissions to the default. 2269 */ 2270 set_area_direct_map(area, set_direct_map_invalid_noflush); 2271 _vm_unmap_aliases(start, end, flush_dmap); 2272 set_area_direct_map(area, set_direct_map_default_noflush); 2273 } 2274 2275 static void __vunmap(const void *addr, int deallocate_pages) 2276 { 2277 struct vm_struct *area; 2278 2279 if (!addr) 2280 return; 2281 2282 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2283 addr)) 2284 return; 2285 2286 area = find_vm_area(addr); 2287 if (unlikely(!area)) { 2288 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 2289 addr); 2290 return; 2291 } 2292 2293 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 2294 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 2295 2296 kasan_poison_vmalloc(area->addr, area->size); 2297 2298 vm_remove_mappings(area, deallocate_pages); 2299 2300 if (deallocate_pages) { 2301 int i; 2302 2303 for (i = 0; i < area->nr_pages; i++) { 2304 struct page *page = area->pages[i]; 2305 2306 BUG_ON(!page); 2307 __free_pages(page, 0); 2308 } 2309 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); 2310 2311 kvfree(area->pages); 2312 } 2313 2314 kfree(area); 2315 return; 2316 } 2317 2318 static inline void __vfree_deferred(const void *addr) 2319 { 2320 /* 2321 * Use raw_cpu_ptr() because this can be called from preemptible 2322 * context. Preemption is absolutely fine here, because the llist_add() 2323 * implementation is lockless, so it works even if we are adding to 2324 * nother cpu's list. schedule_work() should be fine with this too. 2325 */ 2326 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2327 2328 if (llist_add((struct llist_node *)addr, &p->list)) 2329 schedule_work(&p->wq); 2330 } 2331 2332 /** 2333 * vfree_atomic - release memory allocated by vmalloc() 2334 * @addr: memory base address 2335 * 2336 * This one is just like vfree() but can be called in any atomic context 2337 * except NMIs. 2338 */ 2339 void vfree_atomic(const void *addr) 2340 { 2341 BUG_ON(in_nmi()); 2342 2343 kmemleak_free(addr); 2344 2345 if (!addr) 2346 return; 2347 __vfree_deferred(addr); 2348 } 2349 2350 static void __vfree(const void *addr) 2351 { 2352 if (unlikely(in_interrupt())) 2353 __vfree_deferred(addr); 2354 else 2355 __vunmap(addr, 1); 2356 } 2357 2358 /** 2359 * vfree - release memory allocated by vmalloc() 2360 * @addr: memory base address 2361 * 2362 * Free the virtually continuous memory area starting at @addr, as 2363 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 2364 * NULL, no operation is performed. 2365 * 2366 * Must not be called in NMI context (strictly speaking, only if we don't 2367 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 2368 * conventions for vfree() arch-depenedent would be a really bad idea) 2369 * 2370 * May sleep if called *not* from interrupt context. 2371 * 2372 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) 2373 */ 2374 void vfree(const void *addr) 2375 { 2376 BUG_ON(in_nmi()); 2377 2378 kmemleak_free(addr); 2379 2380 might_sleep_if(!in_interrupt()); 2381 2382 if (!addr) 2383 return; 2384 2385 __vfree(addr); 2386 } 2387 EXPORT_SYMBOL(vfree); 2388 2389 /** 2390 * vunmap - release virtual mapping obtained by vmap() 2391 * @addr: memory base address 2392 * 2393 * Free the virtually contiguous memory area starting at @addr, 2394 * which was created from the page array passed to vmap(). 2395 * 2396 * Must not be called in interrupt context. 2397 */ 2398 void vunmap(const void *addr) 2399 { 2400 BUG_ON(in_interrupt()); 2401 might_sleep(); 2402 if (addr) 2403 __vunmap(addr, 0); 2404 } 2405 EXPORT_SYMBOL(vunmap); 2406 2407 /** 2408 * vmap - map an array of pages into virtually contiguous space 2409 * @pages: array of page pointers 2410 * @count: number of pages to map 2411 * @flags: vm_area->flags 2412 * @prot: page protection for the mapping 2413 * 2414 * Maps @count pages from @pages into contiguous kernel virtual 2415 * space. 2416 * 2417 * Return: the address of the area or %NULL on failure 2418 */ 2419 void *vmap(struct page **pages, unsigned int count, 2420 unsigned long flags, pgprot_t prot) 2421 { 2422 struct vm_struct *area; 2423 unsigned long size; /* In bytes */ 2424 2425 might_sleep(); 2426 2427 if (count > totalram_pages()) 2428 return NULL; 2429 2430 size = (unsigned long)count << PAGE_SHIFT; 2431 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 2432 if (!area) 2433 return NULL; 2434 2435 if (map_vm_area(area, prot, pages)) { 2436 vunmap(area->addr); 2437 return NULL; 2438 } 2439 2440 return area->addr; 2441 } 2442 EXPORT_SYMBOL(vmap); 2443 2444 static void *__vmalloc_node(unsigned long size, unsigned long align, 2445 gfp_t gfp_mask, pgprot_t prot, 2446 int node, const void *caller); 2447 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 2448 pgprot_t prot, int node) 2449 { 2450 struct page **pages; 2451 unsigned int nr_pages, array_size, i; 2452 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 2453 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; 2454 const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? 2455 0 : 2456 __GFP_HIGHMEM; 2457 2458 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 2459 array_size = (nr_pages * sizeof(struct page *)); 2460 2461 /* Please note that the recursion is strictly bounded. */ 2462 if (array_size > PAGE_SIZE) { 2463 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, 2464 PAGE_KERNEL, node, area->caller); 2465 } else { 2466 pages = kmalloc_node(array_size, nested_gfp, node); 2467 } 2468 2469 if (!pages) { 2470 remove_vm_area(area->addr); 2471 kfree(area); 2472 return NULL; 2473 } 2474 2475 area->pages = pages; 2476 area->nr_pages = nr_pages; 2477 2478 for (i = 0; i < area->nr_pages; i++) { 2479 struct page *page; 2480 2481 if (node == NUMA_NO_NODE) 2482 page = alloc_page(alloc_mask|highmem_mask); 2483 else 2484 page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); 2485 2486 if (unlikely(!page)) { 2487 /* Successfully allocated i pages, free them in __vunmap() */ 2488 area->nr_pages = i; 2489 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 2490 goto fail; 2491 } 2492 area->pages[i] = page; 2493 if (gfpflags_allow_blocking(gfp_mask)) 2494 cond_resched(); 2495 } 2496 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 2497 2498 if (map_vm_area(area, prot, pages)) 2499 goto fail; 2500 return area->addr; 2501 2502 fail: 2503 warn_alloc(gfp_mask, NULL, 2504 "vmalloc: allocation failure, allocated %ld of %ld bytes", 2505 (area->nr_pages*PAGE_SIZE), area->size); 2506 __vfree(area->addr); 2507 return NULL; 2508 } 2509 2510 /** 2511 * __vmalloc_node_range - allocate virtually contiguous memory 2512 * @size: allocation size 2513 * @align: desired alignment 2514 * @start: vm area range start 2515 * @end: vm area range end 2516 * @gfp_mask: flags for the page level allocator 2517 * @prot: protection mask for the allocated pages 2518 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 2519 * @node: node to use for allocation or NUMA_NO_NODE 2520 * @caller: caller's return address 2521 * 2522 * Allocate enough pages to cover @size from the page level 2523 * allocator with @gfp_mask flags. Map them into contiguous 2524 * kernel virtual space, using a pagetable protection of @prot. 2525 * 2526 * Return: the address of the area or %NULL on failure 2527 */ 2528 void *__vmalloc_node_range(unsigned long size, unsigned long align, 2529 unsigned long start, unsigned long end, gfp_t gfp_mask, 2530 pgprot_t prot, unsigned long vm_flags, int node, 2531 const void *caller) 2532 { 2533 struct vm_struct *area; 2534 void *addr; 2535 unsigned long real_size = size; 2536 2537 size = PAGE_ALIGN(size); 2538 if (!size || (size >> PAGE_SHIFT) > totalram_pages()) 2539 goto fail; 2540 2541 area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED | 2542 vm_flags, start, end, node, gfp_mask, caller); 2543 if (!area) 2544 goto fail; 2545 2546 addr = __vmalloc_area_node(area, gfp_mask, prot, node); 2547 if (!addr) 2548 return NULL; 2549 2550 /* 2551 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 2552 * flag. It means that vm_struct is not fully initialized. 2553 * Now, it is fully initialized, so remove this flag here. 2554 */ 2555 clear_vm_uninitialized_flag(area); 2556 2557 kmemleak_vmalloc(area, size, gfp_mask); 2558 2559 return addr; 2560 2561 fail: 2562 warn_alloc(gfp_mask, NULL, 2563 "vmalloc: allocation failure: %lu bytes", real_size); 2564 return NULL; 2565 } 2566 2567 /* 2568 * This is only for performance analysis of vmalloc and stress purpose. 2569 * It is required by vmalloc test module, therefore do not use it other 2570 * than that. 2571 */ 2572 #ifdef CONFIG_TEST_VMALLOC_MODULE 2573 EXPORT_SYMBOL_GPL(__vmalloc_node_range); 2574 #endif 2575 2576 /** 2577 * __vmalloc_node - allocate virtually contiguous memory 2578 * @size: allocation size 2579 * @align: desired alignment 2580 * @gfp_mask: flags for the page level allocator 2581 * @prot: protection mask for the allocated pages 2582 * @node: node to use for allocation or NUMA_NO_NODE 2583 * @caller: caller's return address 2584 * 2585 * Allocate enough pages to cover @size from the page level 2586 * allocator with @gfp_mask flags. Map them into contiguous 2587 * kernel virtual space, using a pagetable protection of @prot. 2588 * 2589 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 2590 * and __GFP_NOFAIL are not supported 2591 * 2592 * Any use of gfp flags outside of GFP_KERNEL should be consulted 2593 * with mm people. 2594 * 2595 * Return: pointer to the allocated memory or %NULL on error 2596 */ 2597 static void *__vmalloc_node(unsigned long size, unsigned long align, 2598 gfp_t gfp_mask, pgprot_t prot, 2599 int node, const void *caller) 2600 { 2601 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 2602 gfp_mask, prot, 0, node, caller); 2603 } 2604 2605 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 2606 { 2607 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 2608 __builtin_return_address(0)); 2609 } 2610 EXPORT_SYMBOL(__vmalloc); 2611 2612 static inline void *__vmalloc_node_flags(unsigned long size, 2613 int node, gfp_t flags) 2614 { 2615 return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 2616 node, __builtin_return_address(0)); 2617 } 2618 2619 2620 void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, 2621 void *caller) 2622 { 2623 return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller); 2624 } 2625 2626 /** 2627 * vmalloc - allocate virtually contiguous memory 2628 * @size: allocation size 2629 * 2630 * Allocate enough pages to cover @size from the page level 2631 * allocator and map them into contiguous kernel virtual space. 2632 * 2633 * For tight control over page level allocator and protection flags 2634 * use __vmalloc() instead. 2635 * 2636 * Return: pointer to the allocated memory or %NULL on error 2637 */ 2638 void *vmalloc(unsigned long size) 2639 { 2640 return __vmalloc_node_flags(size, NUMA_NO_NODE, 2641 GFP_KERNEL); 2642 } 2643 EXPORT_SYMBOL(vmalloc); 2644 2645 /** 2646 * vzalloc - allocate virtually contiguous memory with zero fill 2647 * @size: allocation size 2648 * 2649 * Allocate enough pages to cover @size from the page level 2650 * allocator and map them into contiguous kernel virtual space. 2651 * The memory allocated is set to zero. 2652 * 2653 * For tight control over page level allocator and protection flags 2654 * use __vmalloc() instead. 2655 * 2656 * Return: pointer to the allocated memory or %NULL on error 2657 */ 2658 void *vzalloc(unsigned long size) 2659 { 2660 return __vmalloc_node_flags(size, NUMA_NO_NODE, 2661 GFP_KERNEL | __GFP_ZERO); 2662 } 2663 EXPORT_SYMBOL(vzalloc); 2664 2665 /** 2666 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 2667 * @size: allocation size 2668 * 2669 * The resulting memory area is zeroed so it can be mapped to userspace 2670 * without leaking data. 2671 * 2672 * Return: pointer to the allocated memory or %NULL on error 2673 */ 2674 void *vmalloc_user(unsigned long size) 2675 { 2676 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2677 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 2678 VM_USERMAP, NUMA_NO_NODE, 2679 __builtin_return_address(0)); 2680 } 2681 EXPORT_SYMBOL(vmalloc_user); 2682 2683 /** 2684 * vmalloc_node - allocate memory on a specific node 2685 * @size: allocation size 2686 * @node: numa node 2687 * 2688 * Allocate enough pages to cover @size from the page level 2689 * allocator and map them into contiguous kernel virtual space. 2690 * 2691 * For tight control over page level allocator and protection flags 2692 * use __vmalloc() instead. 2693 * 2694 * Return: pointer to the allocated memory or %NULL on error 2695 */ 2696 void *vmalloc_node(unsigned long size, int node) 2697 { 2698 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, 2699 node, __builtin_return_address(0)); 2700 } 2701 EXPORT_SYMBOL(vmalloc_node); 2702 2703 /** 2704 * vzalloc_node - allocate memory on a specific node with zero fill 2705 * @size: allocation size 2706 * @node: numa node 2707 * 2708 * Allocate enough pages to cover @size from the page level 2709 * allocator and map them into contiguous kernel virtual space. 2710 * The memory allocated is set to zero. 2711 * 2712 * For tight control over page level allocator and protection flags 2713 * use __vmalloc_node() instead. 2714 * 2715 * Return: pointer to the allocated memory or %NULL on error 2716 */ 2717 void *vzalloc_node(unsigned long size, int node) 2718 { 2719 return __vmalloc_node_flags(size, node, 2720 GFP_KERNEL | __GFP_ZERO); 2721 } 2722 EXPORT_SYMBOL(vzalloc_node); 2723 2724 /** 2725 * vmalloc_user_node_flags - allocate memory for userspace on a specific node 2726 * @size: allocation size 2727 * @node: numa node 2728 * @flags: flags for the page level allocator 2729 * 2730 * The resulting memory area is zeroed so it can be mapped to userspace 2731 * without leaking data. 2732 * 2733 * Return: pointer to the allocated memory or %NULL on error 2734 */ 2735 void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags) 2736 { 2737 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2738 flags | __GFP_ZERO, PAGE_KERNEL, 2739 VM_USERMAP, node, 2740 __builtin_return_address(0)); 2741 } 2742 EXPORT_SYMBOL(vmalloc_user_node_flags); 2743 2744 /** 2745 * vmalloc_exec - allocate virtually contiguous, executable memory 2746 * @size: allocation size 2747 * 2748 * Kernel-internal function to allocate enough pages to cover @size 2749 * the page level allocator and map them into contiguous and 2750 * executable kernel virtual space. 2751 * 2752 * For tight control over page level allocator and protection flags 2753 * use __vmalloc() instead. 2754 * 2755 * Return: pointer to the allocated memory or %NULL on error 2756 */ 2757 void *vmalloc_exec(unsigned long size) 2758 { 2759 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 2760 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, 2761 NUMA_NO_NODE, __builtin_return_address(0)); 2762 } 2763 2764 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 2765 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 2766 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 2767 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 2768 #else 2769 /* 2770 * 64b systems should always have either DMA or DMA32 zones. For others 2771 * GFP_DMA32 should do the right thing and use the normal zone. 2772 */ 2773 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 2774 #endif 2775 2776 /** 2777 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 2778 * @size: allocation size 2779 * 2780 * Allocate enough 32bit PA addressable pages to cover @size from the 2781 * page level allocator and map them into contiguous kernel virtual space. 2782 * 2783 * Return: pointer to the allocated memory or %NULL on error 2784 */ 2785 void *vmalloc_32(unsigned long size) 2786 { 2787 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 2788 NUMA_NO_NODE, __builtin_return_address(0)); 2789 } 2790 EXPORT_SYMBOL(vmalloc_32); 2791 2792 /** 2793 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 2794 * @size: allocation size 2795 * 2796 * The resulting memory area is 32bit addressable and zeroed so it can be 2797 * mapped to userspace without leaking data. 2798 * 2799 * Return: pointer to the allocated memory or %NULL on error 2800 */ 2801 void *vmalloc_32_user(unsigned long size) 2802 { 2803 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2804 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 2805 VM_USERMAP, NUMA_NO_NODE, 2806 __builtin_return_address(0)); 2807 } 2808 EXPORT_SYMBOL(vmalloc_32_user); 2809 2810 /* 2811 * small helper routine , copy contents to buf from addr. 2812 * If the page is not present, fill zero. 2813 */ 2814 2815 static int aligned_vread(char *buf, char *addr, unsigned long count) 2816 { 2817 struct page *p; 2818 int copied = 0; 2819 2820 while (count) { 2821 unsigned long offset, length; 2822 2823 offset = offset_in_page(addr); 2824 length = PAGE_SIZE - offset; 2825 if (length > count) 2826 length = count; 2827 p = vmalloc_to_page(addr); 2828 /* 2829 * To do safe access to this _mapped_ area, we need 2830 * lock. But adding lock here means that we need to add 2831 * overhead of vmalloc()/vfree() calles for this _debug_ 2832 * interface, rarely used. Instead of that, we'll use 2833 * kmap() and get small overhead in this access function. 2834 */ 2835 if (p) { 2836 /* 2837 * we can expect USER0 is not used (see vread/vwrite's 2838 * function description) 2839 */ 2840 void *map = kmap_atomic(p); 2841 memcpy(buf, map + offset, length); 2842 kunmap_atomic(map); 2843 } else 2844 memset(buf, 0, length); 2845 2846 addr += length; 2847 buf += length; 2848 copied += length; 2849 count -= length; 2850 } 2851 return copied; 2852 } 2853 2854 static int aligned_vwrite(char *buf, char *addr, unsigned long count) 2855 { 2856 struct page *p; 2857 int copied = 0; 2858 2859 while (count) { 2860 unsigned long offset, length; 2861 2862 offset = offset_in_page(addr); 2863 length = PAGE_SIZE - offset; 2864 if (length > count) 2865 length = count; 2866 p = vmalloc_to_page(addr); 2867 /* 2868 * To do safe access to this _mapped_ area, we need 2869 * lock. But adding lock here means that we need to add 2870 * overhead of vmalloc()/vfree() calles for this _debug_ 2871 * interface, rarely used. Instead of that, we'll use 2872 * kmap() and get small overhead in this access function. 2873 */ 2874 if (p) { 2875 /* 2876 * we can expect USER0 is not used (see vread/vwrite's 2877 * function description) 2878 */ 2879 void *map = kmap_atomic(p); 2880 memcpy(map + offset, buf, length); 2881 kunmap_atomic(map); 2882 } 2883 addr += length; 2884 buf += length; 2885 copied += length; 2886 count -= length; 2887 } 2888 return copied; 2889 } 2890 2891 /** 2892 * vread() - read vmalloc area in a safe way. 2893 * @buf: buffer for reading data 2894 * @addr: vm address. 2895 * @count: number of bytes to be read. 2896 * 2897 * This function checks that addr is a valid vmalloc'ed area, and 2898 * copy data from that area to a given buffer. If the given memory range 2899 * of [addr...addr+count) includes some valid address, data is copied to 2900 * proper area of @buf. If there are memory holes, they'll be zero-filled. 2901 * IOREMAP area is treated as memory hole and no copy is done. 2902 * 2903 * If [addr...addr+count) doesn't includes any intersects with alive 2904 * vm_struct area, returns 0. @buf should be kernel's buffer. 2905 * 2906 * Note: In usual ops, vread() is never necessary because the caller 2907 * should know vmalloc() area is valid and can use memcpy(). 2908 * This is for routines which have to access vmalloc area without 2909 * any information, as /dev/kmem. 2910 * 2911 * Return: number of bytes for which addr and buf should be increased 2912 * (same number as @count) or %0 if [addr...addr+count) doesn't 2913 * include any intersection with valid vmalloc area 2914 */ 2915 long vread(char *buf, char *addr, unsigned long count) 2916 { 2917 struct vmap_area *va; 2918 struct vm_struct *vm; 2919 char *vaddr, *buf_start = buf; 2920 unsigned long buflen = count; 2921 unsigned long n; 2922 2923 /* Don't allow overflow */ 2924 if ((unsigned long) addr + count < count) 2925 count = -(unsigned long) addr; 2926 2927 spin_lock(&vmap_area_lock); 2928 list_for_each_entry(va, &vmap_area_list, list) { 2929 if (!count) 2930 break; 2931 2932 if (!va->vm) 2933 continue; 2934 2935 vm = va->vm; 2936 vaddr = (char *) vm->addr; 2937 if (addr >= vaddr + get_vm_area_size(vm)) 2938 continue; 2939 while (addr < vaddr) { 2940 if (count == 0) 2941 goto finished; 2942 *buf = '\0'; 2943 buf++; 2944 addr++; 2945 count--; 2946 } 2947 n = vaddr + get_vm_area_size(vm) - addr; 2948 if (n > count) 2949 n = count; 2950 if (!(vm->flags & VM_IOREMAP)) 2951 aligned_vread(buf, addr, n); 2952 else /* IOREMAP area is treated as memory hole */ 2953 memset(buf, 0, n); 2954 buf += n; 2955 addr += n; 2956 count -= n; 2957 } 2958 finished: 2959 spin_unlock(&vmap_area_lock); 2960 2961 if (buf == buf_start) 2962 return 0; 2963 /* zero-fill memory holes */ 2964 if (buf != buf_start + buflen) 2965 memset(buf, 0, buflen - (buf - buf_start)); 2966 2967 return buflen; 2968 } 2969 2970 /** 2971 * vwrite() - write vmalloc area in a safe way. 2972 * @buf: buffer for source data 2973 * @addr: vm address. 2974 * @count: number of bytes to be read. 2975 * 2976 * This function checks that addr is a valid vmalloc'ed area, and 2977 * copy data from a buffer to the given addr. If specified range of 2978 * [addr...addr+count) includes some valid address, data is copied from 2979 * proper area of @buf. If there are memory holes, no copy to hole. 2980 * IOREMAP area is treated as memory hole and no copy is done. 2981 * 2982 * If [addr...addr+count) doesn't includes any intersects with alive 2983 * vm_struct area, returns 0. @buf should be kernel's buffer. 2984 * 2985 * Note: In usual ops, vwrite() is never necessary because the caller 2986 * should know vmalloc() area is valid and can use memcpy(). 2987 * This is for routines which have to access vmalloc area without 2988 * any information, as /dev/kmem. 2989 * 2990 * Return: number of bytes for which addr and buf should be 2991 * increased (same number as @count) or %0 if [addr...addr+count) 2992 * doesn't include any intersection with valid vmalloc area 2993 */ 2994 long vwrite(char *buf, char *addr, unsigned long count) 2995 { 2996 struct vmap_area *va; 2997 struct vm_struct *vm; 2998 char *vaddr; 2999 unsigned long n, buflen; 3000 int copied = 0; 3001 3002 /* Don't allow overflow */ 3003 if ((unsigned long) addr + count < count) 3004 count = -(unsigned long) addr; 3005 buflen = count; 3006 3007 spin_lock(&vmap_area_lock); 3008 list_for_each_entry(va, &vmap_area_list, list) { 3009 if (!count) 3010 break; 3011 3012 if (!va->vm) 3013 continue; 3014 3015 vm = va->vm; 3016 vaddr = (char *) vm->addr; 3017 if (addr >= vaddr + get_vm_area_size(vm)) 3018 continue; 3019 while (addr < vaddr) { 3020 if (count == 0) 3021 goto finished; 3022 buf++; 3023 addr++; 3024 count--; 3025 } 3026 n = vaddr + get_vm_area_size(vm) - addr; 3027 if (n > count) 3028 n = count; 3029 if (!(vm->flags & VM_IOREMAP)) { 3030 aligned_vwrite(buf, addr, n); 3031 copied++; 3032 } 3033 buf += n; 3034 addr += n; 3035 count -= n; 3036 } 3037 finished: 3038 spin_unlock(&vmap_area_lock); 3039 if (!copied) 3040 return 0; 3041 return buflen; 3042 } 3043 3044 /** 3045 * remap_vmalloc_range_partial - map vmalloc pages to userspace 3046 * @vma: vma to cover 3047 * @uaddr: target user address to start at 3048 * @kaddr: virtual address of vmalloc kernel memory 3049 * @size: size of map area 3050 * 3051 * Returns: 0 for success, -Exxx on failure 3052 * 3053 * This function checks that @kaddr is a valid vmalloc'ed area, 3054 * and that it is big enough to cover the range starting at 3055 * @uaddr in @vma. Will return failure if that criteria isn't 3056 * met. 3057 * 3058 * Similar to remap_pfn_range() (see mm/memory.c) 3059 */ 3060 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 3061 void *kaddr, unsigned long size) 3062 { 3063 struct vm_struct *area; 3064 3065 size = PAGE_ALIGN(size); 3066 3067 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 3068 return -EINVAL; 3069 3070 area = find_vm_area(kaddr); 3071 if (!area) 3072 return -EINVAL; 3073 3074 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 3075 return -EINVAL; 3076 3077 if (kaddr + size > area->addr + get_vm_area_size(area)) 3078 return -EINVAL; 3079 3080 do { 3081 struct page *page = vmalloc_to_page(kaddr); 3082 int ret; 3083 3084 ret = vm_insert_page(vma, uaddr, page); 3085 if (ret) 3086 return ret; 3087 3088 uaddr += PAGE_SIZE; 3089 kaddr += PAGE_SIZE; 3090 size -= PAGE_SIZE; 3091 } while (size > 0); 3092 3093 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3094 3095 return 0; 3096 } 3097 EXPORT_SYMBOL(remap_vmalloc_range_partial); 3098 3099 /** 3100 * remap_vmalloc_range - map vmalloc pages to userspace 3101 * @vma: vma to cover (map full range of vma) 3102 * @addr: vmalloc memory 3103 * @pgoff: number of pages into addr before first page to map 3104 * 3105 * Returns: 0 for success, -Exxx on failure 3106 * 3107 * This function checks that addr is a valid vmalloc'ed area, and 3108 * that it is big enough to cover the vma. Will return failure if 3109 * that criteria isn't met. 3110 * 3111 * Similar to remap_pfn_range() (see mm/memory.c) 3112 */ 3113 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 3114 unsigned long pgoff) 3115 { 3116 return remap_vmalloc_range_partial(vma, vma->vm_start, 3117 addr + (pgoff << PAGE_SHIFT), 3118 vma->vm_end - vma->vm_start); 3119 } 3120 EXPORT_SYMBOL(remap_vmalloc_range); 3121 3122 /* 3123 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 3124 * have one. 3125 * 3126 * The purpose of this function is to make sure the vmalloc area 3127 * mappings are identical in all page-tables in the system. 3128 */ 3129 void __weak vmalloc_sync_all(void) 3130 { 3131 } 3132 3133 3134 static int f(pte_t *pte, unsigned long addr, void *data) 3135 { 3136 pte_t ***p = data; 3137 3138 if (p) { 3139 *(*p) = pte; 3140 (*p)++; 3141 } 3142 return 0; 3143 } 3144 3145 /** 3146 * alloc_vm_area - allocate a range of kernel address space 3147 * @size: size of the area 3148 * @ptes: returns the PTEs for the address space 3149 * 3150 * Returns: NULL on failure, vm_struct on success 3151 * 3152 * This function reserves a range of kernel address space, and 3153 * allocates pagetables to map that range. No actual mappings 3154 * are created. 3155 * 3156 * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 3157 * allocated for the VM area are returned. 3158 */ 3159 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 3160 { 3161 struct vm_struct *area; 3162 3163 area = get_vm_area_caller(size, VM_IOREMAP, 3164 __builtin_return_address(0)); 3165 if (area == NULL) 3166 return NULL; 3167 3168 /* 3169 * This ensures that page tables are constructed for this region 3170 * of kernel virtual address space and mapped into init_mm. 3171 */ 3172 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 3173 size, f, ptes ? &ptes : NULL)) { 3174 free_vm_area(area); 3175 return NULL; 3176 } 3177 3178 return area; 3179 } 3180 EXPORT_SYMBOL_GPL(alloc_vm_area); 3181 3182 void free_vm_area(struct vm_struct *area) 3183 { 3184 struct vm_struct *ret; 3185 ret = remove_vm_area(area->addr); 3186 BUG_ON(ret != area); 3187 kfree(area); 3188 } 3189 EXPORT_SYMBOL_GPL(free_vm_area); 3190 3191 #ifdef CONFIG_SMP 3192 static struct vmap_area *node_to_va(struct rb_node *n) 3193 { 3194 return rb_entry_safe(n, struct vmap_area, rb_node); 3195 } 3196 3197 /** 3198 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 3199 * @addr: target address 3200 * 3201 * Returns: vmap_area if it is found. If there is no such area 3202 * the first highest(reverse order) vmap_area is returned 3203 * i.e. va->va_start < addr && va->va_end < addr or NULL 3204 * if there are no any areas before @addr. 3205 */ 3206 static struct vmap_area * 3207 pvm_find_va_enclose_addr(unsigned long addr) 3208 { 3209 struct vmap_area *va, *tmp; 3210 struct rb_node *n; 3211 3212 n = free_vmap_area_root.rb_node; 3213 va = NULL; 3214 3215 while (n) { 3216 tmp = rb_entry(n, struct vmap_area, rb_node); 3217 if (tmp->va_start <= addr) { 3218 va = tmp; 3219 if (tmp->va_end >= addr) 3220 break; 3221 3222 n = n->rb_right; 3223 } else { 3224 n = n->rb_left; 3225 } 3226 } 3227 3228 return va; 3229 } 3230 3231 /** 3232 * pvm_determine_end_from_reverse - find the highest aligned address 3233 * of free block below VMALLOC_END 3234 * @va: 3235 * in - the VA we start the search(reverse order); 3236 * out - the VA with the highest aligned end address. 3237 * 3238 * Returns: determined end address within vmap_area 3239 */ 3240 static unsigned long 3241 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3242 { 3243 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3244 unsigned long addr; 3245 3246 if (likely(*va)) { 3247 list_for_each_entry_from_reverse((*va), 3248 &free_vmap_area_list, list) { 3249 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 3250 if ((*va)->va_start < addr) 3251 return addr; 3252 } 3253 } 3254 3255 return 0; 3256 } 3257 3258 /** 3259 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3260 * @offsets: array containing offset of each area 3261 * @sizes: array containing size of each area 3262 * @nr_vms: the number of areas to allocate 3263 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3264 * 3265 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3266 * vm_structs on success, %NULL on failure 3267 * 3268 * Percpu allocator wants to use congruent vm areas so that it can 3269 * maintain the offsets among percpu areas. This function allocates 3270 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3271 * be scattered pretty far, distance between two areas easily going up 3272 * to gigabytes. To avoid interacting with regular vmallocs, these 3273 * areas are allocated from top. 3274 * 3275 * Despite its complicated look, this allocator is rather simple. It 3276 * does everything top-down and scans free blocks from the end looking 3277 * for matching base. While scanning, if any of the areas do not fit the 3278 * base address is pulled down to fit the area. Scanning is repeated till 3279 * all the areas fit and then all necessary data structures are inserted 3280 * and the result is returned. 3281 */ 3282 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3283 const size_t *sizes, int nr_vms, 3284 size_t align) 3285 { 3286 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3287 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3288 struct vmap_area **vas, *va; 3289 struct vm_struct **vms; 3290 int area, area2, last_area, term_area; 3291 unsigned long base, start, size, end, last_end, orig_start, orig_end; 3292 bool purged = false; 3293 enum fit_type type; 3294 3295 /* verify parameters and allocate data structures */ 3296 BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3297 for (last_area = 0, area = 0; area < nr_vms; area++) { 3298 start = offsets[area]; 3299 end = start + sizes[area]; 3300 3301 /* is everything aligned properly? */ 3302 BUG_ON(!IS_ALIGNED(offsets[area], align)); 3303 BUG_ON(!IS_ALIGNED(sizes[area], align)); 3304 3305 /* detect the area with the highest address */ 3306 if (start > offsets[last_area]) 3307 last_area = area; 3308 3309 for (area2 = area + 1; area2 < nr_vms; area2++) { 3310 unsigned long start2 = offsets[area2]; 3311 unsigned long end2 = start2 + sizes[area2]; 3312 3313 BUG_ON(start2 < end && start < end2); 3314 } 3315 } 3316 last_end = offsets[last_area] + sizes[last_area]; 3317 3318 if (vmalloc_end - vmalloc_start < last_end) { 3319 WARN_ON(true); 3320 return NULL; 3321 } 3322 3323 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 3324 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3325 if (!vas || !vms) 3326 goto err_free2; 3327 3328 for (area = 0; area < nr_vms; area++) { 3329 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3330 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3331 if (!vas[area] || !vms[area]) 3332 goto err_free; 3333 } 3334 retry: 3335 spin_lock(&free_vmap_area_lock); 3336 3337 /* start scanning - we scan from the top, begin with the last area */ 3338 area = term_area = last_area; 3339 start = offsets[area]; 3340 end = start + sizes[area]; 3341 3342 va = pvm_find_va_enclose_addr(vmalloc_end); 3343 base = pvm_determine_end_from_reverse(&va, align) - end; 3344 3345 while (true) { 3346 /* 3347 * base might have underflowed, add last_end before 3348 * comparing. 3349 */ 3350 if (base + last_end < vmalloc_start + last_end) 3351 goto overflow; 3352 3353 /* 3354 * Fitting base has not been found. 3355 */ 3356 if (va == NULL) 3357 goto overflow; 3358 3359 /* 3360 * If required width exeeds current VA block, move 3361 * base downwards and then recheck. 3362 */ 3363 if (base + end > va->va_end) { 3364 base = pvm_determine_end_from_reverse(&va, align) - end; 3365 term_area = area; 3366 continue; 3367 } 3368 3369 /* 3370 * If this VA does not fit, move base downwards and recheck. 3371 */ 3372 if (base + start < va->va_start) { 3373 va = node_to_va(rb_prev(&va->rb_node)); 3374 base = pvm_determine_end_from_reverse(&va, align) - end; 3375 term_area = area; 3376 continue; 3377 } 3378 3379 /* 3380 * This area fits, move on to the previous one. If 3381 * the previous one is the terminal one, we're done. 3382 */ 3383 area = (area + nr_vms - 1) % nr_vms; 3384 if (area == term_area) 3385 break; 3386 3387 start = offsets[area]; 3388 end = start + sizes[area]; 3389 va = pvm_find_va_enclose_addr(base + end); 3390 } 3391 3392 /* we've found a fitting base, insert all va's */ 3393 for (area = 0; area < nr_vms; area++) { 3394 int ret; 3395 3396 start = base + offsets[area]; 3397 size = sizes[area]; 3398 3399 va = pvm_find_va_enclose_addr(start); 3400 if (WARN_ON_ONCE(va == NULL)) 3401 /* It is a BUG(), but trigger recovery instead. */ 3402 goto recovery; 3403 3404 type = classify_va_fit_type(va, start, size); 3405 if (WARN_ON_ONCE(type == NOTHING_FIT)) 3406 /* It is a BUG(), but trigger recovery instead. */ 3407 goto recovery; 3408 3409 ret = adjust_va_to_fit_type(va, start, size, type); 3410 if (unlikely(ret)) 3411 goto recovery; 3412 3413 /* Allocated area. */ 3414 va = vas[area]; 3415 va->va_start = start; 3416 va->va_end = start + size; 3417 } 3418 3419 spin_unlock(&free_vmap_area_lock); 3420 3421 /* populate the kasan shadow space */ 3422 for (area = 0; area < nr_vms; area++) { 3423 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 3424 goto err_free_shadow; 3425 3426 kasan_unpoison_vmalloc((void *)vas[area]->va_start, 3427 sizes[area]); 3428 } 3429 3430 /* insert all vm's */ 3431 spin_lock(&vmap_area_lock); 3432 for (area = 0; area < nr_vms; area++) { 3433 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); 3434 3435 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 3436 pcpu_get_vm_areas); 3437 } 3438 spin_unlock(&vmap_area_lock); 3439 3440 kfree(vas); 3441 return vms; 3442 3443 recovery: 3444 /* 3445 * Remove previously allocated areas. There is no 3446 * need in removing these areas from the busy tree, 3447 * because they are inserted only on the final step 3448 * and when pcpu_get_vm_areas() is success. 3449 */ 3450 while (area--) { 3451 orig_start = vas[area]->va_start; 3452 orig_end = vas[area]->va_end; 3453 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, 3454 &free_vmap_area_list); 3455 kasan_release_vmalloc(orig_start, orig_end, 3456 va->va_start, va->va_end); 3457 vas[area] = NULL; 3458 } 3459 3460 overflow: 3461 spin_unlock(&free_vmap_area_lock); 3462 if (!purged) { 3463 purge_vmap_area_lazy(); 3464 purged = true; 3465 3466 /* Before "retry", check if we recover. */ 3467 for (area = 0; area < nr_vms; area++) { 3468 if (vas[area]) 3469 continue; 3470 3471 vas[area] = kmem_cache_zalloc( 3472 vmap_area_cachep, GFP_KERNEL); 3473 if (!vas[area]) 3474 goto err_free; 3475 } 3476 3477 goto retry; 3478 } 3479 3480 err_free: 3481 for (area = 0; area < nr_vms; area++) { 3482 if (vas[area]) 3483 kmem_cache_free(vmap_area_cachep, vas[area]); 3484 3485 kfree(vms[area]); 3486 } 3487 err_free2: 3488 kfree(vas); 3489 kfree(vms); 3490 return NULL; 3491 3492 err_free_shadow: 3493 spin_lock(&free_vmap_area_lock); 3494 /* 3495 * We release all the vmalloc shadows, even the ones for regions that 3496 * hadn't been successfully added. This relies on kasan_release_vmalloc 3497 * being able to tolerate this case. 3498 */ 3499 for (area = 0; area < nr_vms; area++) { 3500 orig_start = vas[area]->va_start; 3501 orig_end = vas[area]->va_end; 3502 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, 3503 &free_vmap_area_list); 3504 kasan_release_vmalloc(orig_start, orig_end, 3505 va->va_start, va->va_end); 3506 vas[area] = NULL; 3507 kfree(vms[area]); 3508 } 3509 spin_unlock(&free_vmap_area_lock); 3510 kfree(vas); 3511 kfree(vms); 3512 return NULL; 3513 } 3514 3515 /** 3516 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 3517 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 3518 * @nr_vms: the number of allocated areas 3519 * 3520 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 3521 */ 3522 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 3523 { 3524 int i; 3525 3526 for (i = 0; i < nr_vms; i++) 3527 free_vm_area(vms[i]); 3528 kfree(vms); 3529 } 3530 #endif /* CONFIG_SMP */ 3531 3532 #ifdef CONFIG_PROC_FS 3533 static void *s_start(struct seq_file *m, loff_t *pos) 3534 __acquires(&vmap_purge_lock) 3535 __acquires(&vmap_area_lock) 3536 { 3537 mutex_lock(&vmap_purge_lock); 3538 spin_lock(&vmap_area_lock); 3539 3540 return seq_list_start(&vmap_area_list, *pos); 3541 } 3542 3543 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3544 { 3545 return seq_list_next(p, &vmap_area_list, pos); 3546 } 3547 3548 static void s_stop(struct seq_file *m, void *p) 3549 __releases(&vmap_purge_lock) 3550 __releases(&vmap_area_lock) 3551 { 3552 mutex_unlock(&vmap_purge_lock); 3553 spin_unlock(&vmap_area_lock); 3554 } 3555 3556 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 3557 { 3558 if (IS_ENABLED(CONFIG_NUMA)) { 3559 unsigned int nr, *counters = m->private; 3560 3561 if (!counters) 3562 return; 3563 3564 if (v->flags & VM_UNINITIALIZED) 3565 return; 3566 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 3567 smp_rmb(); 3568 3569 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 3570 3571 for (nr = 0; nr < v->nr_pages; nr++) 3572 counters[page_to_nid(v->pages[nr])]++; 3573 3574 for_each_node_state(nr, N_HIGH_MEMORY) 3575 if (counters[nr]) 3576 seq_printf(m, " N%u=%u", nr, counters[nr]); 3577 } 3578 } 3579 3580 static void show_purge_info(struct seq_file *m) 3581 { 3582 struct llist_node *head; 3583 struct vmap_area *va; 3584 3585 head = READ_ONCE(vmap_purge_list.first); 3586 if (head == NULL) 3587 return; 3588 3589 llist_for_each_entry(va, head, purge_list) { 3590 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 3591 (void *)va->va_start, (void *)va->va_end, 3592 va->va_end - va->va_start); 3593 } 3594 } 3595 3596 static int s_show(struct seq_file *m, void *p) 3597 { 3598 struct vmap_area *va; 3599 struct vm_struct *v; 3600 3601 va = list_entry(p, struct vmap_area, list); 3602 3603 /* 3604 * s_show can encounter race with remove_vm_area, !vm on behalf 3605 * of vmap area is being tear down or vm_map_ram allocation. 3606 */ 3607 if (!va->vm) { 3608 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 3609 (void *)va->va_start, (void *)va->va_end, 3610 va->va_end - va->va_start); 3611 3612 return 0; 3613 } 3614 3615 v = va->vm; 3616 3617 seq_printf(m, "0x%pK-0x%pK %7ld", 3618 v->addr, v->addr + v->size, v->size); 3619 3620 if (v->caller) 3621 seq_printf(m, " %pS", v->caller); 3622 3623 if (v->nr_pages) 3624 seq_printf(m, " pages=%d", v->nr_pages); 3625 3626 if (v->phys_addr) 3627 seq_printf(m, " phys=%pa", &v->phys_addr); 3628 3629 if (v->flags & VM_IOREMAP) 3630 seq_puts(m, " ioremap"); 3631 3632 if (v->flags & VM_ALLOC) 3633 seq_puts(m, " vmalloc"); 3634 3635 if (v->flags & VM_MAP) 3636 seq_puts(m, " vmap"); 3637 3638 if (v->flags & VM_USERMAP) 3639 seq_puts(m, " user"); 3640 3641 if (v->flags & VM_DMA_COHERENT) 3642 seq_puts(m, " dma-coherent"); 3643 3644 if (is_vmalloc_addr(v->pages)) 3645 seq_puts(m, " vpages"); 3646 3647 show_numa_info(m, v); 3648 seq_putc(m, '\n'); 3649 3650 /* 3651 * As a final step, dump "unpurged" areas. Note, 3652 * that entire "/proc/vmallocinfo" output will not 3653 * be address sorted, because the purge list is not 3654 * sorted. 3655 */ 3656 if (list_is_last(&va->list, &vmap_area_list)) 3657 show_purge_info(m); 3658 3659 return 0; 3660 } 3661 3662 static const struct seq_operations vmalloc_op = { 3663 .start = s_start, 3664 .next = s_next, 3665 .stop = s_stop, 3666 .show = s_show, 3667 }; 3668 3669 static int __init proc_vmalloc_init(void) 3670 { 3671 if (IS_ENABLED(CONFIG_NUMA)) 3672 proc_create_seq_private("vmallocinfo", 0400, NULL, 3673 &vmalloc_op, 3674 nr_node_ids * sizeof(unsigned int), NULL); 3675 else 3676 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 3677 return 0; 3678 } 3679 module_init(proc_vmalloc_init); 3680 3681 #endif 3682