Lines Matching full:va
870 * All vmap_area objects in this tree are sorted by va->va_start
1029 va_size(struct vmap_area *va) in va_size() argument
1031 return (va->va_end - va->va_start); in va_size()
1037 struct vmap_area *va; in get_subtree_max_size() local
1039 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size()
1040 return va ? va->subtree_max_size : 0; in get_subtree_max_size()
1066 struct vmap_area *va; in __find_vmap_area() local
1068 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
1069 if (addr < va->va_start) in __find_vmap_area()
1071 else if (addr >= va->va_end) in __find_vmap_area()
1074 return va; in __find_vmap_area()
1080 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1084 struct vmap_area *va = NULL; in __find_vmap_area_exceed_addr() local
1094 va = tmp; in __find_vmap_area_exceed_addr()
1103 return va; in __find_vmap_area_exceed_addr()
1107 * Returns a node where a first VA, that satisfies addr < va_end, resides.
1109 * VA is no longer needed to be accessed.
1114 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) in find_vmap_area_exceed_addr_lock() argument
1124 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1126 if (*va) in find_vmap_area_exceed_addr_lock()
1127 if (!va_start_lowest || (*va)->va_start < va_start_lowest) in find_vmap_area_exceed_addr_lock()
1128 va_start_lowest = (*va)->va_start; in find_vmap_area_exceed_addr_lock()
1133 * Check if found VA exists, it might have gone away. In this case we in find_vmap_area_exceed_addr_lock()
1134 * repeat the search because a VA has been removed concurrently and we in find_vmap_area_exceed_addr_lock()
1141 *va = __find_vmap_area(va_start_lowest, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1143 if (*va) in find_vmap_area_exceed_addr_lock()
1162 find_va_links(struct vmap_area *va, in find_va_links() argument
1182 * it link, where the new va->rb_node will be attached to. in find_va_links()
1192 if (va->va_end <= tmp_va->va_start) in find_va_links()
1194 else if (va->va_start >= tmp_va->va_end) in find_va_links()
1198 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); in find_va_links()
1215 * The red-black tree where we try to find VA neighbors in get_va_next_sibling()
1227 __link_va(struct vmap_area *va, struct rb_root *root, in __link_va() argument
1232 * VA is still not in the list, but we can in __link_va()
1242 rb_link_node(&va->rb_node, parent, link); in __link_va()
1246 * to the tree. We do not set va->subtree_max_size to in __link_va()
1255 rb_insert_augmented(&va->rb_node, in __link_va()
1257 va->subtree_max_size = 0; in __link_va()
1259 rb_insert_color(&va->rb_node, root); in __link_va()
1263 list_add(&va->list, head); in __link_va()
1267 link_va(struct vmap_area *va, struct rb_root *root, in link_va() argument
1271 __link_va(va, root, parent, link, head, false); in link_va()
1275 link_va_augment(struct vmap_area *va, struct rb_root *root, in link_va_augment() argument
1279 __link_va(va, root, parent, link, head, true); in link_va_augment()
1283 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) in __unlink_va() argument
1285 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) in __unlink_va()
1289 rb_erase_augmented(&va->rb_node, in __unlink_va()
1292 rb_erase(&va->rb_node, root); in __unlink_va()
1294 list_del_init(&va->list); in __unlink_va()
1295 RB_CLEAR_NODE(&va->rb_node); in __unlink_va()
1299 unlink_va(struct vmap_area *va, struct rb_root *root) in unlink_va() argument
1301 __unlink_va(va, root, false); in unlink_va()
1305 unlink_va_augment(struct vmap_area *va, struct rb_root *root) in unlink_va_augment() argument
1307 __unlink_va(va, root, true); in unlink_va_augment()
1315 compute_subtree_max_size(struct vmap_area *va) in compute_subtree_max_size() argument
1317 return max3(va_size(va), in compute_subtree_max_size()
1318 get_subtree_max_size(va->rb_node.rb_left), in compute_subtree_max_size()
1319 get_subtree_max_size(va->rb_node.rb_right)); in compute_subtree_max_size()
1325 struct vmap_area *va; in augment_tree_propagate_check() local
1328 list_for_each_entry(va, &free_vmap_area_list, list) { in augment_tree_propagate_check()
1329 computed_size = compute_subtree_max_size(va); in augment_tree_propagate_check()
1330 if (computed_size != va->subtree_max_size) in augment_tree_propagate_check()
1332 va_size(va), va->subtree_max_size); in augment_tree_propagate_check()
1339 * levels starting from VA point. The propagation must be done
1340 * when VA size is modified by changing its va_start/va_end. Or
1341 * in case of newly inserting of VA to the tree.
1344 * - After VA has been inserted to the tree(free path);
1345 * - After VA has been shrunk(allocation path);
1346 * - After VA has been increased(merging path).
1365 augment_tree_propagate_from(struct vmap_area *va) in augment_tree_propagate_from() argument
1372 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); in augment_tree_propagate_from()
1380 insert_vmap_area(struct vmap_area *va, in insert_vmap_area() argument
1386 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area()
1388 link_va(va, root, parent, link, head); in insert_vmap_area()
1392 insert_vmap_area_augment(struct vmap_area *va, in insert_vmap_area_augment() argument
1400 link = find_va_links(va, NULL, from, &parent); in insert_vmap_area_augment()
1402 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area_augment()
1405 link_va_augment(va, root, parent, link, head); in insert_vmap_area_augment()
1406 augment_tree_propagate_from(va); in insert_vmap_area_augment()
1411 * Merge de-allocated chunk of VA memory with previous
1413 * free area is inserted. If VA has been merged, it is
1422 __merge_or_add_vmap_area(struct vmap_area *va, in __merge_or_add_vmap_area() argument
1432 * Find a place in the tree where VA potentially will be in __merge_or_add_vmap_area()
1435 link = find_va_links(va, root, NULL, &parent); in __merge_or_add_vmap_area()
1440 * Get next node of VA to check if merging can be done. in __merge_or_add_vmap_area()
1449 * |<------VA------>|<-----Next----->| in __merge_or_add_vmap_area()
1455 if (sibling->va_start == va->va_end) { in __merge_or_add_vmap_area()
1456 sibling->va_start = va->va_start; in __merge_or_add_vmap_area()
1459 kmem_cache_free(vmap_area_cachep, va); in __merge_or_add_vmap_area()
1462 va = sibling; in __merge_or_add_vmap_area()
1470 * |<-----Prev----->|<------VA------>| in __merge_or_add_vmap_area()
1476 if (sibling->va_end == va->va_start) { in __merge_or_add_vmap_area()
1485 __unlink_va(va, root, augment); in __merge_or_add_vmap_area()
1487 sibling->va_end = va->va_end; in __merge_or_add_vmap_area()
1490 kmem_cache_free(vmap_area_cachep, va); in __merge_or_add_vmap_area()
1493 va = sibling; in __merge_or_add_vmap_area()
1500 __link_va(va, root, parent, link, head, augment); in __merge_or_add_vmap_area()
1502 return va; in __merge_or_add_vmap_area()
1506 merge_or_add_vmap_area(struct vmap_area *va, in merge_or_add_vmap_area() argument
1509 return __merge_or_add_vmap_area(va, root, head, false); in merge_or_add_vmap_area()
1513 merge_or_add_vmap_area_augment(struct vmap_area *va, in merge_or_add_vmap_area_augment() argument
1516 va = __merge_or_add_vmap_area(va, root, head, true); in merge_or_add_vmap_area_augment()
1517 if (va) in merge_or_add_vmap_area_augment()
1518 augment_tree_propagate_from(va); in merge_or_add_vmap_area_augment()
1520 return va; in merge_or_add_vmap_area_augment()
1524 is_within_this_va(struct vmap_area *va, unsigned long size, in is_within_this_va() argument
1529 if (va->va_start > vstart) in is_within_this_va()
1530 nva_start_addr = ALIGN(va->va_start, align); in is_within_this_va()
1539 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
1553 struct vmap_area *va; in find_vmap_lowest_match() local
1564 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
1567 vstart < va->va_start) { in find_vmap_lowest_match()
1570 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
1571 return va; in find_vmap_lowest_match()
1590 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
1591 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
1592 return va; in find_vmap_lowest_match()
1595 vstart <= va->va_start) { in find_vmap_lowest_match()
1602 vstart = va->va_start + 1; in find_vmap_lowest_match()
1620 struct vmap_area *va; in find_vmap_lowest_linear_match() local
1622 list_for_each_entry(va, head, list) { in find_vmap_lowest_linear_match()
1623 if (!is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_linear_match()
1626 return va; in find_vmap_lowest_linear_match()
1661 classify_va_fit_type(struct vmap_area *va, in classify_va_fit_type() argument
1666 /* Check if it is within VA. */ in classify_va_fit_type()
1667 if (nva_start_addr < va->va_start || in classify_va_fit_type()
1668 nva_start_addr + size > va->va_end) in classify_va_fit_type()
1672 if (va->va_start == nva_start_addr) { in classify_va_fit_type()
1673 if (va->va_end == nva_start_addr + size) in classify_va_fit_type()
1677 } else if (va->va_end == nva_start_addr + size) { in classify_va_fit_type()
1688 struct vmap_area *va, unsigned long nva_start_addr, in va_clip() argument
1692 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); in va_clip()
1696 * No need to split VA, it fully fits. in va_clip()
1702 unlink_va_augment(va, root); in va_clip()
1703 kmem_cache_free(vmap_area_cachep, va); in va_clip()
1706 * Split left edge of fit VA. in va_clip()
1712 va->va_start += size; in va_clip()
1715 * Split right edge of fit VA. in va_clip()
1721 va->va_end = nva_start_addr; in va_clip()
1724 * Split no edge of fit VA. in va_clip()
1765 lva->va_start = va->va_start; in va_clip()
1769 * Shrink this VA to remaining size. in va_clip()
1771 va->va_start = nva_start_addr + size; in va_clip()
1777 augment_tree_propagate_from(va); in va_clip()
1780 insert_vmap_area_augment(lva, &va->rb_node, root, head); in va_clip()
1787 va_alloc(struct vmap_area *va, in va_alloc() argument
1795 if (va->va_start > vstart) in va_alloc()
1796 nva_start_addr = ALIGN(va->va_start, align); in va_alloc()
1805 ret = va_clip(root, head, va, nva_start_addr, size); in va_alloc()
1823 struct vmap_area *va; in __alloc_vmap_area() local
1837 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); in __alloc_vmap_area()
1838 if (unlikely(!va)) in __alloc_vmap_area()
1841 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); in __alloc_vmap_area()
1854 static void free_vmap_area(struct vmap_area *va) in free_vmap_area() argument
1856 struct vmap_node *vn = addr_to_node(va->va_start); in free_vmap_area()
1862 unlink_va(va, &vn->busy.root); in free_vmap_area()
1869 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); in free_vmap_area()
1876 struct vmap_area *va = NULL, *tmp; in preload_this_cpu_lock() local
1888 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in preload_this_cpu_lock()
1893 if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va)) in preload_this_cpu_lock()
1894 kmem_cache_free(vmap_area_cachep, va); in preload_this_cpu_lock()
1909 node_pool_add_va(struct vmap_node *n, struct vmap_area *va) in node_pool_add_va() argument
1913 vp = size_to_va_pool(n, va_size(va)); in node_pool_add_va()
1918 list_add(&va->list, &vp->head); in node_pool_add_va()
1930 struct vmap_area *va = NULL; in node_pool_del_va() local
1940 va = list_first_entry(&vp->head, struct vmap_area, list); in node_pool_del_va()
1942 if (IS_ALIGNED(va->va_start, align)) { in node_pool_del_va()
1947 err |= (va_size(va) != size); in node_pool_del_va()
1948 err |= (va->va_start < vstart); in node_pool_del_va()
1949 err |= (va->va_end > vend); in node_pool_del_va()
1952 list_del_init(&va->list); in node_pool_del_va()
1955 va = NULL; in node_pool_del_va()
1958 list_move_tail(&va->list, &vp->head); in node_pool_del_va()
1959 va = NULL; in node_pool_del_va()
1964 return va; in node_pool_del_va()
1972 struct vmap_area *va; in node_alloc() local
1986 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); in node_alloc()
1989 if (va) in node_alloc()
1990 *addr = va->va_start; in node_alloc()
1992 return va; in node_alloc()
1996 struct vmap_area *va, unsigned long flags, const void *caller) in setup_vmalloc_vm() argument
1999 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
2000 vm->size = vm->requested_size = va_size(va); in setup_vmalloc_vm()
2002 va->vm = vm; in setup_vmalloc_vm()
2016 struct vmap_area *va; in alloc_vmap_area() local
2034 * If a VA is obtained from a global heap(if it fails here) in alloc_vmap_area()
2039 * On success a ready to go VA is returned. in alloc_vmap_area()
2041 va = node_alloc(size, align, vstart, vend, &addr, &vn_id); in alloc_vmap_area()
2042 if (!va) { in alloc_vmap_area()
2043 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area()
2044 if (unlikely(!va)) in alloc_vmap_area()
2051 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
2077 va->va_start = addr; in alloc_vmap_area()
2078 va->va_end = addr + size; in alloc_vmap_area()
2079 va->vm = NULL; in alloc_vmap_area()
2080 va->flags = (va_flags | vn_id); in alloc_vmap_area()
2083 vm->addr = (void *)va->va_start; in alloc_vmap_area()
2084 vm->size = va_size(va); in alloc_vmap_area()
2085 va->vm = vm; in alloc_vmap_area()
2088 vn = addr_to_node(va->va_start); in alloc_vmap_area()
2091 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); in alloc_vmap_area()
2094 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
2095 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
2096 BUG_ON(va->va_end > vend); in alloc_vmap_area()
2100 free_vmap_area(va); in alloc_vmap_area()
2104 return va; in alloc_vmap_area()
2125 kmem_cache_free(vmap_area_cachep, va); in alloc_vmap_area()
2179 struct vmap_area *va, *n; in reclaim_list_global() local
2185 list_for_each_entry_safe(va, n, head, list) in reclaim_list_global()
2186 merge_or_add_vmap_area_augment(va, in reclaim_list_global()
2196 struct vmap_area *va, *nva; in decay_va_pool_node() local
2219 list_for_each_entry_safe(va, nva, &tmp_list, list) { in decay_va_pool_node()
2223 list_del_init(&va->list); in decay_va_pool_node()
2224 merge_or_add_vmap_area(va, &decay_root, &decay_list); in decay_va_pool_node()
2247 struct vmap_area *va; in kasan_release_vmalloc_node() local
2253 list_for_each_entry(va, &vn->purge_list, list) { in kasan_release_vmalloc_node()
2254 if (is_vmalloc_or_module_addr((void *) va->va_start)) in kasan_release_vmalloc_node()
2255 kasan_release_vmalloc(va->va_start, va->va_end, in kasan_release_vmalloc_node()
2256 va->va_start, va->va_end, in kasan_release_vmalloc_node()
2268 struct vmap_area *va, *n_va; in purge_vmap_node() local
2276 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { in purge_vmap_node()
2277 unsigned long nr = va_size(va) >> PAGE_SHIFT; in purge_vmap_node()
2278 unsigned int vn_id = decode_vn_id(va->flags); in purge_vmap_node()
2280 list_del_init(&va->list); in purge_vmap_node()
2286 if (node_pool_add_va(vn, va)) in purge_vmap_node()
2290 list_add(&va->list, &local_list); in purge_vmap_node()
2405 static void free_vmap_area_noflush(struct vmap_area *va) in free_vmap_area_noflush() argument
2408 unsigned long va_start = va->va_start; in free_vmap_area_noflush()
2409 unsigned int vn_id = decode_vn_id(va->flags); in free_vmap_area_noflush()
2413 if (WARN_ON_ONCE(!list_empty(&va->list))) in free_vmap_area_noflush()
2416 nr_lazy = atomic_long_add_return_relaxed(va_size(va) >> PAGE_SHIFT, in free_vmap_area_noflush()
2424 id_to_node(vn_id):addr_to_node(va->va_start); in free_vmap_area_noflush()
2427 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); in free_vmap_area_noflush()
2432 /* After this point, we may free va at any time */ in free_vmap_area_noflush()
2440 static void free_unmap_vmap_area(struct vmap_area *va) in free_unmap_vmap_area() argument
2442 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
2443 vunmap_range_noflush(va->va_start, va->va_end); in free_unmap_vmap_area()
2445 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
2447 free_vmap_area_noflush(va); in free_unmap_vmap_area()
2453 struct vmap_area *va; in find_vmap_area() local
2461 * where a VA is located. If VA spans several zones and passed in find_vmap_area()
2462 * addr is not the same as va->va_start, what is not common, we in find_vmap_area()
2465 * <----va----> in find_vmap_area()
2469 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed in find_vmap_area()
2477 va = __find_vmap_area(addr, &vn->busy.root); in find_vmap_area()
2480 if (va) in find_vmap_area()
2481 return va; in find_vmap_area()
2490 struct vmap_area *va; in find_unlink_vmap_area() local
2501 va = __find_vmap_area(addr, &vn->busy.root); in find_unlink_vmap_area()
2502 if (va) in find_unlink_vmap_area()
2503 unlink_va(va, &vn->busy.root); in find_unlink_vmap_area()
2506 if (va) in find_unlink_vmap_area()
2507 return va; in find_unlink_vmap_area()
2567 struct vmap_area *va; member
2667 struct vmap_area *va; in new_vmap_block() local
2680 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, in new_vmap_block()
2684 if (IS_ERR(va)) { in new_vmap_block()
2686 return ERR_CAST(va); in new_vmap_block()
2689 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
2691 vb->va = va; in new_vmap_block()
2703 xa = addr_to_vb_xa(va->va_start); in new_vmap_block()
2704 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
2708 free_vmap_area(va); in new_vmap_block()
2732 xa = addr_to_vb_xa(vb->va->va_start); in free_vmap_block()
2733 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
2736 vn = addr_to_node(vb->va->va_start); in free_vmap_block()
2738 unlink_va(vb->va, &vn->busy.root); in free_vmap_block()
2741 free_vmap_area_noflush(vb->va); in free_vmap_block()
2846 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
2936 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
2990 struct vmap_area *va; in vm_unmap_ram() local
3006 va = find_unlink_vmap_area(addr); in vm_unmap_ram()
3007 if (WARN_ON_ONCE(!va)) in vm_unmap_ram()
3010 debug_check_no_locks_freed((void *)va->va_start, va_size(va)); in vm_unmap_ram()
3011 free_unmap_vmap_area(va); in vm_unmap_ram()
3041 struct vmap_area *va; in vm_map_ram() local
3042 va = alloc_vmap_area(size, PAGE_SIZE, in vm_map_ram()
3046 if (IS_ERR(va)) in vm_map_ram()
3049 addr = va->va_start; in vm_map_ram()
3169 struct vmap_area *va; in __get_vm_area_node() local
3193 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); in __get_vm_area_node()
3194 if (IS_ERR(va)) { in __get_vm_area_node()
3261 struct vmap_area *va; in find_vm_area() local
3263 va = find_vmap_area((unsigned long)addr); in find_vm_area()
3264 if (!va) in find_vm_area()
3267 return va->vm; in find_vm_area()
3282 struct vmap_area *va; in remove_vm_area() local
3291 va = find_unlink_vmap_area((unsigned long)addr); in remove_vm_area()
3292 if (!va || !va->vm) in remove_vm_area()
3294 vm = va->vm; in remove_vm_area()
3301 free_unmap_vmap_area(va); in remove_vm_area()
4364 start = vmap_block_vaddr(vb->va->va_start, rs); in vmap_ram_vread_iter()
4430 struct vmap_area *va; in vread_iter() local
4444 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); in vread_iter()
4449 if ((unsigned long)addr + remains <= va->va_start) in vread_iter()
4458 vm = va->vm; in vread_iter()
4459 flags = va->flags & VMAP_FLAGS_MASK; in vread_iter()
4475 vaddr = (char *) va->va_start; in vread_iter()
4476 size = vm ? get_vm_area_size(vm) : va_size(va); in vread_iter()
4510 next = va->va_end; in vread_iter()
4512 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); in vread_iter()
4635 * i.e. va->va_start < addr && va->va_end < addr or NULL
4641 struct vmap_area *va, *tmp; in pvm_find_va_enclose_addr() local
4645 va = NULL; in pvm_find_va_enclose_addr()
4650 va = tmp; in pvm_find_va_enclose_addr()
4660 return va; in pvm_find_va_enclose_addr()
4666 * @va:
4667 * in - the VA we start the search(reverse order);
4668 * out - the VA with the highest aligned end address.
4674 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) in pvm_determine_end_from_reverse() argument
4679 if (likely(*va)) { in pvm_determine_end_from_reverse()
4680 list_for_each_entry_from_reverse((*va), in pvm_determine_end_from_reverse()
4682 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
4683 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
4721 struct vmap_area **vas, *va; in pcpu_get_vm_areas() local
4774 va = pvm_find_va_enclose_addr(vmalloc_end); in pcpu_get_vm_areas()
4775 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4788 if (va == NULL) in pcpu_get_vm_areas()
4792 * If required width exceeds current VA block, move in pcpu_get_vm_areas()
4795 if (base + end > va->va_end) { in pcpu_get_vm_areas()
4796 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4802 * If this VA does not fit, move base downwards and recheck. in pcpu_get_vm_areas()
4804 if (base + start < va->va_start) { in pcpu_get_vm_areas()
4805 va = node_to_va(rb_prev(&va->rb_node)); in pcpu_get_vm_areas()
4806 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4821 va = pvm_find_va_enclose_addr(base + end); in pcpu_get_vm_areas()
4824 /* we've found a fitting base, insert all va's */ in pcpu_get_vm_areas()
4831 va = pvm_find_va_enclose_addr(start); in pcpu_get_vm_areas()
4832 if (WARN_ON_ONCE(va == NULL)) in pcpu_get_vm_areas()
4837 &free_vmap_area_list, va, start, size); in pcpu_get_vm_areas()
4843 va = vas[area]; in pcpu_get_vm_areas()
4844 va->va_start = start; in pcpu_get_vm_areas()
4845 va->va_end = start + size; in pcpu_get_vm_areas()
4890 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4892 if (va) in pcpu_get_vm_areas()
4894 va->va_start, va->va_end, in pcpu_get_vm_areas()
4941 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4943 if (va) in pcpu_get_vm_areas()
4945 va->va_start, va->va_end, in pcpu_get_vm_areas()
4978 struct vmap_area *va; in vmalloc_dump_obj() local
4989 va = __find_vmap_area(addr, &vn->busy.root); in vmalloc_dump_obj()
4990 if (!va || !va->vm) { in vmalloc_dump_obj()
4995 vm = va->vm; in vmalloc_dump_obj()
5037 struct vmap_area *va; in show_purge_info() local
5041 list_for_each_entry(va, &vn->lazy.head, list) { in show_purge_info()
5043 (void *)va->va_start, (void *)va->va_end, in show_purge_info()
5044 va_size(va)); in show_purge_info()
5053 struct vmap_area *va; in vmalloc_info_show() local
5062 list_for_each_entry(va, &vn->busy.head, list) { in vmalloc_info_show()
5063 if (!va->vm) { in vmalloc_info_show()
5064 if (va->flags & VMAP_RAM) in vmalloc_info_show()
5066 (void *)va->va_start, (void *)va->va_end, in vmalloc_info_show()
5067 va_size(va)); in vmalloc_info_show()
5072 v = va->vm; in vmalloc_info_show()
5262 struct vmap_area *va; in vmalloc_init() local
5292 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); in vmalloc_init()
5293 if (WARN_ON_ONCE(!va)) in vmalloc_init()
5296 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
5297 va->va_end = va->va_start + tmp->size; in vmalloc_init()
5298 va->vm = tmp; in vmalloc_init()
5300 vn = addr_to_node(va->va_start); in vmalloc_init()
5301 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); in vmalloc_init()