Lines Matching refs:vn

959 #define for_each_vmap_node(vn)	\  argument
960 for ((vn) = &vmap_nodes[0]; \
961 (vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++)
1132 struct vmap_node *vn; in find_vmap_area_exceed_addr_lock() local
1137 for_each_vmap_node(vn) { in find_vmap_area_exceed_addr_lock()
1138 spin_lock(&vn->busy.lock); in find_vmap_area_exceed_addr_lock()
1139 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1144 spin_unlock(&vn->busy.lock); in find_vmap_area_exceed_addr_lock()
1153 vn = addr_to_node(va_start_lowest); in find_vmap_area_exceed_addr_lock()
1155 spin_lock(&vn->busy.lock); in find_vmap_area_exceed_addr_lock()
1156 *va = __find_vmap_area(va_start_lowest, &vn->busy.root); in find_vmap_area_exceed_addr_lock()
1159 return vn; in find_vmap_area_exceed_addr_lock()
1161 spin_unlock(&vn->busy.lock); in find_vmap_area_exceed_addr_lock()
1871 struct vmap_node *vn = addr_to_node(va->va_start); in free_vmap_area() local
1876 spin_lock(&vn->busy.lock); in free_vmap_area()
1877 unlink_va(va, &vn->busy.root); in free_vmap_area()
1878 spin_unlock(&vn->busy.lock); in free_vmap_area()
1913 size_to_va_pool(struct vmap_node *vn, unsigned long size) in size_to_va_pool() argument
1918 return &vn->pool[idx]; in size_to_va_pool()
1941 node_pool_del_va(struct vmap_node *vn, unsigned long size, in node_pool_del_va() argument
1949 vp = size_to_va_pool(vn, size); in node_pool_del_va()
1953 spin_lock(&vn->pool_lock); in node_pool_del_va()
1977 spin_unlock(&vn->pool_lock); in node_pool_del_va()
2030 struct vmap_node *vn; in alloc_vmap_area() local
2114 vn = addr_to_node(va->va_start); in alloc_vmap_area()
2116 spin_lock(&vn->busy.lock); in alloc_vmap_area()
2117 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); in alloc_vmap_area()
2118 spin_unlock(&vn->busy.lock); in alloc_vmap_area()
2219 decay_va_pool_node(struct vmap_node *vn, bool full_decay) in decay_va_pool_node() argument
2230 if (list_empty(&vn->pool[i].head)) in decay_va_pool_node()
2234 spin_lock(&vn->pool_lock); in decay_va_pool_node()
2235 list_replace_init(&vn->pool[i].head, &tmp_list); in decay_va_pool_node()
2236 spin_unlock(&vn->pool_lock); in decay_va_pool_node()
2238 pool_len = n_decay = vn->pool[i].len; in decay_va_pool_node()
2239 WRITE_ONCE(vn->pool[i].len, 0); in decay_va_pool_node()
2261 spin_lock(&vn->pool_lock); in decay_va_pool_node()
2262 list_replace_init(&tmp_list, &vn->pool[i].head); in decay_va_pool_node()
2263 WRITE_ONCE(vn->pool[i].len, pool_len); in decay_va_pool_node()
2264 spin_unlock(&vn->pool_lock); in decay_va_pool_node()
2272 kasan_release_vmalloc_node(struct vmap_node *vn) in kasan_release_vmalloc_node() argument
2277 start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start; in kasan_release_vmalloc_node()
2278 end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end; in kasan_release_vmalloc_node()
2280 list_for_each_entry(va, &vn->purge_list, list) { in kasan_release_vmalloc_node()
2292 struct vmap_node *vn = container_of(work, in purge_vmap_node() local
2299 kasan_release_vmalloc_node(vn); in purge_vmap_node()
2301 vn->nr_purged = 0; in purge_vmap_node()
2303 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { in purge_vmap_node()
2310 vn->nr_purged++; in purge_vmap_node()
2312 if (is_vn_id_valid(vn_id) && !vn->skip_populate) in purge_vmap_node()
2313 if (node_pool_add_va(vn, va)) in purge_vmap_node()
2335 struct vmap_node *vn; in __purge_vmap_area_lazy() local
2345 for_each_vmap_node(vn) { in __purge_vmap_area_lazy()
2346 INIT_LIST_HEAD(&vn->purge_list); in __purge_vmap_area_lazy()
2347 vn->skip_populate = full_pool_decay; in __purge_vmap_area_lazy()
2348 decay_va_pool_node(vn, full_pool_decay); in __purge_vmap_area_lazy()
2350 if (RB_EMPTY_ROOT(&vn->lazy.root)) in __purge_vmap_area_lazy()
2353 spin_lock(&vn->lazy.lock); in __purge_vmap_area_lazy()
2354 WRITE_ONCE(vn->lazy.root.rb_node, NULL); in __purge_vmap_area_lazy()
2355 list_replace_init(&vn->lazy.head, &vn->purge_list); in __purge_vmap_area_lazy()
2356 spin_unlock(&vn->lazy.lock); in __purge_vmap_area_lazy()
2358 start = min(start, list_first_entry(&vn->purge_list, in __purge_vmap_area_lazy()
2361 end = max(end, list_last_entry(&vn->purge_list, in __purge_vmap_area_lazy()
2364 cpumask_set_cpu(node_to_id(vn), &purge_nodes); in __purge_vmap_area_lazy()
2376 vn = &vmap_nodes[i]; in __purge_vmap_area_lazy()
2379 INIT_WORK(&vn->purge_work, purge_vmap_node); in __purge_vmap_area_lazy()
2382 schedule_work_on(i, &vn->purge_work); in __purge_vmap_area_lazy()
2384 schedule_work(&vn->purge_work); in __purge_vmap_area_lazy()
2388 vn->purge_work.func = NULL; in __purge_vmap_area_lazy()
2389 purge_vmap_node(&vn->purge_work); in __purge_vmap_area_lazy()
2390 nr_purged_areas += vn->nr_purged; in __purge_vmap_area_lazy()
2395 vn = &vmap_nodes[i]; in __purge_vmap_area_lazy()
2397 if (vn->purge_work.func) { in __purge_vmap_area_lazy()
2398 flush_work(&vn->purge_work); in __purge_vmap_area_lazy()
2399 nr_purged_areas += vn->nr_purged; in __purge_vmap_area_lazy()
2437 struct vmap_node *vn; in free_vmap_area_noflush() local
2450 vn = is_vn_id_valid(vn_id) ? in free_vmap_area_noflush()
2453 spin_lock(&vn->lazy.lock); in free_vmap_area_noflush()
2454 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); in free_vmap_area_noflush()
2455 spin_unlock(&vn->lazy.lock); in free_vmap_area_noflush()
2479 struct vmap_node *vn; in find_vmap_area() local
2501 vn = &vmap_nodes[i]; in find_vmap_area()
2503 spin_lock(&vn->busy.lock); in find_vmap_area()
2504 va = __find_vmap_area(addr, &vn->busy.root); in find_vmap_area()
2505 spin_unlock(&vn->busy.lock); in find_vmap_area()
2516 struct vmap_node *vn; in find_unlink_vmap_area() local
2525 vn = &vmap_nodes[i]; in find_unlink_vmap_area()
2527 spin_lock(&vn->busy.lock); in find_unlink_vmap_area()
2528 va = __find_vmap_area(addr, &vn->busy.root); in find_unlink_vmap_area()
2530 unlink_va(va, &vn->busy.root); in find_unlink_vmap_area()
2531 spin_unlock(&vn->busy.lock); in find_unlink_vmap_area()
2754 struct vmap_node *vn; in free_vmap_block() local
2762 vn = addr_to_node(vb->va->va_start); in free_vmap_block()
2763 spin_lock(&vn->busy.lock); in free_vmap_block()
2764 unlink_va(vb->va, &vn->busy.root); in free_vmap_block()
2765 spin_unlock(&vn->busy.lock); in free_vmap_block()
4586 struct vmap_node *vn; in vread_iter() local
4601 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); in vread_iter()
4602 if (!vn) in vread_iter()
4668 spin_unlock(&vn->busy.lock); in vread_iter()
4669 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); in vread_iter()
4672 if (vn) in vread_iter()
4673 spin_unlock(&vn->busy.lock); in vread_iter()
4679 if (vn) in vread_iter()
4680 spin_unlock(&vn->busy.lock); in vread_iter()
5015 struct vmap_node *vn = addr_to_node(vas[area]->va_start); in pcpu_get_vm_areas() local
5017 spin_lock(&vn->busy.lock); in pcpu_get_vm_areas()
5018 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); in pcpu_get_vm_areas()
5021 spin_unlock(&vn->busy.lock); in pcpu_get_vm_areas()
5134 struct vmap_node *vn; in vmalloc_dump_obj() local
5139 vn = addr_to_node(addr); in vmalloc_dump_obj()
5141 if (!spin_trylock(&vn->busy.lock)) in vmalloc_dump_obj()
5144 va = __find_vmap_area(addr, &vn->busy.root); in vmalloc_dump_obj()
5146 spin_unlock(&vn->busy.lock); in vmalloc_dump_obj()
5154 spin_unlock(&vn->busy.lock); in vmalloc_dump_obj()
5191 struct vmap_node *vn; in show_purge_info() local
5194 for_each_vmap_node(vn) { in show_purge_info()
5195 spin_lock(&vn->lazy.lock); in show_purge_info()
5196 list_for_each_entry(va, &vn->lazy.head, list) { in show_purge_info()
5201 spin_unlock(&vn->lazy.lock); in show_purge_info()
5207 struct vmap_node *vn; in vmalloc_info_show() local
5215 for_each_vmap_node(vn) { in vmalloc_info_show()
5216 spin_lock(&vn->busy.lock); in vmalloc_info_show()
5217 list_for_each_entry(va, &vn->busy.head, list) { in vmalloc_info_show()
5272 spin_unlock(&vn->busy.lock); in vmalloc_info_show()
5337 struct vmap_node *vn; in vmap_init_nodes() local
5358 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT); in vmap_init_nodes()
5359 if (vn) { in vmap_init_nodes()
5363 vmap_nodes = vn; in vmap_init_nodes()
5370 for_each_vmap_node(vn) { in vmap_init_nodes()
5371 vn->busy.root = RB_ROOT; in vmap_init_nodes()
5372 INIT_LIST_HEAD(&vn->busy.head); in vmap_init_nodes()
5373 spin_lock_init(&vn->busy.lock); in vmap_init_nodes()
5375 vn->lazy.root = RB_ROOT; in vmap_init_nodes()
5376 INIT_LIST_HEAD(&vn->lazy.head); in vmap_init_nodes()
5377 spin_lock_init(&vn->lazy.lock); in vmap_init_nodes()
5380 INIT_LIST_HEAD(&vn->pool[i].head); in vmap_init_nodes()
5381 WRITE_ONCE(vn->pool[i].len, 0); in vmap_init_nodes()
5384 spin_lock_init(&vn->pool_lock); in vmap_init_nodes()
5392 struct vmap_node *vn; in vmap_node_shrink_count() local
5395 for_each_vmap_node(vn) { in vmap_node_shrink_count()
5397 count += READ_ONCE(vn->pool[i].len); in vmap_node_shrink_count()
5406 struct vmap_node *vn; in vmap_node_shrink_scan() local
5408 for_each_vmap_node(vn) in vmap_node_shrink_scan()
5409 decay_va_pool_node(vn, true); in vmap_node_shrink_scan()
5418 struct vmap_node *vn; in vmalloc_init() local
5455 vn = addr_to_node(va->va_start); in vmalloc_init()
5456 insert_vmap_area(va, &vn->busy.root, &vn->busy.head); in vmalloc_init()